diff --git a/.github/labeler.yml b/.github/labeler.yml index 78366fb2097..ffe55984ac6 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -240,6 +240,10 @@ - changed-files: - any-glob-to-any-file: - "extensions/device-pair/**" +"extensions: acpx": + - changed-files: + - any-glob-to-any-file: + - "extensions/acpx/**" "extensions: minimax-portal-auth": - changed-files: - any-glob-to-any-file: diff --git a/.github/workflows/auto-response.yml b/.github/workflows/auto-response.yml index 1502456a251..faea8807df0 100644 --- a/.github/workflows/auto-response.yml +++ b/.github/workflows/auto-response.yml @@ -3,6 +3,8 @@ name: Auto response on: issues: types: [opened, edited, labeled] + issue_comment: + types: [created] pull_request_target: types: [labeled] @@ -42,6 +44,7 @@ jobs: { label: "r: testflight", close: true, + commentTriggers: ["testflight"], message: "Not available, build from source.", }, { @@ -55,11 +58,76 @@ jobs: close: true, lock: true, lockReason: "off-topic", + commentTriggers: ["moltbook"], message: "OpenClaw is not affiliated with Moltbook, and issues related to Moltbook should not be submitted here.", }, ]; + const maintainerTeam = "maintainer"; + const pingWarningMessage = + "Please don’t spam-ping multiple maintainers at once. Be patient, or join our community Discord for help: https://discord.gg/clawd"; + const mentionRegex = /@([A-Za-z0-9-]+)/g; + const maintainerCache = new Map(); + const normalizeLogin = (login) => login.toLowerCase(); + + const isMaintainer = async (login) => { + if (!login) { + return false; + } + const normalized = normalizeLogin(login); + if (maintainerCache.has(normalized)) { + return maintainerCache.get(normalized); + } + let isMember = false; + try { + const membership = await github.rest.teams.getMembershipForUserInOrg({ + org: context.repo.owner, + team_slug: maintainerTeam, + username: normalized, + }); + isMember = membership?.data?.state === "active"; + } catch (error) { + if (error?.status !== 404) { + throw error; + } + } + maintainerCache.set(normalized, isMember); + return isMember; + }; + + const countMaintainerMentions = async (body, authorLogin) => { + if (!body) { + return 0; + } + const normalizedAuthor = authorLogin ? normalizeLogin(authorLogin) : ""; + if (normalizedAuthor && (await isMaintainer(normalizedAuthor))) { + return 0; + } + + const haystack = body.toLowerCase(); + const teamMention = `@${context.repo.owner.toLowerCase()}/${maintainerTeam}`; + if (haystack.includes(teamMention)) { + return 3; + } + + const mentions = new Set(); + for (const match of body.matchAll(mentionRegex)) { + mentions.add(normalizeLogin(match[1])); + } + if (normalizedAuthor) { + mentions.delete(normalizedAuthor); + } + + let count = 0; + for (const login of mentions) { + if (await isMaintainer(login)) { + count += 1; + } + } + return count; + }; + const triggerLabel = "trigger-response"; const target = context.payload.issue ?? context.payload.pull_request; if (!target) { @@ -72,6 +140,63 @@ jobs: .filter((name) => typeof name === "string"), ); + const issue = context.payload.issue; + const pullRequest = context.payload.pull_request; + const comment = context.payload.comment; + if (comment) { + const authorLogin = comment.user?.login ?? ""; + if (comment.user?.type === "Bot" || authorLogin.endsWith("[bot]")) { + return; + } + + const commentBody = comment.body ?? ""; + const responses = []; + const mentionCount = await countMaintainerMentions(commentBody, authorLogin); + if (mentionCount >= 3) { + responses.push(pingWarningMessage); + } + + const commentHaystack = commentBody.toLowerCase(); + const commentRule = rules.find((item) => + (item.commentTriggers ?? []).some((trigger) => + commentHaystack.includes(trigger), + ), + ); + if (commentRule) { + responses.push(commentRule.message); + } + + if (responses.length > 0) { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: target.number, + body: responses.join("\n\n"), + }); + } + return; + } + + if (issue) { + const action = context.payload.action; + if (action === "opened" || action === "edited") { + const issueText = `${issue.title ?? ""}\n${issue.body ?? ""}`.trim(); + const authorLogin = issue.user?.login ?? ""; + const mentionCount = await countMaintainerMentions( + issueText, + authorLogin, + ); + if (mentionCount >= 3) { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue.number, + body: pingWarningMessage, + }); + } + } + } + const hasTriggerLabel = labelSet.has(triggerLabel); if (hasTriggerLabel) { labelSet.delete(triggerLabel); @@ -94,7 +219,6 @@ jobs: return; } - const issue = context.payload.issue; if (issue) { const title = issue.title ?? ""; const body = issue.body ?? ""; @@ -136,7 +260,6 @@ jobs: const noisyPrMessage = "Closing this PR because it looks dirty (too many unrelated or unexpected changes). This usually happens when a branch picks up unrelated commits or a merge went sideways. Please recreate the PR from a clean branch."; - const pullRequest = context.payload.pull_request; if (pullRequest) { if (labelSet.has(dirtyLabel)) { await github.rest.issues.createComment({ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8de4f3882c8..e7bef285a7a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -418,12 +418,23 @@ jobs: include: - runtime: node task: lint + shard_index: 0 + shard_count: 1 command: pnpm lint - runtime: node task: test + shard_index: 1 + shard_count: 2 + command: pnpm canvas:a2ui:bundle && pnpm test + - runtime: node + task: test + shard_index: 2 + shard_count: 2 command: pnpm canvas:a2ui:bundle && pnpm test - runtime: node task: protocol + shard_index: 0 + shard_count: 1 command: pnpm protocol:check steps: - name: Checkout @@ -495,6 +506,12 @@ jobs: pnpm -v pnpm install --frozen-lockfile --ignore-scripts=false --config.engine-strict=false --config.enable-pre-post-scripts=true || pnpm install --frozen-lockfile --ignore-scripts=false --config.engine-strict=false --config.enable-pre-post-scripts=true + - name: Configure test shard (Windows) + if: matrix.task == 'test' + run: | + echo "OPENCLAW_TEST_SHARDS=${{ matrix.shard_count }}" >> "$GITHUB_ENV" + echo "OPENCLAW_TEST_SHARD_INDEX=${{ matrix.shard_index }}" >> "$GITHUB_ENV" + - name: Configure vitest JSON reports if: matrix.task == 'test' run: echo "OPENCLAW_VITEST_REPORT_DIR=$RUNNER_TEMP/vitest-reports" >> "$GITHUB_ENV" @@ -512,7 +529,7 @@ jobs: if: matrix.task == 'test' uses: actions/upload-artifact@v4 with: - name: vitest-reports-${{ runner.os }}-${{ matrix.runtime }} + name: vitest-reports-${{ runner.os }}-${{ matrix.runtime }}-shard${{ matrix.shard_index }}of${{ matrix.shard_count }} path: | ${{ env.OPENCLAW_VITEST_REPORT_DIR }} ${{ runner.temp }}/vitest-slowest.md diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index fc0d97d4091..eff0993b466 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -172,6 +172,9 @@ jobs: if [[ "${GITHUB_REF}" == refs/tags/v* ]]; then version="${GITHUB_REF#refs/tags/v}" tags+=("${IMAGE}:${version}") + if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9]+)?$ ]]; then + tags+=("${IMAGE}:latest") + fi fi if [[ ${#tags[@]} -eq 0 ]]; then echo "::error::No manifest tags resolved for ref ${GITHUB_REF}" diff --git a/AGENTS.md b/AGENTS.md index 09ed6423ac4..a0eca723170 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,6 +1,7 @@ # Repository Guidelines - Repo: https://github.com/openclaw/openclaw +- In chat replies, file references must be repo-root relative only (example: `extensions/bluebubbles/src/channel.ts:80`); never absolute paths or `~/...`. - GitHub issues/comments/PR comments: use literal multiline strings or `-F - <<'EOF'` (or $'...') for real newlines; never embed "\\n". - GitHub comment footgun: never use `gh issue/pr comment -b "..."` when body contains backticks or shell chars. Always use single-quoted heredoc (`-F - <<'EOF'`) so no command substitution/escaping corruption. - GitHub linking footgun: don’t wrap issue/PR refs like `#24643` in backticks when you want auto-linking. Use plain `#24643` (optionally add full URL). diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b3dacb2e26..90669ce9e20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,30 +2,177 @@ Docs: https://docs.openclaw.ai -## 2026.2.25 (Unreleased) +## 2026.2.26 + +### Changes + +- Highlight: External Secrets Management introduces a full `openclaw secrets` workflow (`audit`, `configure`, `apply`, `reload`) with runtime snapshot activation, strict `secrets apply` target-path validation, safer migration scrubbing, ref-only auth-profile support, and dedicated docs. (#26155) Thanks @joshavant. +- ACP/Thread-bound agents: make ACP agents first-class runtimes for thread sessions with `acp` spawn/send dispatch integration, acpx backend bridging, lifecycle controls, startup reconciliation, runtime cleanup, and coalesced thread replies. (#23580) thanks @osolmaz. +- Agents/Routing CLI: add `openclaw agents bindings`, `openclaw agents bind`, and `openclaw agents unbind` for account-scoped route management, including channel-only to account-scoped binding upgrades, role-aware binding identity handling, plugin-resolved binding account IDs, and optional account-binding prompts in `openclaw channels add`. (#27195) thanks @gumadeiras. +- Codex/WebSocket transport: make `openai-codex` WebSocket-first by default (`transport: "auto"` with SSE fallback), keep explicit per-model/runtime transport overrides, and add regression coverage + docs for transport selection. +- Onboarding/Plugins: let channel plugins own interactive onboarding flows with optional `configureInteractive` and `configureWhenConfigured` hooks while preserving the generic fallback path. (#27191) thanks @gumadeiras. +- Auth/Onboarding: add an explicit account-risk warning and confirmation gate before starting Gemini CLI OAuth, and document the caution in provider docs and the Gemini CLI auth plugin README. (#16683) Thanks @vincentkoc. +- Android/Nodes: add Android `device` capability plus `device.status` and `device.info` node commands, including runtime handler wiring and protocol/registry coverage for device status/info payloads. (#27664) Thanks @obviyus. +- Android/Nodes: add `notifications.list` support on Android nodes and expose `nodes notifications_list` in agent tooling for listing active device notifications. (#27344) thanks @obviyus. +- Docs/Contributing: add Nimrod Gutman to the maintainer roster in `CONTRIBUTING.md`. (#27840) Thanks @ngutman. + +### Fixes + +- Telegram/DM allowlist runtime inheritance: enforce `dmPolicy: "allowlist"` `allowFrom` requirements using effective account-plus-parent config across account-capable channels (Telegram, Discord, Slack, Signal, iMessage, IRC, BlueBubbles, WhatsApp), and align `openclaw doctor` checks to the same inheritance logic so DM traffic is not silently dropped after upgrades. (#27936) Thanks @widingmarcus-cyber. +- Delivery queue/recovery backoff: prevent retry starvation by persisting `lastAttemptAt` on failed sends and deferring recovery retries until each entry's `lastAttemptAt + backoff` window is eligible, while continuing to recover ready entries behind deferred ones. Landed from contributor PR #27710 by @Jimmy-xuzimo. Thanks @Jimmy-xuzimo. +- Gemini OAuth/Auth flow: align OAuth project discovery metadata and endpoint fallback handling for Gemini CLI auth, including fallback coverage for environment-provided project IDs. (#16684) Thanks @vincentkoc. +- Google Chat/Lifecycle: keep Google Chat `startAccount` pending until abort in webhook mode so startup is no longer interpreted as immediate exit, preventing auto-restart loops and webhook-target churn. (#27384) thanks @junsuwhy. +- Temp dirs/Linux umask: force `0700` permissions after temp-dir creation and self-heal existing writable temp dirs before trust checks so `umask 0002` installs no longer crash-loop on startup. Landed from contributor PR #27860 by @stakeswky. (#27853) Thanks @stakeswky. +- Nextcloud Talk/Lifecycle: keep `startAccount` pending until abort and stop the webhook monitor on shutdown, preventing `EADDRINUSE` restart loops when the gateway manages account lifecycle. (#27897) +- Microsoft Teams/File uploads: acknowledge `fileConsent/invoke` immediately (`invokeResponse` before upload + file card send) so Teams no longer shows false "Something went wrong" timeout banners while upload completion continues asynchronously; includes updated async regression coverage. Landed from contributor PR #27641 by @scz2011. +- Queue/Drain/Cron reliability: harden lane draining with guaranteed `draining` flag reset on synchronous pump failures, reject new queue enqueues during gateway restart drain windows (instead of silently killing accepted tasks), add `/stop` queued-backlog cutoff metadata with stale-message skipping (while avoiding cross-session native-stop cutoff bleed), and raise isolated cron `agentTurn` outer safety timeout to avoid false 10-minute timeout races against longer agent session timeouts. (#27407, #27332, #27427) +- Typing/Main reply pipeline: always mark dispatch idle in `agent-runner` finalization so typing cleanup runs even when dispatcher `onIdle` does not fire, preventing stuck typing indicators after run completion. (#27250) Thanks @Sid-Qin. +- Typing/TTL safety net: add max-duration guardrails to shared typing callbacks so stuck lifecycle edges auto-stop typing indicators even when explicit idle/cleanup signals are missed. (#27428) Thanks @Crpdim. +- Typing/Cross-channel leakage: unify run-scoped typing suppression for cross-channel/internal-webchat routes, preserve current inbound origin as embedded run message channel context, harden shared typing keepalive with consecutive-failure circuit breaker edge-case handling, and enforce dispatcher completion/idle waits in extension dispatcher callsites (Feishu, Matrix, Mattermost, MSTeams) so typing indicators always clean up on success/error paths. Related: #27647, #27493, #27598. Supersedes/replaces draft PRs: #27640, #27593, #27540. +- Telegram/sendChatAction 401 handling: add bounded exponential backoff + temporary local typing suppression after repeated unauthorized failures to stop unbounded `sendChatAction` retry loops that can trigger Telegram abuse enforcement and bot deletion. (#27415) Thanks @widingmarcus-cyber. +- Telegram/Webhook startup: clarify webhook config guidance, allow `channels.telegram.webhookPort: 0` for ephemeral listener binding, and log both the local listener URL and Telegram-advertised webhook URL with the bound port. (#25732) thanks @huntharo. +- Config/Doctor allowlist safety: reject `dmPolicy: "allowlist"` configs with empty `allowFrom`, add Telegram account-level inheritance-aware validation, and teach `openclaw doctor --fix` to restore missing `allowFrom` entries from pairing-store files when present, preventing silent DM drops after upgrades. (#27936) Thanks @widingmarcus-cyber. +- Browser/Chrome extension handshake: bind relay WS message handling before `onopen` and add non-blocking `connect.challenge` response handling for gateway-style handshake frames, avoiding stuck `…` badge states when challenge frames arrive immediately on connect. Landed from contributor PR #22571 by @pandego. (#22553) +- Browser/Extension relay init: dedupe concurrent same-port relay startup with shared in-flight initialization promises so callers await one startup lifecycle and receive consistent success/failure results. Landed from contributor PR #21277 by @HOYALIM. (Related #20688) +- Browser/Fill relay + CLI parity: accept `act.fill` fields without explicit `type` by defaulting missing/empty `type` to `text` in both browser relay route parsing and `openclaw browser fill` CLI field parsing, so relay calls no longer fail when the model omits field type metadata. Landed from contributor PR #27662 by @Uface11. (#27296) Thanks @Uface11. +- Feishu/Permission error dispatch: merge sender-name permission notices into the main inbound dispatch so one user message produces one agent turn/reply (instead of a duplicate permission-notice turn), with regression coverage. (#27381) thanks @byungsker. +- Agents/Canvas default node resolution: when multiple connected canvas-capable nodes exist and no single `mac-*` candidate is selected, default to the first connected candidate instead of failing with `node required` for implicit-node canvas tool calls. Landed from contributor PR #27444 by @carbaj03. Thanks @carbaj03. +- TUI/stream assembly: preserve streamed text across real tool-boundary drops without keeping stale streamed text when non-text blocks appear only in the final payload. Landed from contributor PR #27711 by @scz2011. (#27674) +- Hooks/Internal `message:sent`: forward `sessionKey` on outbound sends from agent delivery, cron isolated delivery, gateway receipt acks, heartbeat sends, session-maintenance warnings, and restart-sentinel recovery so internal `message:sent` hooks consistently dispatch with session context, including `openclaw agent --deliver` runs resumed via `--session-id` (without explicit `--session-key`). Landed from contributor PR #27584 by @qualiobra. Thanks @qualiobra. +- Pi image-token usage: stop re-injecting history image blocks each turn, process image references from the current prompt only, and prune already-answered user-image blocks in stored history to prevent runaway token growth. (#27602) +- BlueBubbles/SSRF: auto-allowlist the configured `serverUrl` hostname for attachment fetches so localhost/private-IP BlueBubbles setups are no longer false-blocked by default SSRF checks. Landed from contributor PR #27648 by @lailoo. (#27599) Thanks @taylorhou for reporting. +- Agents/Compaction + onboarding safety: prevent destructive double-compaction by stripping stale assistant usage around compaction boundaries, skipping post-compaction custom metadata writes in the same attempt, and cancelling safeguard compaction when there are no real conversation messages to summarize; harden workspace/bootstrap detection for memory-backed workspaces; and change `openclaw onboard --reset` default scope to `config+creds+sessions` (workspace deletion now requires `--reset-scope full`). (#26458, #27314) Thanks @jaden-clovervnd, @Sid-Qin, and @widingmarcus-cyber for fix direction in #26502, #26529, and #27492. +- NO_REPLY suppression: suppress `NO_REPLY` before Slack API send and in sub-agent announce completion flow so sentinel text no longer leaks into user channels. Landed from contributor PRs #27529 (by @Sid-Qin) and #27535 (rewritten minimal landing by maintainers). (#27387, #27531) +- Matrix/Group sender identity: preserve sender labels in Matrix group inbound prompt text (`BodyForAgent`) for both channel and threaded messages, and align group envelopes with shared inbound sender-prefix formatting so first-person requests resolve against the current sender. (#27401) thanks @koushikxd. +- Auto-reply/Streaming: suppress only exact `NO_REPLY` final replies while still filtering streaming partial sentinel fragments (`NO_`, `NO_RE`, `HEARTBEAT_...`) so substantive replies ending with `NO_REPLY` are delivered and partial silent tokens do not leak during streaming. (#19576) Thanks @aldoeliacim. +- Auto-reply/Inbound metadata: add a readable `timestamp` field to conversation info and ignore invalid/out-of-range timestamp values so prompt assembly never crashes on malformed timestamp inputs. (#17017) thanks @liuy. +- Typing/Run completion race: prevent post-run keepalive ticks from re-triggering typing callbacks by guarding `triggerTyping()` with `runComplete`, with regression coverage for no-restart behavior during run-complete/dispatch-idle boundaries. (#27413) Thanks @widingmarcus-cyber. +- Typing/Dispatch idle: force typing cleanup when `markDispatchIdle` never arrives after run completion, avoiding leaked typing keepalive loops in cron/announce edges. Landed from contributor PR #27541 by @Sid-Qin. (#27493) +- Telegram/Inline buttons: allow callback-query button handling in groups (including `/models` follow-up buttons) when group policy authorizes the sender, by removing the redundant callback allowlist gate that blocked open-policy groups. (#27343) Thanks @GodsBoy. +- Telegram/Streaming preview: when finalizing without an existing preview message, prime pending preview text with final answer before stop-flush so users do not briefly see stale 1-2 word fragments (for example `no` before `no problem`). (#27449) Thanks @emanuelst for the original fix direction in #19673. +- Browser/Extension relay CORS: handle `/json*` `OPTIONS` preflight before auth checks, allow Chrome extension origins, and return extension-origin CORS headers on relay HTTP responses so extension token validation no longer fails cross-origin. Landed from contributor PR #23962 by @miloudbelarebia. (#23842) +- Browser/Extension relay auth: allow `?token=` query-param auth on relay `/json*` endpoints (consistent with relay WebSocket auth) so curl/devtools-style `/json/version` and `/json/list` probes work without requiring custom headers. Landed from contributor PR #26015 by @Sid-Qin. (#25928) +- Browser/Extension relay shutdown: flush pending extension-request timers/rejections during relay `stop()` before socket/server teardown so in-flight extension waits do not survive shutdown windows. Landed from contributor PR #24142 by @kevinWangSheng. +- Browser/Extension relay reconnect resilience: keep CDP clients alive across brief MV3 extension disconnect windows, wait briefly for extension reconnect before failing in-flight CDP commands, and only tear down relay target/client state after reconnect grace expires. Landed from contributor PR #27617 by @davidemanuelDEV. +- Browser/Route decode hardening: guard malformed percent-encoding in relay target action routes and browser route-param decoding so crafted `%` paths return `400` instead of crashing/unhandled URI decode failures. Landed from contributor PR #11880 by @Yida-Dev. +- Feishu/Inbound message metadata: include inbound `message_id` in `BodyForAgent` on a dedicated metadata line so agents can reliably correlate and act on media/message operations that require message IDs, with regression coverage. (#27253) thanks @xss925175263. +- Feishu/Doc tools: route `feishu_doc` and `feishu_app_scopes` through the active agent account context (with explicit `accountId` override support) so multi-account agents no longer default to the first configured app, with regression coverage for context routing and explicit override behavior. (#27338) thanks @AaronL725. +- LINE/Inline directives auth: gate directive parsing (`/model`, `/think`, `/verbose`, `/reasoning`, `/queue`) on resolved authorization (`command.isAuthorizedSender`) so `commands.allowFrom`-authorized LINE senders are not silently stripped when raw `CommandAuthorized` is unset. Landed from contributor PR #27248 by @kevinWangSheng. (#27240) +- Onboarding/Gateway: seed default Control UI `allowedOrigins` for non-loopback binds during onboarding (`localhost`/`127.0.0.1` plus custom bind host) so fresh non-loopback setups do not fail startup due to missing origin policy. (#26157) thanks @stakeswky. +- Docker/GCP onboarding: reduce first-build OOM risk by capping Node heap during `pnpm install`, reuse existing gateway token during `docker-setup.sh` reruns so `.env` stays aligned with config, auto-bootstrap Control UI allowed origins for non-loopback Docker binds, and add GCP docs guidance for tokenized dashboard links + pairing recovery commands. (#26253) Thanks @pandego. +- CLI/Gateway `--force` in non-root Docker: recover from `lsof` permission failures (`EACCES`/`EPERM`) by falling back to `fuser` kill + probe-based port checks, so `openclaw gateway --force` works for default container `node` user flows. (#27941) +- Gateway/Bind visibility: emit a startup warning when binding to non-loopback addresses so operators get explicit exposure guidance in runtime logs. (#25397) thanks @let5sne. +- Sessions cleanup/Doctor: add `openclaw sessions cleanup --fix-missing` to prune store entries whose transcript files are missing, including doctor guidance and CLI coverage. Landed from contributor PR #27508 by @Sid-Qin. (#27422) +- Doctor/State integrity: ignore metadata-only slash routing sessions when checking recent missing transcripts so `openclaw doctor` no longer reports false-positive transcript-missing warnings for `*:slash:*` keys. (#27375) thanks @gumadeiras. +- CLI/Gateway status: force local `gateway status` probe host to `127.0.0.1` for `bind=lan` so co-located probes do not trip non-loopback plaintext WebSocket checks. (#26997) thanks @chikko80. +- CLI/Gateway auth: align `gateway run --auth` parsing/help text with supported gateway auth modes by accepting `none` and `trusted-proxy` (in addition to `token`/`password`) for CLI overrides. (#27469) thanks @s1korrrr. +- CLI/Daemon status TLS probe: use `wss://` and forward local TLS certificate fingerprint for TLS-enabled gateway daemon probes so `openclaw daemon status` works with `gateway.bind=lan` + `gateway.tls.enabled=true`. (#24234) thanks @liuy. +- Podman/Default bind: change `run-openclaw-podman.sh` default gateway bind from `lan` to `loopback` and document explicit LAN opt-in with Control UI origin configuration. (#27491) thanks @robbyczgw-cla. +- Daemon/macOS launchd: forward proxy env vars into supervised service environments, keep LaunchAgent `KeepAlive=true` semantics, and harden restart sequencing to `print -> bootout -> wait old pid exit -> bootstrap -> kickstart`. (#27276) thanks @frankekn. +- Daemon/macOS TLS certs: default LaunchAgent service env `NODE_EXTRA_CA_CERTS` to `/etc/ssl/cert.pem` (while preserving explicit overrides) so HTTPS clients no longer fail with local-issuer errors under launchd. (#27915) Thanks @Lukavyi. +- Gateway/macOS restart-loop hardening: detect OpenClaw-managed supervisor markers during SIGUSR1 restart handoff, clean stale gateway PIDs before `/restart` launchctl/systemctl triggers, and set LaunchAgent `ThrottleInterval=60` to bound launchd retry storms during lock-release races. Landed from contributor PRs #27655 (@taw0002), #27448 (@Sid-Qin), and #27650 (@kevinWangSheng). (#27605, #27590, #26904, #26736) +- Models/MiniMax auth header defaults: set `authHeader: true` for both onboarding-generated MiniMax API providers and implicit built-in MiniMax (`minimax`, `minimax-portal`) provider templates so first requests no longer fail with MiniMax `401 authentication_error` due to missing `Authorization` header. Landed from contributor PRs #27622 by @riccoyuanft and #27631 by @kevinWangSheng. (#27600, #15303) +- Models/Google Antigravity IDs: normalize bare `gemini-3-pro`, `gemini-3.1-pro`, and `gemini-3-1-pro` model IDs to the default `-low` thinking tier so provider requests no longer fail with 404 when the tier suffix is omitted. (#24145) Thanks @byungsker. +- Auth/Auth profiles: normalize `auth-profiles.json` alias fields (`mode -> type`, `apiKey -> key`) before credential validation so entries copied from `openclaw.json` auth examples are no longer silently dropped. (#26950) thanks @byungsker. +- Models/Google Gemini: treat `google` (Gemini API key auth profile) as a reasoning-tag provider to prevent `` leakage, and add forward-compat model fallback for `google-gemini-cli` `gemini-3.1-pro*` / `gemini-3.1-flash*` IDs to avoid false unknown-model errors. (#26551, #26524) Thanks @byungsker. +- Models/Profile suffix parsing: centralize trailing `@profile` parsing and only treat `@` as a profile separator when it appears after the final `/`, preserving model IDs like `openai/@cf/...` and `openrouter/@preset/...` across `/model` directive parsing and allowlist model resolution, with regression coverage. +- Models/OpenAI Codex config schema parity: accept `openai-codex-responses` in the config model API schema and TypeScript `ModelApi` union, with regression coverage for config validation. Landed from contributor PR #27501 by @AytuncYildizli. Thanks @AytuncYildizli. +- Agents/Models config: preserve agent-level provider `apiKey` and `baseUrl` during merge-mode `models.json` updates when agent values are present. (#27293) thanks @Sid-Qin. +- Azure OpenAI Responses: force `store=true` for `azure-openai-responses` direct responses API calls to avoid multi-turn 400 failures. Landed from contributor PR #27499 by @polarbear-Yang. (#27497) +- Security/Node exec approvals: require structured `commandArgv` approvals for `host=node`, enforce versioned `systemRunBindingV1` matching for argv/cwd/session/agent/env context with fail-closed behavior on missing/mismatched bindings, and add `GIT_EXTERNAL_DIFF` to blocked host env keys. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Plugin channel HTTP auth: normalize protected `/api/channels` path checks against canonicalized request paths (case + percent-decoding + slash normalization), resolve encoded dot-segment traversal variants, and fail closed on malformed `%`-encoded channel prefixes so alternate-path variants cannot bypass gateway auth. This ships in the next npm release (`2026.2.26`). Thanks @zpbrent for reporting. +- Security/Gateway node pairing: pin paired-device `platform`/`deviceFamily` metadata across reconnects and bind those fields into device-auth signatures, so reconnect metadata spoofing cannot expand node command allowlists without explicit repair pairing. This ships in the next npm release (`2026.2.26`). Thanks @76embiid21 for reporting. +- Security/Sandbox path alias guard: reject broken symlink targets by resolving through existing ancestors and failing closed on out-of-root targets, preventing workspace-only `apply_patch` writes from escaping sandbox/workspace boundaries via dangling symlinks. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Workspace FS boundary aliases: harden canonical boundary resolution for non-existent-leaf symlink aliases while preserving valid in-root aliases, preventing first-write workspace escapes via out-of-root symlink targets. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Config includes: harden `$include` file loading with verified-open reads, reject hardlinked include aliases, and enforce include file-size guardrails so config include resolution remains bounded to trusted in-root files. This ships in the next npm release (`2026.2.26`). Thanks @zpbrent for reporting. +- Security/Node exec approvals hardening: freeze immutable approval-time execution plans (`argv`/`cwd`/`agentId`/`sessionKey`) via `system.run.prepare`, enforce those canonical plan values during approval forwarding/execution, and reject mutable parent-symlink cwd paths during approval-plan building to prevent approval bypass via symlink rebind. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Microsoft Teams media fetch: route Graph message/hosted-content/attachment fetches and auth-scope fallback attachment downloads through shared SSRF-guarded fetch paths, and centralize hostname-suffix allowlist policy helpers in the plugin SDK to remove channel/plugin drift. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Voice Call (Twilio): bind webhook replay + manager dedupe identity to authenticated request material, remove unsigned `i-twilio-idempotency-token` trust from replay/dedupe keys, and thread verified request identity through provider parse flow to harden cross-provider event dedupe. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Exec approvals forwarding: prefer turn-source channel/account/thread metadata when resolving approval delivery targets so stale session routes do not misroute approval prompts. +- Security/Pairing multi-account isolation: enforce account-scoped pairing allowlists and pending-request storage across core + extension message channels while preserving channel-scoped defaults for the default account. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting and @gumadeiras for implementation. +- Config/Plugins entries: treat unknown `plugins.entries.*` ids as startup warnings (ignored stale keys) instead of hard validation failures that can crash-loop gateway boot. Landed from contributor PR #27506 by @Sid-Qin. (#27455) +- Telegram native commands: degrade command registration on `BOT_COMMANDS_TOO_MUCH` by retrying with fewer commands instead of crash-looping startup sync. Landed from contributor PR #27512 by @Sid-Qin. (#27456) +- Web tools/Proxy: route `web_search` provider HTTP calls (Brave, Perplexity, xAI, Gemini, Kimi), redirect resolution, and `web_fetch` through a shared proxy-aware SSRF guard path so gateway installs behind `HTTP_PROXY`/`HTTPS_PROXY`/`ALL_PROXY` no longer fail with transport `fetch failed` errors. (#27430) thanks @kevinWangSheng. +- Android/Node invoke: remove native gateway WebSocket `Origin` header to avoid false origin rejections, unify invoke command registry/policy/error parsing paths, and keep command availability checks centralized to reduce dispatcher/advertisement drift. (#27257) Thanks @obviyus. +- Android/Camera clip: remove `camera.clip` HTTP-upload fallback to base64 so clip transport is deterministic and fail-loud, and reject non-positive `maxWidth` values so invalid inputs fall back to the safe resize default. (#28229) Thanks @obviyus. +- Gateway shared-auth scopes: preserve requested operator scopes for shared-token clients when device identity is unavailable, instead of clearing scopes during auth handling. Landed from contributor PR #27498 by @kevinWangSheng. (#27494) +- Cron/Hooks isolated routing: preserve canonical `agent:*` session keys in isolated runs so already-qualified keys are not double-prefixed (for example `agent:main:main` no longer becomes `agent:main:agent:main:main`). Landed from contributor PR #27333 by @MaheshBhushan. (#27289, #27282) +- Channels/Multi-account config: when adding a non-default channel account to a single-account top-level channel setup, move existing account-scoped top-level single-account values into `channels..accounts.default` before writing the new account so the original account keeps working without duplicated account values at channel root; `openclaw doctor --fix` now repairs previously mixed channel account shapes the same way. (#27334) thanks @gumadeiras. +- iOS/Talk mode: stop injecting the voice directive hint into iOS Talk prompts and remove the Voice Directive Hint setting, reducing model bias toward tool-style TTS directives and keeping relay responses text-first by default. (#27543) thanks @ngutman. +- CI/Windows: shard the Windows `checks-windows` test lane into two matrix jobs and honor explicit shard index overrides in `scripts/test-parallel.mjs` to reduce CI critical-path wall time. (#27234) Thanks @joshavant. + +## 2026.2.25 ### Changes - Android/Chat: improve streaming delivery handling and markdown rendering quality in the native Android chat UI, including better GitHub-flavored markdown behavior. (#26079) Thanks @obviyus. +- Android/Startup perf: defer foreground-service startup, move WebView debugging init out of critical startup, and add startup macrobenchmark + low-noise perf CLI scripts for deterministic cold-start tracking. (#26659) Thanks @obviyus. +- UI/Chat compose: add mobile stacked layout for compose action buttons on small screens to improve send/session controls usability. (#11167) Thanks @junyiz. +- Heartbeat/Config: replace heartbeat DM toggle with `agents.defaults.heartbeat.directPolicy` (`allow` | `block`; also supported per-agent via `agents.list[].heartbeat.directPolicy`) for clearer delivery semantics. +- Onboarding/Security: clarify onboarding security notices that OpenClaw is personal-by-default (single trusted operator boundary) and shared/multi-user setups require explicit lock-down/hardening. - Branding/Docs + Apple surfaces: replace remaining `bot.molt` launchd label, bundle-id, logging subsystem, and command examples with `ai.openclaw` across docs, iOS app surfaces, helper scripts, and CLI test fixtures. +- Agents/Config: remind agents to call `config.schema` before config edits or config-field questions to avoid guessing. Thanks @thewilloftheshadow. +- Dependencies: update workspace dependency pins and lockfile (Bedrock SDK `3.998.0`, `@mariozechner/pi-*` `0.55.1`, TypeScript native preview `7.0.0-dev.20260225.1`) while keeping `@buape/carbon` pinned. + +### Breaking + +- **BREAKING:** Heartbeat direct/DM delivery default is now `allow` again. To keep DM-blocked behavior from `2026.2.24`, set `agents.defaults.heartbeat.directPolicy: "block"` (or per-agent override). ### Fixes -- Security/Nextcloud Talk: reject unsigned webhook traffic before full body reads, reducing unauthenticated request-body exposure, with auth-order regression coverage. (#26118) Thanks @bmendonca3. -- Security/Nextcloud Talk: stop treating DM pairing-store entries as group allowlist senders, so group authorization remains bounded to configured group allowlists. (#26116) Thanks @bmendonca3. -- Security/IRC: keep pairing-store approvals DM-only and out of IRC group allowlist authorization, with policy regression tests for allowlist resolution. (#26112) Thanks @bmendonca3. -- Security/Microsoft Teams: isolate group allowlist and command authorization from DM pairing-store entries to prevent cross-context authorization bleed. (#26111) Thanks @bmendonca3. -- Security/LINE: cap unsigned webhook body reads before auth/signature handling to bound unauthenticated body processing. (#26095) Thanks @bmendonca3. -- Agents/Model fallback: keep explicit text + image fallback chains reachable even when `agents.defaults.models` allowlists are present, prefer explicit run `agentId` over session-key parsing for followup fallback override resolution (with session-key fallback), treat agent-level fallback overrides as configured in embedded runner preflight, and classify `model_cooldown` / `cooling down` errors as `rate_limit` so failover continues. (#11972, #24137, #17231) +- Agents/Subagents delivery: refactor subagent completion announce dispatch into an explicit queue/direct/fallback state machine, recover outbound channel-plugin resolution in cold/stale plugin-registry states across announce/message/gateway send paths, finalize cleanup bookkeeping when announce flow rejects, and treat Telegram sends without `message_id` as delivery failures (instead of false-success `"unknown"` IDs). (#26867, #25961, #26803, #25069, #26741) Thanks @SmithLabsLLC and @docaohieu2808. +- Telegram/Webhook: pre-initialize webhook bots, switch webhook processing to callback-mode JSON handling, and preserve full near-limit payload reads under delayed handlers to prevent webhook request hangs and dropped updates. (#26156) +- Slack/Session threads: prevent oversized parent-session inheritance from silently bricking new thread sessions, surface embedded context-overflow empty-result failures to users, and add configurable `session.parentForkMaxTokens` (default `100000`, `0` disables). (#26912) Thanks @markshields-tl. +- Cron/Message multi-account routing: honor explicit `delivery.accountId` for isolated cron delivery resolution, and when `message.send` omits `accountId`, fall back to the sending agent's bound channel account instead of defaulting to the global account. (#27015, #26975) Thanks @lbo728 and @stakeswky. +- Gateway/Message media roots: thread `agentId` through gateway `send` RPC and prefer explicit `agentId` over session/default resolution so non-default agent workspace media sends no longer fail with `LocalMediaAccessError`; added regression coverage for agent precedence and blank-agent fallback. (#23249) Thanks @Sid-Qin. - Followups/Routing: when explicit origin routing fails, allow same-channel fallback dispatch (while still blocking cross-channel fallback) so followup replies do not get dropped on transient origin-adapter failures. (#26109) Thanks @Sid-Qin. -- Agents/Model fallback: continue fallback traversal on unrecognized errors when candidates remain, while still throwing the original unknown error on the last candidate. (#26106) Thanks @Sid-Qin. +- Cron/Announce duplicate guard: track attempted announce/direct delivery separately from confirmed `delivered`, and suppress fallback main-session cron summaries when delivery was already attempted to avoid duplicate end-user sends in uncertain-ack paths. (#27018) +- LINE/Lifecycle: keep LINE `startAccount` pending until abort so webhook startup is no longer misread as immediate channel exit, preventing restart-loop storms on LINE provider boot. (#26528) Thanks @Sid-Qin. +- Discord/Gateway: capture and drain startup-time gateway `error` events before lifecycle listeners attach so early `Fatal Gateway error: 4014` closes surface as actionable intent guidance instead of uncaught gateway crashes. (#23832) Thanks @theotarr. +- Discord/Inbound text: preserve embed `title` + `description` fallback text in message and forwarded snapshot parsing so embed titles are not silently dropped from agent input. (#26946) Thanks @stakeswky. +- Slack/Inbound media fallback: deliver file-only messages even when Slack media downloads fail by adding a filename placeholder fallback, capping fallback names to the shared media-file limit, and normalizing empty filenames to `file` so attachment-only messages are not silently dropped. (#25181) Thanks @justinhuangcode. +- Telegram/Preview cleanup: keep finalized text previews when a later assistant message is media-only (for example mixed text plus voice turns) by skipping finalized preview archival at assistant-message boundaries, preventing cleanup from deleting already-visible final text messages. (#27042) - Telegram/Markdown spoilers: keep valid `||spoiler||` pairs while leaving unmatched trailing `||` delimiters as literal text, avoiding false all-or-nothing spoiler suppression. (#26105) Thanks @Sid-Qin. -- Hooks/Inbound metadata: include `guildId` and `channelName` in `message_received` metadata for both plugin and internal hook paths. (#26115) Thanks @davidrudduck. -- Discord/Component auth: evaluate guild component interactions with command-gating authorizers so unauthorized users no longer get `CommandAuthorized: true` on modal/button events. (#26119) Thanks @bmendonca3. +- Slack/Allowlist channels: match channel IDs case-insensitively during channel allowlist resolution so lowercase config keys (for example `c0abc12345`) correctly match Slack runtime IDs (`C0ABC12345`) under `groupPolicy: "allowlist"`, preventing silent channel-event drops. (#26878) Thanks @lbo728. - Discord/Typing indicator: prevent stuck typing indicators by sealing channel typing keepalive callbacks after idle/cleanup and ensuring Discord dispatch always marks typing idle even if preview-stream cleanup fails. (#26295) Thanks @ngutman. - Channels/Typing indicator: guard typing keepalive start callbacks after idle/cleanup close so post-close ticks cannot re-trigger stale typing indicators. (#26325) Thanks @win4r. +- Followups/Typing indicator: ensure followup turns mark dispatch idle on every exit path (including `NO_REPLY`, empty payloads, and agent errors) so typing keepalive cleanup always runs and channel typing indicators do not get stuck after queued/silent followups. (#26881) Thanks @codexGW. +- Voice-call/TTS tools: hide the `tts` tool when the message provider is `voice`, preventing voice-call runs from selecting self-playback TTS and falling into silent no-output loops. (#27025) +- Agents/Tools: normalize non-standard plugin tool results that omit `content` so embedded runs no longer crash with `Cannot read properties of undefined (reading 'filter')` after tool completion (including `tesseramemo_query`). (#27007) +- Cron/Model overrides: when isolated `payload.model` is no longer allowlisted, fall back to default model selection instead of failing the job, while still returning explicit errors for invalid model strings. (#26717) Thanks @Youyou972. +- Agents/Model fallback: keep explicit text + image fallback chains reachable even when `agents.defaults.models` allowlists are present, prefer explicit run `agentId` over session-key parsing for followup fallback override resolution (with session-key fallback), treat agent-level fallback overrides as configured in embedded runner preflight, and classify `model_cooldown` / `cooling down` errors as `rate_limit` so failover continues. (#11972, #24137, #17231) +- Agents/Model fallback: keep same-provider fallback chains active when session model differs from configured primary, infer cooldown reason from provider profile state (instead of `disabledReason` only), keep no-profile fallback providers eligible (env/models.json paths), and only relax same-provider cooldown fallback attempts for `rate_limit`. (#23816) thanks @ramezgaberiel. +- Agents/Model fallback: continue fallback traversal on unrecognized errors when candidates remain, while still throwing the original unknown error on the last candidate. (#26106) Thanks @Sid-Qin. +- Models/Auth probes: map permanent auth failover reasons (`auth_permanent`, for example revoked keys) into probe auth status instead of `unknown`, so `openclaw models status --probe` reports actionable auth failures. (#25754) thanks @rrenamed. +- Hooks/Inbound metadata: include `guildId` and `channelName` in `message_received` metadata for both plugin and internal hook paths. (#26115) Thanks @davidrudduck. +- Discord/Component auth: evaluate guild component interactions with command-gating authorizers so unauthorized users no longer get `CommandAuthorized: true` on modal/button events. (#26119) Thanks @bmendonca3. +- Security/Gateway auth: require pairing for operator device-identity sessions authenticated with shared token auth so unpaired devices cannot self-assign operator scopes. Thanks @tdjackey for reporting. +- Security/Gateway WebSocket auth: enforce origin checks for direct browser WebSocket clients beyond Control UI/Webchat, apply password-auth failure throttling to browser-origin loopback attempts (including localhost), and block silent auto-pairing for non-Control-UI browser clients to prevent cross-origin brute-force and session takeover chains. This ships in the next npm release (`2026.2.26`). Thanks @luz-oasis for reporting. +- Security/Gateway trusted proxy: require `operator` role for the Control UI trusted-proxy pairing bypass so unpaired `node` sessions can no longer connect via `client.id=control-ui` and invoke node event methods. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/macOS beta onboarding: remove Anthropic OAuth sign-in and the legacy `oauth.json` onboarding path that exposed the PKCE verifier via OAuth `state`; this impacted the macOS beta onboarding path only. Anthropic subscription auth is now setup-token-only and will ship in the next npm release (`2026.2.26`). Thanks @zdi-disclosures for reporting. +- Security/Microsoft Teams file consent: bind `fileConsent/invoke` upload acceptance/decline to the originating conversation before consuming pending uploads, preventing cross-conversation pending-file upload or cancellation via leaked `uploadId` values; includes regression coverage for match/mismatch invoke handling. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Gateway: harden `agents.files` path handling to block out-of-workspace symlink targets for `agents.files.get`/`agents.files.set`, keep in-workspace symlink targets supported, and add gateway regression coverage for both blocked escapes and allowed in-workspace symlinks. Thanks @tdjackey for reporting. +- Security/Workspace FS: reject hardlinked workspace file aliases in `tools.fs.workspaceOnly` and `tools.exec.applyPatch.workspaceOnly` boundary checks (including sandbox mount-root guards) to prevent out-of-workspace read/write via in-workspace hardlink paths. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Browser temp paths: harden trace/download output-path handling against symlink-root and symlink-parent escapes with realpath-based write-path checks plus secure fallback tmp-dir validation that fails closed on unsafe fallback links. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Browser uploads: revalidate upload paths at use-time in Playwright file-chooser and direct-input flows so missing/rebound paths are rejected before `setFiles`, with regression coverage for strict missing-path handling. +- Security/Exec approvals: bind `system.run` approval matching to exact argv identity and preserve argv whitespace in rendered command text, preventing trailing-space executable path swaps from reusing a mismatched approval. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Exec approvals: harden approval-bound `system.run` execution on node hosts by rejecting symlink `cwd` paths and canonicalizing path-like executable argv before spawn, blocking mutable-cwd symlink retarget chains between approval and execution. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Signal: enforce DM/group authorization before reaction-only notification enqueue so unauthorized senders can no longer inject Signal reaction system events under `dmPolicy`/`groupPolicy`; reaction notifications now require channel access checks first. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Discord reactions: enforce DM policy/allowlist authorization before reaction-event system enqueue in direct messages; Discord reaction handling now also honors DM/group-DM enablement and guild `groupPolicy` channel gating to keep reaction ingress aligned with normal message preflight. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Slack reactions + pins: gate `reaction_*` and `pin_*` system-event enqueue through shared sender authorization so DM `dmPolicy`/`allowFrom` and channel `users` allowlists are enforced consistently for non-message ingress, with regression coverage for denied/allowed sender paths. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Slack member + message subtype events: gate `member_*` plus `message_changed`/`message_deleted`/`thread_broadcast` system-event enqueue through shared sender authorization so DM `dmPolicy`/`allowFrom` and channel `users` allowlists are enforced consistently for non-message ingress; message subtype system events now fail closed when sender identity is missing, with regression coverage. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Telegram reactions: enforce `dmPolicy`/`allowFrom` and group allowlist authorization on `message_reaction` events before enqueueing reaction system events, preventing unauthorized reaction-triggered input in DMs and groups; ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Telegram group allowlist: fail closed for group sender authorization by removing DM pairing-store fallback from group allowlist evaluation; group sender access now requires explicit `groupAllowFrom` or per-group/per-topic `allowFrom`. (#25988) Thanks @bmendonca3. +- Security/DM-group allowlist boundaries: keep DM pairing-store approvals DM-only by removing pairing-store inheritance from group sender authorization in LINE and Mattermost message preflight, and by centralizing shared DM/group allowlist composition so group checks never include pairing-store entries. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Slack interactions: enforce channel/DM authorization and modal actor binding (`private_metadata.userId`) before enqueueing `block_action`/`view_submission`/`view_closed` system events, with regression coverage for unauthorized senders and missing/mismatched actor metadata. This ships in the next npm release (`2026.2.26`). Thanks @tdjackey for reporting. +- Security/Nextcloud Talk: drop replayed signed webhook events with persistent per-account replay dedupe across restarts, and reject unexpected webhook backend origins when account base URL is configured. Thanks @aristorechina for reporting. +- Security/Nextcloud Talk: reject unsigned webhook traffic before full body reads, reducing unauthenticated request-body exposure, with auth-order regression coverage. (#26118) Thanks @bmendonca3. +- Security/Nextcloud Talk: stop treating DM pairing-store entries as group allowlist senders, so group authorization remains bounded to configured group allowlists. (#26116) Thanks @bmendonca3. +- Security/LINE: cap unsigned webhook body reads before auth/signature handling to bound unauthenticated body processing. (#26095) Thanks @bmendonca3. +- Security/IRC: keep pairing-store approvals DM-only and out of IRC group allowlist authorization, with policy regression tests for allowlist resolution. (#26112) Thanks @bmendonca3. +- Security/Microsoft Teams: isolate group allowlist and command authorization from DM pairing-store entries to prevent cross-context authorization bleed. (#26111) Thanks @bmendonca3. +- Security/SSRF guard: classify IPv6 multicast literals (`ff00::/8`) as blocked/private-internal targets in shared SSRF IP checks, preventing multicast literals from bypassing URL-host preflight and DNS answer validation. This ships in the next npm release (`2026.2.26`). Thanks @zpbrent for reporting. - Tests/Low-memory stability: disable Vitest `vmForks` by default on low-memory local hosts (`<64 GiB`), keep low-profile extension lane parallelism at 4 workers, and align cron isolated-agent tests with `setSessionRuntimeModel` usage to avoid deterministic suite failures. (#26324) Thanks @ngutman. -- Slack/Inbound media fallback: deliver file-only messages even when Slack media downloads fail by adding a filename placeholder fallback, capping fallback names to the shared media-file limit, and normalizing empty filenames to `file` so attachment-only messages are not silently dropped. (#25181) Thanks @justinhuangcode. ## 2026.2.24 @@ -188,6 +335,8 @@ Docs: https://docs.openclaw.ai - Providers/Bedrock: disable prompt-cache retention for non-Anthropic Bedrock models so Nova/Mistral requests do not send unsupported cache metadata. (#20866) Thanks @pierreeurope. - Providers/Bedrock: apply Anthropic-Claude cacheRetention defaults and runtime pass-through for `amazon-bedrock/*anthropic.claude*` model refs, while keeping non-Anthropic Bedrock models excluded. (#22303) Thanks @snese. - Providers/OpenRouter: remove conflicting top-level `reasoning_effort` when injecting nested `reasoning.effort`, preventing OpenRouter 400 payload-validation failures for reasoning models. (#24120) thanks @tenequm. +- Plugins/Install: when npm install returns 404 for bundled channel npm specs, fallback to bundled channel sources and complete install/enable persistence instead of failing plugin install. (#12849) Thanks @vincentkoc. +- Gemini OAuth/Auth: resolve npm global shim install layouts while discovering Gemini CLI credentials, preventing false "Gemini CLI not found" onboarding/auth failures when shim paths are on `PATH`. (#27585) Thanks @ehgamemo and @vincentkoc. - Providers/Groq: avoid classifying Groq TPM limit errors as context overflow so throttling paths no longer trigger overflow recovery logic. (#16176) Thanks @dddabtc. - Gateway/WS: close repeated post-handshake `unauthorized role:*` request floods per connection and sample duplicate rejection logs, preventing a single misbehaving client from degrading gateway responsiveness. (#20168) Thanks @acy103, @vibecodooor, and @vincentkoc. - Gateway/Restart: treat child listener PIDs as owned by the service runtime PID during restart health checks to avoid false stale-process kills and restart timeouts on launchd/systemd. (#24696) Thanks @gumadeiras. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1386bc4881a..02085735456 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,6 +32,9 @@ Welcome to the lobster tank! 🦞 - **Mariano Belinky** - iOS app, Security - GitHub: [@mbelinky](https://github.com/mbelinky) · X: [@belimad](https://x.com/belimad) +- **Nimrod Gutman** - iOS app, macOS app and crustacean features + - GitHub: [@ngutman](https://github.com/ngutman) · X: [@theguti](https://x.com/theguti) + - **Vincent Koc** - Agents, Telemetry, Hooks, Security - GitHub: [@vincentkoc](https://github.com/vincentkoc) · X: [@vincent_koc](https://x.com/vincent_koc) @@ -50,6 +53,12 @@ Welcome to the lobster tank! 🦞 - **Onur Solmaz** - Agents, dev workflows, ACP integrations, MS Teams - GitHub: [@onutc](https://github.com/onutc), [@osolmaz](https://github.com/osolmaz) · X: [@onusoz](https://x.com/onusoz) +- **Josh Avant** - Core, CLI, Gateway, Security, Agents + - GitHub: [@joshavant](https://github.com/joshavant) · X: [@joshavant](https://x.com/joshavant) + +- **Jonathan Taylor** - ACP subsystem, Gateway features/bugs, Gog/Mog/Sog CLI's, SEDMAT + - Github [@visionik](https://github.com/visionik) · X: [@visionik](https://x.com/visionik) + ## How to Contribute 1. **Bugs & small fixes** → Open a PR! diff --git a/Dockerfile b/Dockerfile index 255340cb02b..2229a299a56 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,9 @@ COPY --chown=node:node patches ./patches COPY --chown=node:node scripts ./scripts USER node -RUN pnpm install --frozen-lockfile +# Reduce OOM risk on low-memory hosts during dependency installation. +# Docker builds on small VMs may otherwise fail with "Killed" (exit 137). +RUN NODE_OPTIONS=--max-old-space-size=2048 pnpm install --frozen-lockfile # Optionally install Chromium and Xvfb for browser automation. # Build with: docker build --build-arg OPENCLAW_INSTALL_BROWSER=1 ... diff --git a/PR_STATUS.md b/PR_STATUS.md deleted file mode 100644 index 1887eca27d9..00000000000 --- a/PR_STATUS.md +++ /dev/null @@ -1,78 +0,0 @@ -# OpenClaw PR Submission Status - -> Auto-maintained by agent team. Last updated: 2026-02-22 - -## PR Plan Overview - -All PRs target upstream `openclaw/openclaw` via fork `kevinWangSheng/openclaw`. -Each PR follows [CONTRIBUTING.md](./CONTRIBUTING.md) and uses the [PR template](./.github/PULL_REQUEST_TEMPLATE.md). - -## Duplicate Check - -Before submission, each PR was cross-referenced against: - -- 100+ open upstream PRs (as of 2026-02-22) -- 50 recently merged PRs -- 50+ open issues - -No overlap found with existing PRs. - -## PR Status Table - -| # | Branch | Title | Type | Status | PR URL | -| --- | -------------------------------------- | --------------------------------------------------------------------------- | -------- | --------------- | --------------------------------------------------------- | -| 1 | `security/redos-safe-regex` | fix(security): add ReDoS protection for user-controlled regex patterns | Security | CI Pass | [#23670](https://github.com/openclaw/openclaw/pull/23670) | -| 2 | `security/session-slug-crypto-random` | fix(security): use crypto.randomInt for session slug generation | Security | CI Pass | [#23671](https://github.com/openclaw/openclaw/pull/23671) | -| 3 | `fix/json-parse-crash-guard` | fix(resilience): guard JSON.parse of external process output with try-catch | Bug fix | CI Pass | [#23672](https://github.com/openclaw/openclaw/pull/23672) | -| 4 | `refactor/console-to-subsystem-logger` | refactor(logging): migrate remaining console calls to subsystem logger | Refactor | CI Pass | [#23669](https://github.com/openclaw/openclaw/pull/23669) | -| 5 | `fix/sanitize-rpc-error-messages` | fix(security): sanitize RPC error messages in signal and imessage clients | Security | CI Pass | [#23724](https://github.com/openclaw/openclaw/pull/23724) | -| 6 | `fix/download-stream-cleanup` | fix(resilience): destroy write streams on download errors | Bug fix | CI Pass | [#23726](https://github.com/openclaw/openclaw/pull/23726) | -| 7 | `fix/telegram-status-reaction-cleanup` | fix(telegram): clear done reaction when removeAckAfterReply is true | Bug fix | CI Pass | [#23728](https://github.com/openclaw/openclaw/pull/23728) | -| 8 | `fix/session-cache-eviction` | fix(memory): add max size eviction to session manager cache | Bug fix | CI Pass (17/17) | [#23744](https://github.com/openclaw/openclaw/pull/23744) | -| 9 | `fix/fetch-missing-timeout` | fix(resilience): add timeout to unguarded fetch calls in browser subsystem | Bug fix | CI Pass (18/18) | [#23745](https://github.com/openclaw/openclaw/pull/23745) | -| 10 | `fix/skills-download-partial-cleanup` | fix(resilience): clean up partial file on skill download failure | Bug fix | CI Pass (19/19) | [#24141](https://github.com/openclaw/openclaw/pull/24141) | -| 11 | `fix/extension-relay-stop-cleanup` | fix(browser): flush pending extension timers on relay stop | Bug fix | CI Pass (20/20) | [#24142](https://github.com/openclaw/openclaw/pull/24142) | - -## Isolation Rules - -- Each agent works on a separate git worktree branch -- No two agents modify the same file -- File ownership: - - PR 1: `src/infra/exec-approval-forwarder.ts`, `src/discord/monitor/exec-approvals.ts` - - PR 2: `src/agents/session-slug.ts` - - PR 3: `src/infra/bonjour-discovery.ts`, `src/infra/outbound/delivery-queue.ts` - - PR 4: `src/infra/tailscale.ts`, `src/node-host/runner.ts` - - PR 5: `src/signal/client.ts`, `src/imessage/client.ts` - - PR 6: `src/media/store.ts`, `src/commands/signal-install.ts` - - PR 7: `src/telegram/bot-message-dispatch.ts` - - PR 8: `src/agents/pi-embedded-runner/session-manager-cache.ts` - - PR 9: `src/cli/nodes-camera.ts`, `src/browser/pw-session.ts` - - PR 10: `src/agents/skills-install-download.ts` - - PR 11: `src/browser/extension-relay.ts` - -## Verification Results - -### Batch 1 (PRs 1-4) — All CI Green - -- PR 1: 17 tests pass, check/build/tests all green -- PR 2: 3 tests pass, check/build/tests all green -- PR 3: 45 tests pass (3 new), check/build/tests all green -- PR 4: 12 tests pass, check/build/tests all green - -### Batch 2 (PRs 5-7) — CI Running - -- PR 5: 3 signal tests pass, check pass, awaiting full test suite -- PR 6: 38 tests pass (20 media + 18 signal-install), check pass, awaiting full suite -- PR 7: 47 tests pass (3 new), check pass, awaiting full suite - -### Batch 3 (PRs 8-9) — All CI Green - -- PR 8 & 9: Initially failed due to pre-existing upstream TS errors + Windows flaky test. Fixed by rebasing onto latest upstream/main and removing `yieldMs: 10` from flaky sandbox test. -- PR 8: 17/17 pass, check/build/tests/windows all green -- PR 9: 18/18 pass, check/build/tests/windows all green - -### Batch 4 (PRs 10-11) — All CI Green - -- PR 10 & 11: Initially failed Windows flaky test (`yieldMs: 10` race). Fixed by removing `yieldMs: 10` from flaky sandbox test (same fix as PRs 8-9). -- PR 10: 19/19 pass, check/build/tests/windows all green -- PR 11: 20/20 pass, check/build/tests/windows all green diff --git a/SECURITY.md b/SECURITY.md index eb42a335572..d7e4977e600 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -41,6 +41,7 @@ For fastest triage, include all of the following: - For exposed-secret reports: proof the credential is OpenClaw-owned (or grants access to OpenClaw-operated infrastructure/services). - Explicit statement that the report does not rely on adversarial operators sharing one gateway host/config. - Scope check explaining why the report is **not** covered by the Out of Scope section below. +- For command-risk/parity reports (for example obfuscation detection differences), a concrete boundary-bypass path is required (auth/approval/allowlist/sandbox). Parity-only findings are treated as hardening, not vulnerabilities. Reports that miss these requirements may be closed as `invalid` or `no-action`. @@ -53,10 +54,12 @@ These are frequently reported but are typically closed with no code change: - Authorized user-triggered local actions presented as privilege escalation. Example: an allowlisted/owner sender running `/export-session /absolute/path.html` to write on the host. In this trust model, authorized user actions are trusted host actions unless you demonstrate an auth/sandbox/boundary bypass. - Reports that only show a malicious plugin executing privileged actions after a trusted operator installs/enables it. - Reports that assume per-user multi-tenant authorization on a shared gateway host/config. +- Reports that only show differences in heuristic detection/parity (for example obfuscation-pattern detection on one exec path but not another, such as `node.invoke -> system.run` parity gaps) without demonstrating bypass of auth, approvals, allowlist enforcement, sandboxing, or other documented trust boundaries. - ReDoS/DoS claims that require trusted operator configuration input (for example catastrophic regex in `sessionFilter` or `logging.redactPatterns`) without a trust-boundary bypass. - Missing HSTS findings on default local/loopback deployments. - Slack webhook signature findings when HTTP mode already uses signing-secret verification. - Discord inbound webhook signature findings for paths not used by this repo's Discord integration. +- Claims that Microsoft Teams `fileConsent/invoke` `uploadInfo.uploadUrl` is attacker-controlled without demonstrating one of: auth boundary bypass, a real authenticated Teams/Bot Framework event carrying attacker-chosen URL, or compromise of the Microsoft/Bot trust path. - Scanner-only claims against stale/nonexistent paths, or claims without a working repro. ### Duplicate Report Handling @@ -113,8 +116,10 @@ Plugins/extensions are part of OpenClaw's trusted computing base for a gateway. - Reports where the only claim is that a trusted-installed/enabled plugin can execute with gateway/host privileges (documented trust model behavior). - Any report whose only claim is that an operator-enabled `dangerous*`/`dangerously*` config option weakens defaults (these are explicit break-glass tradeoffs by design) - Reports that depend on trusted operator-supplied configuration values to trigger availability impact (for example custom regex patterns). These may still be fixed as defense-in-depth hardening, but are not security-boundary bypasses. +- Reports whose only claim is heuristic/parity drift in command-risk detection (for example obfuscation-pattern checks) across exec surfaces, without a demonstrated trust-boundary bypass. These are hardening-only findings and are not vulnerabilities; triage may close them as `invalid`/`no-action` or track them separately as low/informational hardening. - Exposed secrets that are third-party/user-controlled credentials (not OpenClaw-owned and not granting access to OpenClaw-operated infrastructure/services) without demonstrated OpenClaw impact - Reports whose only claim is host-side exec when sandbox runtime is disabled/unavailable (documented default behavior in the trusted-operator model), without a boundary bypass. +- Reports whose only claim is that a platform-provided upload destination URL is untrusted (for example Microsoft Teams `fileConsent/invoke` `uploadInfo.uploadUrl`) without proving attacker control in an authenticated production flow. ## Deployment Assumptions @@ -150,6 +155,7 @@ OpenClaw separates routing from execution, but both remain inside the same opera - **Gateway** is the control plane. If a caller passes Gateway auth, they are treated as a trusted operator for that Gateway. - **Node** is an execution extension of the Gateway. Pairing a node grants operator-level remote capability on that node. - **Exec approvals** (allowlist/ask UI) are operator guardrails to reduce accidental command execution, not a multi-tenant authorization boundary. +- Differences in command-risk warning heuristics between exec surfaces (`gateway`, `node`, `sandbox`) do not, by themselves, constitute a security-boundary bypass. - For untrusted-user isolation, split by trust boundary: separate gateways and separate OS users/hosts per boundary. ## Workspace Memory Trust Boundary diff --git a/appcast.xml b/appcast.xml index 902d60972fd..b01defa5429 100644 --- a/appcast.xml +++ b/appcast.xml @@ -209,106 +209,106 @@ - 2026.2.24 - Wed, 25 Feb 2026 02:59:30 +0000 + 2026.2.26 + Thu, 26 Feb 2026 23:37:15 +0100 https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml - 14728 - 2026.2.24 + 15221 + 2026.2.26 15.0 - OpenClaw 2026.2.24 + OpenClaw 2026.2.26

Changes

    -
  • Auto-reply/Abort shortcuts: expand standalone stop phrases (stop openclaw, stop action, stop run, stop agent, please stop, and related variants), accept trailing punctuation (for example STOP OPENCLAW!!!), add multilingual stop keywords (including ES/FR/ZH/HI/AR/JP/DE/PT/RU forms), and treat exact do not do that as a stop trigger while preserving strict standalone matching. (#25103) Thanks @steipete and @vincentkoc.
  • -
  • Android/App UX: ship a native four-step onboarding flow, move post-onboarding into a five-tab shell (Connect, Chat, Voice, Screen, Settings), add a full Connect setup/manual mode screen, and refresh Android chat/settings surfaces for the new navigation model.
  • -
  • Talk/Gateway config: add provider-agnostic Talk configuration with legacy compatibility, and expose gateway Talk ElevenLabs config metadata for setup/status surfaces.
  • -
  • Security/Audit: add security.trust_model.multi_user_heuristic to flag likely shared-user ingress and clarify the personal-assistant trust model, with hardening guidance for intentional multi-user setups (sandbox.mode="all", workspace-scoped FS, reduced tool surface, no personal/private identities on shared runtimes).
  • -
  • Dependencies: refresh key runtime and tooling packages across the workspace (Bedrock SDK, pi runtime stack, OpenAI, Google auth, and oxlint/oxfmt), while intentionally keeping @buape/carbon pinned.
  • -
-

Breaking

-
    -
  • BREAKING: Heartbeat delivery now blocks direct/DM targets when destination parsing identifies a direct chat (for example user:, Telegram user chat IDs, or WhatsApp direct numbers/JIDs). Heartbeat runs still execute, but direct-message delivery is skipped and only non-DM destinations (for example channel/group targets) can receive outbound heartbeat messages.
  • -
  • BREAKING: Security/Sandbox: block Docker network: "container:" namespace-join mode by default for sandbox and sandbox-browser containers. To keep that behavior intentionally, set agents.defaults.sandbox.docker.dangerouslyAllowContainerNamespaceJoin: true (break-glass). Thanks @tdjackey for reporting.
  • +
  • Highlight: External Secrets Management introduces a full openclaw secrets workflow (audit, configure, apply, reload) with runtime snapshot activation, strict secrets apply target-path validation, safer migration scrubbing, ref-only auth-profile support, and dedicated docs. (#26155) Thanks @joshavant.
  • +
  • ACP/Thread-bound agents: make ACP agents first-class runtimes for thread sessions with acp spawn/send dispatch integration, acpx backend bridging, lifecycle controls, startup reconciliation, runtime cleanup, and coalesced thread replies. (#23580) thanks @osolmaz.
  • +
  • Agents/Routing CLI: add openclaw agents bindings, openclaw agents bind, and openclaw agents unbind for account-scoped route management, including channel-only to account-scoped binding upgrades, role-aware binding identity handling, plugin-resolved binding account IDs, and optional account-binding prompts in openclaw channels add. (#27195) thanks @gumadeiras.
  • +
  • Codex/WebSocket transport: make openai-codex WebSocket-first by default (transport: "auto" with SSE fallback), keep explicit per-model/runtime transport overrides, and add regression coverage + docs for transport selection.
  • +
  • Onboarding/Plugins: let channel plugins own interactive onboarding flows with optional configureInteractive and configureWhenConfigured hooks while preserving the generic fallback path. (#27191) thanks @gumadeiras.
  • +
  • Android/Nodes: add Android device capability plus device.status and device.info node commands, including runtime handler wiring and protocol/registry coverage for device status/info payloads. (#27664) Thanks @obviyus.
  • +
  • Android/Nodes: add notifications.list support on Android nodes and expose nodes notifications_list in agent tooling for listing active device notifications. (#27344) thanks @obviyus.
  • +
  • Docs/Contributing: add Nimrod Gutman to the maintainer roster in CONTRIBUTING.md. (#27840) Thanks @ngutman.

Fixes

    -
  • Routing/Session isolation: harden followup routing so explicit cross-channel origin replies never fall back to the active dispatcher on route failure, preserve queued overflow summary routing metadata (channel/to/thread) across followup drain, and prefer originating channel context over internal provider tags for embedded followup runs. This prevents webchat/control-ui context from hijacking Discord-targeted replies in shared sessions. (#25864) Thanks @Gamedesigner.
  • -
  • Security/Routing: fail closed for shared-session cross-channel replies by binding outbound target resolution to the current turn’s source channel metadata (instead of stale session route fallbacks), and wire those turn-source fields through gateway + command delivery planners with regression coverage. (#24571) Thanks @brandonwise.
  • -
  • Heartbeat routing: prevent heartbeat leakage/spam into Discord and other direct-message destinations by blocking direct-chat heartbeat delivery targets and keeping blocked-delivery cron/exec prompts internal-only. (#25871)
  • -
  • Heartbeat defaults/prompts: switch the implicit heartbeat delivery target from last to none (opt-in for external delivery), and use internal-only cron/exec heartbeat prompt wording when delivery is disabled so background checks do not nudge user-facing relay behavior. (#25871, #24638, #25851)
  • -
  • Auto-reply/Heartbeat queueing: drop heartbeat runs when a session already has an active run instead of enqueueing a stale followup, preventing duplicate heartbeat response branches after queue drain. (#25610, #25606) Thanks @mcaxtr.
  • -
  • Cron/Heartbeat delivery: stop inheriting cached session lastThreadId for heartbeat-mode target resolution unless a thread/topic is explicitly requested, so announce-mode cron and heartbeat deliveries stay on top-level destinations instead of leaking into active conversation threads. (#25730) Thanks @markshields-tl.
  • -
  • Messaging tool dedupe: treat originating channel metadata as authoritative for same-target message.send suppression in proactive runs (heartbeat/cron/exec-event), including synthetic-provider contexts, so delivery-mirror transcript entries no longer cause duplicate Telegram sends. (#25835) Thanks @jadeathena84-arch.
  • -
  • Channels/Typing keepalive: refresh channel typing callbacks on a keepalive interval during long replies and clear keepalive timers on idle/cleanup across core + extension dispatcher callsites so typing indicators do not expire mid-inference. (#25886, #25882) Thanks @stakeswky.
  • -
  • Agents/Model fallback: when a run is currently on a configured fallback model, keep traversing the configured fallback chain instead of collapsing straight to primary-only, preventing dead-end failures when primary stays in cooldown. (#25922, #25912) Thanks @Taskle.
  • -
  • Gateway/Models: honor explicit agents.defaults.models allowlist refs even when bundled model catalog data is stale, synthesize missing allowlist entries in models.list, and allow sessions.patch//model selection for those refs without false model not allowed errors. (#20291) Thanks @kensipe, @nikolasdehor, and @vincentkoc.
  • -
  • Control UI/Agents: inherit agents.defaults.model.fallbacks in the Overview fallback input when no per-agent model entry exists, while preserving explicit per-agent fallback overrides (including empty lists). (#25729, #25710) Thanks @Suko.
  • -
  • Automation/Subagent/Cron reliability: honor ANNOUNCE_SKIP in sessions_spawn completion/direct announce flows (no user-visible token leaks), add transient direct-announce retries for channel unavailability (for example WhatsApp listener reconnect windows), and include cron in the coding tool profile so /tools/invoke can execute cron actions when explicitly allowed by gateway policy. (#25800, #25656, #25842, #25813, #25822, #25821) Thanks @astra-fer, @aaajiao, @dwight11232-coder, @kevinWangSheng, @widingmarcus-cyber, and @stakeswky.
  • -
  • Discord/Voice reliability: restore runtime DAVE dependency (@snazzah/davey), add configurable DAVE join options (channels.discord.voice.daveEncryption and channels.discord.voice.decryptionFailureTolerance), clean up voice listeners/session teardown, guard against stale connection events, and trigger controlled rejoin recovery after repeated decrypt failures to improve inbound STT stability under DAVE receive errors. (#25861, #25372, #24883, #24825, #23890, #23105, #22961, #23421, #23278, #23032)
  • -
  • Discord/Block streaming: restore block-streamed reply delivery by suppressing only reasoning payloads (instead of all block payloads), fixing missing Discord replies in channels.discord.streaming=block mode. (#25839, #25836, #25792) Thanks @pewallin.
  • -
  • Discord/Proxy + reactions + model picker: thread channel proxy fetch into inbound media/sticker downloads, use proxy-aware gateway metadata fetch for WSL/corporate proxy setups, wire messages.statusReactions.{emojis,timing} into Discord reaction lifecycle control, and compact model-picker custom_id keys to stay under Discord's 100-char limit while keeping backward-compatible parsing. (#25232, #25507, #25564, #25695) Thanks @openperf, @chilu18, @Yipsh, @lbo728, and @s1korrrr.
  • -
  • WhatsApp/Web reconnect: treat close status 440 as non-retryable (including string-form status values), stop reconnect loops immediately, and emit operator guidance to relink after resolving session conflicts. (#25858) Thanks @markmusson.
  • -
  • WhatsApp/Reasoning safety: suppress outbound payloads marked as reasoning and hard-drop text payloads that begin with Reasoning: before WhatsApp delivery, preventing hidden thinking blocks from leaking to end users through final-message paths. (#25804, #25214, #24328)
  • -
  • Matrix/Read receipts: send read receipts as soon as Matrix messages arrive (before handler pipeline work), so clients no longer show long-lived unread/sent states while replies are processing. (#25841, #25840) Thanks @joshjhall.
  • -
  • Telegram/Replies: when markdown formatting renders to empty HTML (for example syntax-only chunks in threaded replies), retry delivery with plain text, and fail loud when both formatted and plain payloads are empty to avoid false delivered states. (#25096, #25091) Thanks @Glucksberg.
  • -
  • Telegram/Media fetch: prioritize IPv4 before IPv6 in SSRF pinned DNS address ordering so media downloads still work on hosts with broken IPv6 routing. (#24295, #23975) Thanks @Glucksberg.
  • -
  • Telegram/Outbound API: replace Node 22's global undici dispatcher when applying Telegram autoSelectFamily decisions so outbound fetch calls inherit IPv4 fallback instead of staying pinned to stale dispatcher settings. (#25682, #25676) Thanks @lairtonlelis.
  • -
  • Onboarding/Telegram: keep core-channel onboarding available when plugin registry population is missing by falling back to built-in adapters and continuing wizard setup with actionable recovery guidance. (#25803) Thanks @Suko.
  • -
  • Android/Gateway auth: preserve Android gateway auth state across onboarding, use the native client id for operator sessions, retry with shared-token fallback after device-token auth failures, and avoid clearing tokens on transient connect errors.
  • -
  • Slack/DM routing: treat D* channel IDs as direct messages even when Slack sends an incorrect channel_type, preventing DM traffic from being misclassified as channel/group chats. (#25479) Thanks @mcaxtr.
  • -
  • Zalo/Group policy: enforce sender authorization for group messages with groupPolicy + groupAllowFrom (fallback to allowFrom), default runtime group behavior to fail-closed allowlist, and block unauthorized non-command group messages before dispatch. Thanks @tdjackey for reporting.
  • -
  • macOS/Voice input: guard all audio-input startup paths against missing default microphones (Voice Wake, Talk Mode, Push-to-Talk, mic-level monitor, tester) to avoid launch/runtime crashes on mic-less Macs and fail gracefully until input becomes available. (#25817) Thanks @sfo2001.
  • -
  • macOS/IME input: when marked text is active, treat Return as IME candidate confirmation first in both the voice overlay composer and shared chat composer to prevent accidental sends while composing CJK text. (#25178) Thanks @bottotl.
  • -
  • macOS/Voice wake routing: default forwarded voice-wake transcripts to the webchat channel (instead of ambiguous last routing) so local voice prompts stay pinned to the control chat surface unless explicitly overridden. (#25440) Thanks @chilu18.
  • -
  • macOS/Gateway launch: prefer an available openclaw binary before pnpm/node runtime fallback when resolving local gateway commands, so local startup no longer fails on hosts with broken runtime discovery. (#25512) Thanks @chilu18.
  • -
  • macOS/Menu bar: stop reusing the injector delegate for the "Usage cost (30 days)" submenu to prevent recursive submenu injection loops when opening cost history. (#25341) Thanks @yingchunbai.
  • -
  • macOS/WebChat panel: fix rounded-corner clipping by using panel-specific visual-effect blending and matching corner masking on both effect and hosting layers. (#22458) Thanks @apethree and @agisilaos.
  • -
  • Windows/Exec shell selection: prefer PowerShell 7 (pwsh) discovery (Program Files, ProgramW6432, PATH) before falling back to Windows PowerShell 5.1, fixing && command chaining failures on Windows hosts with PS7 installed. (#25684, #25638) Thanks @zerone0x.
  • -
  • Windows/Media safety checks: align async local-file identity validation with sync-safe-open behavior by treating win32 dev=0 stats as unknown-device fallbacks (while keeping strict dev checks when both sides are non-zero), fixing false Local media path is not safe to read drops for local attachments/TTS/images. (#25708, #21989, #25699, #25878) Thanks @kevinWangSheng.
  • -
  • iMessage/Reasoning safety: harden iMessage echo suppression with outbound messageId matching (plus scoped text fallback), and enforce reasoning-payload suppression on routed outbound delivery paths to prevent hidden thinking text from being sent as user-visible channel messages. (#25897, #1649, #25757) Thanks @rmarr and @Iranb.
  • -
  • Providers/OpenRouter/Auth profiles: bypass auth-profile cooldown/disable windows for OpenRouter, so provider failures no longer put OpenRouter profiles into local cooldown and stale legacy cooldown markers are ignored in fallback and status selection paths. (#25892) Thanks @alexanderatallah for raising this and @vincentkoc for the fix.
  • -
  • Providers/Google reasoning: sanitize invalid negative thinkingBudget payloads for Gemini 3.1 requests by dropping -1 budgets and mapping configured reasoning effort to thinkingLevel, preventing malformed reasoning payloads on google-generative-ai. (#25900)
  • -
  • Providers/SiliconFlow: normalize thinking="off" to thinking: null for Pro/* model payloads to avoid provider-side 400 loops and misleading compaction retries. (#25435) Thanks @Zjianru.
  • -
  • Models/Bedrock auth: normalize additional Bedrock provider aliases (bedrock, aws-bedrock, aws_bedrock, amazon bedrock) to canonical amazon-bedrock, ensuring auth-mode resolution consistently selects AWS SDK fallback. (#25756) Thanks @fwhite13.
  • -
  • Models/Providers: preserve explicit user reasoning overrides when merging provider model config with built-in catalog metadata, so reasoning: false is no longer overwritten by catalog defaults. (#25314) Thanks @lbo728.
  • -
  • Gateway/Auth: allow trusted-proxy authenticated Control UI websocket sessions to skip device pairing when device identity is absent, preventing false pairing required failures behind trusted reverse proxies. (#25428) Thanks @SidQin-cyber.
  • -
  • CLI/Memory search: accept --query for openclaw memory search (while keeping positional query support), and emit a clear error when neither form is provided. (#25904, #25857) Thanks @niceysam and @stakeswky.
  • -
  • CLI/Doctor: correct stale recovery hints to use valid commands (openclaw gateway status --deep and openclaw configure --section model). (#24485) Thanks @chilu18.
  • -
  • Doctor/Sandbox: when sandbox mode is enabled but Docker is unavailable, surface a clear actionable warning (including failure impact and remediation) instead of a mild “skip checks” note. (#25438) Thanks @mcaxtr.
  • -
  • Doctor/Plugins: auto-enable now resolves third-party channel plugins by manifest plugin id (not channel id), preventing invalid plugins.entries. writes when ids differ. (#25275) Thanks @zerone0x.
  • -
  • Config/Plugins: treat stale removed google-antigravity-auth plugin references as compatibility warnings (not hard validation errors) across plugins.entries, plugins.allow, plugins.deny, and plugins.slots.memory, so startup no longer fails after antigravity removal. (#25538, #25862) Thanks @chilu18.
  • -
  • Config/Meta: accept numeric meta.lastTouchedAt timestamps and coerce them to ISO strings, preserving compatibility with agent edits that write Date.now() values. (#25491) Thanks @mcaxtr.
  • -
  • Usage accounting: parse Moonshot/Kimi cached_tokens fields (including prompt_tokens_details.cached_tokens) into normalized cache-read usage metrics. (#25436) Thanks @Elarwei001.
  • -
  • Agents/Tool dispatch: await block-reply flush before tool execution starts so buffered block replies preserve message ordering around tool calls. (#25427) Thanks @SidQin-cyber.
  • -
  • Agents/Billing classification: prevent long assistant/user-facing text from being rewritten as billing failures while preserving explicit status/code/http 402 detection for oversized structured error payloads. (#25680, #25661) Thanks @lairtonlelis.
  • -
  • Sessions/Tool-result guard: avoid generating synthetic toolResult entries for assistant turns that ended with stopReason: "aborted" or "error", preventing orphaned tool-use IDs from triggering downstream API validation errors. (#25429) Thanks @mikaeldiakhate-cell.
  • -
  • Auto-reply/Reset hooks: guarantee native /new and /reset flows emit command/reset hooks even on early-return command paths, with dedupe protection to avoid double hook emission. (#25459) Thanks @chilu18.
  • -
  • Hooks/Slug generator: resolve session slug model from the agent’s effective model (including defaults/fallback resolution) instead of raw agent-primary config only. (#25485) Thanks @SudeepMalipeddi.
  • -
  • Sandbox/FS bridge tests: add regression coverage for dash-leading basenames to confirm sandbox file reads resolve to absolute container paths (and avoid shell-option misdiagnosis for dashed filenames). (#25891) Thanks @albertlieyingadrian.
  • -
  • Sandbox/FS bridge: build canonical-path shell scripts with newline separators (not ; joins) to avoid POSIX sh do; syntax errors that broke sandbox file/image read-write operations. (#25737, #25824, #25868) Thanks @DennisGoldfinger and @peteragility.
  • -
  • Sandbox/Config: preserve dangerouslyAllowReservedContainerTargets and dangerouslyAllowExternalBindSources during sandbox docker config resolution so explicit bind-mount break-glass overrides reach runtime validation. (#25410) Thanks @skyer-jian.
  • -
  • Gateway/Security: enforce gateway auth for the exact /api/channels plugin root path (plus /api/channels/ descendants), with regression coverage for query/trailing-slash variants and near-miss paths that must remain plugin-owned. (#25753) Thanks @bmendonca3.
  • -
  • Exec approvals: treat bare allowlist * as a true wildcard for parsed executables, including unresolved PATH lookups, so global opt-in allowlists work as configured. (#25250) Thanks @widingmarcus-cyber.
  • -
  • iOS/Signing: improve scripts/ios-team-id.sh for Xcode 16+ by falling back to Xcode-managed provisioning profiles, add actionable guidance when an Apple account exists but no Team ID can be resolved, and ignore Xcode xcodebuild output directories (apps/ios/build, apps/shared/OpenClawKit/build, Swabble/build). (#22773) Thanks @brianleach.
  • -
  • Control UI/Chat images: route image-click opens through a shared safe-open helper (allowing only safe URL schemes) and open new tabs with opener isolation to block tabnabbing. (#18685, #25444, #25847) Thanks @Mariana-Codebase and @shakkernerd.
  • -
  • Security/Exec: sanitize inherited host execution environment before merge, canonicalize inherited PATH handling, and strip dangerous keys (LD_*, DYLD_*, SSLKEYLOGFILE, and related injection vectors) from non-sandboxed exec runs. (#25755) Thanks @bmendonca3.
  • -
  • Security/Hooks: normalize hook session-key classification with trim/lowercase plus Unicode NFKC folding (for example full-width HOOK:...) so external-content wrapping cannot be bypassed by mixed-case or lookalike prefixes. (#25750) Thanks @bmendonca3.
  • -
  • Security/Voice Call: add Telnyx webhook replay detection and canonicalize replay-key signature encoding (Base64/Base64URL equivalent forms dedupe together), so duplicate signed webhook deliveries no longer re-trigger side effects. (#25832) Thanks @bmendonca3.
  • -
  • Security/Sandbox media: restrict sandbox media tmp-path allowances to OpenClaw-managed tmp roots instead of broad host os.tmpdir() trust, and add outbound/channel guardrails (tmp-path lint + media-root smoke tests) to prevent regressions in local media attachment reads. Thanks @tdjackey for reporting.
  • -
  • Security/Sandbox media: reject hard-linked OpenClaw tmp media aliases (including symlink-to-hardlink chains) during sandbox media path resolution to prevent out-of-sandbox inode alias reads. (#25820) Thanks @bmendonca3.
  • -
  • Security/Message actions: enforce local media root checks for sendAttachment and setGroupIcon when sandboxRoot is unset, preventing attachment hydration from reading arbitrary host files via local absolute paths. Thanks @GCXWLP for reporting.
  • -
  • Security/Telegram: enforce DM authorization before media download/write (including media groups) and move telegram inbound activity tracking after DM authorization, preventing unauthorized sender-triggered inbound media disk writes. Thanks @v8hid for reporting.
  • -
  • Security/Workspace FS: normalize @-prefixed paths before workspace-boundary checks (including workspace-only read/write/edit and sandbox mount path guards), preventing absolute-path escape attempts from bypassing guard validation. Thanks @tdjackey for reporting.
  • -
  • Security/Synology Chat: enforce fail-closed allowlist behavior for DM ingress so dmPolicy: "allowlist" with empty allowedUserIds rejects all senders instead of allowing unauthorized dispatch. (#25827) Thanks @bmendonca3 for the contribution and @tdjackey for reporting.
  • -
  • Security/Native images: enforce tools.fs.workspaceOnly for native prompt image auto-load (including history refs), preventing out-of-workspace sandbox mounts from being implicitly ingested as vision input. Thanks @tdjackey for reporting.
  • -
  • Security/Exec approvals: bind system.run command display/approval text to full argv when shell-wrapper inline payloads carry positional argv values, and reject payload-only rawCommand mismatches for those wrapper-carrier forms, preventing hidden command execution under misleading approval text. Thanks @tdjackey for reporting.
  • -
  • Security/Exec companion host: forward canonical system.run display text (not payload-only shell snippets) to the macOS exec host, and enforce rawCommand/argv consistency there for shell-wrapper positional-argv carriers and env-modifier preludes, preventing companion-side approval/display drift. Thanks @tdjackey for reporting.
  • -
  • Security/Exec approvals: fail closed when transparent dispatch-wrapper unwrapping exceeds the depth cap, so nested /usr/bin/env chains cannot bypass shell-wrapper approval gating in allowlist + ask=on-miss mode. Thanks @tdjackey for reporting.
  • -
  • Security/Exec: limit default safe-bin trusted directories to immutable system paths (/bin, /usr/bin) and require explicit opt-in (tools.exec.safeBinTrustedDirs) for package-manager/user bin paths (for example Homebrew), add security-audit findings for risky trusted-dir choices, warn at runtime when explicitly trusted dirs are group/world writable, and add doctor hints when configured safeBins resolve outside trusted dirs. Thanks @tdjackey for reporting.
  • -
  • Security/Sandbox: canonicalize bind-mount source paths via existing-ancestor realpath so symlink-parent + non-existent-leaf paths cannot bypass allowed-source-roots or blocked-path checks. Thanks @tdjackey.
  • +
  • Telegram/DM allowlist runtime inheritance: enforce dmPolicy: "allowlist" allowFrom requirements using effective account-plus-parent config across account-capable channels (Telegram, Discord, Slack, Signal, iMessage, IRC, BlueBubbles, WhatsApp), and align openclaw doctor checks to the same inheritance logic so DM traffic is not silently dropped after upgrades. (#27936) Thanks @widingmarcus-cyber.
  • +
  • Delivery queue/recovery backoff: prevent retry starvation by persisting lastAttemptAt on failed sends and deferring recovery retries until each entry's lastAttemptAt + backoff window is eligible, while continuing to recover ready entries behind deferred ones. Landed from contributor PR #27710 by @Jimmy-xuzimo. Thanks @Jimmy-xuzimo.
  • +
  • Google Chat/Lifecycle: keep Google Chat startAccount pending until abort in webhook mode so startup is no longer interpreted as immediate exit, preventing auto-restart loops and webhook-target churn. (#27384) thanks @junsuwhy.
  • +
  • Temp dirs/Linux umask: force 0700 permissions after temp-dir creation and self-heal existing writable temp dirs before trust checks so umask 0002 installs no longer crash-loop on startup. Landed from contributor PR #27860 by @stakeswky. (#27853) Thanks @stakeswky.
  • +
  • Nextcloud Talk/Lifecycle: keep startAccount pending until abort and stop the webhook monitor on shutdown, preventing EADDRINUSE restart loops when the gateway manages account lifecycle. (#27897)
  • +
  • Microsoft Teams/File uploads: acknowledge fileConsent/invoke immediately (invokeResponse before upload + file card send) so Teams no longer shows false "Something went wrong" timeout banners while upload completion continues asynchronously; includes updated async regression coverage. Landed from contributor PR #27641 by @scz2011.
  • +
  • Queue/Drain/Cron reliability: harden lane draining with guaranteed draining flag reset on synchronous pump failures, reject new queue enqueues during gateway restart drain windows (instead of silently killing accepted tasks), add /stop queued-backlog cutoff metadata with stale-message skipping (while avoiding cross-session native-stop cutoff bleed), and raise isolated cron agentTurn outer safety timeout to avoid false 10-minute timeout races against longer agent session timeouts. (#27407, #27332, #27427)
  • +
  • Typing/Main reply pipeline: always mark dispatch idle in agent-runner finalization so typing cleanup runs even when dispatcher onIdle does not fire, preventing stuck typing indicators after run completion. (#27250) Thanks @Sid-Qin.
  • +
  • Typing/TTL safety net: add max-duration guardrails to shared typing callbacks so stuck lifecycle edges auto-stop typing indicators even when explicit idle/cleanup signals are missed. (#27428) Thanks @Crpdim.
  • +
  • Typing/Cross-channel leakage: unify run-scoped typing suppression for cross-channel/internal-webchat routes, preserve current inbound origin as embedded run message channel context, harden shared typing keepalive with consecutive-failure circuit breaker edge-case handling, and enforce dispatcher completion/idle waits in extension dispatcher callsites (Feishu, Matrix, Mattermost, MSTeams) so typing indicators always clean up on success/error paths. Related: #27647, #27493, #27598. Supersedes/replaces draft PRs: #27640, #27593, #27540.
  • +
  • Telegram/sendChatAction 401 handling: add bounded exponential backoff + temporary local typing suppression after repeated unauthorized failures to stop unbounded sendChatAction retry loops that can trigger Telegram abuse enforcement and bot deletion. (#27415) Thanks @widingmarcus-cyber.
  • +
  • Telegram/Webhook startup: clarify webhook config guidance, allow channels.telegram.webhookPort: 0 for ephemeral listener binding, and log both the local listener URL and Telegram-advertised webhook URL with the bound port. (#25732) thanks @huntharo.
  • +
  • Browser/Chrome extension handshake: bind relay WS message handling before onopen and add non-blocking connect.challenge response handling for gateway-style handshake frames, avoiding stuck badge states when challenge frames arrive immediately on connect. Landed from contributor PR #22571 by @pandego. (#22553)
  • +
  • Browser/Extension relay init: dedupe concurrent same-port relay startup with shared in-flight initialization promises so callers await one startup lifecycle and receive consistent success/failure results. Landed from contributor PR #21277 by @HOYALIM. (Related #20688)
  • +
  • Browser/Fill relay + CLI parity: accept act.fill fields without explicit type by defaulting missing/empty type to text in both browser relay route parsing and openclaw browser fill CLI field parsing, so relay calls no longer fail when the model omits field type metadata. Landed from contributor PR #27662 by @Uface11. (#27296) Thanks @Uface11.
  • +
  • Feishu/Permission error dispatch: merge sender-name permission notices into the main inbound dispatch so one user message produces one agent turn/reply (instead of a duplicate permission-notice turn), with regression coverage. (#27381) thanks @byungsker.
  • +
  • Agents/Canvas default node resolution: when multiple connected canvas-capable nodes exist and no single mac-* candidate is selected, default to the first connected candidate instead of failing with node required for implicit-node canvas tool calls. Landed from contributor PR #27444 by @carbaj03. Thanks @carbaj03.
  • +
  • TUI/stream assembly: preserve streamed text across real tool-boundary drops without keeping stale streamed text when non-text blocks appear only in the final payload. Landed from contributor PR #27711 by @scz2011. (#27674)
  • +
  • Hooks/Internal message:sent: forward sessionKey on outbound sends from agent delivery, cron isolated delivery, gateway receipt acks, heartbeat sends, session-maintenance warnings, and restart-sentinel recovery so internal message:sent hooks consistently dispatch with session context, including openclaw agent --deliver runs resumed via --session-id (without explicit --session-key). Landed from contributor PR #27584 by @qualiobra. Thanks @qualiobra.
  • +
  • Pi image-token usage: stop re-injecting history image blocks each turn, process image references from the current prompt only, and prune already-answered user-image blocks in stored history to prevent runaway token growth. (#27602)
  • +
  • BlueBubbles/SSRF: auto-allowlist the configured serverUrl hostname for attachment fetches so localhost/private-IP BlueBubbles setups are no longer false-blocked by default SSRF checks. Landed from contributor PR #27648 by @lailoo. (#27599) Thanks @taylorhou for reporting.
  • +
  • Agents/Compaction + onboarding safety: prevent destructive double-compaction by stripping stale assistant usage around compaction boundaries, skipping post-compaction custom metadata writes in the same attempt, and cancelling safeguard compaction when there are no real conversation messages to summarize; harden workspace/bootstrap detection for memory-backed workspaces; and change openclaw onboard --reset default scope to config+creds+sessions (workspace deletion now requires --reset-scope full). (#26458, #27314) Thanks @jaden-clovervnd, @Sid-Qin, and @widingmarcus-cyber for fix direction in #26502, #26529, and #27492.
  • +
  • NO_REPLY suppression: suppress NO_REPLY before Slack API send and in sub-agent announce completion flow so sentinel text no longer leaks into user channels. Landed from contributor PRs #27529 (by @Sid-Qin) and #27535 (rewritten minimal landing by maintainers). (#27387, #27531)
  • +
  • Matrix/Group sender identity: preserve sender labels in Matrix group inbound prompt text (BodyForAgent) for both channel and threaded messages, and align group envelopes with shared inbound sender-prefix formatting so first-person requests resolve against the current sender. (#27401) thanks @koushikxd.
  • +
  • Auto-reply/Streaming: suppress only exact NO_REPLY final replies while still filtering streaming partial sentinel fragments (NO_, NO_RE, HEARTBEAT_...) so substantive replies ending with NO_REPLY are delivered and partial silent tokens do not leak during streaming. (#19576) Thanks @aldoeliacim.
  • +
  • Auto-reply/Inbound metadata: add a readable timestamp field to conversation info and ignore invalid/out-of-range timestamp values so prompt assembly never crashes on malformed timestamp inputs. (#17017) thanks @liuy.
  • +
  • Typing/Run completion race: prevent post-run keepalive ticks from re-triggering typing callbacks by guarding triggerTyping() with runComplete, with regression coverage for no-restart behavior during run-complete/dispatch-idle boundaries. (#27413) Thanks @widingmarcus-cyber.
  • +
  • Typing/Dispatch idle: force typing cleanup when markDispatchIdle never arrives after run completion, avoiding leaked typing keepalive loops in cron/announce edges. Landed from contributor PR #27541 by @Sid-Qin. (#27493)
  • +
  • Telegram/Inline buttons: allow callback-query button handling in groups (including /models follow-up buttons) when group policy authorizes the sender, by removing the redundant callback allowlist gate that blocked open-policy groups. (#27343) Thanks @GodsBoy.
  • +
  • Telegram/Streaming preview: when finalizing without an existing preview message, prime pending preview text with final answer before stop-flush so users do not briefly see stale 1-2 word fragments (for example no before no problem). (#27449) Thanks @emanuelst for the original fix direction in #19673.
  • +
  • Browser/Extension relay CORS: handle /json* OPTIONS preflight before auth checks, allow Chrome extension origins, and return extension-origin CORS headers on relay HTTP responses so extension token validation no longer fails cross-origin. Landed from contributor PR #23962 by @miloudbelarebia. (#23842)
  • +
  • Browser/Extension relay auth: allow ?token= query-param auth on relay /json* endpoints (consistent with relay WebSocket auth) so curl/devtools-style /json/version and /json/list probes work without requiring custom headers. Landed from contributor PR #26015 by @Sid-Qin. (#25928)
  • +
  • Browser/Extension relay shutdown: flush pending extension-request timers/rejections during relay stop() before socket/server teardown so in-flight extension waits do not survive shutdown windows. Landed from contributor PR #24142 by @kevinWangSheng.
  • +
  • Browser/Extension relay reconnect resilience: keep CDP clients alive across brief MV3 extension disconnect windows, wait briefly for extension reconnect before failing in-flight CDP commands, and only tear down relay target/client state after reconnect grace expires. Landed from contributor PR #27617 by @davidemanuelDEV.
  • +
  • Browser/Route decode hardening: guard malformed percent-encoding in relay target action routes and browser route-param decoding so crafted % paths return 400 instead of crashing/unhandled URI decode failures. Landed from contributor PR #11880 by @Yida-Dev.
  • +
  • Feishu/Inbound message metadata: include inbound message_id in BodyForAgent on a dedicated metadata line so agents can reliably correlate and act on media/message operations that require message IDs, with regression coverage. (#27253) thanks @xss925175263.
  • +
  • Feishu/Doc tools: route feishu_doc and feishu_app_scopes through the active agent account context (with explicit accountId override support) so multi-account agents no longer default to the first configured app, with regression coverage for context routing and explicit override behavior. (#27338) thanks @AaronL725.
  • +
  • LINE/Inline directives auth: gate directive parsing (/model, /think, /verbose, /reasoning, /queue) on resolved authorization (command.isAuthorizedSender) so commands.allowFrom-authorized LINE senders are not silently stripped when raw CommandAuthorized is unset. Landed from contributor PR #27248 by @kevinWangSheng. (#27240)
  • +
  • Onboarding/Gateway: seed default Control UI allowedOrigins for non-loopback binds during onboarding (localhost/127.0.0.1 plus custom bind host) so fresh non-loopback setups do not fail startup due to missing origin policy. (#26157) thanks @stakeswky.
  • +
  • Docker/GCP onboarding: reduce first-build OOM risk by capping Node heap during pnpm install, reuse existing gateway token during docker-setup.sh reruns so .env stays aligned with config, auto-bootstrap Control UI allowed origins for non-loopback Docker binds, and add GCP docs guidance for tokenized dashboard links + pairing recovery commands. (#26253) Thanks @pandego.
  • +
  • CLI/Gateway --force in non-root Docker: recover from lsof permission failures (EACCES/EPERM) by falling back to fuser kill + probe-based port checks, so openclaw gateway --force works for default container node user flows. (#27941)
  • +
  • Gateway/Bind visibility: emit a startup warning when binding to non-loopback addresses so operators get explicit exposure guidance in runtime logs. (#25397) thanks @let5sne.
  • +
  • Sessions cleanup/Doctor: add openclaw sessions cleanup --fix-missing to prune store entries whose transcript files are missing, including doctor guidance and CLI coverage. Landed from contributor PR #27508 by @Sid-Qin. (#27422)
  • +
  • Doctor/State integrity: ignore metadata-only slash routing sessions when checking recent missing transcripts so openclaw doctor no longer reports false-positive transcript-missing warnings for *:slash:* keys. (#27375) thanks @gumadeiras.
  • +
  • CLI/Gateway status: force local gateway status probe host to 127.0.0.1 for bind=lan so co-located probes do not trip non-loopback plaintext WebSocket checks. (#26997) thanks @chikko80.
  • +
  • CLI/Gateway auth: align gateway run --auth parsing/help text with supported gateway auth modes by accepting none and trusted-proxy (in addition to token/password) for CLI overrides. (#27469) thanks @s1korrrr.
  • +
  • CLI/Daemon status TLS probe: use wss:// and forward local TLS certificate fingerprint for TLS-enabled gateway daemon probes so openclaw daemon status works with gateway.bind=lan + gateway.tls.enabled=true. (#24234) thanks @liuy.
  • +
  • Podman/Default bind: change run-openclaw-podman.sh default gateway bind from lan to loopback and document explicit LAN opt-in with Control UI origin configuration. (#27491) thanks @robbyczgw-cla.
  • +
  • Daemon/macOS launchd: forward proxy env vars into supervised service environments, keep LaunchAgent KeepAlive=true semantics, and harden restart sequencing to print -> bootout -> wait old pid exit -> bootstrap -> kickstart. (#27276) thanks @frankekn.
  • +
  • Gateway/macOS restart-loop hardening: detect OpenClaw-managed supervisor markers during SIGUSR1 restart handoff, clean stale gateway PIDs before /restart launchctl/systemctl triggers, and set LaunchAgent ThrottleInterval=60 to bound launchd retry storms during lock-release races. Landed from contributor PRs #27655 (@taw0002), #27448 (@Sid-Qin), and #27650 (@kevinWangSheng). (#27605, #27590, #26904, #26736)
  • +
  • Models/MiniMax auth header defaults: set authHeader: true for both onboarding-generated MiniMax API providers and implicit built-in MiniMax (minimax, minimax-portal) provider templates so first requests no longer fail with MiniMax 401 authentication_error due to missing Authorization header. Landed from contributor PRs #27622 by @riccoyuanft and #27631 by @kevinWangSheng. (#27600, #15303)
  • +
  • Auth/Auth profiles: normalize auth-profiles.json alias fields (mode -> type, apiKey -> key) before credential validation so entries copied from openclaw.json auth examples are no longer silently dropped. (#26950) thanks @byungsker.
  • +
  • Models/Profile suffix parsing: centralize trailing @profile parsing and only treat @ as a profile separator when it appears after the final /, preserving model IDs like openai/@cf/... and openrouter/@preset/... across /model directive parsing and allowlist model resolution, with regression coverage.
  • +
  • Models/OpenAI Codex config schema parity: accept openai-codex-responses in the config model API schema and TypeScript ModelApi union, with regression coverage for config validation. Landed from contributor PR #27501 by @AytuncYildizli. Thanks @AytuncYildizli.
  • +
  • Agents/Models config: preserve agent-level provider apiKey and baseUrl during merge-mode models.json updates when agent values are present. (#27293) thanks @Sid-Qin.
  • +
  • Azure OpenAI Responses: force store=true for azure-openai-responses direct responses API calls to avoid multi-turn 400 failures. Landed from contributor PR #27499 by @polarbear-Yang. (#27497)
  • +
  • Security/Node exec approvals: require structured commandArgv approvals for host=node, enforce versioned systemRunBindingV1 matching for argv/cwd/session/agent/env context with fail-closed behavior on missing/mismatched bindings, and add GIT_EXTERNAL_DIFF to blocked host env keys. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting.
  • +
  • Security/Plugin channel HTTP auth: normalize protected /api/channels path checks against canonicalized request paths (case + percent-decoding + slash normalization), resolve encoded dot-segment traversal variants, and fail closed on malformed %-encoded channel prefixes so alternate-path variants cannot bypass gateway auth. This ships in the next npm release (2026.2.26). Thanks @zpbrent for reporting.
  • +
  • Security/Gateway node pairing: pin paired-device platform/deviceFamily metadata across reconnects and bind those fields into device-auth signatures, so reconnect metadata spoofing cannot expand node command allowlists without explicit repair pairing. This ships in the next npm release (2026.2.26). Thanks @76embiid21 for reporting.
  • +
  • Security/Sandbox path alias guard: reject broken symlink targets by resolving through existing ancestors and failing closed on out-of-root targets, preventing workspace-only apply_patch writes from escaping sandbox/workspace boundaries via dangling symlinks. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting.
  • +
  • Security/Workspace FS boundary aliases: harden canonical boundary resolution for non-existent-leaf symlink aliases while preserving valid in-root aliases, preventing first-write workspace escapes via out-of-root symlink targets. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting.
  • +
  • Security/Config includes: harden $include file loading with verified-open reads, reject hardlinked include aliases, and enforce include file-size guardrails so config include resolution remains bounded to trusted in-root files. This ships in the next npm release (2026.2.26). Thanks @zpbrent for reporting.
  • +
  • Security/Node exec approvals hardening: freeze immutable approval-time execution plans (argv/cwd/agentId/sessionKey) via system.run.prepare, enforce those canonical plan values during approval forwarding/execution, and reject mutable parent-symlink cwd paths during approval-plan building to prevent approval bypass via symlink rebind. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting.
  • +
  • Security/Microsoft Teams media fetch: route Graph message/hosted-content/attachment fetches and auth-scope fallback attachment downloads through shared SSRF-guarded fetch paths, and centralize hostname-suffix allowlist policy helpers in the plugin SDK to remove channel/plugin drift. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting.
  • +
  • Security/Voice Call (Twilio): bind webhook replay + manager dedupe identity to authenticated request material, remove unsigned i-twilio-idempotency-token trust from replay/dedupe keys, and thread verified request identity through provider parse flow to harden cross-provider event dedupe. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting.
  • +
  • Security/Exec approvals forwarding: prefer turn-source channel/account/thread metadata when resolving approval delivery targets so stale session routes do not misroute approval prompts.
  • +
  • Security/Pairing multi-account isolation: enforce account-scoped pairing allowlists and pending-request storage across core + extension message channels while preserving channel-scoped defaults for the default account. This ships in the next npm release (2026.2.26). Thanks @tdjackey for reporting and @gumadeiras for implementation.
  • +
  • Config/Plugins entries: treat unknown plugins.entries.* ids as startup warnings (ignored stale keys) instead of hard validation failures that can crash-loop gateway boot. Landed from contributor PR #27506 by @Sid-Qin. (#27455)
  • +
  • Telegram native commands: degrade command registration on BOT_COMMANDS_TOO_MUCH by retrying with fewer commands instead of crash-looping startup sync. Landed from contributor PR #27512 by @Sid-Qin. (#27456)
  • +
  • Web tools/Proxy: route web_search provider HTTP calls (Brave, Perplexity, xAI, Gemini, Kimi), redirect resolution, and web_fetch through a shared proxy-aware SSRF guard path so gateway installs behind HTTP_PROXY/HTTPS_PROXY/ALL_PROXY no longer fail with transport fetch failed errors. (#27430) thanks @kevinWangSheng.
  • +
  • Android/Node invoke: remove native gateway WebSocket Origin header to avoid false origin rejections, unify invoke command registry/policy/error parsing paths, and keep command availability checks centralized to reduce dispatcher/advertisement drift. (#27257) Thanks @obviyus.
  • +
  • Gateway shared-auth scopes: preserve requested operator scopes for shared-token clients when device identity is unavailable, instead of clearing scopes during auth handling. Landed from contributor PR #27498 by @kevinWangSheng. (#27494)
  • +
  • Cron/Hooks isolated routing: preserve canonical agent:* session keys in isolated runs so already-qualified keys are not double-prefixed (for example agent:main:main no longer becomes agent:main:agent:main:main). Landed from contributor PR #27333 by @MaheshBhushan. (#27289, #27282)
  • +
  • Channels/Multi-account config: when adding a non-default channel account to a single-account top-level channel setup, move existing account-scoped top-level single-account values into channels..accounts.default before writing the new account so the original account keeps working without duplicated account values at channel root; openclaw doctor --fix now repairs previously mixed channel account shapes the same way. (#27334) thanks @gumadeiras.
  • +
  • iOS/Talk mode: stop injecting the voice directive hint into iOS Talk prompts and remove the Voice Directive Hint setting, reducing model bias toward tool-style TTS directives and keeping relay responses text-first by default. (#27543) thanks @ngutman.
  • +
  • CI/Windows: shard the Windows checks-windows test lane into two matrix jobs and honor explicit shard index overrides in scripts/test-parallel.mjs to reduce CI critical-path wall time. (#27234) Thanks @joshavant.

View full changelog

]]>
- +
- \ No newline at end of file + diff --git a/apps/android/README.md b/apps/android/README.md index 799109c0a0f..4a9951e6441 100644 --- a/apps/android/README.md +++ b/apps/android/README.md @@ -34,6 +34,40 @@ cd apps/android `gradlew` auto-detects the Android SDK at `~/Library/Android/sdk` (macOS default) if `ANDROID_SDK_ROOT` / `ANDROID_HOME` are unset. +## Macrobenchmark (Startup + Frame Timing) + +```bash +cd apps/android +./gradlew :benchmark:connectedDebugAndroidTest +``` + +Reports are written under: + +- `apps/android/benchmark/build/reports/androidTests/connected/` + +## Perf CLI (low-noise) + +Deterministic startup measurement + hotspot extraction with compact CLI output: + +```bash +cd apps/android +./scripts/perf-startup-benchmark.sh +./scripts/perf-startup-hotspots.sh +``` + +Benchmark script behavior: + +- Runs only `StartupMacrobenchmark#coldStartup` (10 iterations). +- Prints median/min/max/COV in one line. +- Writes timestamped snapshot JSON to `apps/android/benchmark/results/`. +- Auto-compares with previous local snapshot (or pass explicit baseline: `--baseline `). + +Hotspot script behavior: + +- Ensures debug app installed, captures startup `simpleperf` data for `.MainActivity`. +- Prints top DSOs, top symbols, and key app-path clues (Compose/MainActivity/WebView). +- Writes raw `perf.data` path for deeper follow-up if needed. + ## Run on a Real Android Phone (USB) 1) On phone, enable **Developer options** + **USB debugging**. diff --git a/apps/android/app/build.gradle.kts b/apps/android/app/build.gradle.kts index dda17320625..5e9a27d13eb 100644 --- a/apps/android/app/build.gradle.kts +++ b/apps/android/app/build.gradle.kts @@ -20,8 +20,8 @@ android { applicationId = "ai.openclaw.android" minSdk = 31 targetSdk = 36 - versionCode = 202602250 - versionName = "2026.2.25" + versionCode = 202602260 + versionName = "2026.2.26" ndk { // Support all major ABIs — native libs are tiny (~47 KB per ABI) abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64") @@ -146,6 +146,7 @@ dependencies { testImplementation("org.jetbrains.kotlinx:kotlinx-coroutines-test:1.10.2") testImplementation("io.kotest:kotest-runner-junit5-jvm:6.1.3") testImplementation("io.kotest:kotest-assertions-core-jvm:6.1.3") + testImplementation("com.squareup.okhttp3:mockwebserver:5.3.2") testImplementation("org.robolectric:robolectric:4.16.1") testRuntimeOnly("org.junit.vintage:junit-vintage-engine:6.0.2") } diff --git a/apps/android/app/src/main/AndroidManifest.xml b/apps/android/app/src/main/AndroidManifest.xml index 6b8dd7eedba..3d0b27f39e6 100644 --- a/apps/android/app/src/main/AndroidManifest.xml +++ b/apps/android/app/src/main/AndroidManifest.xml @@ -38,6 +38,15 @@ android:name=".NodeForegroundService" android:exported="false" android:foregroundServiceType="dataSync|microphone|mediaProjection" /> + + + + + micCapture.setMicEnabled(enabled) diff --git a/apps/android/app/src/main/java/ai/openclaw/android/SecurePrefs.kt b/apps/android/app/src/main/java/ai/openclaw/android/SecurePrefs.kt index f03e2b56e0b..96e4572955e 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/SecurePrefs.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/SecurePrefs.kt @@ -20,19 +20,21 @@ class SecurePrefs(context: Context) { val defaultWakeWords: List = listOf("openclaw", "claude") private const val displayNameKey = "node.displayName" private const val voiceWakeModeKey = "voiceWake.mode" + private const val plainPrefsName = "openclaw.node" + private const val securePrefsName = "openclaw.node.secure" } private val appContext = context.applicationContext private val json = Json { ignoreUnknownKeys = true } + private val plainPrefs: SharedPreferences = + appContext.getSharedPreferences(plainPrefsName, Context.MODE_PRIVATE) - private val masterKey = - MasterKey.Builder(context) + private val masterKey by lazy { + MasterKey.Builder(appContext) .setKeyScheme(MasterKey.KeyScheme.AES256_GCM) .build() - - private val prefs: SharedPreferences by lazy { - createPrefs(appContext, "openclaw.node.secure") } + private val securePrefs: SharedPreferences by lazy { createSecurePrefs(appContext, securePrefsName) } private val _instanceId = MutableStateFlow(loadOrCreateInstanceId()) val instanceId: StateFlow = _instanceId @@ -41,52 +43,51 @@ class SecurePrefs(context: Context) { MutableStateFlow(loadOrMigrateDisplayName(context = context)) val displayName: StateFlow = _displayName - private val _cameraEnabled = MutableStateFlow(prefs.getBoolean("camera.enabled", true)) + private val _cameraEnabled = MutableStateFlow(plainPrefs.getBoolean("camera.enabled", true)) val cameraEnabled: StateFlow = _cameraEnabled private val _locationMode = - MutableStateFlow(LocationMode.fromRawValue(prefs.getString("location.enabledMode", "off"))) + MutableStateFlow(LocationMode.fromRawValue(plainPrefs.getString("location.enabledMode", "off"))) val locationMode: StateFlow = _locationMode private val _locationPreciseEnabled = - MutableStateFlow(prefs.getBoolean("location.preciseEnabled", true)) + MutableStateFlow(plainPrefs.getBoolean("location.preciseEnabled", true)) val locationPreciseEnabled: StateFlow = _locationPreciseEnabled - private val _preventSleep = MutableStateFlow(prefs.getBoolean("screen.preventSleep", true)) + private val _preventSleep = MutableStateFlow(plainPrefs.getBoolean("screen.preventSleep", true)) val preventSleep: StateFlow = _preventSleep private val _manualEnabled = - MutableStateFlow(prefs.getBoolean("gateway.manual.enabled", false)) + MutableStateFlow(plainPrefs.getBoolean("gateway.manual.enabled", false)) val manualEnabled: StateFlow = _manualEnabled private val _manualHost = - MutableStateFlow(prefs.getString("gateway.manual.host", "") ?: "") + MutableStateFlow(plainPrefs.getString("gateway.manual.host", "") ?: "") val manualHost: StateFlow = _manualHost private val _manualPort = - MutableStateFlow(prefs.getInt("gateway.manual.port", 18789)) + MutableStateFlow(plainPrefs.getInt("gateway.manual.port", 18789)) val manualPort: StateFlow = _manualPort private val _manualTls = - MutableStateFlow(prefs.getBoolean("gateway.manual.tls", true)) + MutableStateFlow(plainPrefs.getBoolean("gateway.manual.tls", true)) val manualTls: StateFlow = _manualTls - private val _gatewayToken = - MutableStateFlow(prefs.getString("gateway.manual.token", "") ?: "") + private val _gatewayToken = MutableStateFlow("") val gatewayToken: StateFlow = _gatewayToken private val _onboardingCompleted = - MutableStateFlow(prefs.getBoolean("onboarding.completed", false)) + MutableStateFlow(plainPrefs.getBoolean("onboarding.completed", false)) val onboardingCompleted: StateFlow = _onboardingCompleted private val _lastDiscoveredStableId = MutableStateFlow( - prefs.getString("gateway.lastDiscoveredStableID", "") ?: "", + plainPrefs.getString("gateway.lastDiscoveredStableID", "") ?: "", ) val lastDiscoveredStableId: StateFlow = _lastDiscoveredStableId private val _canvasDebugStatusEnabled = - MutableStateFlow(prefs.getBoolean("canvas.debugStatusEnabled", false)) + MutableStateFlow(plainPrefs.getBoolean("canvas.debugStatusEnabled", false)) val canvasDebugStatusEnabled: StateFlow = _canvasDebugStatusEnabled private val _wakeWords = MutableStateFlow(loadWakeWords()) @@ -95,65 +96,65 @@ class SecurePrefs(context: Context) { private val _voiceWakeMode = MutableStateFlow(loadVoiceWakeMode()) val voiceWakeMode: StateFlow = _voiceWakeMode - private val _talkEnabled = MutableStateFlow(prefs.getBoolean("talk.enabled", false)) + private val _talkEnabled = MutableStateFlow(plainPrefs.getBoolean("talk.enabled", false)) val talkEnabled: StateFlow = _talkEnabled fun setLastDiscoveredStableId(value: String) { val trimmed = value.trim() - prefs.edit { putString("gateway.lastDiscoveredStableID", trimmed) } + plainPrefs.edit { putString("gateway.lastDiscoveredStableID", trimmed) } _lastDiscoveredStableId.value = trimmed } fun setDisplayName(value: String) { val trimmed = value.trim() - prefs.edit { putString(displayNameKey, trimmed) } + plainPrefs.edit { putString(displayNameKey, trimmed) } _displayName.value = trimmed } fun setCameraEnabled(value: Boolean) { - prefs.edit { putBoolean("camera.enabled", value) } + plainPrefs.edit { putBoolean("camera.enabled", value) } _cameraEnabled.value = value } fun setLocationMode(mode: LocationMode) { - prefs.edit { putString("location.enabledMode", mode.rawValue) } + plainPrefs.edit { putString("location.enabledMode", mode.rawValue) } _locationMode.value = mode } fun setLocationPreciseEnabled(value: Boolean) { - prefs.edit { putBoolean("location.preciseEnabled", value) } + plainPrefs.edit { putBoolean("location.preciseEnabled", value) } _locationPreciseEnabled.value = value } fun setPreventSleep(value: Boolean) { - prefs.edit { putBoolean("screen.preventSleep", value) } + plainPrefs.edit { putBoolean("screen.preventSleep", value) } _preventSleep.value = value } fun setManualEnabled(value: Boolean) { - prefs.edit { putBoolean("gateway.manual.enabled", value) } + plainPrefs.edit { putBoolean("gateway.manual.enabled", value) } _manualEnabled.value = value } fun setManualHost(value: String) { val trimmed = value.trim() - prefs.edit { putString("gateway.manual.host", trimmed) } + plainPrefs.edit { putString("gateway.manual.host", trimmed) } _manualHost.value = trimmed } fun setManualPort(value: Int) { - prefs.edit { putInt("gateway.manual.port", value) } + plainPrefs.edit { putInt("gateway.manual.port", value) } _manualPort.value = value } fun setManualTls(value: Boolean) { - prefs.edit { putBoolean("gateway.manual.tls", value) } + plainPrefs.edit { putBoolean("gateway.manual.tls", value) } _manualTls.value = value } fun setGatewayToken(value: String) { val trimmed = value.trim() - prefs.edit(commit = true) { putString("gateway.manual.token", trimmed) } + securePrefs.edit { putString("gateway.manual.token", trimmed) } _gatewayToken.value = trimmed } @@ -162,62 +163,67 @@ class SecurePrefs(context: Context) { } fun setOnboardingCompleted(value: Boolean) { - prefs.edit { putBoolean("onboarding.completed", value) } + plainPrefs.edit { putBoolean("onboarding.completed", value) } _onboardingCompleted.value = value } fun setCanvasDebugStatusEnabled(value: Boolean) { - prefs.edit { putBoolean("canvas.debugStatusEnabled", value) } + plainPrefs.edit { putBoolean("canvas.debugStatusEnabled", value) } _canvasDebugStatusEnabled.value = value } fun loadGatewayToken(): String? { - val manual = _gatewayToken.value.trim() + val manual = + _gatewayToken.value.trim().ifEmpty { + val stored = securePrefs.getString("gateway.manual.token", null)?.trim().orEmpty() + if (stored.isNotEmpty()) _gatewayToken.value = stored + stored + } if (manual.isNotEmpty()) return manual val key = "gateway.token.${_instanceId.value}" - val stored = prefs.getString(key, null)?.trim() + val stored = securePrefs.getString(key, null)?.trim() return stored?.takeIf { it.isNotEmpty() } } fun saveGatewayToken(token: String) { val key = "gateway.token.${_instanceId.value}" - prefs.edit { putString(key, token.trim()) } + securePrefs.edit { putString(key, token.trim()) } } fun loadGatewayPassword(): String? { val key = "gateway.password.${_instanceId.value}" - val stored = prefs.getString(key, null)?.trim() + val stored = securePrefs.getString(key, null)?.trim() return stored?.takeIf { it.isNotEmpty() } } fun saveGatewayPassword(password: String) { val key = "gateway.password.${_instanceId.value}" - prefs.edit { putString(key, password.trim()) } + securePrefs.edit { putString(key, password.trim()) } } fun loadGatewayTlsFingerprint(stableId: String): String? { val key = "gateway.tls.$stableId" - return prefs.getString(key, null)?.trim()?.takeIf { it.isNotEmpty() } + return plainPrefs.getString(key, null)?.trim()?.takeIf { it.isNotEmpty() } } fun saveGatewayTlsFingerprint(stableId: String, fingerprint: String) { val key = "gateway.tls.$stableId" - prefs.edit { putString(key, fingerprint.trim()) } + plainPrefs.edit { putString(key, fingerprint.trim()) } } fun getString(key: String): String? { - return prefs.getString(key, null) + return securePrefs.getString(key, null) } fun putString(key: String, value: String) { - prefs.edit { putString(key, value) } + securePrefs.edit { putString(key, value) } } fun remove(key: String) { - prefs.edit { remove(key) } + securePrefs.edit { remove(key) } } - private fun createPrefs(context: Context, name: String): SharedPreferences { + private fun createSecurePrefs(context: Context, name: String): SharedPreferences { return EncryptedSharedPreferences.create( context, name, @@ -228,21 +234,21 @@ class SecurePrefs(context: Context) { } private fun loadOrCreateInstanceId(): String { - val existing = prefs.getString("node.instanceId", null)?.trim() + val existing = plainPrefs.getString("node.instanceId", null)?.trim() if (!existing.isNullOrBlank()) return existing val fresh = UUID.randomUUID().toString() - prefs.edit { putString("node.instanceId", fresh) } + plainPrefs.edit { putString("node.instanceId", fresh) } return fresh } private fun loadOrMigrateDisplayName(context: Context): String { - val existing = prefs.getString(displayNameKey, null)?.trim().orEmpty() + val existing = plainPrefs.getString(displayNameKey, null)?.trim().orEmpty() if (existing.isNotEmpty() && existing != "Android Node") return existing val candidate = DeviceNames.bestDefaultNodeName(context).trim() val resolved = candidate.ifEmpty { "Android Node" } - prefs.edit { putString(displayNameKey, resolved) } + plainPrefs.edit { putString(displayNameKey, resolved) } return resolved } @@ -250,34 +256,34 @@ class SecurePrefs(context: Context) { val sanitized = WakeWords.sanitize(words, defaultWakeWords) val encoded = JsonArray(sanitized.map { JsonPrimitive(it) }).toString() - prefs.edit { putString("voiceWake.triggerWords", encoded) } + plainPrefs.edit { putString("voiceWake.triggerWords", encoded) } _wakeWords.value = sanitized } fun setVoiceWakeMode(mode: VoiceWakeMode) { - prefs.edit { putString(voiceWakeModeKey, mode.rawValue) } + plainPrefs.edit { putString(voiceWakeModeKey, mode.rawValue) } _voiceWakeMode.value = mode } fun setTalkEnabled(value: Boolean) { - prefs.edit { putBoolean("talk.enabled", value) } + plainPrefs.edit { putBoolean("talk.enabled", value) } _talkEnabled.value = value } private fun loadVoiceWakeMode(): VoiceWakeMode { - val raw = prefs.getString(voiceWakeModeKey, null) + val raw = plainPrefs.getString(voiceWakeModeKey, null) val resolved = VoiceWakeMode.fromRawValue(raw) // Default ON (foreground) when unset. if (raw.isNullOrBlank()) { - prefs.edit { putString(voiceWakeModeKey, resolved.rawValue) } + plainPrefs.edit { putString(voiceWakeModeKey, resolved.rawValue) } } return resolved } private fun loadWakeWords(): List { - val raw = prefs.getString("voiceWake.triggerWords", null)?.trim() + val raw = plainPrefs.getString("voiceWake.triggerWords", null)?.trim() if (raw.isNullOrEmpty()) return defaultWakeWords return try { val element = json.parseToJsonElement(raw) @@ -295,5 +301,4 @@ class SecurePrefs(context: Context) { defaultWakeWords } } - } diff --git a/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthPayload.kt b/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthPayload.kt new file mode 100644 index 00000000000..9fecaa03b55 --- /dev/null +++ b/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthPayload.kt @@ -0,0 +1,52 @@ +package ai.openclaw.android.gateway + +internal object DeviceAuthPayload { + fun buildV3( + deviceId: String, + clientId: String, + clientMode: String, + role: String, + scopes: List, + signedAtMs: Long, + token: String?, + nonce: String, + platform: String?, + deviceFamily: String?, + ): String { + val scopeString = scopes.joinToString(",") + val authToken = token.orEmpty() + val platformNorm = normalizeMetadataField(platform) + val deviceFamilyNorm = normalizeMetadataField(deviceFamily) + return listOf( + "v3", + deviceId, + clientId, + clientMode, + role, + scopeString, + signedAtMs.toString(), + authToken, + nonce, + platformNorm, + deviceFamilyNorm, + ).joinToString("|") + } + + internal fun normalizeMetadataField(value: String?): String { + val trimmed = value?.trim().orEmpty() + if (trimmed.isEmpty()) { + return "" + } + // Keep cross-runtime normalization deterministic (TS/Swift/Kotlin): + // lowercase ASCII A-Z only for auth payload metadata fields. + val out = StringBuilder(trimmed.length) + for (ch in trimmed) { + if (ch in 'A'..'Z') { + out.append((ch.code + 32).toChar()) + } else { + out.append(ch) + } + } + return out.toString() + } +} diff --git a/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthStore.kt b/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthStore.kt index 810e029fba8..8ace62e087c 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthStore.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceAuthStore.kt @@ -2,13 +2,18 @@ package ai.openclaw.android.gateway import ai.openclaw.android.SecurePrefs -class DeviceAuthStore(private val prefs: SecurePrefs) { - fun loadToken(deviceId: String, role: String): String? { +interface DeviceAuthTokenStore { + fun loadToken(deviceId: String, role: String): String? + fun saveToken(deviceId: String, role: String, token: String) +} + +class DeviceAuthStore(private val prefs: SecurePrefs) : DeviceAuthTokenStore { + override fun loadToken(deviceId: String, role: String): String? { val key = tokenKey(deviceId, role) return prefs.getString(key)?.trim()?.takeIf { it.isNotEmpty() } } - fun saveToken(deviceId: String, role: String, token: String) { + override fun saveToken(deviceId: String, role: String, token: String) { val key = tokenKey(deviceId, role) prefs.putString(key, token.trim()) } diff --git a/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceIdentityStore.kt b/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceIdentityStore.kt index ff651c6c17b..68830772f9a 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceIdentityStore.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/gateway/DeviceIdentityStore.kt @@ -3,11 +3,7 @@ package ai.openclaw.android.gateway import android.content.Context import android.util.Base64 import java.io.File -import java.security.KeyFactory -import java.security.KeyPairGenerator import java.security.MessageDigest -import java.security.Signature -import java.security.spec.PKCS8EncodedKeySpec import kotlinx.serialization.Serializable import kotlinx.serialization.json.Json @@ -22,21 +18,26 @@ data class DeviceIdentity( class DeviceIdentityStore(context: Context) { private val json = Json { ignoreUnknownKeys = true } private val identityFile = File(context.filesDir, "openclaw/identity/device.json") + @Volatile private var cachedIdentity: DeviceIdentity? = null @Synchronized fun loadOrCreate(): DeviceIdentity { + cachedIdentity?.let { return it } val existing = load() if (existing != null) { val derived = deriveDeviceId(existing.publicKeyRawBase64) if (derived != null && derived != existing.deviceId) { val updated = existing.copy(deviceId = derived) save(updated) + cachedIdentity = updated return updated } + cachedIdentity = existing return existing } val fresh = generate() save(fresh) + cachedIdentity = fresh return fresh } @@ -151,22 +152,16 @@ class DeviceIdentityStore(context: Context) { } } - private fun stripSpkiPrefix(spki: ByteArray): ByteArray { - if (spki.size == ED25519_SPKI_PREFIX.size + 32 && - spki.copyOfRange(0, ED25519_SPKI_PREFIX.size).contentEquals(ED25519_SPKI_PREFIX) - ) { - return spki.copyOfRange(ED25519_SPKI_PREFIX.size, spki.size) - } - return spki - } - private fun sha256Hex(data: ByteArray): String { val digest = MessageDigest.getInstance("SHA-256").digest(data) - val out = StringBuilder(digest.size * 2) + val out = CharArray(digest.size * 2) + var i = 0 for (byte in digest) { - out.append(String.format("%02x", byte)) + val v = byte.toInt() and 0xff + out[i++] = HEX[v ushr 4] + out[i++] = HEX[v and 0x0f] } - return out.toString() + return String(out) } private fun base64UrlEncode(data: ByteArray): String { @@ -174,9 +169,6 @@ class DeviceIdentityStore(context: Context) { } companion object { - private val ED25519_SPKI_PREFIX = - byteArrayOf( - 0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00, - ) + private val HEX = "0123456789abcdef".toCharArray() } } diff --git a/apps/android/app/src/main/java/ai/openclaw/android/gateway/GatewaySession.kt b/apps/android/app/src/main/java/ai/openclaw/android/gateway/GatewaySession.kt index 92acf968954..7c8b13ec396 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/gateway/GatewaySession.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/gateway/GatewaySession.kt @@ -55,7 +55,7 @@ data class GatewayConnectOptions( class GatewaySession( private val scope: CoroutineScope, private val identityStore: DeviceIdentityStore, - private val deviceAuthStore: DeviceAuthStore, + private val deviceAuthStore: DeviceAuthTokenStore, private val onConnected: (serverName: String?, remoteAddress: String?, mainSessionKey: String?) -> Unit, private val onDisconnected: (message: String) -> Unit, private val onEvent: (event: String, payloadJson: String?) -> Unit, @@ -200,9 +200,7 @@ class GatewaySession( suspend fun connect() { val scheme = if (tls != null) "wss" else "ws" val url = "$scheme://${endpoint.host}:${endpoint.port}" - val httpScheme = if (tls != null) "https" else "http" - val origin = "$httpScheme://${endpoint.host}:${endpoint.port}" - val request = Request.Builder().url(url).header("Origin", origin).build() + val request = Request.Builder().url(url).build() socket = client.newWebSocket(request, Listener()) try { connectDeferred.await() @@ -374,7 +372,7 @@ class GatewaySession( val signedAtMs = System.currentTimeMillis() val payload = - buildDeviceAuthPayload( + DeviceAuthPayload.buildV3( deviceId = identity.deviceId, clientId = client.id, clientMode = client.mode, @@ -383,6 +381,8 @@ class GatewaySession( signedAtMs = signedAtMs, token = if (authToken.isNotEmpty()) authToken else null, nonce = connectNonce, + platform = client.platform, + deviceFamily = client.deviceFamily, ) val signature = identityStore.signPayload(payload, identity) val publicKey = identityStore.publicKeyBase64Url(identity) @@ -535,16 +535,8 @@ class GatewaySession( } private fun invokeErrorFromThrowable(err: Throwable): InvokeResult { - val msg = err.message?.trim().takeIf { !it.isNullOrEmpty() } ?: err::class.java.simpleName - val parts = msg.split(":", limit = 2) - if (parts.size == 2) { - val code = parts[0].trim() - val rest = parts[1].trim() - if (code.isNotEmpty() && code.all { it.isUpperCase() || it == '_' }) { - return InvokeResult.error(code = code, message = rest.ifEmpty { msg }) - } - } - return InvokeResult.error(code = "UNAVAILABLE", message = msg) + val parsed = parseInvokeErrorFromThrowable(err, fallbackMessage = err::class.java.simpleName) + return InvokeResult.error(code = parsed.code, message = parsed.message) } private fun failPending() { @@ -592,33 +584,6 @@ class GatewaySession( } } - private fun buildDeviceAuthPayload( - deviceId: String, - clientId: String, - clientMode: String, - role: String, - scopes: List, - signedAtMs: Long, - token: String?, - nonce: String, - ): String { - val scopeString = scopes.joinToString(",") - val authToken = token.orEmpty() - val parts = - mutableListOf( - "v2", - deviceId, - clientId, - clientMode, - role, - scopeString, - signedAtMs.toString(), - authToken, - nonce, - ) - return parts.joinToString("|") - } - private fun normalizeCanvasHostUrl( raw: String?, endpoint: GatewayEndpoint, diff --git a/apps/android/app/src/main/java/ai/openclaw/android/gateway/InvokeErrorParser.kt b/apps/android/app/src/main/java/ai/openclaw/android/gateway/InvokeErrorParser.kt new file mode 100644 index 00000000000..7242f4a5533 --- /dev/null +++ b/apps/android/app/src/main/java/ai/openclaw/android/gateway/InvokeErrorParser.kt @@ -0,0 +1,39 @@ +package ai.openclaw.android.gateway + +data class ParsedInvokeError( + val code: String, + val message: String, + val hadExplicitCode: Boolean, +) { + val prefixedMessage: String + get() = "$code: $message" +} + +fun parseInvokeErrorMessage(raw: String): ParsedInvokeError { + val trimmed = raw.trim() + if (trimmed.isEmpty()) { + return ParsedInvokeError(code = "UNAVAILABLE", message = "error", hadExplicitCode = false) + } + + val parts = trimmed.split(":", limit = 2) + if (parts.size == 2) { + val code = parts[0].trim() + val rest = parts[1].trim() + if (code.isNotEmpty() && code.all { it.isUpperCase() || it == '_' }) { + return ParsedInvokeError( + code = code, + message = rest.ifEmpty { trimmed }, + hadExplicitCode = true, + ) + } + } + return ParsedInvokeError(code = "UNAVAILABLE", message = trimmed, hadExplicitCode = false) +} + +fun parseInvokeErrorFromThrowable( + err: Throwable, + fallbackMessage: String = "error", +): ParsedInvokeError { + val raw = err.message?.trim().takeIf { !it.isNullOrEmpty() } ?: fallbackMessage + return parseInvokeErrorMessage(raw) +} diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/CameraCaptureManager.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/CameraCaptureManager.kt index 65bac915eff..c4d60cd17fd 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/node/CameraCaptureManager.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/CameraCaptureManager.kt @@ -30,6 +30,10 @@ import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.suspendCancellableCoroutine import kotlinx.coroutines.withTimeout import kotlinx.coroutines.withContext +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.contentOrNull import java.io.ByteArrayOutputStream import java.io.File import java.util.concurrent.Executor @@ -80,9 +84,10 @@ class CameraCaptureManager(private val context: Context) { withContext(Dispatchers.Main) { ensureCameraPermission() val owner = lifecycleOwner ?: throw IllegalStateException("UNAVAILABLE: camera not ready") - val facing = parseFacing(paramsJson) ?: "front" - val quality = (parseQuality(paramsJson) ?: 0.5).coerceIn(0.1, 1.0) - val maxWidth = parseMaxWidth(paramsJson) ?: 800 + val params = parseParamsObject(paramsJson) + val facing = parseFacing(params) ?: "front" + val quality = (parseQuality(params) ?: 0.95).coerceIn(0.1, 1.0) + val maxWidth = parseMaxWidth(params) ?: 1600 val provider = context.cameraProvider() val capture = ImageCapture.Builder().build() @@ -145,9 +150,10 @@ class CameraCaptureManager(private val context: Context) { withContext(Dispatchers.Main) { ensureCameraPermission() val owner = lifecycleOwner ?: throw IllegalStateException("UNAVAILABLE: camera not ready") - val facing = parseFacing(paramsJson) ?: "front" - val durationMs = (parseDurationMs(paramsJson) ?: 3_000).coerceIn(200, 60_000) - val includeAudio = parseIncludeAudio(paramsJson) ?: true + val params = parseParamsObject(paramsJson) + val facing = parseFacing(params) ?: "front" + val durationMs = (parseDurationMs(params) ?: 3_000).coerceIn(200, 60_000) + val includeAudio = parseIncludeAudio(params) ?: true if (includeAudio) ensureMicPermission() android.util.Log.w("CameraCaptureManager", "clip: start facing=$facing duration=$durationMs audio=$includeAudio") @@ -270,46 +276,45 @@ class CameraCaptureManager(private val context: Context) { return rotated } - private fun parseFacing(paramsJson: String?): String? = - when { - paramsJson?.contains("\"front\"") == true -> "front" - paramsJson?.contains("\"back\"") == true -> "back" - else -> null + private fun parseParamsObject(paramsJson: String?): JsonObject? { + if (paramsJson.isNullOrBlank()) return null + return try { + Json.parseToJsonElement(paramsJson).asObjectOrNull() + } catch (_: Throwable) { + null } + } - private fun parseQuality(paramsJson: String?): Double? = - parseNumber(paramsJson, key = "quality")?.toDoubleOrNull() + private fun readPrimitive(params: JsonObject?, key: String): JsonPrimitive? = + params?.get(key) as? JsonPrimitive - private fun parseMaxWidth(paramsJson: String?): Int? = - parseNumber(paramsJson, key = "maxWidth")?.toIntOrNull() - - private fun parseDurationMs(paramsJson: String?): Int? = - parseNumber(paramsJson, key = "durationMs")?.toIntOrNull() - - private fun parseIncludeAudio(paramsJson: String?): Boolean? { - val raw = paramsJson ?: return null - val key = "\"includeAudio\"" - val idx = raw.indexOf(key) - if (idx < 0) return null - val colon = raw.indexOf(':', idx + key.length) - if (colon < 0) return null - val tail = raw.substring(colon + 1).trimStart() - return when { - tail.startsWith("true") -> true - tail.startsWith("false") -> false + private fun parseFacing(params: JsonObject?): String? { + val value = readPrimitive(params, "facing")?.contentOrNull?.trim()?.lowercase() ?: return null + return when (value) { + "front", "back" -> value else -> null } } - private fun parseNumber(paramsJson: String?, key: String): String? { - val raw = paramsJson ?: return null - val needle = "\"$key\"" - val idx = raw.indexOf(needle) - if (idx < 0) return null - val colon = raw.indexOf(':', idx + needle.length) - if (colon < 0) return null - val tail = raw.substring(colon + 1).trimStart() - return tail.takeWhile { it.isDigit() || it == '.' } + private fun parseQuality(params: JsonObject?): Double? = + readPrimitive(params, "quality")?.contentOrNull?.toDoubleOrNull() + + private fun parseMaxWidth(params: JsonObject?): Int? = + readPrimitive(params, "maxWidth") + ?.contentOrNull + ?.toIntOrNull() + ?.takeIf { it > 0 } + + private fun parseDurationMs(params: JsonObject?): Int? = + readPrimitive(params, "durationMs")?.contentOrNull?.toIntOrNull() + + private fun parseIncludeAudio(params: JsonObject?): Boolean? { + val value = readPrimitive(params, "includeAudio")?.contentOrNull?.trim()?.lowercase() + return when (value) { + "true" -> true + "false" -> false + else -> null + } } private fun Context.mainExecutor(): Executor = ContextCompat.getMainExecutor(this) diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/CameraHandler.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/CameraHandler.kt index 658c117ff31..ff1b8468cd6 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/node/CameraHandler.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/CameraHandler.kt @@ -3,20 +3,22 @@ package ai.openclaw.android.node import android.content.Context import ai.openclaw.android.CameraHudKind import ai.openclaw.android.BuildConfig -import ai.openclaw.android.SecurePrefs -import ai.openclaw.android.gateway.GatewayEndpoint import ai.openclaw.android.gateway.GatewaySession import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.flow.MutableStateFlow import kotlinx.coroutines.withContext -import okhttp3.MediaType.Companion.toMediaType -import okhttp3.RequestBody.Companion.asRequestBody +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.contentOrNull + +internal const val CAMERA_CLIP_MAX_RAW_BYTES: Long = 18L * 1024L * 1024L + +internal fun isCameraClipWithinPayloadLimit(rawBytes: Long): Boolean = + rawBytes in 0L..CAMERA_CLIP_MAX_RAW_BYTES class CameraHandler( private val appContext: Context, private val camera: CameraCaptureManager, - private val prefs: SecurePrefs, - private val connectedEndpoint: () -> GatewayEndpoint?, private val externalAudioCaptureActive: MutableStateFlow, private val showCameraHud: (message: String, kind: CameraHudKind, autoHideMs: Long?) -> Unit, private val triggerCameraFlash: () -> Unit, @@ -69,7 +71,7 @@ class CameraHandler( clipLogFile?.appendText("[CLIP $ts] $msg\n") android.util.Log.w("openclaw", "camera.clip: $msg") } - val includeAudio = paramsJson?.contains("\"includeAudio\":true") != false + val includeAudio = parseIncludeAudio(paramsJson) ?: true if (includeAudio) externalAudioCaptureActive.value = true try { clipLogFile?.writeText("") // clear @@ -89,62 +91,28 @@ class CameraHandler( showCameraHud(message, CameraHudKind.Error, 2400) return GatewaySession.InvokeResult.error(code = code, message = message) } - // Upload file via HTTP instead of base64 through WebSocket - clipLog("uploading via HTTP...") - val uploadUrl = try { - withContext(Dispatchers.IO) { - val ep = connectedEndpoint() - val gatewayHost = if (ep != null) { - val isHttps = ep.tlsEnabled || ep.port == 443 - if (!isHttps) { - clipLog("refusing to upload over plain HTTP — bearer token would be exposed; falling back to base64") - throw Exception("HTTPS required for upload (bearer token protection)") - } - if (ep.port == 443) "https://${ep.host}" else "https://${ep.host}:${ep.port}" - } else { - clipLog("error: no gateway endpoint connected, cannot upload") - throw Exception("no gateway endpoint connected") - } - val token = prefs.loadGatewayToken() ?: "" - val client = okhttp3.OkHttpClient.Builder() - .connectTimeout(10, java.util.concurrent.TimeUnit.SECONDS) - .writeTimeout(120, java.util.concurrent.TimeUnit.SECONDS) - .readTimeout(30, java.util.concurrent.TimeUnit.SECONDS) - .build() - val body = filePayload.file.asRequestBody("video/mp4".toMediaType()) - val req = okhttp3.Request.Builder() - .url("$gatewayHost/upload/clip.mp4") - .put(body) - .header("Authorization", "Bearer $token") - .build() - clipLog("uploading ${filePayload.file.length()} bytes to $gatewayHost/upload/clip.mp4") - val resp = client.newCall(req).execute() - val respBody = resp.body?.string() ?: "" - clipLog("upload response: ${resp.code} $respBody") - filePayload.file.delete() - if (!resp.isSuccessful) throw Exception("upload failed: HTTP ${resp.code}") - // Parse URL from response - val urlMatch = Regex("\"url\":\"([^\"]+)\"").find(respBody) - urlMatch?.groupValues?.get(1) ?: throw Exception("no url in response: $respBody") - } - } catch (err: Throwable) { - clipLog("upload failed: ${err.message}, falling back to base64") - // Fallback to base64 if upload fails - val bytes = withContext(Dispatchers.IO) { - val b = filePayload.file.readBytes() - filePayload.file.delete() - b - } - val base64 = android.util.Base64.encodeToString(bytes, android.util.Base64.NO_WRAP) - showCameraHud("Clip captured", CameraHudKind.Success, 1800) - return GatewaySession.InvokeResult.ok( - """{"format":"mp4","base64":"$base64","durationMs":${filePayload.durationMs},"hasAudio":${filePayload.hasAudio}}""" + val rawBytes = filePayload.file.length() + if (!isCameraClipWithinPayloadLimit(rawBytes)) { + clipLog("payload too large: bytes=$rawBytes max=$CAMERA_CLIP_MAX_RAW_BYTES") + withContext(Dispatchers.IO) { filePayload.file.delete() } + showCameraHud("Clip too large", CameraHudKind.Error, 2400) + return GatewaySession.InvokeResult.error( + code = "PAYLOAD_TOO_LARGE", + message = + "PAYLOAD_TOO_LARGE: camera clip is $rawBytes bytes; max is $CAMERA_CLIP_MAX_RAW_BYTES bytes. Reduce durationMs and retry.", ) } - clipLog("returning URL result: $uploadUrl") + + val bytes = withContext(Dispatchers.IO) { + val b = filePayload.file.readBytes() + filePayload.file.delete() + b + } + val base64 = android.util.Base64.encodeToString(bytes, android.util.Base64.NO_WRAP) + clipLog("returning base64 payload") showCameraHud("Clip captured", CameraHudKind.Success, 1800) return GatewaySession.InvokeResult.ok( - """{"format":"mp4","url":"$uploadUrl","durationMs":${filePayload.durationMs},"hasAudio":${filePayload.hasAudio}}""" + """{"format":"mp4","base64":"$base64","durationMs":${filePayload.durationMs},"hasAudio":${filePayload.hasAudio}}""" ) } catch (err: Throwable) { clipLog("outer error: ${err::class.java.simpleName}: ${err.message}") @@ -154,4 +122,24 @@ class CameraHandler( if (includeAudio) externalAudioCaptureActive.value = false } } + + private fun parseIncludeAudio(paramsJson: String?): Boolean? { + if (paramsJson.isNullOrBlank()) return null + val root = + try { + Json.parseToJsonElement(paramsJson).asObjectOrNull() + } catch (_: Throwable) { + null + } ?: return null + val value = + (root["includeAudio"] as? JsonPrimitive) + ?.contentOrNull + ?.trim() + ?.lowercase() + return when (value) { + "true" -> true + "false" -> false + else -> null + } + } } diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/ConnectionManager.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/ConnectionManager.kt index 9b449fc85f3..1c9a045c896 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/node/ConnectionManager.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/ConnectionManager.kt @@ -7,12 +7,6 @@ import ai.openclaw.android.gateway.GatewayClientInfo import ai.openclaw.android.gateway.GatewayConnectOptions import ai.openclaw.android.gateway.GatewayEndpoint import ai.openclaw.android.gateway.GatewayTlsParams -import ai.openclaw.android.protocol.OpenClawCanvasA2UICommand -import ai.openclaw.android.protocol.OpenClawCanvasCommand -import ai.openclaw.android.protocol.OpenClawCameraCommand -import ai.openclaw.android.protocol.OpenClawLocationCommand -import ai.openclaw.android.protocol.OpenClawScreenCommand -import ai.openclaw.android.protocol.OpenClawSmsCommand import ai.openclaw.android.protocol.OpenClawCapability import ai.openclaw.android.LocationMode import ai.openclaw.android.VoiceWakeMode @@ -80,37 +74,18 @@ class ConnectionManager( } fun buildInvokeCommands(): List = - buildList { - add(OpenClawCanvasCommand.Present.rawValue) - add(OpenClawCanvasCommand.Hide.rawValue) - add(OpenClawCanvasCommand.Navigate.rawValue) - add(OpenClawCanvasCommand.Eval.rawValue) - add(OpenClawCanvasCommand.Snapshot.rawValue) - add(OpenClawCanvasA2UICommand.Push.rawValue) - add(OpenClawCanvasA2UICommand.PushJSONL.rawValue) - add(OpenClawCanvasA2UICommand.Reset.rawValue) - add(OpenClawScreenCommand.Record.rawValue) - if (cameraEnabled()) { - add(OpenClawCameraCommand.Snap.rawValue) - add(OpenClawCameraCommand.Clip.rawValue) - } - if (locationMode() != LocationMode.Off) { - add(OpenClawLocationCommand.Get.rawValue) - } - if (smsAvailable()) { - add(OpenClawSmsCommand.Send.rawValue) - } - if (BuildConfig.DEBUG) { - add("debug.logs") - add("debug.ed25519") - } - add("app.update") - } + InvokeCommandRegistry.advertisedCommands( + cameraEnabled = cameraEnabled(), + locationEnabled = locationMode() != LocationMode.Off, + smsAvailable = smsAvailable(), + debugBuild = BuildConfig.DEBUG, + ) fun buildCapabilities(): List = buildList { add(OpenClawCapability.Canvas.rawValue) add(OpenClawCapability.Screen.rawValue) + add(OpenClawCapability.Device.rawValue) if (cameraEnabled()) add(OpenClawCapability.Camera.rawValue) if (smsAvailable()) add(OpenClawCapability.Sms.rawValue) if (voiceWakeMode() != VoiceWakeMode.Off && hasRecordAudioPermission()) { diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/DeviceHandler.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/DeviceHandler.kt new file mode 100644 index 00000000000..896d7c7c74c --- /dev/null +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/DeviceHandler.kt @@ -0,0 +1,171 @@ +package ai.openclaw.android.node + +import android.content.Context +import android.content.Intent +import android.content.IntentFilter +import android.net.ConnectivityManager +import android.net.NetworkCapabilities +import android.os.BatteryManager +import android.os.Build +import android.os.Environment +import android.os.PowerManager +import android.os.StatFs +import android.os.SystemClock +import ai.openclaw.android.BuildConfig +import ai.openclaw.android.gateway.GatewaySession +import java.util.Locale +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonArray +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.json.put + +class DeviceHandler( + private val appContext: Context, +) { + fun handleDeviceStatus(_paramsJson: String?): GatewaySession.InvokeResult { + return GatewaySession.InvokeResult.ok(statusPayloadJson()) + } + + fun handleDeviceInfo(_paramsJson: String?): GatewaySession.InvokeResult { + return GatewaySession.InvokeResult.ok(infoPayloadJson()) + } + + private fun statusPayloadJson(): String { + val batteryIntent = appContext.registerReceiver(null, IntentFilter(Intent.ACTION_BATTERY_CHANGED)) + val batteryStatus = + batteryIntent?.getIntExtra(BatteryManager.EXTRA_STATUS, BatteryManager.BATTERY_STATUS_UNKNOWN) + ?: BatteryManager.BATTERY_STATUS_UNKNOWN + val batteryLevel = batteryLevelFraction(batteryIntent) + val powerManager = appContext.getSystemService(PowerManager::class.java) + val storage = StatFs(Environment.getDataDirectory().absolutePath) + val totalBytes = storage.totalBytes + val freeBytes = storage.availableBytes + val usedBytes = (totalBytes - freeBytes).coerceAtLeast(0L) + val connectivity = appContext.getSystemService(ConnectivityManager::class.java) + val activeNetwork = connectivity?.activeNetwork + val caps = activeNetwork?.let { connectivity.getNetworkCapabilities(it) } + val uptimeSeconds = SystemClock.elapsedRealtime() / 1_000.0 + + return buildJsonObject { + put( + "battery", + buildJsonObject { + batteryLevel?.let { put("level", JsonPrimitive(it)) } + put("state", JsonPrimitive(mapBatteryState(batteryStatus))) + put("lowPowerModeEnabled", JsonPrimitive(powerManager?.isPowerSaveMode == true)) + }, + ) + put( + "thermal", + buildJsonObject { + put("state", JsonPrimitive(mapThermalState(powerManager))) + }, + ) + put( + "storage", + buildJsonObject { + put("totalBytes", JsonPrimitive(totalBytes)) + put("freeBytes", JsonPrimitive(freeBytes)) + put("usedBytes", JsonPrimitive(usedBytes)) + }, + ) + put( + "network", + buildJsonObject { + put("status", JsonPrimitive(mapNetworkStatus(caps))) + put( + "isExpensive", + JsonPrimitive( + caps?.hasCapability(NetworkCapabilities.NET_CAPABILITY_NOT_METERED)?.not() ?: false, + ), + ) + put( + "isConstrained", + JsonPrimitive( + caps?.hasCapability(NetworkCapabilities.NET_CAPABILITY_NOT_RESTRICTED)?.not() ?: false, + ), + ) + put("interfaces", networkInterfacesJson(caps)) + }, + ) + put("uptimeSeconds", JsonPrimitive(uptimeSeconds)) + }.toString() + } + + private fun infoPayloadJson(): String { + val model = Build.MODEL?.trim().orEmpty() + val manufacturer = Build.MANUFACTURER?.trim().orEmpty() + val modelIdentifier = Build.DEVICE?.trim().orEmpty() + val systemVersion = Build.VERSION.RELEASE?.trim().orEmpty() + val locale = Locale.getDefault().toLanguageTag().trim() + val appVersion = BuildConfig.VERSION_NAME.trim() + val appBuild = BuildConfig.VERSION_CODE.toString() + + return buildJsonObject { + put("deviceName", JsonPrimitive(model.ifEmpty { "Android" })) + put("modelIdentifier", JsonPrimitive(modelIdentifier.ifEmpty { listOf(manufacturer, model).filter { it.isNotEmpty() }.joinToString(" ") })) + put("systemName", JsonPrimitive("Android")) + put("systemVersion", JsonPrimitive(systemVersion.ifEmpty { Build.VERSION.SDK_INT.toString() })) + put("appVersion", JsonPrimitive(appVersion.ifEmpty { "dev" })) + put("appBuild", JsonPrimitive(appBuild.ifEmpty { "0" })) + put("locale", JsonPrimitive(locale.ifEmpty { Locale.getDefault().toString() })) + }.toString() + } + + private fun batteryLevelFraction(intent: Intent?): Double? { + val rawLevel = intent?.getIntExtra(BatteryManager.EXTRA_LEVEL, -1) ?: -1 + val rawScale = intent?.getIntExtra(BatteryManager.EXTRA_SCALE, -1) ?: -1 + if (rawLevel < 0 || rawScale <= 0) return null + return rawLevel.toDouble() / rawScale.toDouble() + } + + private fun mapBatteryState(status: Int): String { + return when (status) { + BatteryManager.BATTERY_STATUS_CHARGING -> "charging" + BatteryManager.BATTERY_STATUS_FULL -> "full" + BatteryManager.BATTERY_STATUS_DISCHARGING, BatteryManager.BATTERY_STATUS_NOT_CHARGING -> "unplugged" + else -> "unknown" + } + } + + private fun mapThermalState(powerManager: PowerManager?): String { + val thermal = powerManager?.currentThermalStatus ?: return "nominal" + return when (thermal) { + PowerManager.THERMAL_STATUS_NONE, PowerManager.THERMAL_STATUS_LIGHT -> "nominal" + PowerManager.THERMAL_STATUS_MODERATE -> "fair" + PowerManager.THERMAL_STATUS_SEVERE -> "serious" + PowerManager.THERMAL_STATUS_CRITICAL, + PowerManager.THERMAL_STATUS_EMERGENCY, + PowerManager.THERMAL_STATUS_SHUTDOWN -> "critical" + else -> "nominal" + } + } + + private fun mapNetworkStatus(caps: NetworkCapabilities?): String { + if (caps == null) return "unsatisfied" + return when { + caps.hasCapability(NetworkCapabilities.NET_CAPABILITY_VALIDATED) -> "satisfied" + caps.hasCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET) -> "requiresConnection" + else -> "unsatisfied" + } + } + + private fun networkInterfacesJson(caps: NetworkCapabilities?) = + buildJsonArray { + if (caps == null) return@buildJsonArray + var hasKnownTransport = false + if (caps.hasTransport(NetworkCapabilities.TRANSPORT_WIFI)) { + hasKnownTransport = true + add(JsonPrimitive("wifi")) + } + if (caps.hasTransport(NetworkCapabilities.TRANSPORT_CELLULAR)) { + hasKnownTransport = true + add(JsonPrimitive("cellular")) + } + if (caps.hasTransport(NetworkCapabilities.TRANSPORT_ETHERNET)) { + hasKnownTransport = true + add(JsonPrimitive("wired")) + } + if (!hasKnownTransport) add(JsonPrimitive("other")) + } +} diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/DeviceNotificationListenerService.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/DeviceNotificationListenerService.kt new file mode 100644 index 00000000000..709e9af5ec5 --- /dev/null +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/DeviceNotificationListenerService.kt @@ -0,0 +1,164 @@ +package ai.openclaw.android.node + +import android.app.Notification +import android.app.NotificationManager +import android.content.ComponentName +import android.content.Context +import android.os.Build +import android.service.notification.NotificationListenerService +import android.service.notification.StatusBarNotification + +private const val MAX_NOTIFICATION_TEXT_CHARS = 512 + +internal fun sanitizeNotificationText(value: CharSequence?): String? { + val normalized = value?.toString()?.trim().orEmpty() + return normalized.take(MAX_NOTIFICATION_TEXT_CHARS).ifEmpty { null } +} + +data class DeviceNotificationEntry( + val key: String, + val packageName: String, + val title: String?, + val text: String?, + val subText: String?, + val category: String?, + val channelId: String?, + val postTimeMs: Long, + val isOngoing: Boolean, + val isClearable: Boolean, +) + +data class DeviceNotificationSnapshot( + val enabled: Boolean, + val connected: Boolean, + val notifications: List, +) + +private object DeviceNotificationStore { + private val lock = Any() + private var connected = false + private val byKey = LinkedHashMap() + + fun replace(entries: List) { + synchronized(lock) { + byKey.clear() + for (entry in entries) { + byKey[entry.key] = entry + } + } + } + + fun upsert(entry: DeviceNotificationEntry) { + synchronized(lock) { + byKey[entry.key] = entry + } + } + + fun remove(key: String) { + synchronized(lock) { + byKey.remove(key) + } + } + + fun setConnected(value: Boolean) { + synchronized(lock) { + connected = value + if (!value) { + byKey.clear() + } + } + } + + fun snapshot(enabled: Boolean): DeviceNotificationSnapshot { + val (isConnected, entries) = + synchronized(lock) { + connected to byKey.values.sortedByDescending { it.postTimeMs } + } + return DeviceNotificationSnapshot( + enabled = enabled, + connected = isConnected, + notifications = entries, + ) + } +} + +class DeviceNotificationListenerService : NotificationListenerService() { + override fun onListenerConnected() { + super.onListenerConnected() + DeviceNotificationStore.setConnected(true) + refreshActiveNotifications() + } + + override fun onListenerDisconnected() { + DeviceNotificationStore.setConnected(false) + super.onListenerDisconnected() + } + + override fun onNotificationPosted(sbn: StatusBarNotification?) { + super.onNotificationPosted(sbn) + val entry = sbn?.toEntry() ?: return + DeviceNotificationStore.upsert(entry) + } + + override fun onNotificationRemoved(sbn: StatusBarNotification?) { + super.onNotificationRemoved(sbn) + val key = sbn?.key ?: return + DeviceNotificationStore.remove(key) + } + + private fun refreshActiveNotifications() { + val entries = + runCatching { + activeNotifications + ?.mapNotNull { it.toEntry() } + ?: emptyList() + }.getOrElse { emptyList() } + DeviceNotificationStore.replace(entries) + } + + private fun StatusBarNotification.toEntry(): DeviceNotificationEntry { + val extras = notification.extras + val keyValue = key.takeIf { it.isNotBlank() } ?: "$packageName:$id:$postTime" + val title = sanitizeNotificationText(extras?.getCharSequence(Notification.EXTRA_TITLE)) + val body = + sanitizeNotificationText(extras?.getCharSequence(Notification.EXTRA_BIG_TEXT)) + ?: sanitizeNotificationText(extras?.getCharSequence(Notification.EXTRA_TEXT)) + val subText = sanitizeNotificationText(extras?.getCharSequence(Notification.EXTRA_SUB_TEXT)) + return DeviceNotificationEntry( + key = keyValue, + packageName = packageName, + title = title, + text = body, + subText = subText, + category = notification.category?.trim()?.ifEmpty { null }, + channelId = notification.channelId?.trim()?.ifEmpty { null }, + postTimeMs = postTime, + isOngoing = isOngoing, + isClearable = isClearable, + ) + } + + companion object { + private fun serviceComponent(context: Context): ComponentName { + return ComponentName(context, DeviceNotificationListenerService::class.java) + } + + fun isAccessEnabled(context: Context): Boolean { + val manager = context.getSystemService(NotificationManager::class.java) ?: return false + return manager.isNotificationListenerAccessGranted(serviceComponent(context)) + } + + fun snapshot(context: Context, enabled: Boolean = isAccessEnabled(context)): DeviceNotificationSnapshot { + return DeviceNotificationStore.snapshot(enabled = enabled) + } + + fun requestServiceRebind(context: Context) { + if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) { + return + } + runCatching { + NotificationListenerService.requestRebind(serviceComponent(context)) + } + } + } +} diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeCommandRegistry.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeCommandRegistry.kt new file mode 100644 index 00000000000..8d37794df4c --- /dev/null +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeCommandRegistry.kt @@ -0,0 +1,125 @@ +package ai.openclaw.android.node + +import ai.openclaw.android.protocol.OpenClawCanvasA2UICommand +import ai.openclaw.android.protocol.OpenClawCanvasCommand +import ai.openclaw.android.protocol.OpenClawCameraCommand +import ai.openclaw.android.protocol.OpenClawDeviceCommand +import ai.openclaw.android.protocol.OpenClawLocationCommand +import ai.openclaw.android.protocol.OpenClawNotificationsCommand +import ai.openclaw.android.protocol.OpenClawScreenCommand +import ai.openclaw.android.protocol.OpenClawSmsCommand + +enum class InvokeCommandAvailability { + Always, + CameraEnabled, + LocationEnabled, + SmsAvailable, + DebugBuild, +} + +data class InvokeCommandSpec( + val name: String, + val requiresForeground: Boolean = false, + val availability: InvokeCommandAvailability = InvokeCommandAvailability.Always, +) + +object InvokeCommandRegistry { + val all: List = + listOf( + InvokeCommandSpec( + name = OpenClawCanvasCommand.Present.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasCommand.Hide.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasCommand.Navigate.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasCommand.Eval.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasCommand.Snapshot.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasA2UICommand.Push.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasA2UICommand.PushJSONL.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCanvasA2UICommand.Reset.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawScreenCommand.Record.rawValue, + requiresForeground = true, + ), + InvokeCommandSpec( + name = OpenClawCameraCommand.Snap.rawValue, + requiresForeground = true, + availability = InvokeCommandAvailability.CameraEnabled, + ), + InvokeCommandSpec( + name = OpenClawCameraCommand.Clip.rawValue, + requiresForeground = true, + availability = InvokeCommandAvailability.CameraEnabled, + ), + InvokeCommandSpec( + name = OpenClawLocationCommand.Get.rawValue, + availability = InvokeCommandAvailability.LocationEnabled, + ), + InvokeCommandSpec( + name = OpenClawDeviceCommand.Status.rawValue, + ), + InvokeCommandSpec( + name = OpenClawDeviceCommand.Info.rawValue, + ), + InvokeCommandSpec( + name = OpenClawNotificationsCommand.List.rawValue, + ), + InvokeCommandSpec( + name = OpenClawSmsCommand.Send.rawValue, + availability = InvokeCommandAvailability.SmsAvailable, + ), + InvokeCommandSpec( + name = "debug.logs", + availability = InvokeCommandAvailability.DebugBuild, + ), + InvokeCommandSpec( + name = "debug.ed25519", + availability = InvokeCommandAvailability.DebugBuild, + ), + InvokeCommandSpec(name = "app.update"), + ) + + private val byNameInternal: Map = all.associateBy { it.name } + + fun find(command: String): InvokeCommandSpec? = byNameInternal[command] + + fun advertisedCommands( + cameraEnabled: Boolean, + locationEnabled: Boolean, + smsAvailable: Boolean, + debugBuild: Boolean, + ): List { + return all + .filter { spec -> + when (spec.availability) { + InvokeCommandAvailability.Always -> true + InvokeCommandAvailability.CameraEnabled -> cameraEnabled + InvokeCommandAvailability.LocationEnabled -> locationEnabled + InvokeCommandAvailability.SmsAvailable -> smsAvailable + InvokeCommandAvailability.DebugBuild -> debugBuild + } + } + .map { it.name } + } +} diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeDispatcher.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeDispatcher.kt index 91e9da8add1..fb88aef03a8 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeDispatcher.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/InvokeDispatcher.kt @@ -4,7 +4,9 @@ import ai.openclaw.android.gateway.GatewaySession import ai.openclaw.android.protocol.OpenClawCanvasA2UICommand import ai.openclaw.android.protocol.OpenClawCanvasCommand import ai.openclaw.android.protocol.OpenClawCameraCommand +import ai.openclaw.android.protocol.OpenClawDeviceCommand import ai.openclaw.android.protocol.OpenClawLocationCommand +import ai.openclaw.android.protocol.OpenClawNotificationsCommand import ai.openclaw.android.protocol.OpenClawScreenCommand import ai.openclaw.android.protocol.OpenClawSmsCommand @@ -12,6 +14,8 @@ class InvokeDispatcher( private val canvas: CanvasController, private val cameraHandler: CameraHandler, private val locationHandler: LocationHandler, + private val deviceHandler: DeviceHandler, + private val notificationsHandler: NotificationsHandler, private val screenHandler: ScreenHandler, private val smsHandler: SmsHandler, private val a2uiHandler: A2UIHandler, @@ -20,40 +24,25 @@ class InvokeDispatcher( private val isForeground: () -> Boolean, private val cameraEnabled: () -> Boolean, private val locationEnabled: () -> Boolean, + private val smsAvailable: () -> Boolean, + private val debugBuild: () -> Boolean, private val onCanvasA2uiPush: () -> Unit, private val onCanvasA2uiReset: () -> Unit, ) { suspend fun handleInvoke(command: String, paramsJson: String?): GatewaySession.InvokeResult { - // Check foreground requirement for canvas/camera/screen commands - if ( - command.startsWith(OpenClawCanvasCommand.NamespacePrefix) || - command.startsWith(OpenClawCanvasA2UICommand.NamespacePrefix) || - command.startsWith(OpenClawCameraCommand.NamespacePrefix) || - command.startsWith(OpenClawScreenCommand.NamespacePrefix) - ) { - if (!isForeground()) { - return GatewaySession.InvokeResult.error( - code = "NODE_BACKGROUND_UNAVAILABLE", - message = "NODE_BACKGROUND_UNAVAILABLE: canvas/camera/screen commands require foreground", + val spec = + InvokeCommandRegistry.find(command) + ?: return GatewaySession.InvokeResult.error( + code = "INVALID_REQUEST", + message = "INVALID_REQUEST: unknown command", ) - } - } - - // Check camera enabled - if (command.startsWith(OpenClawCameraCommand.NamespacePrefix) && !cameraEnabled()) { + if (spec.requiresForeground && !isForeground()) { return GatewaySession.InvokeResult.error( - code = "CAMERA_DISABLED", - message = "CAMERA_DISABLED: enable Camera in Settings", - ) - } - - // Check location enabled - if (command.startsWith(OpenClawLocationCommand.NamespacePrefix) && !locationEnabled()) { - return GatewaySession.InvokeResult.error( - code = "LOCATION_DISABLED", - message = "LOCATION_DISABLED: enable Location in Settings", + code = "NODE_BACKGROUND_UNAVAILABLE", + message = "NODE_BACKGROUND_UNAVAILABLE: canvas/camera/screen commands require foreground", ) } + availabilityError(spec.availability)?.let { return it } return when (command) { // Canvas commands @@ -75,53 +64,33 @@ class InvokeDispatcher( code = "INVALID_REQUEST", message = "INVALID_REQUEST: javaScript required", ) - val result = - try { - canvas.eval(js) - } catch (err: Throwable) { - return GatewaySession.InvokeResult.error( - code = "NODE_BACKGROUND_UNAVAILABLE", - message = "NODE_BACKGROUND_UNAVAILABLE: canvas unavailable", - ) - } - GatewaySession.InvokeResult.ok("""{"result":${result.toJsonString()}}""") + withCanvasAvailable { + val result = canvas.eval(js) + GatewaySession.InvokeResult.ok("""{"result":${result.toJsonString()}}""") + } } OpenClawCanvasCommand.Snapshot.rawValue -> { val snapshotParams = CanvasController.parseSnapshotParams(paramsJson) - val base64 = - try { + withCanvasAvailable { + val base64 = canvas.snapshotBase64( format = snapshotParams.format, quality = snapshotParams.quality, maxWidth = snapshotParams.maxWidth, ) - } catch (err: Throwable) { - return GatewaySession.InvokeResult.error( - code = "NODE_BACKGROUND_UNAVAILABLE", - message = "NODE_BACKGROUND_UNAVAILABLE: canvas unavailable", - ) - } - GatewaySession.InvokeResult.ok("""{"format":"${snapshotParams.format.rawValue}","base64":"$base64"}""") + GatewaySession.InvokeResult.ok("""{"format":"${snapshotParams.format.rawValue}","base64":"$base64"}""") + } } // A2UI commands - OpenClawCanvasA2UICommand.Reset.rawValue -> { - val a2uiUrl = a2uiHandler.resolveA2uiHostUrl() - ?: return GatewaySession.InvokeResult.error( - code = "A2UI_HOST_NOT_CONFIGURED", - message = "A2UI_HOST_NOT_CONFIGURED: gateway did not advertise canvas host", - ) - val ready = a2uiHandler.ensureA2uiReady(a2uiUrl) - if (!ready) { - return GatewaySession.InvokeResult.error( - code = "A2UI_HOST_UNAVAILABLE", - message = "A2UI host not reachable", - ) + OpenClawCanvasA2UICommand.Reset.rawValue -> + withReadyA2ui { + withCanvasAvailable { + val res = canvas.eval(A2UIHandler.a2uiResetJS) + onCanvasA2uiReset() + GatewaySession.InvokeResult.ok(res) + } } - val res = canvas.eval(A2UIHandler.a2uiResetJS) - onCanvasA2uiReset() - GatewaySession.InvokeResult.ok(res) - } OpenClawCanvasA2UICommand.Push.rawValue, OpenClawCanvasA2UICommand.PushJSONL.rawValue -> { val messages = try { @@ -132,22 +101,14 @@ class InvokeDispatcher( message = err.message ?: "invalid A2UI payload" ) } - val a2uiUrl = a2uiHandler.resolveA2uiHostUrl() - ?: return GatewaySession.InvokeResult.error( - code = "A2UI_HOST_NOT_CONFIGURED", - message = "A2UI_HOST_NOT_CONFIGURED: gateway did not advertise canvas host", - ) - val ready = a2uiHandler.ensureA2uiReady(a2uiUrl) - if (!ready) { - return GatewaySession.InvokeResult.error( - code = "A2UI_HOST_UNAVAILABLE", - message = "A2UI host not reachable", - ) + withReadyA2ui { + withCanvasAvailable { + val js = A2UIHandler.a2uiApplyMessagesJS(messages) + val res = canvas.eval(js) + onCanvasA2uiPush() + GatewaySession.InvokeResult.ok(res) + } } - val js = A2UIHandler.a2uiApplyMessagesJS(messages) - val res = canvas.eval(js) - onCanvasA2uiPush() - GatewaySession.InvokeResult.ok(res) } // Camera commands @@ -157,6 +118,13 @@ class InvokeDispatcher( // Location command OpenClawLocationCommand.Get.rawValue -> locationHandler.handleLocationGet(paramsJson) + // Device commands + OpenClawDeviceCommand.Status.rawValue -> deviceHandler.handleDeviceStatus(paramsJson) + OpenClawDeviceCommand.Info.rawValue -> deviceHandler.handleDeviceInfo(paramsJson) + + // Notifications command + OpenClawNotificationsCommand.List.rawValue -> notificationsHandler.handleNotificationsList(paramsJson) + // Screen command OpenClawScreenCommand.Record.rawValue -> screenHandler.handleScreenRecord(paramsJson) @@ -170,11 +138,80 @@ class InvokeDispatcher( // App update "app.update" -> appUpdateHandler.handleUpdate(paramsJson) - else -> - GatewaySession.InvokeResult.error( - code = "INVALID_REQUEST", - message = "INVALID_REQUEST: unknown command", - ) + else -> GatewaySession.InvokeResult.error(code = "INVALID_REQUEST", message = "INVALID_REQUEST: unknown command") + } + } + + private suspend fun withReadyA2ui( + block: suspend () -> GatewaySession.InvokeResult, + ): GatewaySession.InvokeResult { + val a2uiUrl = a2uiHandler.resolveA2uiHostUrl() + ?: return GatewaySession.InvokeResult.error( + code = "A2UI_HOST_NOT_CONFIGURED", + message = "A2UI_HOST_NOT_CONFIGURED: gateway did not advertise canvas host", + ) + val ready = a2uiHandler.ensureA2uiReady(a2uiUrl) + if (!ready) { + return GatewaySession.InvokeResult.error( + code = "A2UI_HOST_UNAVAILABLE", + message = "A2UI host not reachable", + ) + } + return block() + } + + private suspend fun withCanvasAvailable( + block: suspend () -> GatewaySession.InvokeResult, + ): GatewaySession.InvokeResult { + return try { + block() + } catch (_: Throwable) { + GatewaySession.InvokeResult.error( + code = "NODE_BACKGROUND_UNAVAILABLE", + message = "NODE_BACKGROUND_UNAVAILABLE: canvas unavailable", + ) + } + } + + private fun availabilityError(availability: InvokeCommandAvailability): GatewaySession.InvokeResult? { + return when (availability) { + InvokeCommandAvailability.Always -> null + InvokeCommandAvailability.CameraEnabled -> + if (cameraEnabled()) { + null + } else { + GatewaySession.InvokeResult.error( + code = "CAMERA_DISABLED", + message = "CAMERA_DISABLED: enable Camera in Settings", + ) + } + InvokeCommandAvailability.LocationEnabled -> + if (locationEnabled()) { + null + } else { + GatewaySession.InvokeResult.error( + code = "LOCATION_DISABLED", + message = "LOCATION_DISABLED: enable Location in Settings", + ) + } + InvokeCommandAvailability.SmsAvailable -> + if (smsAvailable()) { + null + } else { + GatewaySession.InvokeResult.error( + code = "SMS_UNAVAILABLE", + message = "SMS_UNAVAILABLE: SMS not available on this device", + ) + } + InvokeCommandAvailability.DebugBuild -> + if (debugBuild()) { + null + } else { + GatewaySession.InvokeResult.error( + code = "INVALID_REQUEST", + message = "INVALID_REQUEST: unknown command", + ) + } } } } diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/NodeUtils.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/NodeUtils.kt index 8ba5ad276d5..c3f463174a4 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/node/NodeUtils.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/NodeUtils.kt @@ -1,5 +1,6 @@ package ai.openclaw.android.node +import ai.openclaw.android.gateway.parseInvokeErrorFromThrowable import kotlinx.serialization.json.JsonElement import kotlinx.serialization.json.JsonNull import kotlinx.serialization.json.JsonObject @@ -37,14 +38,9 @@ fun parseHexColorArgb(raw: String?): Long? { } fun invokeErrorFromThrowable(err: Throwable): Pair { - val raw = (err.message ?: "").trim() - if (raw.isEmpty()) return "UNAVAILABLE" to "UNAVAILABLE: error" - - val idx = raw.indexOf(':') - if (idx <= 0) return "UNAVAILABLE" to raw - val code = raw.substring(0, idx).trim().ifEmpty { "UNAVAILABLE" } - val message = raw.substring(idx + 1).trim().ifEmpty { raw } - return code to "$code: $message" + val parsed = parseInvokeErrorFromThrowable(err, fallbackMessage = "UNAVAILABLE: error") + val message = if (parsed.hadExplicitCode) parsed.prefixedMessage else parsed.message + return parsed.code to message } fun normalizeMainKey(raw: String?): String? { diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/NotificationsHandler.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/NotificationsHandler.kt new file mode 100644 index 00000000000..0216e19208c --- /dev/null +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/NotificationsHandler.kt @@ -0,0 +1,81 @@ +package ai.openclaw.android.node + +import android.content.Context +import ai.openclaw.android.gateway.GatewaySession +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.json.put + +internal interface NotificationsStateProvider { + fun readSnapshot(context: Context): DeviceNotificationSnapshot + + fun requestServiceRebind(context: Context) +} + +private object SystemNotificationsStateProvider : NotificationsStateProvider { + override fun readSnapshot(context: Context): DeviceNotificationSnapshot { + val enabled = DeviceNotificationListenerService.isAccessEnabled(context) + if (!enabled) { + return DeviceNotificationSnapshot( + enabled = false, + connected = false, + notifications = emptyList(), + ) + } + return DeviceNotificationListenerService.snapshot(context, enabled = true) + } + + override fun requestServiceRebind(context: Context) { + DeviceNotificationListenerService.requestServiceRebind(context) + } +} + +class NotificationsHandler private constructor( + private val appContext: Context, + private val stateProvider: NotificationsStateProvider, +) { + constructor(appContext: Context) : this(appContext = appContext, stateProvider = SystemNotificationsStateProvider) + + suspend fun handleNotificationsList(_paramsJson: String?): GatewaySession.InvokeResult { + val snapshot = stateProvider.readSnapshot(appContext) + if (snapshot.enabled && !snapshot.connected) { + stateProvider.requestServiceRebind(appContext) + } + return GatewaySession.InvokeResult.ok(snapshotPayloadJson(snapshot)) + } + + private fun snapshotPayloadJson(snapshot: DeviceNotificationSnapshot): String { + return buildJsonObject { + put("enabled", JsonPrimitive(snapshot.enabled)) + put("connected", JsonPrimitive(snapshot.connected)) + put("count", JsonPrimitive(snapshot.notifications.size)) + put( + "notifications", + JsonArray( + snapshot.notifications.map { entry -> + buildJsonObject { + put("key", JsonPrimitive(entry.key)) + put("packageName", JsonPrimitive(entry.packageName)) + put("postTimeMs", JsonPrimitive(entry.postTimeMs)) + put("isOngoing", JsonPrimitive(entry.isOngoing)) + put("isClearable", JsonPrimitive(entry.isClearable)) + entry.title?.let { put("title", JsonPrimitive(it)) } + entry.text?.let { put("text", JsonPrimitive(it)) } + entry.subText?.let { put("subText", JsonPrimitive(it)) } + entry.category?.let { put("category", JsonPrimitive(it)) } + entry.channelId?.let { put("channelId", JsonPrimitive(it)) } + } + }, + ), + ) + }.toString() + } + + companion object { + internal fun forTesting( + appContext: Context, + stateProvider: NotificationsStateProvider, + ): NotificationsHandler = NotificationsHandler(appContext = appContext, stateProvider = stateProvider) + } +} diff --git a/apps/android/app/src/main/java/ai/openclaw/android/node/ScreenRecordManager.kt b/apps/android/app/src/main/java/ai/openclaw/android/node/ScreenRecordManager.kt index 337a953866a..98a3e4d9593 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/node/ScreenRecordManager.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/node/ScreenRecordManager.kt @@ -10,6 +10,10 @@ import ai.openclaw.android.ScreenCaptureRequester import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.delay import kotlinx.coroutines.withContext +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.contentOrNull import java.io.File import kotlin.math.roundToInt @@ -35,12 +39,13 @@ class ScreenRecordManager(private val context: Context) { "SCREEN_PERMISSION_REQUIRED: grant Screen Recording permission", ) - val durationMs = (parseDurationMs(paramsJson) ?: 10_000).coerceIn(250, 60_000) - val fps = (parseFps(paramsJson) ?: 10.0).coerceIn(1.0, 60.0) + val params = parseParamsObject(paramsJson) + val durationMs = (parseDurationMs(params) ?: 10_000).coerceIn(250, 60_000) + val fps = (parseFps(params) ?: 10.0).coerceIn(1.0, 60.0) val fpsInt = fps.roundToInt().coerceIn(1, 60) - val screenIndex = parseScreenIndex(paramsJson) - val includeAudio = parseIncludeAudio(paramsJson) ?: true - val format = parseString(paramsJson, key = "format") + val screenIndex = parseScreenIndex(params) + val includeAudio = parseIncludeAudio(params) ?: true + val format = parseString(params, key = "format") if (format != null && format.lowercase() != "mp4") { throw IllegalArgumentException("INVALID_REQUEST: screen format must be mp4") } @@ -141,55 +146,38 @@ class ScreenRecordManager(private val context: Context) { } } - private fun parseDurationMs(paramsJson: String?): Int? = - parseNumber(paramsJson, key = "durationMs")?.toIntOrNull() + private fun parseParamsObject(paramsJson: String?): JsonObject? { + if (paramsJson.isNullOrBlank()) return null + return try { + Json.parseToJsonElement(paramsJson).asObjectOrNull() + } catch (_: Throwable) { + null + } + } - private fun parseFps(paramsJson: String?): Double? = - parseNumber(paramsJson, key = "fps")?.toDoubleOrNull() + private fun readPrimitive(params: JsonObject?, key: String): JsonPrimitive? = + params?.get(key) as? JsonPrimitive - private fun parseScreenIndex(paramsJson: String?): Int? = - parseNumber(paramsJson, key = "screenIndex")?.toIntOrNull() + private fun parseDurationMs(params: JsonObject?): Int? = + readPrimitive(params, "durationMs")?.contentOrNull?.toIntOrNull() - private fun parseIncludeAudio(paramsJson: String?): Boolean? { - val raw = paramsJson ?: return null - val key = "\"includeAudio\"" - val idx = raw.indexOf(key) - if (idx < 0) return null - val colon = raw.indexOf(':', idx + key.length) - if (colon < 0) return null - val tail = raw.substring(colon + 1).trimStart() - return when { - tail.startsWith("true") -> true - tail.startsWith("false") -> false + private fun parseFps(params: JsonObject?): Double? = + readPrimitive(params, "fps")?.contentOrNull?.toDoubleOrNull() + + private fun parseScreenIndex(params: JsonObject?): Int? = + readPrimitive(params, "screenIndex")?.contentOrNull?.toIntOrNull() + + private fun parseIncludeAudio(params: JsonObject?): Boolean? { + val value = readPrimitive(params, "includeAudio")?.contentOrNull?.trim()?.lowercase() + return when (value) { + "true" -> true + "false" -> false else -> null } } - private fun parseNumber(paramsJson: String?, key: String): String? { - val raw = paramsJson ?: return null - val needle = "\"$key\"" - val idx = raw.indexOf(needle) - if (idx < 0) return null - val colon = raw.indexOf(':', idx + needle.length) - if (colon < 0) return null - val tail = raw.substring(colon + 1).trimStart() - return tail.takeWhile { it.isDigit() || it == '.' || it == '-' } - } - - private fun parseString(paramsJson: String?, key: String): String? { - val raw = paramsJson ?: return null - val needle = "\"$key\"" - val idx = raw.indexOf(needle) - if (idx < 0) return null - val colon = raw.indexOf(':', idx + needle.length) - if (colon < 0) return null - val tail = raw.substring(colon + 1).trimStart() - if (!tail.startsWith('\"')) return null - val rest = tail.drop(1) - val end = rest.indexOf('\"') - if (end < 0) return null - return rest.substring(0, end) - } + private fun parseString(params: JsonObject?, key: String): String? = + readPrimitive(params, key)?.contentOrNull private fun estimateBitrate(width: Int, height: Int, fps: Int): Int { val pixels = width.toLong() * height.toLong() diff --git a/apps/android/app/src/main/java/ai/openclaw/android/protocol/OpenClawProtocolConstants.kt b/apps/android/app/src/main/java/ai/openclaw/android/protocol/OpenClawProtocolConstants.kt index ccca40c4c35..7dd48941331 100644 --- a/apps/android/app/src/main/java/ai/openclaw/android/protocol/OpenClawProtocolConstants.kt +++ b/apps/android/app/src/main/java/ai/openclaw/android/protocol/OpenClawProtocolConstants.kt @@ -7,6 +7,7 @@ enum class OpenClawCapability(val rawValue: String) { Sms("sms"), VoiceWake("voiceWake"), Location("location"), + Device("device"), } enum class OpenClawCanvasCommand(val rawValue: String) { @@ -69,3 +70,22 @@ enum class OpenClawLocationCommand(val rawValue: String) { const val NamespacePrefix: String = "location." } } + +enum class OpenClawDeviceCommand(val rawValue: String) { + Status("device.status"), + Info("device.info"), + ; + + companion object { + const val NamespacePrefix: String = "device." + } +} + +enum class OpenClawNotificationsCommand(val rawValue: String) { + List("notifications.list"), + ; + + companion object { + const val NamespacePrefix: String = "notifications." + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/gateway/DeviceAuthPayloadTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/gateway/DeviceAuthPayloadTest.kt new file mode 100644 index 00000000000..95e145fb11f --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/gateway/DeviceAuthPayloadTest.kt @@ -0,0 +1,35 @@ +package ai.openclaw.android.gateway + +import org.junit.Assert.assertEquals +import org.junit.Test + +class DeviceAuthPayloadTest { + @Test + fun buildV3_matchesCanonicalVector() { + val payload = + DeviceAuthPayload.buildV3( + deviceId = "dev-1", + clientId = "openclaw-macos", + clientMode = "ui", + role = "operator", + scopes = listOf("operator.admin", "operator.read"), + signedAtMs = 1_700_000_000_000, + token = "tok-123", + nonce = "nonce-abc", + platform = " IOS ", + deviceFamily = " iPhone ", + ) + + assertEquals( + "v3|dev-1|openclaw-macos|ui|operator|operator.admin,operator.read|1700000000000|tok-123|nonce-abc|ios|iphone", + payload, + ) + } + + @Test + fun normalizeMetadataField_asciiOnlyLowercase() { + assertEquals("İos", DeviceAuthPayload.normalizeMetadataField(" İOS ")) + assertEquals("mac", DeviceAuthPayload.normalizeMetadataField(" MAC ")) + assertEquals("", DeviceAuthPayload.normalizeMetadataField(null)) + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/gateway/GatewaySessionInvokeTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/gateway/GatewaySessionInvokeTest.kt new file mode 100644 index 00000000000..e8a37aef21b --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/gateway/GatewaySessionInvokeTest.kt @@ -0,0 +1,442 @@ +package ai.openclaw.android.gateway + +import kotlinx.coroutines.CompletableDeferred +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import kotlinx.coroutines.cancelAndJoin +import kotlinx.coroutines.runBlocking +import kotlinx.coroutines.withTimeout +import kotlinx.coroutines.withTimeoutOrNull +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.jsonObject +import kotlinx.serialization.json.jsonPrimitive +import okhttp3.Response +import okhttp3.WebSocket +import okhttp3.WebSocketListener +import okhttp3.mockwebserver.Dispatcher +import okhttp3.mockwebserver.MockResponse +import okhttp3.mockwebserver.MockWebServer +import okhttp3.mockwebserver.RecordedRequest +import org.junit.Assert.assertEquals +import org.junit.Assert.assertNull +import org.junit.Test +import org.junit.runner.RunWith +import org.robolectric.RobolectricTestRunner +import org.robolectric.RuntimeEnvironment +import org.robolectric.annotation.Config +import java.util.concurrent.atomic.AtomicReference + +private class InMemoryDeviceAuthStore : DeviceAuthTokenStore { + private val tokens = mutableMapOf() + + override fun loadToken(deviceId: String, role: String): String? = tokens["${deviceId.trim()}|${role.trim()}"]?.trim()?.takeIf { it.isNotEmpty() } + + override fun saveToken(deviceId: String, role: String, token: String) { + tokens["${deviceId.trim()}|${role.trim()}"] = token.trim() + } +} + +@RunWith(RobolectricTestRunner::class) +@Config(sdk = [34]) +class GatewaySessionInvokeTest { + @Test + fun nodeInvokeRequest_roundTripsInvokeResult() = runBlocking { + val json = Json { ignoreUnknownKeys = true } + val connected = CompletableDeferred() + val invokeRequest = CompletableDeferred() + val invokeResultParams = CompletableDeferred() + val handshakeOrigin = AtomicReference(null) + val lastDisconnect = AtomicReference("") + val server = + MockWebServer().apply { + dispatcher = + object : Dispatcher() { + override fun dispatch(request: RecordedRequest): MockResponse { + handshakeOrigin.compareAndSet(null, request.getHeader("Origin")) + return MockResponse().withWebSocketUpgrade( + object : WebSocketListener() { + override fun onOpen(webSocket: WebSocket, response: Response) { + webSocket.send( + """{"type":"event","event":"connect.challenge","payload":{"nonce":"android-test-nonce"}}""", + ) + } + + override fun onMessage(webSocket: WebSocket, text: String) { + val frame = json.parseToJsonElement(text).jsonObject + if (frame["type"]?.jsonPrimitive?.content != "req") return + val id = frame["id"]?.jsonPrimitive?.content ?: return + val method = frame["method"]?.jsonPrimitive?.content ?: return + when (method) { + "connect" -> { + webSocket.send( + """{"type":"res","id":"$id","ok":true,"payload":{"snapshot":{"sessionDefaults":{"mainSessionKey":"main"}}}}""", + ) + webSocket.send( + """{"type":"event","event":"node.invoke.request","payload":{"id":"invoke-1","nodeId":"node-1","command":"debug.ping","params":{"ping":"pong"},"timeoutMs":5000}}""", + ) + } + "node.invoke.result" -> { + if (!invokeResultParams.isCompleted) { + invokeResultParams.complete(frame["params"]?.toString().orEmpty()) + } + webSocket.send("""{"type":"res","id":"$id","ok":true,"payload":{"ok":true}}""") + webSocket.close(1000, "done") + } + } + } + }, + ) + } + } + start() + } + + val app = RuntimeEnvironment.getApplication() + val sessionJob = SupervisorJob() + val deviceAuthStore = InMemoryDeviceAuthStore() + val session = + GatewaySession( + scope = CoroutineScope(sessionJob + Dispatchers.Default), + identityStore = DeviceIdentityStore(app), + deviceAuthStore = deviceAuthStore, + onConnected = { _, _, _ -> + if (!connected.isCompleted) connected.complete(Unit) + }, + onDisconnected = { message -> + lastDisconnect.set(message) + }, + onEvent = { _, _ -> }, + onInvoke = { req -> + if (!invokeRequest.isCompleted) invokeRequest.complete(req) + GatewaySession.InvokeResult.ok("""{"handled":true}""") + }, + ) + + try { + session.connect( + endpoint = + GatewayEndpoint( + stableId = "manual|127.0.0.1|${server.port}", + name = "test", + host = "127.0.0.1", + port = server.port, + tlsEnabled = false, + ), + token = "test-token", + password = null, + options = + GatewayConnectOptions( + role = "node", + scopes = listOf("node:invoke"), + caps = emptyList(), + commands = emptyList(), + permissions = emptyMap(), + client = + GatewayClientInfo( + id = "openclaw-android-test", + displayName = "Android Test", + version = "1.0.0-test", + platform = "android", + mode = "node", + instanceId = "android-test-instance", + deviceFamily = "android", + modelIdentifier = "test", + ), + ), + tls = null, + ) + + val connectedWithinTimeout = withTimeoutOrNull(8_000) { + connected.await() + true + } == true + if (!connectedWithinTimeout) { + throw AssertionError("never connected; lastDisconnect=${lastDisconnect.get()}; requests=${server.requestCount}") + } + val req = withTimeout(8_000) { invokeRequest.await() } + val resultParamsJson = withTimeout(8_000) { invokeResultParams.await() } + val resultParams = json.parseToJsonElement(resultParamsJson).jsonObject + + assertEquals("invoke-1", req.id) + assertEquals("node-1", req.nodeId) + assertEquals("debug.ping", req.command) + assertEquals("""{"ping":"pong"}""", req.paramsJson) + assertNull(handshakeOrigin.get()) + assertEquals("invoke-1", resultParams["id"]?.jsonPrimitive?.content) + assertEquals("node-1", resultParams["nodeId"]?.jsonPrimitive?.content) + assertEquals(true, resultParams["ok"]?.jsonPrimitive?.content?.toBooleanStrict()) + assertEquals( + true, + resultParams["payload"]?.jsonObject?.get("handled")?.jsonPrimitive?.content?.toBooleanStrict(), + ) + } finally { + session.disconnect() + sessionJob.cancelAndJoin() + server.shutdown() + } + } + + @Test + fun nodeInvokeRequest_usesParamsJsonWhenProvided() = runBlocking { + val json = Json { ignoreUnknownKeys = true } + val connected = CompletableDeferred() + val invokeRequest = CompletableDeferred() + val invokeResultParams = CompletableDeferred() + val lastDisconnect = AtomicReference("") + val server = + MockWebServer().apply { + dispatcher = + object : Dispatcher() { + override fun dispatch(request: RecordedRequest): MockResponse { + return MockResponse().withWebSocketUpgrade( + object : WebSocketListener() { + override fun onOpen(webSocket: WebSocket, response: Response) { + webSocket.send( + """{"type":"event","event":"connect.challenge","payload":{"nonce":"android-test-nonce"}}""", + ) + } + + override fun onMessage(webSocket: WebSocket, text: String) { + val frame = json.parseToJsonElement(text).jsonObject + if (frame["type"]?.jsonPrimitive?.content != "req") return + val id = frame["id"]?.jsonPrimitive?.content ?: return + val method = frame["method"]?.jsonPrimitive?.content ?: return + when (method) { + "connect" -> { + webSocket.send( + """{"type":"res","id":"$id","ok":true,"payload":{"snapshot":{"sessionDefaults":{"mainSessionKey":"main"}}}}""", + ) + webSocket.send( + """{"type":"event","event":"node.invoke.request","payload":{"id":"invoke-2","nodeId":"node-2","command":"debug.raw","paramsJSON":"{\"raw\":true}","params":{"ignored":1},"timeoutMs":5000}}""", + ) + } + "node.invoke.result" -> { + if (!invokeResultParams.isCompleted) { + invokeResultParams.complete(frame["params"]?.toString().orEmpty()) + } + webSocket.send("""{"type":"res","id":"$id","ok":true,"payload":{"ok":true}}""") + webSocket.close(1000, "done") + } + } + } + }, + ) + } + } + start() + } + + val app = RuntimeEnvironment.getApplication() + val sessionJob = SupervisorJob() + val deviceAuthStore = InMemoryDeviceAuthStore() + val session = + GatewaySession( + scope = CoroutineScope(sessionJob + Dispatchers.Default), + identityStore = DeviceIdentityStore(app), + deviceAuthStore = deviceAuthStore, + onConnected = { _, _, _ -> + if (!connected.isCompleted) connected.complete(Unit) + }, + onDisconnected = { message -> + lastDisconnect.set(message) + }, + onEvent = { _, _ -> }, + onInvoke = { req -> + if (!invokeRequest.isCompleted) invokeRequest.complete(req) + GatewaySession.InvokeResult.ok("""{"handled":true}""") + }, + ) + + try { + session.connect( + endpoint = + GatewayEndpoint( + stableId = "manual|127.0.0.1|${server.port}", + name = "test", + host = "127.0.0.1", + port = server.port, + tlsEnabled = false, + ), + token = "test-token", + password = null, + options = + GatewayConnectOptions( + role = "node", + scopes = listOf("node:invoke"), + caps = emptyList(), + commands = emptyList(), + permissions = emptyMap(), + client = + GatewayClientInfo( + id = "openclaw-android-test", + displayName = "Android Test", + version = "1.0.0-test", + platform = "android", + mode = "node", + instanceId = "android-test-instance", + deviceFamily = "android", + modelIdentifier = "test", + ), + ), + tls = null, + ) + + val connectedWithinTimeout = withTimeoutOrNull(8_000) { + connected.await() + true + } == true + if (!connectedWithinTimeout) { + throw AssertionError("never connected; lastDisconnect=${lastDisconnect.get()}; requests=${server.requestCount}") + } + + val req = withTimeout(8_000) { invokeRequest.await() } + val resultParamsJson = withTimeout(8_000) { invokeResultParams.await() } + val resultParams = json.parseToJsonElement(resultParamsJson).jsonObject + + assertEquals("invoke-2", req.id) + assertEquals("node-2", req.nodeId) + assertEquals("debug.raw", req.command) + assertEquals("""{"raw":true}""", req.paramsJson) + assertEquals("invoke-2", resultParams["id"]?.jsonPrimitive?.content) + assertEquals("node-2", resultParams["nodeId"]?.jsonPrimitive?.content) + assertEquals(true, resultParams["ok"]?.jsonPrimitive?.content?.toBooleanStrict()) + } finally { + session.disconnect() + sessionJob.cancelAndJoin() + server.shutdown() + } + } + + @Test + fun nodeInvokeRequest_mapsCodePrefixedErrorsIntoInvokeResult() = runBlocking { + val json = Json { ignoreUnknownKeys = true } + val connected = CompletableDeferred() + val invokeResultParams = CompletableDeferred() + val lastDisconnect = AtomicReference("") + val server = + MockWebServer().apply { + dispatcher = + object : Dispatcher() { + override fun dispatch(request: RecordedRequest): MockResponse { + return MockResponse().withWebSocketUpgrade( + object : WebSocketListener() { + override fun onOpen(webSocket: WebSocket, response: Response) { + webSocket.send( + """{"type":"event","event":"connect.challenge","payload":{"nonce":"android-test-nonce"}}""", + ) + } + + override fun onMessage(webSocket: WebSocket, text: String) { + val frame = json.parseToJsonElement(text).jsonObject + if (frame["type"]?.jsonPrimitive?.content != "req") return + val id = frame["id"]?.jsonPrimitive?.content ?: return + val method = frame["method"]?.jsonPrimitive?.content ?: return + when (method) { + "connect" -> { + webSocket.send( + """{"type":"res","id":"$id","ok":true,"payload":{"snapshot":{"sessionDefaults":{"mainSessionKey":"main"}}}}""", + ) + webSocket.send( + """{"type":"event","event":"node.invoke.request","payload":{"id":"invoke-3","nodeId":"node-3","command":"camera.snap","params":{"facing":"front"},"timeoutMs":5000}}""", + ) + } + "node.invoke.result" -> { + if (!invokeResultParams.isCompleted) { + invokeResultParams.complete(frame["params"]?.toString().orEmpty()) + } + webSocket.send("""{"type":"res","id":"$id","ok":true,"payload":{"ok":true}}""") + webSocket.close(1000, "done") + } + } + } + }, + ) + } + } + start() + } + + val app = RuntimeEnvironment.getApplication() + val sessionJob = SupervisorJob() + val deviceAuthStore = InMemoryDeviceAuthStore() + val session = + GatewaySession( + scope = CoroutineScope(sessionJob + Dispatchers.Default), + identityStore = DeviceIdentityStore(app), + deviceAuthStore = deviceAuthStore, + onConnected = { _, _, _ -> + if (!connected.isCompleted) connected.complete(Unit) + }, + onDisconnected = { message -> + lastDisconnect.set(message) + }, + onEvent = { _, _ -> }, + onInvoke = { + throw IllegalStateException("CAMERA_PERMISSION_REQUIRED: grant Camera permission") + }, + ) + + try { + session.connect( + endpoint = + GatewayEndpoint( + stableId = "manual|127.0.0.1|${server.port}", + name = "test", + host = "127.0.0.1", + port = server.port, + tlsEnabled = false, + ), + token = "test-token", + password = null, + options = + GatewayConnectOptions( + role = "node", + scopes = listOf("node:invoke"), + caps = emptyList(), + commands = emptyList(), + permissions = emptyMap(), + client = + GatewayClientInfo( + id = "openclaw-android-test", + displayName = "Android Test", + version = "1.0.0-test", + platform = "android", + mode = "node", + instanceId = "android-test-instance", + deviceFamily = "android", + modelIdentifier = "test", + ), + ), + tls = null, + ) + + val connectedWithinTimeout = withTimeoutOrNull(8_000) { + connected.await() + true + } == true + if (!connectedWithinTimeout) { + throw AssertionError("never connected; lastDisconnect=${lastDisconnect.get()}; requests=${server.requestCount}") + } + + val resultParamsJson = withTimeout(8_000) { invokeResultParams.await() } + val resultParams = json.parseToJsonElement(resultParamsJson).jsonObject + + assertEquals("invoke-3", resultParams["id"]?.jsonPrimitive?.content) + assertEquals("node-3", resultParams["nodeId"]?.jsonPrimitive?.content) + assertEquals(false, resultParams["ok"]?.jsonPrimitive?.content?.toBooleanStrict()) + assertEquals( + "CAMERA_PERMISSION_REQUIRED", + resultParams["error"]?.jsonObject?.get("code")?.jsonPrimitive?.content, + ) + assertEquals( + "grant Camera permission", + resultParams["error"]?.jsonObject?.get("message")?.jsonPrimitive?.content, + ) + } finally { + session.disconnect() + sessionJob.cancelAndJoin() + server.shutdown() + } + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/gateway/InvokeErrorParserTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/gateway/InvokeErrorParserTest.kt new file mode 100644 index 00000000000..ca8e8f21424 --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/gateway/InvokeErrorParserTest.kt @@ -0,0 +1,33 @@ +package ai.openclaw.android.gateway + +import org.junit.Assert.assertEquals +import org.junit.Assert.assertFalse +import org.junit.Assert.assertTrue +import org.junit.Test + +class InvokeErrorParserTest { + @Test + fun parseInvokeErrorMessage_parsesUppercaseCodePrefix() { + val parsed = parseInvokeErrorMessage("CAMERA_PERMISSION_REQUIRED: grant Camera permission") + assertEquals("CAMERA_PERMISSION_REQUIRED", parsed.code) + assertEquals("grant Camera permission", parsed.message) + assertTrue(parsed.hadExplicitCode) + assertEquals("CAMERA_PERMISSION_REQUIRED: grant Camera permission", parsed.prefixedMessage) + } + + @Test + fun parseInvokeErrorMessage_rejectsNonCanonicalCodePrefix() { + val parsed = parseInvokeErrorMessage("IllegalStateException: boom") + assertEquals("UNAVAILABLE", parsed.code) + assertEquals("IllegalStateException: boom", parsed.message) + assertFalse(parsed.hadExplicitCode) + } + + @Test + fun parseInvokeErrorFromThrowable_usesFallbackWhenMessageMissing() { + val parsed = parseInvokeErrorFromThrowable(IllegalStateException(), fallbackMessage = "fallback") + assertEquals("UNAVAILABLE", parsed.code) + assertEquals("fallback", parsed.message) + assertFalse(parsed.hadExplicitCode) + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/node/CameraHandlerTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/node/CameraHandlerTest.kt new file mode 100644 index 00000000000..470f925a7d4 --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/node/CameraHandlerTest.kt @@ -0,0 +1,25 @@ +package ai.openclaw.android.node + +import org.junit.Assert.assertEquals +import org.junit.Assert.assertFalse +import org.junit.Assert.assertTrue +import org.junit.Test + +class CameraHandlerTest { + @Test + fun isCameraClipWithinPayloadLimit_allowsZeroAndLimit() { + assertTrue(isCameraClipWithinPayloadLimit(0L)) + assertTrue(isCameraClipWithinPayloadLimit(CAMERA_CLIP_MAX_RAW_BYTES)) + } + + @Test + fun isCameraClipWithinPayloadLimit_rejectsNegativeAndTooLarge() { + assertFalse(isCameraClipWithinPayloadLimit(-1L)) + assertFalse(isCameraClipWithinPayloadLimit(CAMERA_CLIP_MAX_RAW_BYTES + 1L)) + } + + @Test + fun cameraClipMaxRawBytes_matchesExpectedBudget() { + assertEquals(18L * 1024L * 1024L, CAMERA_CLIP_MAX_RAW_BYTES) + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/node/DeviceHandlerTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/node/DeviceHandlerTest.kt new file mode 100644 index 00000000000..046f610bf5b --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/node/DeviceHandlerTest.kt @@ -0,0 +1,82 @@ +package ai.openclaw.android.node + +import android.content.Context +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.boolean +import kotlinx.serialization.json.double +import kotlinx.serialization.json.jsonArray +import kotlinx.serialization.json.jsonObject +import kotlinx.serialization.json.jsonPrimitive +import org.junit.Assert.assertEquals +import org.junit.Assert.assertTrue +import org.junit.Test +import org.junit.runner.RunWith +import org.robolectric.RobolectricTestRunner +import org.robolectric.RuntimeEnvironment + +@RunWith(RobolectricTestRunner::class) +class DeviceHandlerTest { + @Test + fun handleDeviceInfo_returnsStablePayload() { + val handler = DeviceHandler(appContext()) + + val result = handler.handleDeviceInfo(null) + + assertTrue(result.ok) + val payload = parsePayload(result.payloadJson) + assertEquals("Android", payload.getValue("systemName").jsonPrimitive.content) + assertTrue(payload.getValue("deviceName").jsonPrimitive.content.isNotBlank()) + assertTrue(payload.getValue("modelIdentifier").jsonPrimitive.content.isNotBlank()) + assertTrue(payload.getValue("systemVersion").jsonPrimitive.content.isNotBlank()) + assertTrue(payload.getValue("appVersion").jsonPrimitive.content.isNotBlank()) + assertTrue(payload.getValue("appBuild").jsonPrimitive.content.isNotBlank()) + assertTrue(payload.getValue("locale").jsonPrimitive.content.isNotBlank()) + } + + @Test + fun handleDeviceStatus_returnsExpectedShape() { + val handler = DeviceHandler(appContext()) + + val result = handler.handleDeviceStatus(null) + + assertTrue(result.ok) + val payload = parsePayload(result.payloadJson) + val battery = payload.getValue("battery").jsonObject + val storage = payload.getValue("storage").jsonObject + val thermal = payload.getValue("thermal").jsonObject + val network = payload.getValue("network").jsonObject + + val state = battery.getValue("state").jsonPrimitive.content + assertTrue(state in setOf("unknown", "unplugged", "charging", "full")) + battery["level"]?.jsonPrimitive?.double?.let { level -> + assertTrue(level in 0.0..1.0) + } + battery.getValue("lowPowerModeEnabled").jsonPrimitive.boolean + + val totalBytes = storage.getValue("totalBytes").jsonPrimitive.content.toLong() + val freeBytes = storage.getValue("freeBytes").jsonPrimitive.content.toLong() + val usedBytes = storage.getValue("usedBytes").jsonPrimitive.content.toLong() + assertTrue(totalBytes >= 0L) + assertTrue(freeBytes >= 0L) + assertTrue(usedBytes >= 0L) + assertEquals((totalBytes - freeBytes).coerceAtLeast(0L), usedBytes) + + val thermalState = thermal.getValue("state").jsonPrimitive.content + assertTrue(thermalState in setOf("nominal", "fair", "serious", "critical")) + + val networkStatus = network.getValue("status").jsonPrimitive.content + assertTrue(networkStatus in setOf("satisfied", "unsatisfied", "requiresConnection")) + val interfaces = network.getValue("interfaces").jsonArray.map { it.jsonPrimitive.content } + assertTrue(interfaces.all { it in setOf("wifi", "cellular", "wired", "other") }) + + assertTrue(payload.getValue("uptimeSeconds").jsonPrimitive.double >= 0.0) + } + + private fun appContext(): Context = RuntimeEnvironment.getApplication() + + private fun parsePayload(payloadJson: String?): JsonObject { + val jsonString = payloadJson ?: error("expected payload") + return Json.parseToJsonElement(jsonString).jsonObject + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/node/InvokeCommandRegistryTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/node/InvokeCommandRegistryTest.kt new file mode 100644 index 00000000000..148d3866346 --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/node/InvokeCommandRegistryTest.kt @@ -0,0 +1,56 @@ +package ai.openclaw.android.node + +import ai.openclaw.android.protocol.OpenClawCameraCommand +import ai.openclaw.android.protocol.OpenClawDeviceCommand +import ai.openclaw.android.protocol.OpenClawLocationCommand +import ai.openclaw.android.protocol.OpenClawNotificationsCommand +import ai.openclaw.android.protocol.OpenClawSmsCommand +import org.junit.Assert.assertFalse +import org.junit.Assert.assertTrue +import org.junit.Test + +class InvokeCommandRegistryTest { + @Test + fun advertisedCommands_respectsFeatureAvailability() { + val commands = + InvokeCommandRegistry.advertisedCommands( + cameraEnabled = false, + locationEnabled = false, + smsAvailable = false, + debugBuild = false, + ) + + assertFalse(commands.contains(OpenClawCameraCommand.Snap.rawValue)) + assertFalse(commands.contains(OpenClawCameraCommand.Clip.rawValue)) + assertFalse(commands.contains(OpenClawLocationCommand.Get.rawValue)) + assertTrue(commands.contains(OpenClawDeviceCommand.Status.rawValue)) + assertTrue(commands.contains(OpenClawDeviceCommand.Info.rawValue)) + assertTrue(commands.contains(OpenClawNotificationsCommand.List.rawValue)) + assertFalse(commands.contains(OpenClawSmsCommand.Send.rawValue)) + assertFalse(commands.contains("debug.logs")) + assertFalse(commands.contains("debug.ed25519")) + assertTrue(commands.contains("app.update")) + } + + @Test + fun advertisedCommands_includesFeatureCommandsWhenEnabled() { + val commands = + InvokeCommandRegistry.advertisedCommands( + cameraEnabled = true, + locationEnabled = true, + smsAvailable = true, + debugBuild = true, + ) + + assertTrue(commands.contains(OpenClawCameraCommand.Snap.rawValue)) + assertTrue(commands.contains(OpenClawCameraCommand.Clip.rawValue)) + assertTrue(commands.contains(OpenClawLocationCommand.Get.rawValue)) + assertTrue(commands.contains(OpenClawDeviceCommand.Status.rawValue)) + assertTrue(commands.contains(OpenClawDeviceCommand.Info.rawValue)) + assertTrue(commands.contains(OpenClawNotificationsCommand.List.rawValue)) + assertTrue(commands.contains(OpenClawSmsCommand.Send.rawValue)) + assertTrue(commands.contains("debug.logs")) + assertTrue(commands.contains("debug.ed25519")) + assertTrue(commands.contains("app.update")) + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/node/NotificationsHandlerTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/node/NotificationsHandlerTest.kt new file mode 100644 index 00000000000..7768e6e25da --- /dev/null +++ b/apps/android/app/src/test/java/ai/openclaw/android/node/NotificationsHandlerTest.kt @@ -0,0 +1,146 @@ +package ai.openclaw.android.node + +import android.content.Context +import ai.openclaw.android.gateway.GatewaySession +import kotlinx.coroutines.test.runTest +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.boolean +import kotlinx.serialization.json.int +import kotlinx.serialization.json.jsonArray +import kotlinx.serialization.json.jsonObject +import kotlinx.serialization.json.jsonPrimitive +import org.junit.Assert.assertEquals +import org.junit.Assert.assertFalse +import org.junit.Assert.assertNull +import org.junit.Assert.assertTrue +import org.junit.Test +import org.junit.runner.RunWith +import org.robolectric.RobolectricTestRunner +import org.robolectric.RuntimeEnvironment + +@RunWith(RobolectricTestRunner::class) +class NotificationsHandlerTest { + @Test + fun notificationsListReturnsStatusPayloadWhenDisabled() = + runTest { + val provider = + FakeNotificationsStateProvider( + DeviceNotificationSnapshot( + enabled = false, + connected = false, + notifications = emptyList(), + ), + ) + val handler = NotificationsHandler.forTesting(appContext = appContext(), stateProvider = provider) + + val result = handler.handleNotificationsList(null) + + assertTrue(result.ok) + assertNull(result.error) + val payload = parsePayload(result) + assertFalse(payload.getValue("enabled").jsonPrimitive.boolean) + assertFalse(payload.getValue("connected").jsonPrimitive.boolean) + assertEquals(0, payload.getValue("count").jsonPrimitive.int) + assertEquals(0, payload.getValue("notifications").jsonArray.size) + assertEquals(0, provider.rebindRequests) + } + + @Test + fun notificationsListRequestsRebindWhenEnabledButDisconnected() = + runTest { + val provider = + FakeNotificationsStateProvider( + DeviceNotificationSnapshot( + enabled = true, + connected = false, + notifications = listOf(sampleEntry("n1")), + ), + ) + val handler = NotificationsHandler.forTesting(appContext = appContext(), stateProvider = provider) + + val result = handler.handleNotificationsList(null) + + assertTrue(result.ok) + assertNull(result.error) + val payload = parsePayload(result) + assertTrue(payload.getValue("enabled").jsonPrimitive.boolean) + assertFalse(payload.getValue("connected").jsonPrimitive.boolean) + assertEquals(1, payload.getValue("count").jsonPrimitive.int) + assertEquals(1, payload.getValue("notifications").jsonArray.size) + assertEquals(1, provider.rebindRequests) + } + + @Test + fun notificationsListDoesNotRequestRebindWhenConnected() = + runTest { + val provider = + FakeNotificationsStateProvider( + DeviceNotificationSnapshot( + enabled = true, + connected = true, + notifications = listOf(sampleEntry("n2")), + ), + ) + val handler = NotificationsHandler.forTesting(appContext = appContext(), stateProvider = provider) + + val result = handler.handleNotificationsList(null) + + assertTrue(result.ok) + assertNull(result.error) + val payload = parsePayload(result) + assertTrue(payload.getValue("enabled").jsonPrimitive.boolean) + assertTrue(payload.getValue("connected").jsonPrimitive.boolean) + assertEquals(1, payload.getValue("count").jsonPrimitive.int) + assertEquals(0, provider.rebindRequests) + } + + @Test + fun sanitizeNotificationTextReturnsNullForBlankInput() { + assertNull(sanitizeNotificationText(null)) + assertNull(sanitizeNotificationText(" ")) + } + + @Test + fun sanitizeNotificationTextTrimsAndTruncates() { + val value = " ${"x".repeat(600)} " + val sanitized = sanitizeNotificationText(value) + + assertEquals(512, sanitized?.length) + assertTrue((sanitized ?: "").all { it == 'x' }) + } + + private fun parsePayload(result: GatewaySession.InvokeResult): JsonObject { + val payloadJson = result.payloadJson ?: error("expected payload") + return Json.parseToJsonElement(payloadJson).jsonObject + } + + private fun appContext(): Context = RuntimeEnvironment.getApplication() + + private fun sampleEntry(key: String): DeviceNotificationEntry = + DeviceNotificationEntry( + key = key, + packageName = "com.example.app", + title = "Title", + text = "Text", + subText = null, + category = null, + channelId = null, + postTimeMs = 123L, + isOngoing = false, + isClearable = true, + ) +} + +private class FakeNotificationsStateProvider( + private val snapshot: DeviceNotificationSnapshot, +) : NotificationsStateProvider { + var rebindRequests: Int = 0 + private set + + override fun readSnapshot(context: Context): DeviceNotificationSnapshot = snapshot + + override fun requestServiceRebind(context: Context) { + rebindRequests += 1 + } +} diff --git a/apps/android/app/src/test/java/ai/openclaw/android/protocol/OpenClawProtocolConstantsTest.kt b/apps/android/app/src/test/java/ai/openclaw/android/protocol/OpenClawProtocolConstantsTest.kt index 10ab733ae53..41a9a7514e8 100644 --- a/apps/android/app/src/test/java/ai/openclaw/android/protocol/OpenClawProtocolConstantsTest.kt +++ b/apps/android/app/src/test/java/ai/openclaw/android/protocol/OpenClawProtocolConstantsTest.kt @@ -26,10 +26,24 @@ class OpenClawProtocolConstantsTest { assertEquals("camera", OpenClawCapability.Camera.rawValue) assertEquals("screen", OpenClawCapability.Screen.rawValue) assertEquals("voiceWake", OpenClawCapability.VoiceWake.rawValue) + assertEquals("location", OpenClawCapability.Location.rawValue) + assertEquals("sms", OpenClawCapability.Sms.rawValue) + assertEquals("device", OpenClawCapability.Device.rawValue) } @Test fun screenCommandsUseStableStrings() { assertEquals("screen.record", OpenClawScreenCommand.Record.rawValue) } + + @Test + fun notificationsCommandsUseStableStrings() { + assertEquals("notifications.list", OpenClawNotificationsCommand.List.rawValue) + } + + @Test + fun deviceCommandsUseStableStrings() { + assertEquals("device.status", OpenClawDeviceCommand.Status.rawValue) + assertEquals("device.info", OpenClawDeviceCommand.Info.rawValue) + } } diff --git a/apps/android/benchmark/build.gradle.kts b/apps/android/benchmark/build.gradle.kts new file mode 100644 index 00000000000..99d1d8e4c60 --- /dev/null +++ b/apps/android/benchmark/build.gradle.kts @@ -0,0 +1,36 @@ +plugins { + id("com.android.test") +} + +android { + namespace = "ai.openclaw.android.benchmark" + compileSdk = 36 + + defaultConfig { + minSdk = 31 + targetSdk = 36 + testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner" + testInstrumentationRunnerArguments["androidx.benchmark.suppressErrors"] = "DEBUGGABLE,EMULATOR" + } + + targetProjectPath = ":app" + experimentalProperties["android.experimental.self-instrumenting"] = true + + compileOptions { + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 + } +} + +kotlin { + compilerOptions { + jvmTarget.set(org.jetbrains.kotlin.gradle.dsl.JvmTarget.JVM_17) + allWarningsAsErrors.set(true) + } +} + +dependencies { + implementation("androidx.benchmark:benchmark-macro-junit4:1.4.1") + implementation("androidx.test.ext:junit:1.2.1") + implementation("androidx.test.uiautomator:uiautomator:2.4.0-alpha06") +} diff --git a/apps/android/benchmark/src/main/java/ai/openclaw/android/benchmark/StartupMacrobenchmark.kt b/apps/android/benchmark/src/main/java/ai/openclaw/android/benchmark/StartupMacrobenchmark.kt new file mode 100644 index 00000000000..46181f6a9a1 --- /dev/null +++ b/apps/android/benchmark/src/main/java/ai/openclaw/android/benchmark/StartupMacrobenchmark.kt @@ -0,0 +1,76 @@ +package ai.openclaw.android.benchmark + +import androidx.benchmark.macro.CompilationMode +import androidx.benchmark.macro.FrameTimingMetric +import androidx.benchmark.macro.StartupMode +import androidx.benchmark.macro.StartupTimingMetric +import androidx.benchmark.macro.junit4.MacrobenchmarkRule +import androidx.test.ext.junit.runners.AndroidJUnit4 +import androidx.test.platform.app.InstrumentationRegistry +import androidx.test.uiautomator.UiDevice +import org.junit.Assume.assumeTrue +import org.junit.Rule +import org.junit.Test +import org.junit.runner.RunWith + +@RunWith(AndroidJUnit4::class) +class StartupMacrobenchmark { + @get:Rule + val benchmarkRule = MacrobenchmarkRule() + + private val packageName = "ai.openclaw.android" + + @Test + fun coldStartup() { + runBenchmarkOrSkip { + benchmarkRule.measureRepeated( + packageName = packageName, + metrics = listOf(StartupTimingMetric()), + startupMode = StartupMode.COLD, + compilationMode = CompilationMode.None(), + iterations = 10, + ) { + pressHome() + startActivityAndWait() + } + } + } + + @Test + fun startupAndScrollFrameTiming() { + runBenchmarkOrSkip { + benchmarkRule.measureRepeated( + packageName = packageName, + metrics = listOf(FrameTimingMetric()), + startupMode = StartupMode.WARM, + compilationMode = CompilationMode.None(), + iterations = 10, + ) { + startActivityAndWait() + val device = UiDevice.getInstance(InstrumentationRegistry.getInstrumentation()) + val x = device.displayWidth / 2 + val yStart = (device.displayHeight * 0.8f).toInt() + val yEnd = (device.displayHeight * 0.25f).toInt() + repeat(4) { + device.swipe(x, yStart, x, yEnd, 24) + device.waitForIdle() + } + } + } + } + + private fun runBenchmarkOrSkip(run: () -> Unit) { + try { + run() + } catch (err: IllegalStateException) { + val message = err.message.orEmpty() + val knownDeviceIssue = + message.contains("Unable to confirm activity launch completion") || + message.contains("no renderthread slices", ignoreCase = true) + if (knownDeviceIssue) { + assumeTrue("Skipping benchmark on this device: $message", false) + } + throw err + } + } +} diff --git a/apps/android/build.gradle.kts b/apps/android/build.gradle.kts index bea7b46b2c2..1d191c9e375 100644 --- a/apps/android/build.gradle.kts +++ b/apps/android/build.gradle.kts @@ -1,5 +1,6 @@ plugins { id("com.android.application") version "9.0.1" apply false + id("com.android.test") version "9.0.1" apply false id("org.jetbrains.kotlin.plugin.compose") version "2.2.21" apply false id("org.jetbrains.kotlin.plugin.serialization") version "2.2.21" apply false } diff --git a/apps/android/scripts/perf-startup-benchmark.sh b/apps/android/scripts/perf-startup-benchmark.sh new file mode 100755 index 00000000000..70342d3cba4 --- /dev/null +++ b/apps/android/scripts/perf-startup-benchmark.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +ANDROID_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)" +RESULTS_DIR="$ANDROID_DIR/benchmark/results" +CLASS_FILTER="ai.openclaw.android.benchmark.StartupMacrobenchmark#coldStartup" +BASELINE_JSON="" + +usage() { + cat <<'EOF' +Usage: + ./scripts/perf-startup-benchmark.sh [--baseline ] + +Runs cold-start macrobenchmark only, then prints a compact summary. +Also saves a timestamped snapshot JSON under benchmark/results/. +If --baseline is omitted, compares against latest previous snapshot when available. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --baseline) + BASELINE_JSON="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown arg: $1" >&2 + usage >&2 + exit 2 + ;; + esac +done + +if ! command -v jq >/dev/null 2>&1; then + echo "jq required but missing." >&2 + exit 1 +fi + +if ! command -v adb >/dev/null 2>&1; then + echo "adb required but missing." >&2 + exit 1 +fi + +device_count="$(adb devices | awk 'NR>1 && $2=="device" {c+=1} END {print c+0}')" +if [[ "$device_count" -lt 1 ]]; then + echo "No connected Android device (adb state=device)." >&2 + exit 1 +fi + +mkdir -p "$RESULTS_DIR" + +run_log="$(mktemp -t openclaw-android-bench.XXXXXX.log)" +trap 'rm -f "$run_log"' EXIT + +cd "$ANDROID_DIR" + +./gradlew :benchmark:connectedDebugAndroidTest \ + -Pandroid.testInstrumentationRunnerArguments.class="$CLASS_FILTER" \ + --console=plain \ + >"$run_log" 2>&1 + +latest_json="$( + find "$ANDROID_DIR/benchmark/build/outputs/connected_android_test_additional_output/debug/connected" \ + -name '*benchmarkData.json' -type f \ + | while IFS= read -r file; do + printf '%s\t%s\n' "$(stat -f '%m' "$file")" "$file" + done \ + | sort -nr \ + | head -n1 \ + | cut -f2- +)" + +if [[ -z "$latest_json" || ! -f "$latest_json" ]]; then + echo "benchmarkData.json not found after run." >&2 + tail -n 120 "$run_log" >&2 + exit 1 +fi + +timestamp="$(date +%Y%m%d-%H%M%S)" +snapshot_json="$RESULTS_DIR/startup-$timestamp.json" +cp "$latest_json" "$snapshot_json" + +median_ms="$(jq -r '.benchmarks[] | select(.name=="coldStartup") | .metrics.timeToInitialDisplayMs.median' "$snapshot_json")" +min_ms="$(jq -r '.benchmarks[] | select(.name=="coldStartup") | .metrics.timeToInitialDisplayMs.minimum' "$snapshot_json")" +max_ms="$(jq -r '.benchmarks[] | select(.name=="coldStartup") | .metrics.timeToInitialDisplayMs.maximum' "$snapshot_json")" +cov="$(jq -r '.benchmarks[] | select(.name=="coldStartup") | .metrics.timeToInitialDisplayMs.coefficientOfVariation' "$snapshot_json")" +device="$(jq -r '.context.build.model' "$snapshot_json")" +sdk="$(jq -r '.context.build.version.sdk' "$snapshot_json")" +runs_count="$(jq -r '.benchmarks[] | select(.name=="coldStartup") | .metrics.timeToInitialDisplayMs.runs | length' "$snapshot_json")" + +printf 'startup.cold.median_ms=%.3f min_ms=%.3f max_ms=%.3f cov=%.4f runs=%s device=%s sdk=%s\n' \ + "$median_ms" "$min_ms" "$max_ms" "$cov" "$runs_count" "$device" "$sdk" +echo "snapshot_json=$snapshot_json" + +if [[ -z "$BASELINE_JSON" ]]; then + BASELINE_JSON="$( + find "$RESULTS_DIR" -name 'startup-*.json' -type f \ + | while IFS= read -r file; do + if [[ "$file" == "$snapshot_json" ]]; then + continue + fi + printf '%s\t%s\n' "$(stat -f '%m' "$file")" "$file" + done \ + | sort -nr \ + | head -n1 \ + | cut -f2- + )" +fi + +if [[ -n "$BASELINE_JSON" ]]; then + if [[ ! -f "$BASELINE_JSON" ]]; then + echo "Baseline file missing: $BASELINE_JSON" >&2 + exit 1 + fi + base_median="$(jq -r '.benchmarks[] | select(.name=="coldStartup") | .metrics.timeToInitialDisplayMs.median' "$BASELINE_JSON")" + delta_ms="$(awk -v a="$median_ms" -v b="$base_median" 'BEGIN { printf "%.3f", (a-b) }')" + delta_pct="$(awk -v a="$median_ms" -v b="$base_median" 'BEGIN { if (b==0) { print "nan" } else { printf "%.2f", ((a-b)/b)*100 } }')" + echo "baseline_median_ms=$base_median delta_ms=$delta_ms delta_pct=$delta_pct%" +fi diff --git a/apps/android/scripts/perf-startup-hotspots.sh b/apps/android/scripts/perf-startup-hotspots.sh new file mode 100755 index 00000000000..787d5fac300 --- /dev/null +++ b/apps/android/scripts/perf-startup-hotspots.sh @@ -0,0 +1,154 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +ANDROID_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)" + +PACKAGE="ai.openclaw.android" +ACTIVITY=".MainActivity" +DURATION_SECONDS="10" +OUTPUT_PERF_DATA="" + +usage() { + cat <<'EOF' +Usage: + ./scripts/perf-startup-hotspots.sh [--package ] [--activity ] [--duration ] [--out ] + +Captures startup CPU profile via simpleperf (app_profiler.py), then prints concise hotspot summaries. +Default package/activity target OpenClaw Android startup. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --package) + PACKAGE="${2:-}" + shift 2 + ;; + --activity) + ACTIVITY="${2:-}" + shift 2 + ;; + --duration) + DURATION_SECONDS="${2:-}" + shift 2 + ;; + --out) + OUTPUT_PERF_DATA="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown arg: $1" >&2 + usage >&2 + exit 2 + ;; + esac +done + +if ! command -v uv >/dev/null 2>&1; then + echo "uv required but missing." >&2 + exit 1 +fi + +if ! command -v adb >/dev/null 2>&1; then + echo "adb required but missing." >&2 + exit 1 +fi + +if [[ -z "$OUTPUT_PERF_DATA" ]]; then + OUTPUT_PERF_DATA="/tmp/openclaw-startup-$(date +%Y%m%d-%H%M%S).perf.data" +fi + +device_count="$(adb devices | awk 'NR>1 && $2=="device" {c+=1} END {print c+0}')" +if [[ "$device_count" -lt 1 ]]; then + echo "No connected Android device (adb state=device)." >&2 + exit 1 +fi + +simpleperf_dir="" +if [[ -n "${ANDROID_NDK_HOME:-}" && -f "${ANDROID_NDK_HOME}/simpleperf/app_profiler.py" ]]; then + simpleperf_dir="${ANDROID_NDK_HOME}/simpleperf" +elif [[ -n "${ANDROID_NDK_ROOT:-}" && -f "${ANDROID_NDK_ROOT}/simpleperf/app_profiler.py" ]]; then + simpleperf_dir="${ANDROID_NDK_ROOT}/simpleperf" +else + latest_simpleperf="$(ls -d "${HOME}/Library/Android/sdk/ndk/"*/simpleperf 2>/dev/null | sort -V | tail -n1 || true)" + if [[ -n "$latest_simpleperf" && -f "$latest_simpleperf/app_profiler.py" ]]; then + simpleperf_dir="$latest_simpleperf" + fi +fi + +if [[ -z "$simpleperf_dir" ]]; then + echo "simpleperf not found. Set ANDROID_NDK_HOME or install NDK under ~/Library/Android/sdk/ndk/." >&2 + exit 1 +fi + +app_profiler="$simpleperf_dir/app_profiler.py" +report_py="$simpleperf_dir/report.py" +ndk_path="$(cd -- "$simpleperf_dir/.." && pwd)" + +tmp_dir="$(mktemp -d -t openclaw-android-hotspots.XXXXXX)" +trap 'rm -rf "$tmp_dir"' EXIT + +capture_log="$tmp_dir/capture.log" +dso_csv="$tmp_dir/dso.csv" +symbols_csv="$tmp_dir/symbols.csv" +children_txt="$tmp_dir/children.txt" + +cd "$ANDROID_DIR" +./gradlew :app:installDebug --console=plain >"$tmp_dir/install.log" 2>&1 + +if ! uv run --no-project python3 "$app_profiler" \ + -p "$PACKAGE" \ + -a "$ACTIVITY" \ + -o "$OUTPUT_PERF_DATA" \ + --ndk_path "$ndk_path" \ + -r "-e task-clock:u -f 1000 -g --duration $DURATION_SECONDS" \ + >"$capture_log" 2>&1; then + echo "simpleperf capture failed. tail(capture_log):" >&2 + tail -n 120 "$capture_log" >&2 + exit 1 +fi + +uv run --no-project python3 "$report_py" \ + -i "$OUTPUT_PERF_DATA" \ + --sort dso \ + --csv \ + --csv-separator "|" \ + --include-process-name "$PACKAGE" \ + >"$dso_csv" 2>"$tmp_dir/report-dso.err" + +uv run --no-project python3 "$report_py" \ + -i "$OUTPUT_PERF_DATA" \ + --sort dso,symbol \ + --csv \ + --csv-separator "|" \ + --include-process-name "$PACKAGE" \ + >"$symbols_csv" 2>"$tmp_dir/report-symbols.err" + +uv run --no-project python3 "$report_py" \ + -i "$OUTPUT_PERF_DATA" \ + --children \ + --sort dso,symbol \ + -n \ + --percent-limit 0.2 \ + --include-process-name "$PACKAGE" \ + >"$children_txt" 2>"$tmp_dir/report-children.err" + +clean_csv() { + awk 'BEGIN{print_on=0} /^Overhead\|/{print_on=1} print_on==1{print}' "$1" +} + +echo "perf_data=$OUTPUT_PERF_DATA" +echo +echo "top_dso_self:" +clean_csv "$dso_csv" | tail -n +2 | awk -F'|' 'NR<=10 {printf " %s %s\n", $1, $2}' +echo +echo "top_symbols_self:" +clean_csv "$symbols_csv" | tail -n +2 | awk -F'|' 'NR<=20 {printf " %s %s :: %s\n", $1, $2, $3}' +echo +echo "app_path_clues_children:" +rg 'androidx\.compose|MainActivity|NodeRuntime|NodeForegroundService|SecurePrefs|WebView|libwebviewchromium' "$children_txt" | awk 'NR<=20 {print}' || true diff --git a/apps/android/settings.gradle.kts b/apps/android/settings.gradle.kts index b3b43a44550..25e5d09bbe1 100644 --- a/apps/android/settings.gradle.kts +++ b/apps/android/settings.gradle.kts @@ -16,3 +16,4 @@ dependencyResolutionManagement { rootProject.name = "OpenClawNodeAndroid" include(":app") +include(":benchmark") diff --git a/apps/ios/ShareExtension/Info.plist b/apps/ios/ShareExtension/Info.plist index aedea62a5e1..6fcad4635b0 100644 --- a/apps/ios/ShareExtension/Info.plist +++ b/apps/ios/ShareExtension/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType XPC! CFBundleShortVersionString - 2026.2.23 + 2026.2.26 CFBundleVersion - 20260223 + 20260226 NSExtension NSExtensionAttributes diff --git a/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift b/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift index 9571839059d..67f01138803 100644 --- a/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift +++ b/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift @@ -54,7 +54,12 @@ struct IOSGatewayChatTransport: OpenClawChatTransport, Sendable { idempotencyKey: String, attachments: [OpenClawChatAttachmentPayload]) async throws -> OpenClawChatSendResponse { - Self.logger.info("chat.send start sessionKey=\(sessionKey, privacy: .public) len=\(message.count, privacy: .public) attachments=\(attachments.count, privacy: .public)") + let startLogMessage = + "chat.send start sessionKey=\(sessionKey) " + + "len=\(message.count) attachments=\(attachments.count)" + Self.logger.info( + "\(startLogMessage, privacy: .public)" + ) struct Params: Codable { var sessionKey: String var message: String diff --git a/apps/ios/Sources/Gateway/GatewayConnectionController.swift b/apps/ios/Sources/Gateway/GatewayConnectionController.swift index a770fcb2c6f..53e32684988 100644 --- a/apps/ios/Sources/Gateway/GatewayConnectionController.swift +++ b/apps/ios/Sources/Gateway/GatewayConnectionController.swift @@ -212,7 +212,7 @@ final class GatewayConnectionController { await self.connectManual(host: host, port: port, useTLS: useTLS) case let .discovered(stableID, _): guard let gateway = self.gateways.first(where: { $0.stableID == stableID }) else { return } - await self.connectDiscoveredGateway(gateway) + _ = await self.connectDiscoveredGateway(gateway) } } @@ -399,7 +399,7 @@ final class GatewayConnectionController { self.didAutoConnect = true Task { [weak self] in guard let self else { return } - await self.connectDiscoveredGateway(target) + _ = await self.connectDiscoveredGateway(target) } return } @@ -411,7 +411,7 @@ final class GatewayConnectionController { self.didAutoConnect = true Task { [weak self] in guard let self else { return } - await self.connectDiscoveredGateway(gateway) + _ = await self.connectDiscoveredGateway(gateway) } return } @@ -632,7 +632,8 @@ final class GatewayConnectionController { 0, NI_NUMERICHOST) guard rc == 0 else { return nil } - return String(cString: buffer) + let bytes = buffer.prefix { $0 != 0 }.map { UInt8(bitPattern: $0) } + return String(bytes: bytes, encoding: .utf8) } if let host, !host.isEmpty { @@ -889,11 +890,9 @@ final class GatewayConnectionController { permissions["contacts"] = contactsStatus == .authorized || contactsStatus == .limited let calendarStatus = EKEventStore.authorizationStatus(for: .event) - permissions["calendar"] = - calendarStatus == .authorized || calendarStatus == .fullAccess || calendarStatus == .writeOnly + permissions["calendar"] = Self.hasEventKitAccess(calendarStatus) let remindersStatus = EKEventStore.authorizationStatus(for: .reminder) - permissions["reminders"] = - remindersStatus == .authorized || remindersStatus == .fullAccess || remindersStatus == .writeOnly + permissions["reminders"] = Self.hasEventKitAccess(remindersStatus) let motionStatus = CMMotionActivityManager.authorizationStatus() let pedometerStatus = CMPedometer.authorizationStatus() @@ -911,13 +910,17 @@ final class GatewayConnectionController { private static func isLocationAuthorized(status: CLAuthorizationStatus) -> Bool { switch status { - case .authorizedAlways, .authorizedWhenInUse, .authorized: + case .authorizedAlways, .authorizedWhenInUse: return true default: return false } } + private static func hasEventKitAccess(_ status: EKAuthorizationStatus) -> Bool { + status == .fullAccess || status == .writeOnly + } + private static func motionAvailable() -> Bool { CMMotionActivityManager.isActivityAvailable() || CMPedometer.isStepCountingAvailable() } @@ -986,7 +989,7 @@ extension GatewayConnectionController { } #endif -private final class GatewayTLSFingerprintProbe: NSObject, URLSessionDelegate { +private final class GatewayTLSFingerprintProbe: NSObject, URLSessionDelegate, @unchecked Sendable { private let url: URL private let timeoutSeconds: Double private let onComplete: (String?) -> Void diff --git a/apps/ios/Sources/Info.plist b/apps/ios/Sources/Info.plist index bcb8c251a02..12d340594f3 100644 --- a/apps/ios/Sources/Info.plist +++ b/apps/ios/Sources/Info.plist @@ -19,7 +19,7 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.2.25 + 2026.2.26 CFBundleURLTypes @@ -32,7 +32,7 @@ CFBundleVersion - 20260225 + 20260226 NSAppTransportSecurity NSAllowsArbitraryLoadsInWebContent diff --git a/apps/ios/Sources/Model/NodeAppModel.swift b/apps/ios/Sources/Model/NodeAppModel.swift index d763a3b908f..ca9c3f9d0c3 100644 --- a/apps/ios/Sources/Model/NodeAppModel.swift +++ b/apps/ios/Sources/Model/NodeAppModel.swift @@ -46,6 +46,7 @@ private enum IOSDeepLinkAgentPolicy { @MainActor @Observable +// swiftlint:disable type_body_length file_length final class NodeAppModel { struct AgentDeepLinkPrompt: Identifiable, Equatable { let id: String @@ -414,8 +415,10 @@ final class NodeAppModel { } let wasSuppressed = self.backgroundReconnectSuppressed self.backgroundReconnectSuppressed = false - self.pushWakeLogger.info( - "Background reconnect lease reason=\(reason, privacy: .public) seconds=\(leaseSeconds, privacy: .public) wasSuppressed=\(wasSuppressed, privacy: .public)") + let leaseLogMessage = + "Background reconnect lease reason=\(reason) " + + "seconds=\(leaseSeconds) wasSuppressed=\(wasSuppressed)" + self.pushWakeLogger.info("\(leaseLogMessage, privacy: .public)") } private func suppressBackgroundReconnect(reason: String, disconnectIfNeeded: Bool) { @@ -425,8 +428,10 @@ final class NodeAppModel { self.backgroundReconnectLeaseUntil = nil self.backgroundReconnectSuppressed = true guard changed else { return } - self.pushWakeLogger.info( - "Background reconnect suppressed reason=\(reason, privacy: .public) disconnect=\(disconnectIfNeeded, privacy: .public)") + let suppressLogMessage = + "Background reconnect suppressed reason=\(reason) " + + "disconnect=\(disconnectIfNeeded)" + self.pushWakeLogger.info("\(suppressLogMessage, privacy: .public)") guard disconnectIfNeeded else { return } Task { [weak self] in guard let self else { return } @@ -607,7 +612,7 @@ final class NodeAppModel { self.voiceWakeSyncTask = Task { [weak self] in guard let self else { return } - if !(await self.isGatewayHealthMonitorDisabled()) { + if !self.isGatewayHealthMonitorDisabled() { await self.refreshWakeWordsFromGateway() } @@ -662,9 +667,13 @@ final class NodeAppModel { self.gatewayHealthMonitor.start( check: { [weak self] in guard let self else { return false } - if await self.isGatewayHealthMonitorDisabled() { return true } + if await MainActor.run(body: { self.isGatewayHealthMonitorDisabled() }) { return true } do { - let data = try await self.operatorGateway.request(method: "health", paramsJSON: nil, timeoutSeconds: 6) + let data = try await self.operatorGateway.request( + method: "health", + paramsJSON: nil, + timeoutSeconds: 6 + ) guard let decoded = try? JSONDecoder().decode(OpenClawGatewayHealthOK.self, from: data) else { return false } @@ -1765,7 +1774,10 @@ private extension NodeAppModel { try? await Task.sleep(nanoseconds: 1_000_000_000) continue } - if self.shouldPauseReconnectLoopInBackground(source: "operator_loop") { try? await Task.sleep(nanoseconds: 2_000_000_000); continue } + if self.shouldPauseReconnectLoopInBackground(source: "operator_loop") { + try? await Task.sleep(nanoseconds: 2_000_000_000) + continue + } if await self.isOperatorConnected() { try? await Task.sleep(nanoseconds: 1_000_000_000) continue @@ -1830,6 +1842,8 @@ private extension NodeAppModel { } } + // Legacy reconnect state machine; follow-up refactor needed to split into helpers. + // swiftlint:disable:next function_body_length func startNodeGatewayLoop( url: URL, stableID: String, @@ -1854,7 +1868,10 @@ private extension NodeAppModel { try? await Task.sleep(nanoseconds: 1_000_000_000) continue } - if self.shouldPauseReconnectLoopInBackground(source: "node_loop") { try? await Task.sleep(nanoseconds: 2_000_000_000); continue } + if self.shouldPauseReconnectLoopInBackground(source: "node_loop") { + try? await Task.sleep(nanoseconds: 2_000_000_000) + continue + } if await self.isGatewayConnected() { try? await Task.sleep(nanoseconds: 1_000_000_000) continue @@ -1898,7 +1915,10 @@ private extension NodeAppModel { sessionKey: relayData.sessionKey, deliveryChannel: relayData.deliveryChannel, deliveryTo: relayData.deliveryTo)) - GatewayDiagnostics.log("gateway connected host=\(url.host ?? "?") scheme=\(url.scheme ?? "?")") + GatewayDiagnostics.log( + "gateway connected host=\(url.host ?? "?") " + + "scheme=\(url.scheme ?? "?")" + ) if let addr = await self.nodeGateway.currentRemoteAddress() { await MainActor.run { self.gatewayRemoteAddress = addr } } @@ -1993,9 +2013,11 @@ private extension NodeAppModel { self.gatewayPairingRequestId = requestId if let requestId, !requestId.isEmpty { self.gatewayStatusText = - "Pairing required (requestId: \(requestId)). Approve on gateway and return to OpenClaw." + "Pairing required (requestId: \(requestId)). " + + "Approve on gateway and return to OpenClaw." } else { - self.gatewayStatusText = "Pairing required. Approve on gateway and return to OpenClaw." + self.gatewayStatusText = + "Pairing required. Approve on gateway and return to OpenClaw." } } // Hard stop the underlying WebSocket watchdog reconnects so the UI stays stable and @@ -2213,12 +2235,16 @@ extension NodeAppModel { key: event.replyId) do { try await self.sendAgentRequest(link: link) - self.watchReplyLogger.info( - "watch reply forwarded replyId=\(event.replyId, privacy: .public) action=\(event.actionId, privacy: .public)") + let forwardedMessage = + "watch reply forwarded replyId=\(event.replyId) " + + "action=\(event.actionId)" + self.watchReplyLogger.info("\(forwardedMessage, privacy: .public)") self.openChatRequestID &+= 1 } catch { - self.watchReplyLogger.error( - "watch reply forwarding failed replyId=\(event.replyId, privacy: .public) error=\(error.localizedDescription, privacy: .public)") + let failedMessage = + "watch reply forwarding failed replyId=\(event.replyId) " + + "error=\(error.localizedDescription)" + self.watchReplyLogger.error("\(failedMessage, privacy: .public)") self.queuedWatchReplies.insert(event, at: 0) } } @@ -2252,21 +2278,37 @@ extension NodeAppModel { return false } let pushKind = Self.openclawPushKind(userInfo) - self.pushWakeLogger.info( - "Silent push received wakeId=\(wakeId, privacy: .public) kind=\(pushKind, privacy: .public) backgrounded=\(self.isBackgrounded, privacy: .public) autoReconnect=\(self.gatewayAutoReconnectEnabled, privacy: .public)") + let receivedMessage = + "Silent push received wakeId=\(wakeId) " + + "kind=\(pushKind) " + + "backgrounded=\(self.isBackgrounded) " + + "autoReconnect=\(self.gatewayAutoReconnectEnabled)" + self.pushWakeLogger.info("\(receivedMessage, privacy: .public)") let result = await self.reconnectGatewaySessionsForSilentPushIfNeeded(wakeId: wakeId) - self.pushWakeLogger.info( - "Silent push outcome wakeId=\(wakeId, privacy: .public) applied=\(result.applied, privacy: .public) reason=\(result.reason, privacy: .public) durationMs=\(result.durationMs, privacy: .public)") + let outcomeMessage = + "Silent push outcome wakeId=\(wakeId) " + + "applied=\(result.applied) " + + "reason=\(result.reason) " + + "durationMs=\(result.durationMs)" + self.pushWakeLogger.info("\(outcomeMessage, privacy: .public)") return result.applied } func handleBackgroundRefreshWake(trigger: String = "bg_app_refresh") async -> Bool { let wakeId = Self.makePushWakeAttemptID() - self.pushWakeLogger.info( - "Background refresh wake received wakeId=\(wakeId, privacy: .public) trigger=\(trigger, privacy: .public) backgrounded=\(self.isBackgrounded, privacy: .public) autoReconnect=\(self.gatewayAutoReconnectEnabled, privacy: .public)") + let receivedMessage = + "Background refresh wake received wakeId=\(wakeId) " + + "trigger=\(trigger) " + + "backgrounded=\(self.isBackgrounded) " + + "autoReconnect=\(self.gatewayAutoReconnectEnabled)" + self.pushWakeLogger.info("\(receivedMessage, privacy: .public)") let result = await self.reconnectGatewaySessionsForSilentPushIfNeeded(wakeId: wakeId) - self.pushWakeLogger.info( - "Background refresh wake outcome wakeId=\(wakeId, privacy: .public) applied=\(result.applied, privacy: .public) reason=\(result.reason, privacy: .public) durationMs=\(result.durationMs, privacy: .public)") + let outcomeMessage = + "Background refresh wake outcome wakeId=\(wakeId) " + + "applied=\(result.applied) " + + "reason=\(result.reason) " + + "durationMs=\(result.durationMs)" + self.pushWakeLogger.info("\(outcomeMessage, privacy: .public)") return result.applied } @@ -2283,17 +2325,26 @@ extension NodeAppModel { if let last = self.lastSignificantLocationWakeAt, now.timeIntervalSince(last) < throttleWindowSeconds { - self.locationWakeLogger.info( - "Location wake throttled wakeId=\(wakeId, privacy: .public) elapsedSec=\(now.timeIntervalSince(last), privacy: .public)") + let throttledMessage = + "Location wake throttled wakeId=\(wakeId) " + + "elapsedSec=\(now.timeIntervalSince(last))" + self.locationWakeLogger.info("\(throttledMessage, privacy: .public)") return } self.lastSignificantLocationWakeAt = now - self.locationWakeLogger.info( - "Location wake begin wakeId=\(wakeId, privacy: .public) backgrounded=\(self.isBackgrounded, privacy: .public) autoReconnect=\(self.gatewayAutoReconnectEnabled, privacy: .public)") + let beginMessage = + "Location wake begin wakeId=\(wakeId) " + + "backgrounded=\(self.isBackgrounded) " + + "autoReconnect=\(self.gatewayAutoReconnectEnabled)" + self.locationWakeLogger.info("\(beginMessage, privacy: .public)") let result = await self.reconnectGatewaySessionsForSilentPushIfNeeded(wakeId: wakeId) - self.locationWakeLogger.info( - "Location wake trigger wakeId=\(wakeId, privacy: .public) applied=\(result.applied, privacy: .public) reason=\(result.reason, privacy: .public) durationMs=\(result.durationMs, privacy: .public)") + let triggerMessage = + "Location wake trigger wakeId=\(wakeId) " + + "applied=\(result.applied) " + + "reason=\(result.reason) " + + "durationMs=\(result.durationMs)" + self.locationWakeLogger.info("\(triggerMessage, privacy: .public)") guard result.applied else { return } let connected = await self.waitForGatewayConnection(timeoutMs: 5000, pollMs: 250) @@ -2451,14 +2502,18 @@ extension NodeAppModel { extension NodeAppModel { private func refreshWakeWordsFromGateway() async { do { - let data = try await self.operatorGateway.request(method: "voicewake.get", paramsJSON: "{}", timeoutSeconds: 8) + let data = try await self.operatorGateway.request( + method: "voicewake.get", + paramsJSON: "{}", + timeoutSeconds: 8 + ) guard let triggers = VoiceWakePreferences.decodeGatewayTriggers(from: data) else { return } VoiceWakePreferences.saveTriggerWords(triggers) } catch { if let gatewayError = error as? GatewayResponseError { let lower = gatewayError.message.lowercased() if lower.contains("unauthorized role") || lower.contains("missing scope") { - await self.setGatewayHealthMonitorDisabled(true) + self.setGatewayHealthMonitorDisabled(true) return } } @@ -2513,7 +2568,8 @@ extension NodeAppModel { ) if message.count > IOSDeepLinkAgentPolicy.maxMessageChars { - self.screen.errorText = "Deep link too large (message exceeds \(IOSDeepLinkAgentPolicy.maxMessageChars) characters)." + self.screen.errorText = "Deep link too large (message exceeds " + + "\(IOSDeepLinkAgentPolicy.maxMessageChars) characters)." self.recordShareEvent("Rejected: message too large (\(message.count) chars).") return } @@ -2728,3 +2784,4 @@ extension NodeAppModel { } } #endif +// swiftlint:enable type_body_length file_length diff --git a/apps/ios/Sources/Motion/MotionService.swift b/apps/ios/Sources/Motion/MotionService.swift index f108e0b560b..e126b3bd20d 100644 --- a/apps/ios/Sources/Motion/MotionService.swift +++ b/apps/ios/Sources/Motion/MotionService.swift @@ -20,7 +20,7 @@ final class MotionService: MotionServicing { let limit = max(1, min(params.limit ?? 200, 1000)) let manager = CMMotionActivityManager() - let mapped = try await withCheckedThrowingContinuation { (cont: CheckedContinuation<[OpenClawMotionActivityEntry], Error>) in + let mapped: [OpenClawMotionActivityEntry] = try await withCheckedThrowingContinuation { cont in manager.queryActivityStarting(from: start, to: end, to: OperationQueue()) { activity, error in if let error { cont.resume(throwing: error) @@ -62,7 +62,7 @@ final class MotionService: MotionServicing { let (start, end) = Self.resolveRange(startISO: params.startISO, endISO: params.endISO) let pedometer = CMPedometer() - let payload = try await withCheckedThrowingContinuation { (cont: CheckedContinuation) in + let payload: OpenClawPedometerPayload = try await withCheckedThrowingContinuation { cont in pedometer.queryPedometerData(from: start, to: end) { data, error in if let error { cont.resume(throwing: error) diff --git a/apps/ios/Sources/Onboarding/OnboardingWizardView.swift b/apps/ios/Sources/Onboarding/OnboardingWizardView.swift index c0e872b2ceb..b0dbdc13639 100644 --- a/apps/ios/Sources/Onboarding/OnboardingWizardView.swift +++ b/apps/ios/Sources/Onboarding/OnboardingWizardView.swift @@ -134,7 +134,10 @@ struct OnboardingWizardView: View { Button("Done") { UIApplication.shared.sendAction( #selector(UIResponder.resignFirstResponder), - to: nil, from: nil, for: nil) + to: nil, + from: nil, + for: nil + ) } } } @@ -716,8 +719,10 @@ struct OnboardingWizardView: View { private func detectQRCode(from data: Data) -> String? { guard let ciImage = CIImage(data: data) else { return nil } let detector = CIDetector( - ofType: CIDetectorTypeQRCode, context: nil, - options: [CIDetectorAccuracy: CIDetectorAccuracyHigh]) + ofType: CIDetectorTypeQRCode, + context: nil, + options: [CIDetectorAccuracy: CIDetectorAccuracyHigh] + ) let features = detector?.features(in: ciImage) ?? [] for feature in features { if let qr = feature as? CIQRCodeFeature, let message = qr.messageString { diff --git a/apps/ios/Sources/OpenClawApp.swift b/apps/ios/Sources/OpenClawApp.swift index 0dc0c4cac26..27f7f5e02ca 100644 --- a/apps/ios/Sources/OpenClawApp.swift +++ b/apps/ios/Sources/OpenClawApp.swift @@ -4,7 +4,7 @@ import OpenClawKit import os import UIKit import BackgroundTasks -import UserNotifications +@preconcurrency import UserNotifications private struct PendingWatchPromptAction { var promptId: String? @@ -119,11 +119,19 @@ final class OpenClawAppDelegate: NSObject, UIApplicationDelegate, @preconcurrenc request.earliestBeginDate = Date().addingTimeInterval(max(60, delay)) do { try BGTaskScheduler.shared.submit(request) + let scheduledLogMessage = + "Scheduled background wake refresh reason=\(reason) " + + "delaySeconds=\(max(60, delay))" self.backgroundWakeLogger.info( - "Scheduled background wake refresh reason=\(reason, privacy: .public) delaySeconds=\(max(60, delay), privacy: .public)") + "\(scheduledLogMessage, privacy: .public)" + ) } catch { + let failedLogMessage = + "Failed scheduling background wake refresh reason=\(reason) " + + "error=\(error.localizedDescription)" self.backgroundWakeLogger.error( - "Failed scheduling background wake refresh reason=\(reason, privacy: .public) error=\(error.localizedDescription, privacy: .public)") + "\(failedLogMessage, privacy: .public)" + ) } } @@ -418,7 +426,9 @@ enum WatchPromptNotificationBridge { } } - private static func notificationAuthorizationStatus(center: UNUserNotificationCenter) async -> UNAuthorizationStatus { + private static func notificationAuthorizationStatus( + center: UNUserNotificationCenter + ) async -> UNAuthorizationStatus { await withCheckedContinuation { continuation in center.getNotificationSettings { settings in continuation.resume(returning: settings.authorizationStatus) @@ -440,7 +450,10 @@ enum WatchPromptNotificationBridge { } } - private static func addNotificationRequest(_ request: UNNotificationRequest, center: UNUserNotificationCenter) async throws { + private static func addNotificationRequest( + _ request: UNNotificationRequest, + center: UNUserNotificationCenter + ) async throws { try await withCheckedThrowingContinuation { (continuation: CheckedContinuation) in center.add(request) { error in if let error { diff --git a/apps/ios/Sources/Reminders/RemindersService.swift b/apps/ios/Sources/Reminders/RemindersService.swift index 249f439fb17..8c347b2282b 100644 --- a/apps/ios/Sources/Reminders/RemindersService.swift +++ b/apps/ios/Sources/Reminders/RemindersService.swift @@ -17,7 +17,7 @@ final class RemindersService: RemindersServicing { let statusFilter = params.status ?? .incomplete let predicate = store.predicateForReminders(in: nil) - let payload = try await withCheckedThrowingContinuation { (cont: CheckedContinuation<[OpenClawReminderPayload], Error>) in + let payload: [OpenClawReminderPayload] = try await withCheckedThrowingContinuation { cont in store.fetchReminders(matching: predicate) { items in let formatter = ISO8601DateFormatter() let filtered = (items ?? []).filter { reminder in diff --git a/apps/ios/Sources/Services/NodeServiceProtocols.swift b/apps/ios/Sources/Services/NodeServiceProtocols.swift index 27ee7cc2776..1eba72e7d6a 100644 --- a/apps/ios/Sources/Services/NodeServiceProtocols.swift +++ b/apps/ios/Sources/Services/NodeServiceProtocols.swift @@ -3,10 +3,13 @@ import Foundation import OpenClawKit import UIKit +typealias OpenClawCameraSnapResult = (format: String, base64: String, width: Int, height: Int) +typealias OpenClawCameraClipResult = (format: String, base64: String, durationMs: Int, hasAudio: Bool) + protocol CameraServicing: Sendable { func listDevices() async -> [CameraController.CameraDeviceInfo] - func snap(params: OpenClawCameraSnapParams) async throws -> (format: String, base64: String, width: Int, height: Int) - func clip(params: OpenClawCameraClipParams) async throws -> (format: String, base64: String, durationMs: Int, hasAudio: Bool) + func snap(params: OpenClawCameraSnapParams) async throws -> OpenClawCameraSnapResult + func clip(params: OpenClawCameraClipParams) async throws -> OpenClawCameraClipResult } protocol ScreenRecordingServicing: Sendable { diff --git a/apps/ios/Sources/Services/WatchMessagingService.swift b/apps/ios/Sources/Services/WatchMessagingService.swift index 3511a06c2db..e173a63c8e2 100644 --- a/apps/ios/Sources/Services/WatchMessagingService.swift +++ b/apps/ios/Sources/Services/WatchMessagingService.swift @@ -148,11 +148,15 @@ final class WatchMessagingService: NSObject, WatchMessagingServicing, @unchecked private func sendReachableMessage(_ payload: [String: Any], with session: WCSession) async throws { try await withCheckedThrowingContinuation { continuation in - session.sendMessage(payload, replyHandler: { _ in - continuation.resume() - }, errorHandler: { error in - continuation.resume(throwing: error) - }) + session.sendMessage( + payload, + replyHandler: { _ in + continuation.resume() + }, + errorHandler: { error in + continuation.resume(throwing: error) + } + ) } } diff --git a/apps/ios/Sources/Settings/SettingsTab.swift b/apps/ios/Sources/Settings/SettingsTab.swift index 3ff2ed465c3..7186c7205b5 100644 --- a/apps/ios/Sources/Settings/SettingsTab.swift +++ b/apps/ios/Sources/Settings/SettingsTab.swift @@ -5,6 +5,7 @@ import os import SwiftUI import UIKit +// swiftlint:disable type_body_length struct SettingsTab: View { private struct FeatureHelp: Identifiable { let id = UUID() @@ -22,7 +23,6 @@ struct SettingsTab: View { @AppStorage("talk.enabled") private var talkEnabled: Bool = false @AppStorage("talk.button.enabled") private var talkButtonEnabled: Bool = true @AppStorage("talk.background.enabled") private var talkBackgroundEnabled: Bool = false - @AppStorage("talk.voiceDirectiveHint.enabled") private var talkVoiceDirectiveHintEnabled: Bool = true @AppStorage("camera.enabled") private var cameraEnabled: Bool = true @AppStorage("location.enabledMode") private var locationEnabledModeRaw: String = OpenClawLocationMode.off.rawValue @AppStorage("screen.preventSleep") private var preventSleep: Bool = true @@ -229,7 +229,10 @@ struct SettingsTab: View { .foregroundStyle(.secondary) .frame(maxWidth: .infinity, alignment: .leading) .padding(10) - .background(.thinMaterial, in: RoundedRectangle(cornerRadius: 10, style: .continuous)) + .background( + .thinMaterial, + in: RoundedRectangle(cornerRadius: 10, style: .continuous) + ) } } } label: { @@ -276,7 +279,9 @@ struct SettingsTab: View { self.featureToggle( "Allow Camera", isOn: self.$cameraEnabled, - help: "Allows the gateway to request photos or short video clips while OpenClaw is foregrounded.") + help: "Allows the gateway to request photos or short video clips " + + "while OpenClaw is foregrounded." + ) HStack(spacing: 8) { Text("Location Access") @@ -284,7 +289,11 @@ struct SettingsTab: View { Button { self.activeFeatureHelp = FeatureHelp( title: "Location Access", - message: "Controls location permissions for OpenClaw. Off disables location tools, While Using enables foreground location, and Always enables background location.") + message: "Controls location permissions for OpenClaw. " + + "Off disables location tools, While Using enables " + + "foreground location, and Always enables " + + "background location." + ) } label: { Image(systemName: "info.circle") .foregroundStyle(.secondary) @@ -314,7 +323,11 @@ struct SettingsTab: View { LabeledContent( "API Key", value: self.appModel.talkMode.gatewayTalkConfigLoaded - ? (self.appModel.talkMode.gatewayTalkApiKeyConfigured ? "Configured" : "Not configured") + ? ( + self.appModel.talkMode.gatewayTalkApiKeyConfigured + ? "Configured" + : "Not configured" + ) : "Not loaded") LabeledContent( "Default Model", @@ -326,10 +339,6 @@ struct SettingsTab: View { .font(.footnote) .foregroundStyle(.secondary) } - self.featureToggle( - "Voice Directive Hint", - isOn: self.$talkVoiceDirectiveHintEnabled, - help: "Adds voice-switching instructions to Talk prompts. Disable to reduce prompt size.") self.featureToggle( "Show Talk Button", isOn: self.$talkButtonEnabled, @@ -345,7 +354,9 @@ struct SettingsTab: View { Button { self.activeFeatureHelp = FeatureHelp( title: "Default Share Instruction", - message: "Appends this instruction when sharing content into OpenClaw from iOS.") + message: "Appends this instruction when sharing content " + + "into OpenClaw from iOS." + ) } label: { Image(systemName: "info.circle") .foregroundStyle(.secondary) @@ -398,7 +409,9 @@ struct SettingsTab: View { Button("Cancel", role: .cancel) {} } message: { Text( - "This will disconnect, clear saved gateway connection + credentials, and reopen the onboarding wizard.") + "This will disconnect, clear saved gateway connection + credentials, " + + "and reopen the onboarding wizard." + ) } .alert(item: self.$activeFeatureHelp) { help in Alert( @@ -706,7 +719,9 @@ struct SettingsTab: View { let hasToken = !self.gatewayToken.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty let hasPassword = !self.gatewayPassword.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty GatewayDiagnostics.log( - "setup code applied host=\(host) port=\(resolvedPort ?? -1) tls=\(self.manualGatewayTLS) token=\(hasToken) password=\(hasPassword)") + "setup code applied host=\(host) port=\(resolvedPort ?? -1) " + + "tls=\(self.manualGatewayTLS) token=\(hasToken) password=\(hasPassword)" + ) guard let port = resolvedPort else { self.setupStatusText = "Failed: invalid port" return @@ -1014,3 +1029,4 @@ struct SettingsTab: View { return lines } } +// swiftlint:enable type_body_length diff --git a/apps/ios/Sources/Status/StatusPill.swift b/apps/ios/Sources/Status/StatusPill.swift index ea5e425c49d..8c0885fc516 100644 --- a/apps/ios/Sources/Status/StatusPill.swift +++ b/apps/ios/Sources/Status/StatusPill.swift @@ -51,7 +51,11 @@ struct StatusPill: View { Circle() .fill(self.gateway.color) .frame(width: 9, height: 9) - .scaleEffect(self.gateway == .connecting && !self.reduceMotion ? (self.pulse ? 1.15 : 0.85) : 1.0) + .scaleEffect( + self.gateway == .connecting && !self.reduceMotion + ? (self.pulse ? 1.15 : 0.85) + : 1.0 + ) .opacity(self.gateway == .connecting && !self.reduceMotion ? (self.pulse ? 1.0 : 0.6) : 1.0) Text(self.gateway.title) diff --git a/apps/ios/Sources/Voice/TalkModeManager.swift b/apps/ios/Sources/Voice/TalkModeManager.swift index 0f8a7e6461b..5210921a5a7 100644 --- a/apps/ios/Sources/Voice/TalkModeManager.swift +++ b/apps/ios/Sources/Voice/TalkModeManager.swift @@ -10,7 +10,7 @@ import Speech // This file intentionally centralizes talk mode state + behavior. // It's large, and splitting would force `private` -> `fileprivate` across many members. // We'll refactor into smaller files when the surface stabilizes. -// swiftlint:disable type_body_length +// swiftlint:disable type_body_length file_length @MainActor @Observable final class TalkModeManager: NSObject { @@ -156,9 +156,7 @@ final class TalkModeManager: NSObject { let micOk = await Self.requestMicrophonePermission() guard micOk else { self.logger.warning("start blocked: microphone permission denied") - self.statusText = Self.permissionMessage( - kind: "Microphone", - status: AVAudioSession.sharedInstance().recordPermission) + self.statusText = "Microphone permission denied" return } let speechOk = await Self.requestSpeechPermission() @@ -300,9 +298,7 @@ final class TalkModeManager: NSObject { if !self.allowSimulatorCapture { let micOk = await Self.requestMicrophonePermission() guard micOk else { - self.statusText = Self.permissionMessage( - kind: "Microphone", - status: AVAudioSession.sharedInstance().recordPermission) + self.statusText = "Microphone permission denied" throw NSError(domain: "TalkMode", code: 4, userInfo: [ NSLocalizedDescriptionKey: "Microphone permission denied", ]) @@ -470,14 +466,15 @@ final class TalkModeManager: NSObject { private func startRecognition() throws { #if targetEnvironment(simulator) + if self.allowSimulatorCapture { + self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest() + self.recognitionRequest?.shouldReportPartialResults = true + return + } if !self.allowSimulatorCapture { throw NSError(domain: "TalkMode", code: 2, userInfo: [ NSLocalizedDescriptionKey: "Talk mode is not supported on the iOS simulator", ]) - } else { - self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest() - self.recognitionRequest?.shouldReportPartialResults = true - return } #endif @@ -525,7 +522,9 @@ final class TalkModeManager: NSObject { self.noiseFloorSamples.removeAll(keepingCapacity: true) let threshold = min(0.35, max(0.12, avg + 0.10)) GatewayDiagnostics.log( - "talk audio: noiseFloor=\(String(format: "%.3f", avg)) threshold=\(String(format: "%.3f", threshold))") + "talk audio: noiseFloor=\(String(format: "%.3f", avg)) " + + "threshold=\(String(format: "%.3f", threshold))" + ) } } @@ -549,7 +548,9 @@ final class TalkModeManager: NSObject { self.loggedPartialThisCycle = false GatewayDiagnostics.log( - "talk speech: recognition started mode=\(String(describing: self.captureMode)) engineRunning=\(self.audioEngine.isRunning)") + "talk speech: recognition started mode=\(String(describing: self.captureMode)) " + + "engineRunning=\(self.audioEngine.isRunning)" + ) self.recognitionTask = recognizer.recognitionTask(with: request) { [weak self] result, error in guard let self else { return } if let error { @@ -850,11 +851,10 @@ final class TalkModeManager: NSObject { private func buildPrompt(transcript: String) -> String { let interrupted = self.lastInterruptedAtSeconds self.lastInterruptedAtSeconds = nil - let includeVoiceDirectiveHint = (UserDefaults.standard.object(forKey: "talk.voiceDirectiveHint.enabled") as? Bool) ?? true return TalkPromptBuilder.build( transcript: transcript, interruptedAtSeconds: interrupted, - includeVoiceDirectiveHint: includeVoiceDirectiveHint) + includeVoiceDirectiveHint: false) } private enum ChatCompletionState: CustomStringConvertible { @@ -1317,11 +1317,11 @@ final class TalkModeManager: NSObject { try Task.checkCancellation() chunks.append(chunk) } - await self?.completeIncrementalPrefetch(id: id, chunks: chunks) + self?.completeIncrementalPrefetch(id: id, chunks: chunks) } catch is CancellationError { - await self?.clearIncrementalPrefetch(id: id) + self?.clearIncrementalPrefetch(id: id) } catch { - await self?.failIncrementalPrefetch(id: id, error: error) + self?.failIncrementalPrefetch(id: id, error: error) } } self.incrementalSpeechPrefetch = IncrementalSpeechPrefetchState( @@ -1427,7 +1427,10 @@ final class TalkModeManager: NSObject { for await evt in stream { if Task.isCancelled { return } guard evt.event == "agent", let payload = evt.payload else { continue } - guard let agentEvent = try? GatewayPayloadDecoding.decode(payload, as: OpenClawAgentEventPayload.self) else { + guard let agentEvent = try? GatewayPayloadDecoding.decode( + payload, + as: OpenClawAgentEventPayload.self + ) else { continue } guard agentEvent.runId == runId, agentEvent.stream == "assistant" else { continue } @@ -1727,23 +1730,20 @@ private struct IncrementalSpeechBuffer { extension TalkModeManager { nonisolated static func requestMicrophonePermission() async -> Bool { - let session = AVAudioSession.sharedInstance() - switch session.recordPermission { + switch AVAudioApplication.shared.recordPermission { case .granted: return true case .denied: return false case .undetermined: - break + return await self.requestPermissionWithTimeout { completion in + AVAudioApplication.requestRecordPermission(completionHandler: { ok in + completion(ok) + }) + } @unknown default: return false } - - return await self.requestPermissionWithTimeout { completion in - AVAudioSession.sharedInstance().requestRecordPermission { ok in - completion(ok) - } - } } nonisolated static func requestSpeechPermission() async -> Bool { @@ -1767,7 +1767,7 @@ extension TalkModeManager { } private nonisolated static func requestPermissionWithTimeout( - _ operation: @escaping @Sendable (@escaping (Bool) -> Void) -> Void) async -> Bool + _ operation: @escaping @Sendable (@escaping @Sendable (Bool) -> Void) -> Void) async -> Bool { do { return try await AsyncTimeout.withTimeout( @@ -1911,7 +1911,7 @@ extension TalkModeManager { } let providerID = Self.normalizedTalkProviderID(rawProvider) ?? - normalizedProviders.keys.sorted().first ?? + normalizedProviders.keys.min() ?? Self.defaultTalkProvider return TalkProviderConfigSelection( provider: providerID, @@ -1921,7 +1921,11 @@ extension TalkModeManager { func reloadConfig() async { guard let gateway else { return } do { - let res = try await gateway.request(method: "talk.config", paramsJSON: "{\"includeSecrets\":true}", timeoutSeconds: 8) + let res = try await gateway.request( + method: "talk.config", + paramsJSON: "{\"includeSecrets\":true}", + timeoutSeconds: 8 + ) guard let json = try JSONSerialization.jsonObject(with: res) as? [String: Any] else { return } guard let config = json["config"] as? [String: Any] else { return } let talk = config["talk"] as? [String: Any] @@ -2008,10 +2012,18 @@ extension TalkModeManager { private static func describeAudioSession() -> String { let session = AVAudioSession.sharedInstance() - let inputs = session.currentRoute.inputs.map { "\($0.portType.rawValue):\($0.portName)" }.joined(separator: ",") - let outputs = session.currentRoute.outputs.map { "\($0.portType.rawValue):\($0.portName)" }.joined(separator: ",") - let available = session.availableInputs?.map { "\($0.portType.rawValue):\($0.portName)" }.joined(separator: ",") ?? "" - return "category=\(session.category.rawValue) mode=\(session.mode.rawValue) opts=\(session.categoryOptions.rawValue) inputAvail=\(session.isInputAvailable) routeIn=[\(inputs)] routeOut=[\(outputs)] availIn=[\(available)]" + let inputs = session.currentRoute.inputs + .map { "\($0.portType.rawValue):\($0.portName)" } + .joined(separator: ",") + let outputs = session.currentRoute.outputs + .map { "\($0.portType.rawValue):\($0.portName)" } + .joined(separator: ",") + let available = session.availableInputs? + .map { "\($0.portType.rawValue):\($0.portName)" } + .joined(separator: ",") ?? "" + return "category=\(session.category.rawValue) mode=\(session.mode.rawValue) " + + "opts=\(session.categoryOptions.rawValue) inputAvail=\(session.isInputAvailable) " + + "routeIn=[\(inputs)] routeOut=[\(outputs)] availIn=[\(available)]" } } @@ -2079,7 +2091,9 @@ private final class AudioTapDiagnostics: @unchecked Sendable { guard shouldLog else { return } GatewayDiagnostics.log( - "\(label) mic: buffers=\(count) frames=\(frames) rate=\(Int(rate))Hz ch=\(ch) rms=\(String(format: "%.4f", resolvedRms)) max=\(String(format: "%.4f", maxRms))") + "\(label) mic: buffers=\(count) frames=\(frames) rate=\(Int(rate))Hz ch=\(ch) " + + "rms=\(String(format: "%.4f", resolvedRms)) max=\(String(format: "%.4f", maxRms))" + ) } } @@ -2136,4 +2150,4 @@ private struct IncrementalPrefetchedAudio { let outputFormat: String? } -// swiftlint:enable type_body_length +// swiftlint:enable type_body_length file_length diff --git a/apps/ios/Tests/Info.plist b/apps/ios/Tests/Info.plist index c273b1923d1..b1a0354205c 100644 --- a/apps/ios/Tests/Info.plist +++ b/apps/ios/Tests/Info.plist @@ -17,8 +17,8 @@ CFBundlePackageType BNDL CFBundleShortVersionString - 2026.2.25 + 2026.2.26 CFBundleVersion - 20260225 + 20260226 diff --git a/apps/ios/WatchApp/Info.plist b/apps/ios/WatchApp/Info.plist index 4e309b031a6..3551b0af6f4 100644 --- a/apps/ios/WatchApp/Info.plist +++ b/apps/ios/WatchApp/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.2.23 + 2026.2.26 CFBundleVersion - 20260223 + 20260226 WKCompanionAppBundleIdentifier $(OPENCLAW_APP_BUNDLE_ID) WKWatchKitApp diff --git a/apps/ios/WatchExtension/Info.plist b/apps/ios/WatchExtension/Info.plist index 1b5f28dfc43..70451f55eb5 100644 --- a/apps/ios/WatchExtension/Info.plist +++ b/apps/ios/WatchExtension/Info.plist @@ -15,9 +15,9 @@ CFBundleName $(PRODUCT_NAME) CFBundleShortVersionString - 2026.2.23 + 2026.2.26 CFBundleVersion - 20260223 + 20260226 NSExtension NSExtensionAttributes diff --git a/apps/ios/project.yml b/apps/ios/project.yml index a4d5928d820..63a959d0f18 100644 --- a/apps/ios/project.yml +++ b/apps/ios/project.yml @@ -92,8 +92,8 @@ targets: - CFBundleURLName: ai.openclaw.ios CFBundleURLSchemes: - openclaw - CFBundleShortVersionString: "2026.2.23" - CFBundleVersion: "20260223" + CFBundleShortVersionString: "2026.2.26" + CFBundleVersion: "20260226" UILaunchScreen: {} UIApplicationSceneManifest: UIApplicationSupportsMultipleScenes: false @@ -133,11 +133,13 @@ targets: - path: ShareExtension dependencies: - package: OpenClawKit + - sdk: AppIntents.framework settings: base: CODE_SIGN_IDENTITY: "Apple Development" CODE_SIGN_STYLE: "$(OPENCLAW_CODE_SIGN_STYLE)" DEVELOPMENT_TEAM: "$(OPENCLAW_DEVELOPMENT_TEAM)" + ENABLE_APPINTENTS_METADATA: NO PRODUCT_BUNDLE_IDENTIFIER: "$(OPENCLAW_SHARE_BUNDLE_ID)" PROVISIONING_PROFILE_SPECIFIER: "$(OPENCLAW_SHARE_PROFILE)" SWIFT_VERSION: "6.0" @@ -146,8 +148,8 @@ targets: path: ShareExtension/Info.plist properties: CFBundleDisplayName: OpenClaw Share - CFBundleShortVersionString: "2026.2.23" - CFBundleVersion: "20260223" + CFBundleShortVersionString: "2026.2.26" + CFBundleVersion: "20260226" NSExtension: NSExtensionPointIdentifier: com.apple.share-services NSExtensionPrincipalClass: "$(PRODUCT_MODULE_NAME).ShareViewController" @@ -171,13 +173,14 @@ targets: Release: Config/Signing.xcconfig settings: base: + ENABLE_APPINTENTS_METADATA: NO PRODUCT_BUNDLE_IDENTIFIER: "$(OPENCLAW_WATCH_APP_BUNDLE_ID)" info: path: WatchApp/Info.plist properties: CFBundleDisplayName: OpenClaw - CFBundleShortVersionString: "2026.2.23" - CFBundleVersion: "20260223" + CFBundleShortVersionString: "2026.2.26" + CFBundleVersion: "20260226" WKCompanionAppBundleIdentifier: "$(OPENCLAW_APP_BUNDLE_ID)" WKWatchKitApp: true @@ -200,8 +203,8 @@ targets: path: WatchExtension/Info.plist properties: CFBundleDisplayName: OpenClaw - CFBundleShortVersionString: "2026.2.23" - CFBundleVersion: "20260223" + CFBundleShortVersionString: "2026.2.26" + CFBundleVersion: "20260226" NSExtension: NSExtensionAttributes: WKAppBundleIdentifier: "$(OPENCLAW_WATCH_APP_BUNDLE_ID)" @@ -234,5 +237,5 @@ targets: path: Tests/Info.plist properties: CFBundleDisplayName: OpenClawTests - CFBundleShortVersionString: "2026.2.23" - CFBundleVersion: "20260223" + CFBundleShortVersionString: "2026.2.26" + CFBundleVersion: "20260226" diff --git a/apps/macos/Sources/OpenClaw/AnthropicAuthControls.swift b/apps/macos/Sources/OpenClaw/AnthropicAuthControls.swift deleted file mode 100644 index 06f107d6c6e..00000000000 --- a/apps/macos/Sources/OpenClaw/AnthropicAuthControls.swift +++ /dev/null @@ -1,234 +0,0 @@ -import AppKit -import Combine -import SwiftUI - -@MainActor -struct AnthropicAuthControls: View { - let connectionMode: AppState.ConnectionMode - - @State private var oauthStatus: OpenClawOAuthStore.AnthropicOAuthStatus = OpenClawOAuthStore.anthropicOAuthStatus() - @State private var pkce: AnthropicOAuth.PKCE? - @State private var code: String = "" - @State private var busy = false - @State private var statusText: String? - @State private var autoDetectClipboard = true - @State private var autoConnectClipboard = true - @State private var lastPasteboardChangeCount = NSPasteboard.general.changeCount - - private static let clipboardPoll: AnyPublisher = { - if ProcessInfo.processInfo.isRunningTests { - return Empty(completeImmediately: false).eraseToAnyPublisher() - } - return Timer.publish(every: 0.4, on: .main, in: .common) - .autoconnect() - .eraseToAnyPublisher() - }() - - var body: some View { - VStack(alignment: .leading, spacing: 10) { - if self.connectionMode != .local { - Text("Gateway isn’t running locally; OAuth must be created on the gateway host.") - .font(.footnote) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } - - HStack(spacing: 10) { - Circle() - .fill(self.oauthStatus.isConnected ? Color.green : Color.orange) - .frame(width: 8, height: 8) - Text(self.oauthStatus.shortDescription) - .font(.footnote.weight(.semibold)) - .foregroundStyle(.secondary) - Spacer() - Button("Reveal") { - NSWorkspace.shared.activateFileViewerSelecting([OpenClawOAuthStore.oauthURL()]) - } - .buttonStyle(.bordered) - .disabled(!FileManager().fileExists(atPath: OpenClawOAuthStore.oauthURL().path)) - - Button("Refresh") { - self.refresh() - } - .buttonStyle(.bordered) - } - - Text(OpenClawOAuthStore.oauthURL().path) - .font(.caption.monospaced()) - .foregroundStyle(.secondary) - .lineLimit(1) - .truncationMode(.middle) - .textSelection(.enabled) - - HStack(spacing: 12) { - Button { - self.startOAuth() - } label: { - if self.busy { - ProgressView().controlSize(.small) - } else { - Text(self.oauthStatus.isConnected ? "Re-auth (OAuth)" : "Open sign-in (OAuth)") - } - } - .buttonStyle(.borderedProminent) - .disabled(self.connectionMode != .local || self.busy) - - if self.pkce != nil { - Button("Cancel") { - self.pkce = nil - self.code = "" - self.statusText = nil - } - .buttonStyle(.bordered) - .disabled(self.busy) - } - } - - if self.pkce != nil { - VStack(alignment: .leading, spacing: 8) { - Text("Paste `code#state`") - .font(.footnote.weight(.semibold)) - .foregroundStyle(.secondary) - - TextField("code#state", text: self.$code) - .textFieldStyle(.roundedBorder) - .disabled(self.busy) - - Toggle("Auto-detect from clipboard", isOn: self.$autoDetectClipboard) - .font(.footnote) - .foregroundStyle(.secondary) - .disabled(self.busy) - - Toggle("Auto-connect when detected", isOn: self.$autoConnectClipboard) - .font(.footnote) - .foregroundStyle(.secondary) - .disabled(self.busy) - - Button("Connect") { - Task { await self.finishOAuth() } - } - .buttonStyle(.bordered) - .disabled(self.busy || self.connectionMode != .local || self.code - .trimmingCharacters(in: .whitespacesAndNewlines) - .isEmpty) - } - } - - if let statusText, !statusText.isEmpty { - Text(statusText) - .font(.footnote) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } - } - .onAppear { - self.refresh() - } - .onReceive(Self.clipboardPoll) { _ in - self.pollClipboardIfNeeded() - } - } - - private func refresh() { - let imported = OpenClawOAuthStore.importLegacyAnthropicOAuthIfNeeded() - self.oauthStatus = OpenClawOAuthStore.anthropicOAuthStatus() - if imported != nil { - self.statusText = "Imported existing OAuth credentials." - } - } - - private func startOAuth() { - guard self.connectionMode == .local else { return } - guard !self.busy else { return } - self.busy = true - defer { self.busy = false } - - do { - let pkce = try AnthropicOAuth.generatePKCE() - self.pkce = pkce - let url = AnthropicOAuth.buildAuthorizeURL(pkce: pkce) - NSWorkspace.shared.open(url) - self.statusText = "Browser opened. After approving, paste the `code#state` value here." - } catch { - self.statusText = "Failed to start OAuth: \(error.localizedDescription)" - } - } - - @MainActor - private func finishOAuth() async { - guard self.connectionMode == .local else { return } - guard !self.busy else { return } - guard let pkce = self.pkce else { return } - self.busy = true - defer { self.busy = false } - - guard let parsed = AnthropicOAuthCodeState.parse(from: self.code) else { - self.statusText = "OAuth failed: missing or invalid code/state." - return - } - - do { - let creds = try await AnthropicOAuth.exchangeCode( - code: parsed.code, - state: parsed.state, - verifier: pkce.verifier) - try OpenClawOAuthStore.saveAnthropicOAuth(creds) - self.refresh() - self.pkce = nil - self.code = "" - self.statusText = "Connected. OpenClaw can now use Claude via OAuth." - } catch { - self.statusText = "OAuth failed: \(error.localizedDescription)" - } - } - - private func pollClipboardIfNeeded() { - guard self.connectionMode == .local else { return } - guard self.pkce != nil else { return } - guard !self.busy else { return } - guard self.autoDetectClipboard else { return } - - let pb = NSPasteboard.general - let changeCount = pb.changeCount - guard changeCount != self.lastPasteboardChangeCount else { return } - self.lastPasteboardChangeCount = changeCount - - guard let raw = pb.string(forType: .string), !raw.isEmpty else { return } - guard let parsed = AnthropicOAuthCodeState.parse(from: raw) else { return } - guard let pkce = self.pkce, parsed.state == pkce.verifier else { return } - - let next = "\(parsed.code)#\(parsed.state)" - if self.code != next { - self.code = next - self.statusText = "Detected `code#state` from clipboard." - } - - guard self.autoConnectClipboard else { return } - Task { await self.finishOAuth() } - } -} - -#if DEBUG -extension AnthropicAuthControls { - init( - connectionMode: AppState.ConnectionMode, - oauthStatus: OpenClawOAuthStore.AnthropicOAuthStatus, - pkce: AnthropicOAuth.PKCE? = nil, - code: String = "", - busy: Bool = false, - statusText: String? = nil, - autoDetectClipboard: Bool = true, - autoConnectClipboard: Bool = true) - { - self.connectionMode = connectionMode - self._oauthStatus = State(initialValue: oauthStatus) - self._pkce = State(initialValue: pkce) - self._code = State(initialValue: code) - self._busy = State(initialValue: busy) - self._statusText = State(initialValue: statusText) - self._autoDetectClipboard = State(initialValue: autoDetectClipboard) - self._autoConnectClipboard = State(initialValue: autoConnectClipboard) - self._lastPasteboardChangeCount = State(initialValue: NSPasteboard.general.changeCount) - } -} -#endif diff --git a/apps/macos/Sources/OpenClaw/AnthropicOAuth.swift b/apps/macos/Sources/OpenClaw/AnthropicOAuth.swift deleted file mode 100644 index f594cc04c31..00000000000 --- a/apps/macos/Sources/OpenClaw/AnthropicOAuth.swift +++ /dev/null @@ -1,383 +0,0 @@ -import CryptoKit -import Foundation -import OSLog -import Security - -struct AnthropicOAuthCredentials: Codable { - let type: String - let refresh: String - let access: String - let expires: Int64 -} - -enum AnthropicAuthMode: Equatable { - case oauthFile - case oauthEnv - case apiKeyEnv - case missing - - var shortLabel: String { - switch self { - case .oauthFile: "OAuth (OpenClaw token file)" - case .oauthEnv: "OAuth (env var)" - case .apiKeyEnv: "API key (env var)" - case .missing: "Missing credentials" - } - } - - var isConfigured: Bool { - switch self { - case .missing: false - case .oauthFile, .oauthEnv, .apiKeyEnv: true - } - } -} - -enum AnthropicAuthResolver { - static func resolve( - environment: [String: String] = ProcessInfo.processInfo.environment, - oauthStatus: OpenClawOAuthStore.AnthropicOAuthStatus = OpenClawOAuthStore - .anthropicOAuthStatus()) -> AnthropicAuthMode - { - if oauthStatus.isConnected { return .oauthFile } - - if let token = environment["ANTHROPIC_OAUTH_TOKEN"]?.trimmingCharacters(in: .whitespacesAndNewlines), - !token.isEmpty - { - return .oauthEnv - } - - if let key = environment["ANTHROPIC_API_KEY"]?.trimmingCharacters(in: .whitespacesAndNewlines), - !key.isEmpty - { - return .apiKeyEnv - } - - return .missing - } -} - -enum AnthropicOAuth { - private static let logger = Logger(subsystem: "ai.openclaw", category: "anthropic-oauth") - - private static let clientId = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" - private static let authorizeURL = URL(string: "https://claude.ai/oauth/authorize")! - private static let tokenURL = URL(string: "https://console.anthropic.com/v1/oauth/token")! - private static let redirectURI = "https://console.anthropic.com/oauth/code/callback" - private static let scopes = "org:create_api_key user:profile user:inference" - - struct PKCE { - let verifier: String - let challenge: String - } - - static func generatePKCE() throws -> PKCE { - var bytes = [UInt8](repeating: 0, count: 32) - let status = SecRandomCopyBytes(kSecRandomDefault, bytes.count, &bytes) - guard status == errSecSuccess else { - throw NSError(domain: NSOSStatusErrorDomain, code: Int(status)) - } - let verifier = Data(bytes).base64URLEncodedString() - let hash = SHA256.hash(data: Data(verifier.utf8)) - let challenge = Data(hash).base64URLEncodedString() - return PKCE(verifier: verifier, challenge: challenge) - } - - static func buildAuthorizeURL(pkce: PKCE) -> URL { - var components = URLComponents(url: self.authorizeURL, resolvingAgainstBaseURL: false)! - components.queryItems = [ - URLQueryItem(name: "code", value: "true"), - URLQueryItem(name: "client_id", value: self.clientId), - URLQueryItem(name: "response_type", value: "code"), - URLQueryItem(name: "redirect_uri", value: self.redirectURI), - URLQueryItem(name: "scope", value: self.scopes), - URLQueryItem(name: "code_challenge", value: pkce.challenge), - URLQueryItem(name: "code_challenge_method", value: "S256"), - // Match legacy flow: state is the verifier. - URLQueryItem(name: "state", value: pkce.verifier), - ] - return components.url! - } - - static func exchangeCode( - code: String, - state: String, - verifier: String) async throws -> AnthropicOAuthCredentials - { - let payload: [String: Any] = [ - "grant_type": "authorization_code", - "client_id": self.clientId, - "code": code, - "state": state, - "redirect_uri": self.redirectURI, - "code_verifier": verifier, - ] - let body = try JSONSerialization.data(withJSONObject: payload, options: []) - - var request = URLRequest(url: self.tokenURL) - request.httpMethod = "POST" - request.httpBody = body - request.setValue("application/json", forHTTPHeaderField: "Content-Type") - - let (data, response) = try await URLSession.shared.data(for: request) - guard let http = response as? HTTPURLResponse else { - throw URLError(.badServerResponse) - } - guard (200..<300).contains(http.statusCode) else { - let text = String(data: data, encoding: .utf8) ?? "" - throw NSError( - domain: "AnthropicOAuth", - code: http.statusCode, - userInfo: [NSLocalizedDescriptionKey: "Token exchange failed: \(text)"]) - } - - let decoded = try JSONSerialization.jsonObject(with: data) as? [String: Any] - let access = decoded?["access_token"] as? String - let refresh = decoded?["refresh_token"] as? String - let expiresIn = decoded?["expires_in"] as? Double - guard let access, let refresh, let expiresIn else { - throw NSError(domain: "AnthropicOAuth", code: 0, userInfo: [ - NSLocalizedDescriptionKey: "Unexpected token response.", - ]) - } - - // Match legacy flow: expiresAt = now + expires_in - 5 minutes. - let expiresAtMs = Int64(Date().timeIntervalSince1970 * 1000) - + Int64(expiresIn * 1000) - - Int64(5 * 60 * 1000) - - self.logger.info("Anthropic OAuth exchange ok; expiresAtMs=\(expiresAtMs, privacy: .public)") - return AnthropicOAuthCredentials(type: "oauth", refresh: refresh, access: access, expires: expiresAtMs) - } - - static func refresh(refreshToken: String) async throws -> AnthropicOAuthCredentials { - let payload: [String: Any] = [ - "grant_type": "refresh_token", - "client_id": self.clientId, - "refresh_token": refreshToken, - ] - let body = try JSONSerialization.data(withJSONObject: payload, options: []) - - var request = URLRequest(url: self.tokenURL) - request.httpMethod = "POST" - request.httpBody = body - request.setValue("application/json", forHTTPHeaderField: "Content-Type") - - let (data, response) = try await URLSession.shared.data(for: request) - guard let http = response as? HTTPURLResponse else { - throw URLError(.badServerResponse) - } - guard (200..<300).contains(http.statusCode) else { - let text = String(data: data, encoding: .utf8) ?? "" - throw NSError( - domain: "AnthropicOAuth", - code: http.statusCode, - userInfo: [NSLocalizedDescriptionKey: "Token refresh failed: \(text)"]) - } - - let decoded = try JSONSerialization.jsonObject(with: data) as? [String: Any] - let access = decoded?["access_token"] as? String - let refresh = (decoded?["refresh_token"] as? String) ?? refreshToken - let expiresIn = decoded?["expires_in"] as? Double - guard let access, let expiresIn else { - throw NSError(domain: "AnthropicOAuth", code: 0, userInfo: [ - NSLocalizedDescriptionKey: "Unexpected token response.", - ]) - } - - let expiresAtMs = Int64(Date().timeIntervalSince1970 * 1000) - + Int64(expiresIn * 1000) - - Int64(5 * 60 * 1000) - - self.logger.info("Anthropic OAuth refresh ok; expiresAtMs=\(expiresAtMs, privacy: .public)") - return AnthropicOAuthCredentials(type: "oauth", refresh: refresh, access: access, expires: expiresAtMs) - } -} - -enum OpenClawOAuthStore { - static let oauthFilename = "oauth.json" - private static let providerKey = "anthropic" - private static let openclawOAuthDirEnv = "OPENCLAW_OAUTH_DIR" - private static let legacyPiDirEnv = "PI_CODING_AGENT_DIR" - - enum AnthropicOAuthStatus: Equatable { - case missingFile - case unreadableFile - case invalidJSON - case missingProviderEntry - case missingTokens - case connected(expiresAtMs: Int64?) - - var isConnected: Bool { - if case .connected = self { return true } - return false - } - - var shortDescription: String { - switch self { - case .missingFile: "OpenClaw OAuth token file not found" - case .unreadableFile: "OpenClaw OAuth token file not readable" - case .invalidJSON: "OpenClaw OAuth token file invalid" - case .missingProviderEntry: "No Anthropic entry in OpenClaw OAuth token file" - case .missingTokens: "Anthropic entry missing tokens" - case .connected: "OpenClaw OAuth credentials found" - } - } - } - - static func oauthDir() -> URL { - if let override = ProcessInfo.processInfo.environment[self.openclawOAuthDirEnv]? - .trimmingCharacters(in: .whitespacesAndNewlines), - !override.isEmpty - { - let expanded = NSString(string: override).expandingTildeInPath - return URL(fileURLWithPath: expanded, isDirectory: true) - } - let home = FileManager().homeDirectoryForCurrentUser - return home.appendingPathComponent(".openclaw", isDirectory: true) - .appendingPathComponent("credentials", isDirectory: true) - } - - static func oauthURL() -> URL { - self.oauthDir().appendingPathComponent(self.oauthFilename) - } - - static func legacyOAuthURLs() -> [URL] { - var urls: [URL] = [] - let env = ProcessInfo.processInfo.environment - if let override = env[self.legacyPiDirEnv]?.trimmingCharacters(in: .whitespacesAndNewlines), - !override.isEmpty - { - let expanded = NSString(string: override).expandingTildeInPath - urls.append(URL(fileURLWithPath: expanded, isDirectory: true).appendingPathComponent(self.oauthFilename)) - } - - let home = FileManager().homeDirectoryForCurrentUser - urls.append(home.appendingPathComponent(".pi/agent/\(self.oauthFilename)")) - urls.append(home.appendingPathComponent(".claude/\(self.oauthFilename)")) - urls.append(home.appendingPathComponent(".config/claude/\(self.oauthFilename)")) - urls.append(home.appendingPathComponent(".config/anthropic/\(self.oauthFilename)")) - - var seen = Set() - return urls.filter { url in - let path = url.standardizedFileURL.path - if seen.contains(path) { return false } - seen.insert(path) - return true - } - } - - static func importLegacyAnthropicOAuthIfNeeded() -> URL? { - let dest = self.oauthURL() - guard !FileManager().fileExists(atPath: dest.path) else { return nil } - - for url in self.legacyOAuthURLs() { - guard FileManager().fileExists(atPath: url.path) else { continue } - guard self.anthropicOAuthStatus(at: url).isConnected else { continue } - guard let storage = self.loadStorage(at: url) else { continue } - do { - try self.saveStorage(storage) - return url - } catch { - continue - } - } - - return nil - } - - static func anthropicOAuthStatus() -> AnthropicOAuthStatus { - self.anthropicOAuthStatus(at: self.oauthURL()) - } - - static func hasAnthropicOAuth() -> Bool { - self.anthropicOAuthStatus().isConnected - } - - static func anthropicOAuthStatus(at url: URL) -> AnthropicOAuthStatus { - guard FileManager().fileExists(atPath: url.path) else { return .missingFile } - - guard let data = try? Data(contentsOf: url) else { return .unreadableFile } - guard let json = try? JSONSerialization.jsonObject(with: data, options: []) else { return .invalidJSON } - guard let storage = json as? [String: Any] else { return .invalidJSON } - guard let rawEntry = storage[self.providerKey] else { return .missingProviderEntry } - guard let entry = rawEntry as? [String: Any] else { return .invalidJSON } - - let refresh = self.firstString(in: entry, keys: ["refresh", "refresh_token", "refreshToken"]) - let access = self.firstString(in: entry, keys: ["access", "access_token", "accessToken"]) - guard refresh?.isEmpty == false, access?.isEmpty == false else { return .missingTokens } - - let expiresAny = entry["expires"] ?? entry["expires_at"] ?? entry["expiresAt"] - let expiresAtMs: Int64? = if let ms = expiresAny as? Int64 { - ms - } else if let number = expiresAny as? NSNumber { - number.int64Value - } else if let ms = expiresAny as? Double { - Int64(ms) - } else { - nil - } - - return .connected(expiresAtMs: expiresAtMs) - } - - static func loadAnthropicOAuthRefreshToken() -> String? { - let url = self.oauthURL() - guard let storage = self.loadStorage(at: url) else { return nil } - guard let rawEntry = storage[self.providerKey] as? [String: Any] else { return nil } - let refresh = self.firstString(in: rawEntry, keys: ["refresh", "refresh_token", "refreshToken"]) - return refresh?.trimmingCharacters(in: .whitespacesAndNewlines) - } - - private static func firstString(in dict: [String: Any], keys: [String]) -> String? { - for key in keys { - if let value = dict[key] as? String { return value } - } - return nil - } - - private static func loadStorage(at url: URL) -> [String: Any]? { - guard let data = try? Data(contentsOf: url) else { return nil } - guard let json = try? JSONSerialization.jsonObject(with: data, options: []) else { return nil } - return json as? [String: Any] - } - - static func saveAnthropicOAuth(_ creds: AnthropicOAuthCredentials) throws { - let url = self.oauthURL() - let existing: [String: Any] = self.loadStorage(at: url) ?? [:] - - var updated = existing - updated[self.providerKey] = [ - "type": creds.type, - "refresh": creds.refresh, - "access": creds.access, - "expires": creds.expires, - ] - - try self.saveStorage(updated) - } - - private static func saveStorage(_ storage: [String: Any]) throws { - let dir = self.oauthDir() - try FileManager().createDirectory( - at: dir, - withIntermediateDirectories: true, - attributes: [.posixPermissions: 0o700]) - - let url = self.oauthURL() - let data = try JSONSerialization.data( - withJSONObject: storage, - options: [.prettyPrinted, .sortedKeys]) - try data.write(to: url, options: [.atomic]) - try FileManager().setAttributes([.posixPermissions: 0o600], ofItemAtPath: url.path) - } -} - -extension Data { - fileprivate func base64URLEncodedString() -> String { - self.base64EncodedString() - .replacingOccurrences(of: "+", with: "-") - .replacingOccurrences(of: "/", with: "_") - .replacingOccurrences(of: "=", with: "") - } -} diff --git a/apps/macos/Sources/OpenClaw/AnthropicOAuthCodeState.swift b/apps/macos/Sources/OpenClaw/AnthropicOAuthCodeState.swift deleted file mode 100644 index 2a88898c34d..00000000000 --- a/apps/macos/Sources/OpenClaw/AnthropicOAuthCodeState.swift +++ /dev/null @@ -1,59 +0,0 @@ -import Foundation - -enum AnthropicOAuthCodeState { - struct Parsed: Equatable { - let code: String - let state: String - } - - /// Extracts a `code#state` payload from arbitrary text. - /// - /// Supports: - /// - raw `code#state` - /// - OAuth callback URLs containing `code=` and `state=` query params - /// - surrounding text/backticks from instructions pages - static func extract(from raw: String) -> String? { - let text = raw.trimmingCharacters(in: .whitespacesAndNewlines) - .trimmingCharacters(in: CharacterSet(charactersIn: "`")) - if text.isEmpty { return nil } - - if let fromURL = self.extractFromURL(text) { return fromURL } - if let fromToken = self.extractFromToken(text) { return fromToken } - return nil - } - - static func parse(from raw: String) -> Parsed? { - guard let extracted = self.extract(from: raw) else { return nil } - let parts = extracted.split(separator: "#", maxSplits: 1).map(String.init) - let code = parts.first ?? "" - let state = parts.count > 1 ? parts[1] : "" - guard !code.isEmpty, !state.isEmpty else { return nil } - return Parsed(code: code, state: state) - } - - private static func extractFromURL(_ text: String) -> String? { - // Users might copy the callback URL from the browser address bar. - guard let components = URLComponents(string: text), - let items = components.queryItems, - let code = items.first(where: { $0.name == "code" })?.value, - let state = items.first(where: { $0.name == "state" })?.value, - !code.isEmpty, !state.isEmpty - else { return nil } - - return "\(code)#\(state)" - } - - private static func extractFromToken(_ text: String) -> String? { - // Base64url-ish tokens; keep this fairly strict to avoid false positives. - let pattern = #"([A-Za-z0-9._~-]{8,})#([A-Za-z0-9._~-]{8,})"# - guard let re = try? NSRegularExpression(pattern: pattern) else { return nil } - - let range = NSRange(text.startIndex.. ExecHostResponse { let validatedRequest: ExecHostValidatedRequest switch ExecHostRequestEvaluator.validateRequest(request) { - case .success(let request): + case let .success(request): validatedRequest = request - case .failure(let error): + case let .failure(error): return self.errorResponse(error) } @@ -370,7 +370,7 @@ private enum ExecHostExecutor { context: context, approvalDecision: request.approvalDecision) { - case .deny(let error): + case let .deny(error): return self.errorResponse(error) case .allow: break @@ -401,7 +401,7 @@ private enum ExecHostExecutor { context: context, approvalDecision: followupDecision) { - case .deny(let error): + case let .deny(error): return self.errorResponse(error) case .allow: break diff --git a/apps/macos/Sources/OpenClaw/ExecHostRequestEvaluator.swift b/apps/macos/Sources/OpenClaw/ExecHostRequestEvaluator.swift index fe38d7ea18f..4e0ff4173de 100644 --- a/apps/macos/Sources/OpenClaw/ExecHostRequestEvaluator.swift +++ b/apps/macos/Sources/OpenClaw/ExecHostRequestEvaluator.swift @@ -26,9 +26,9 @@ enum ExecHostRequestEvaluator { command: command, rawCommand: request.rawCommand) switch validatedCommand { - case .ok(let resolved): + case let .ok(resolved): return .success(ExecHostValidatedRequest(command: command, displayCommand: resolved.displayCommand)) - case .invalid(let message): + case let .invalid(message): return .failure( ExecHostError( code: "INVALID_REQUEST", diff --git a/apps/macos/Sources/OpenClaw/HostEnvSanitizer.swift b/apps/macos/Sources/OpenClaw/HostEnvSanitizer.swift index b9b993299a9..e1c4f5b8531 100644 --- a/apps/macos/Sources/OpenClaw/HostEnvSanitizer.swift +++ b/apps/macos/Sources/OpenClaw/HostEnvSanitizer.swift @@ -1,36 +1,11 @@ import Foundation enum HostEnvSanitizer { - /// Keep in sync with src/infra/host-env-security-policy.json. + /// Generated from src/infra/host-env-security-policy.json via scripts/generate-host-env-security-policy-swift.mjs. /// Parity is validated by src/infra/host-env-security.policy-parity.test.ts. - private static let blockedKeys: Set = [ - "NODE_OPTIONS", - "NODE_PATH", - "PYTHONHOME", - "PYTHONPATH", - "PERL5LIB", - "PERL5OPT", - "RUBYLIB", - "RUBYOPT", - "BASH_ENV", - "ENV", - "SHELL", - "SHELLOPTS", - "PS4", - "GCONV_PATH", - "IFS", - "SSLKEYLOGFILE", - ] - - private static let blockedPrefixes: [String] = [ - "DYLD_", - "LD_", - "BASH_FUNC_", - ] - private static let blockedOverrideKeys: Set = [ - "HOME", - "ZDOTDIR", - ] + private static let blockedKeys = HostEnvSecurityPolicy.blockedKeys + private static let blockedPrefixes = HostEnvSecurityPolicy.blockedPrefixes + private static let blockedOverrideKeys = HostEnvSecurityPolicy.blockedOverrideKeys private static let shellWrapperAllowedOverrideKeys: Set = [ "TERM", "LANG", diff --git a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift new file mode 100644 index 00000000000..b126d03de21 --- /dev/null +++ b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift @@ -0,0 +1,38 @@ +// Generated file. Do not edit directly. +// Source: src/infra/host-env-security-policy.json +// Regenerate: node scripts/generate-host-env-security-policy-swift.mjs --write + +import Foundation + +enum HostEnvSecurityPolicy { + static let blockedKeys: Set = [ + "NODE_OPTIONS", + "NODE_PATH", + "PYTHONHOME", + "PYTHONPATH", + "PERL5LIB", + "PERL5OPT", + "RUBYLIB", + "RUBYOPT", + "BASH_ENV", + "ENV", + "GIT_EXTERNAL_DIFF", + "SHELL", + "SHELLOPTS", + "PS4", + "GCONV_PATH", + "IFS", + "SSLKEYLOGFILE" + ] + + static let blockedOverrideKeys: Set = [ + "HOME", + "ZDOTDIR" + ] + + static let blockedPrefixes: [String] = [ + "DYLD_", + "LD_", + "BASH_FUNC_" + ] +} diff --git a/apps/macos/Sources/OpenClaw/Onboarding.swift b/apps/macos/Sources/OpenClaw/Onboarding.swift index b8a6377b419..4eae7e092b0 100644 --- a/apps/macos/Sources/OpenClaw/Onboarding.swift +++ b/apps/macos/Sources/OpenClaw/Onboarding.swift @@ -1,5 +1,4 @@ import AppKit -import Combine import Observation import OpenClawChatUI import OpenClawDiscovery @@ -69,22 +68,6 @@ struct OnboardingView: View { @State var workspacePath: String = "" @State var workspaceStatus: String? @State var workspaceApplying = false - @State var anthropicAuthPKCE: AnthropicOAuth.PKCE? - @State var anthropicAuthCode: String = "" - @State var anthropicAuthStatus: String? - @State var anthropicAuthBusy = false - @State var anthropicAuthConnected = false - @State var anthropicAuthVerifying = false - @State var anthropicAuthVerified = false - @State var anthropicAuthVerificationAttempted = false - @State var anthropicAuthVerificationFailed = false - @State var anthropicAuthVerifiedAt: Date? - @State var anthropicAuthDetectedStatus: OpenClawOAuthStore.AnthropicOAuthStatus = .missingFile - @State var anthropicAuthAutoDetectClipboard = true - @State var anthropicAuthAutoConnectClipboard = true - @State var anthropicAuthLastPasteboardChangeCount = NSPasteboard.general.changeCount - @State var monitoringAuth = false - @State var authMonitorTask: Task? @State var needsBootstrap = false @State var didAutoKickoff = false @State var showAdvancedConnection = false @@ -104,19 +87,9 @@ struct OnboardingView: View { let pageWidth: CGFloat = Self.windowWidth let contentHeight: CGFloat = 460 let connectionPageIndex = 1 - let anthropicAuthPageIndex = 2 let wizardPageIndex = 3 let onboardingChatPageIndex = 8 - static let clipboardPoll: AnyPublisher = { - if ProcessInfo.processInfo.isRunningTests { - return Empty(completeImmediately: false).eraseToAnyPublisher() - } - return Timer.publish(every: 0.4, on: .main, in: .common) - .autoconnect() - .eraseToAnyPublisher() - }() - let permissionsPageIndex = 5 static func pageOrder( for mode: AppState.ConnectionMode, diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Actions.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Actions.swift index bcd5bd6d44d..a521926ddb9 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Actions.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Actions.swift @@ -78,70 +78,4 @@ extension OnboardingView { self.copied = true DispatchQueue.main.asyncAfter(deadline: .now() + 1.2) { self.copied = false } } - - func startAnthropicOAuth() { - guard !self.anthropicAuthBusy else { return } - self.anthropicAuthBusy = true - defer { self.anthropicAuthBusy = false } - - do { - let pkce = try AnthropicOAuth.generatePKCE() - self.anthropicAuthPKCE = pkce - let url = AnthropicOAuth.buildAuthorizeURL(pkce: pkce) - NSWorkspace.shared.open(url) - self.anthropicAuthStatus = "Browser opened. After approving, paste the `code#state` value here." - } catch { - self.anthropicAuthStatus = "Failed to start OAuth: \(error.localizedDescription)" - } - } - - @MainActor - func finishAnthropicOAuth() async { - guard !self.anthropicAuthBusy else { return } - guard let pkce = self.anthropicAuthPKCE else { return } - self.anthropicAuthBusy = true - defer { self.anthropicAuthBusy = false } - - guard let parsed = AnthropicOAuthCodeState.parse(from: self.anthropicAuthCode) else { - self.anthropicAuthStatus = "OAuth failed: missing or invalid code/state." - return - } - - do { - let creds = try await AnthropicOAuth.exchangeCode( - code: parsed.code, - state: parsed.state, - verifier: pkce.verifier) - try OpenClawOAuthStore.saveAnthropicOAuth(creds) - self.refreshAnthropicOAuthStatus() - self.anthropicAuthStatus = "Connected. OpenClaw can now use Claude." - } catch { - self.anthropicAuthStatus = "OAuth failed: \(error.localizedDescription)" - } - } - - func pollAnthropicClipboardIfNeeded() { - guard self.currentPage == self.anthropicAuthPageIndex else { return } - guard self.anthropicAuthPKCE != nil else { return } - guard !self.anthropicAuthBusy else { return } - guard self.anthropicAuthAutoDetectClipboard else { return } - - let pb = NSPasteboard.general - let changeCount = pb.changeCount - guard changeCount != self.anthropicAuthLastPasteboardChangeCount else { return } - self.anthropicAuthLastPasteboardChangeCount = changeCount - - guard let raw = pb.string(forType: .string), !raw.isEmpty else { return } - guard let parsed = AnthropicOAuthCodeState.parse(from: raw) else { return } - guard let pkce = self.anthropicAuthPKCE, parsed.state == pkce.verifier else { return } - - let next = "\(parsed.code)#\(parsed.state)" - if self.anthropicAuthCode != next { - self.anthropicAuthCode = next - self.anthropicAuthStatus = "Detected `code#state` from clipboard." - } - - guard self.anthropicAuthAutoConnectClipboard else { return } - Task { await self.finishAnthropicOAuth() } - } } diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Layout.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Layout.swift index ce87e211ce4..9b0e45e205c 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Layout.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Layout.swift @@ -53,7 +53,6 @@ extension OnboardingView { .onDisappear { self.stopPermissionMonitoring() self.stopDiscovery() - self.stopAuthMonitoring() Task { await self.onboardingWizard.cancelIfRunning() } } .task { @@ -61,7 +60,6 @@ extension OnboardingView { self.refreshCLIStatus() await self.loadWorkspaceDefaults() await self.ensureDefaultWorkspace() - self.refreshAnthropicOAuthStatus() self.refreshBootstrapStatus() self.preferredGatewayID = GatewayDiscoveryPreferences.preferredStableID() } diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Monitoring.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Monitoring.swift index dfbdf91d44d..efe37f31673 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Monitoring.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Monitoring.swift @@ -47,7 +47,6 @@ extension OnboardingView { func updateMonitoring(for pageIndex: Int) { self.updatePermissionMonitoring(for: pageIndex) self.updateDiscoveryMonitoring(for: pageIndex) - self.updateAuthMonitoring(for: pageIndex) self.maybeKickoffOnboardingChat(for: pageIndex) } @@ -63,33 +62,6 @@ extension OnboardingView { self.gatewayDiscovery.stop() } - func updateAuthMonitoring(for pageIndex: Int) { - let shouldMonitor = pageIndex == self.anthropicAuthPageIndex && self.state.connectionMode == .local - if shouldMonitor, !self.monitoringAuth { - self.monitoringAuth = true - self.startAuthMonitoring() - } else if !shouldMonitor, self.monitoringAuth { - self.stopAuthMonitoring() - } - } - - func startAuthMonitoring() { - self.refreshAnthropicOAuthStatus() - self.authMonitorTask?.cancel() - self.authMonitorTask = Task { - while !Task.isCancelled { - await MainActor.run { self.refreshAnthropicOAuthStatus() } - try? await Task.sleep(nanoseconds: 1_000_000_000) - } - } - } - - func stopAuthMonitoring() { - self.monitoringAuth = false - self.authMonitorTask?.cancel() - self.authMonitorTask = nil - } - func installCLI() async { guard !self.installingCLI else { return } self.installingCLI = true @@ -125,54 +97,4 @@ extension OnboardingView { expected: expected) } } - - func refreshAnthropicOAuthStatus() { - _ = OpenClawOAuthStore.importLegacyAnthropicOAuthIfNeeded() - let previous = self.anthropicAuthDetectedStatus - let status = OpenClawOAuthStore.anthropicOAuthStatus() - self.anthropicAuthDetectedStatus = status - self.anthropicAuthConnected = status.isConnected - - if previous != status { - self.anthropicAuthVerified = false - self.anthropicAuthVerificationAttempted = false - self.anthropicAuthVerificationFailed = false - self.anthropicAuthVerifiedAt = nil - } - } - - @MainActor - func verifyAnthropicOAuthIfNeeded(force: Bool = false) async { - guard self.state.connectionMode == .local else { return } - guard self.anthropicAuthDetectedStatus.isConnected else { return } - if self.anthropicAuthVerified, !force { return } - if self.anthropicAuthVerifying { return } - if self.anthropicAuthVerificationAttempted, !force { return } - - self.anthropicAuthVerificationAttempted = true - self.anthropicAuthVerifying = true - self.anthropicAuthVerificationFailed = false - defer { self.anthropicAuthVerifying = false } - - guard let refresh = OpenClawOAuthStore.loadAnthropicOAuthRefreshToken(), !refresh.isEmpty else { - self.anthropicAuthStatus = "OAuth verification failed: missing refresh token." - self.anthropicAuthVerificationFailed = true - return - } - - do { - let updated = try await AnthropicOAuth.refresh(refreshToken: refresh) - try OpenClawOAuthStore.saveAnthropicOAuth(updated) - self.refreshAnthropicOAuthStatus() - self.anthropicAuthVerified = true - self.anthropicAuthVerifiedAt = Date() - self.anthropicAuthVerificationFailed = false - self.anthropicAuthStatus = "OAuth detected and verified." - } catch { - self.anthropicAuthVerified = false - self.anthropicAuthVerifiedAt = nil - self.anthropicAuthVerificationFailed = true - self.anthropicAuthStatus = "OAuth verification failed: \(error.localizedDescription)" - } - } } diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift index ed40bd2ed58..4f942dfe8a4 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift @@ -12,8 +12,6 @@ extension OnboardingView { self.welcomePage() case 1: self.connectionPage() - case 2: - self.anthropicAuthPage() case 3: self.wizardPage() case 5: @@ -340,170 +338,6 @@ extension OnboardingView { .buttonStyle(.plain) } - func anthropicAuthPage() -> some View { - self.onboardingPage { - Text("Connect Claude") - .font(.largeTitle.weight(.semibold)) - Text("Give your model the token it needs!") - .font(.body) - .foregroundStyle(.secondary) - .multilineTextAlignment(.center) - .frame(maxWidth: 540) - .fixedSize(horizontal: false, vertical: true) - Text("OpenClaw supports any model — we strongly recommend Opus 4.6 for the best experience.") - .font(.callout) - .foregroundStyle(.secondary) - .multilineTextAlignment(.center) - .frame(maxWidth: 540) - .fixedSize(horizontal: false, vertical: true) - - self.onboardingCard(spacing: 12, padding: 16) { - HStack(alignment: .center, spacing: 10) { - Circle() - .fill(self.anthropicAuthVerified ? Color.green : Color.orange) - .frame(width: 10, height: 10) - Text( - self.anthropicAuthConnected - ? (self.anthropicAuthVerified - ? "Claude connected (OAuth) — verified" - : "Claude connected (OAuth)") - : "Not connected yet") - .font(.headline) - Spacer() - } - - if self.anthropicAuthConnected, self.anthropicAuthVerifying { - Text("Verifying OAuth…") - .font(.caption) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } else if !self.anthropicAuthConnected { - Text(self.anthropicAuthDetectedStatus.shortDescription) - .font(.caption) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } else if self.anthropicAuthVerified, let date = self.anthropicAuthVerifiedAt { - Text("Detected working OAuth (\(date.formatted(date: .abbreviated, time: .shortened))).") - .font(.caption) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } - - Text( - "This lets OpenClaw use Claude immediately. Credentials are stored at " + - "`~/.openclaw/credentials/oauth.json` (owner-only).") - .font(.subheadline) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - - HStack(spacing: 12) { - Text(OpenClawOAuthStore.oauthURL().path) - .font(.caption) - .foregroundStyle(.secondary) - .lineLimit(1) - .truncationMode(.middle) - - Spacer() - - Button("Reveal") { - NSWorkspace.shared.activateFileViewerSelecting([OpenClawOAuthStore.oauthURL()]) - } - .buttonStyle(.bordered) - - Button("Refresh") { - self.refreshAnthropicOAuthStatus() - } - .buttonStyle(.bordered) - } - - Divider().padding(.vertical, 2) - - HStack(spacing: 12) { - if !self.anthropicAuthVerified { - if self.anthropicAuthConnected { - Button("Verify") { - Task { await self.verifyAnthropicOAuthIfNeeded(force: true) } - } - .buttonStyle(.borderedProminent) - .disabled(self.anthropicAuthBusy || self.anthropicAuthVerifying) - - if self.anthropicAuthVerificationFailed { - Button("Re-auth (OAuth)") { - self.startAnthropicOAuth() - } - .buttonStyle(.bordered) - .disabled(self.anthropicAuthBusy || self.anthropicAuthVerifying) - } - } else { - Button { - self.startAnthropicOAuth() - } label: { - if self.anthropicAuthBusy { - ProgressView() - } else { - Text("Open Claude sign-in (OAuth)") - } - } - .buttonStyle(.borderedProminent) - .disabled(self.anthropicAuthBusy) - } - } - } - - if !self.anthropicAuthVerified, self.anthropicAuthPKCE != nil { - VStack(alignment: .leading, spacing: 8) { - Text("Paste the `code#state` value") - .font(.headline) - TextField("code#state", text: self.$anthropicAuthCode) - .textFieldStyle(.roundedBorder) - - Toggle("Auto-detect from clipboard", isOn: self.$anthropicAuthAutoDetectClipboard) - .font(.caption) - .foregroundStyle(.secondary) - .disabled(self.anthropicAuthBusy) - - Toggle("Auto-connect when detected", isOn: self.$anthropicAuthAutoConnectClipboard) - .font(.caption) - .foregroundStyle(.secondary) - .disabled(self.anthropicAuthBusy) - - Button("Connect") { - Task { await self.finishAnthropicOAuth() } - } - .buttonStyle(.bordered) - .disabled( - self.anthropicAuthBusy || - self.anthropicAuthCode.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty) - } - .onReceive(Self.clipboardPoll) { _ in - self.pollAnthropicClipboardIfNeeded() - } - } - - self.onboardingCard(spacing: 8, padding: 12) { - Text("API key (advanced)") - .font(.headline) - Text( - "You can also use an Anthropic API key, but this UI is instructions-only for now " + - "(GUI apps don’t automatically inherit your shell env vars like `ANTHROPIC_API_KEY`).") - .font(.subheadline) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } - .shadow(color: .clear, radius: 0) - .background(Color.clear) - - if let status = self.anthropicAuthStatus { - Text(status) - .font(.caption) - .foregroundStyle(.secondary) - .fixedSize(horizontal: false, vertical: true) - } - } - } - .task { await self.verifyAnthropicOAuthIfNeeded() } - } - func permissionsPage() -> some View { self.onboardingPage { Text("Grant permissions") diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Testing.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Testing.swift index cf8c3d0c78f..2bd9c525ad4 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Testing.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Testing.swift @@ -37,18 +37,9 @@ extension OnboardingView { view.cliStatus = "Installed" view.workspacePath = "/tmp/openclaw" view.workspaceStatus = "Saved workspace" - view.anthropicAuthPKCE = AnthropicOAuth.PKCE(verifier: "verifier", challenge: "challenge") - view.anthropicAuthCode = "code#state" - view.anthropicAuthStatus = "Connected" - view.anthropicAuthDetectedStatus = .connected(expiresAtMs: 1_700_000_000_000) - view.anthropicAuthConnected = true - view.anthropicAuthAutoDetectClipboard = false - view.anthropicAuthAutoConnectClipboard = false - view.state.connectionMode = .local _ = view.welcomePage() _ = view.connectionPage() - _ = view.anthropicAuthPage() _ = view.wizardPage() _ = view.permissionsPage() _ = view.cliPage() diff --git a/apps/macos/Sources/OpenClaw/Resources/Info.plist b/apps/macos/Sources/OpenClaw/Resources/Info.plist index 5abb959dc8e..f1eb6f463ae 100644 --- a/apps/macos/Sources/OpenClaw/Resources/Info.plist +++ b/apps/macos/Sources/OpenClaw/Resources/Info.plist @@ -15,9 +15,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.2.25 + 2026.2.26 CFBundleVersion - 202602250 + 202602260 CFBundleIconFile OpenClaw CFBundleURLTypes diff --git a/apps/macos/Sources/OpenClawMacCLI/WizardCommand.swift b/apps/macos/Sources/OpenClawMacCLI/WizardCommand.swift index ebe3e8ae626..f75ef05fdb2 100644 --- a/apps/macos/Sources/OpenClawMacCLI/WizardCommand.swift +++ b/apps/macos/Sources/OpenClawMacCLI/WizardCommand.swift @@ -280,19 +280,17 @@ actor GatewayWizardClient { let connectNonce = try await self.waitForConnectChallenge() let identity = DeviceIdentityStore.loadOrCreate() let signedAtMs = Int(Date().timeIntervalSince1970 * 1000) - let scopesValue = scopes.joined(separator: ",") - let payloadParts = [ - "v2", - identity.deviceId, - clientId, - clientMode, - role, - scopesValue, - String(signedAtMs), - self.token ?? "", - connectNonce, - ] - let payload = payloadParts.joined(separator: "|") + let payload = GatewayDeviceAuthPayload.buildV3( + deviceId: identity.deviceId, + clientId: clientId, + clientMode: clientMode, + role: role, + scopes: scopes, + signedAtMs: signedAtMs, + token: self.token, + nonce: connectNonce, + platform: platform, + deviceFamily: "Mac") if let signature = DeviceIdentityStore.signPayload(payload, identity: identity), let publicKey = DeviceIdentityStore.publicKeyBase64Url(identity) { diff --git a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift index 4e766514def..a7aaa7d3914 100644 --- a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift @@ -408,6 +408,7 @@ public struct SendParams: Codable, Sendable { public let gifplayback: Bool? public let channel: String? public let accountid: String? + public let agentid: String? public let threadid: String? public let sessionkey: String? public let idempotencykey: String @@ -420,6 +421,7 @@ public struct SendParams: Codable, Sendable { gifplayback: Bool?, channel: String?, accountid: String?, + agentid: String?, threadid: String?, sessionkey: String?, idempotencykey: String) @@ -431,6 +433,7 @@ public struct SendParams: Codable, Sendable { self.gifplayback = gifplayback self.channel = channel self.accountid = accountid + self.agentid = agentid self.threadid = threadid self.sessionkey = sessionkey self.idempotencykey = idempotencykey @@ -444,6 +447,7 @@ public struct SendParams: Codable, Sendable { case gifplayback = "gifPlayback" case channel case accountid = "accountId" + case agentid = "agentId" case threadid = "threadId" case sessionkey = "sessionKey" case idempotencykey = "idempotencyKey" @@ -2805,6 +2809,9 @@ public struct ExecApprovalsSnapshot: Codable, Sendable { public struct ExecApprovalRequestParams: Codable, Sendable { public let id: String? public let command: String + public let commandargv: [String]? + public let systemrunplanv2: [String: AnyCodable]? + public let env: [String: AnyCodable]? public let cwd: AnyCodable? public let nodeid: AnyCodable? public let host: AnyCodable? @@ -2813,12 +2820,19 @@ public struct ExecApprovalRequestParams: Codable, Sendable { public let agentid: AnyCodable? public let resolvedpath: AnyCodable? public let sessionkey: AnyCodable? + public let turnsourcechannel: AnyCodable? + public let turnsourceto: AnyCodable? + public let turnsourceaccountid: AnyCodable? + public let turnsourcethreadid: AnyCodable? public let timeoutms: Int? public let twophase: Bool? public init( id: String?, command: String, + commandargv: [String]?, + systemrunplanv2: [String: AnyCodable]?, + env: [String: AnyCodable]?, cwd: AnyCodable?, nodeid: AnyCodable?, host: AnyCodable?, @@ -2827,11 +2841,18 @@ public struct ExecApprovalRequestParams: Codable, Sendable { agentid: AnyCodable?, resolvedpath: AnyCodable?, sessionkey: AnyCodable?, + turnsourcechannel: AnyCodable?, + turnsourceto: AnyCodable?, + turnsourceaccountid: AnyCodable?, + turnsourcethreadid: AnyCodable?, timeoutms: Int?, twophase: Bool?) { self.id = id self.command = command + self.commandargv = commandargv + self.systemrunplanv2 = systemrunplanv2 + self.env = env self.cwd = cwd self.nodeid = nodeid self.host = host @@ -2840,6 +2861,10 @@ public struct ExecApprovalRequestParams: Codable, Sendable { self.agentid = agentid self.resolvedpath = resolvedpath self.sessionkey = sessionkey + self.turnsourcechannel = turnsourcechannel + self.turnsourceto = turnsourceto + self.turnsourceaccountid = turnsourceaccountid + self.turnsourcethreadid = turnsourcethreadid self.timeoutms = timeoutms self.twophase = twophase } @@ -2847,6 +2872,9 @@ public struct ExecApprovalRequestParams: Codable, Sendable { private enum CodingKeys: String, CodingKey { case id case command + case commandargv = "commandArgv" + case systemrunplanv2 = "systemRunPlanV2" + case env case cwd case nodeid = "nodeId" case host @@ -2855,6 +2883,10 @@ public struct ExecApprovalRequestParams: Codable, Sendable { case agentid = "agentId" case resolvedpath = "resolvedPath" case sessionkey = "sessionKey" + case turnsourcechannel = "turnSourceChannel" + case turnsourceto = "turnSourceTo" + case turnsourceaccountid = "turnSourceAccountId" + case turnsourcethreadid = "turnSourceThreadId" case timeoutms = "timeoutMs" case twophase = "twoPhase" } @@ -2968,6 +3000,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { public let publickey: String public let displayname: String? public let platform: String? + public let devicefamily: String? public let clientid: String? public let clientmode: String? public let role: String? @@ -2984,6 +3017,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { publickey: String, displayname: String?, platform: String?, + devicefamily: String?, clientid: String?, clientmode: String?, role: String?, @@ -2999,6 +3033,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { self.publickey = publickey self.displayname = displayname self.platform = platform + self.devicefamily = devicefamily self.clientid = clientid self.clientmode = clientmode self.role = role @@ -3016,6 +3051,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { case publickey = "publicKey" case displayname = "displayName" case platform + case devicefamily = "deviceFamily" case clientid = "clientId" case clientmode = "clientMode" case role diff --git a/apps/macos/Tests/OpenClawIPCTests/AnthropicAuthControlsSmokeTests.swift b/apps/macos/Tests/OpenClawIPCTests/AnthropicAuthControlsSmokeTests.swift deleted file mode 100644 index 84c61833932..00000000000 --- a/apps/macos/Tests/OpenClawIPCTests/AnthropicAuthControlsSmokeTests.swift +++ /dev/null @@ -1,29 +0,0 @@ -import Testing -@testable import OpenClaw - -@Suite(.serialized) -@MainActor -struct AnthropicAuthControlsSmokeTests { - @Test func anthropicAuthControlsBuildsBodyLocal() { - let pkce = AnthropicOAuth.PKCE(verifier: "verifier", challenge: "challenge") - let view = AnthropicAuthControls( - connectionMode: .local, - oauthStatus: .connected(expiresAtMs: 1_700_000_000_000), - pkce: pkce, - code: "code#state", - statusText: "Detected code", - autoDetectClipboard: false, - autoConnectClipboard: false) - _ = view.body - } - - @Test func anthropicAuthControlsBuildsBodyRemote() { - let view = AnthropicAuthControls( - connectionMode: .remote, - oauthStatus: .missingFile, - pkce: nil, - code: "", - statusText: nil) - _ = view.body - } -} diff --git a/apps/macos/Tests/OpenClawIPCTests/AnthropicAuthResolverTests.swift b/apps/macos/Tests/OpenClawIPCTests/AnthropicAuthResolverTests.swift deleted file mode 100644 index c41b7f64be4..00000000000 --- a/apps/macos/Tests/OpenClawIPCTests/AnthropicAuthResolverTests.swift +++ /dev/null @@ -1,52 +0,0 @@ -import Foundation -import Testing -@testable import OpenClaw - -@Suite -struct AnthropicAuthResolverTests { - @Test - func prefersOAuthFileOverEnv() throws { - let dir = FileManager().temporaryDirectory - .appendingPathComponent("openclaw-oauth-\(UUID().uuidString)", isDirectory: true) - try FileManager().createDirectory(at: dir, withIntermediateDirectories: true) - let oauthFile = dir.appendingPathComponent("oauth.json") - let payload = [ - "anthropic": [ - "type": "oauth", - "refresh": "r1", - "access": "a1", - "expires": 1_234_567_890, - ], - ] - let data = try JSONSerialization.data(withJSONObject: payload, options: [.prettyPrinted, .sortedKeys]) - try data.write(to: oauthFile, options: [.atomic]) - - let status = OpenClawOAuthStore.anthropicOAuthStatus(at: oauthFile) - let mode = AnthropicAuthResolver.resolve(environment: [ - "ANTHROPIC_API_KEY": "sk-ant-ignored", - ], oauthStatus: status) - #expect(mode == .oauthFile) - } - - @Test - func reportsOAuthEnvWhenPresent() { - let mode = AnthropicAuthResolver.resolve(environment: [ - "ANTHROPIC_OAUTH_TOKEN": "token", - ], oauthStatus: .missingFile) - #expect(mode == .oauthEnv) - } - - @Test - func reportsAPIKeyEnvWhenPresent() { - let mode = AnthropicAuthResolver.resolve(environment: [ - "ANTHROPIC_API_KEY": "sk-ant-key", - ], oauthStatus: .missingFile) - #expect(mode == .apiKeyEnv) - } - - @Test - func reportsMissingWhenNothingConfigured() { - let mode = AnthropicAuthResolver.resolve(environment: [:], oauthStatus: .missingFile) - #expect(mode == .missing) - } -} diff --git a/apps/macos/Tests/OpenClawIPCTests/AnthropicOAuthCodeStateTests.swift b/apps/macos/Tests/OpenClawIPCTests/AnthropicOAuthCodeStateTests.swift deleted file mode 100644 index 3d337c2b279..00000000000 --- a/apps/macos/Tests/OpenClawIPCTests/AnthropicOAuthCodeStateTests.swift +++ /dev/null @@ -1,31 +0,0 @@ -import Testing -@testable import OpenClaw - -@Suite -struct AnthropicOAuthCodeStateTests { - @Test - func parsesRawToken() { - let parsed = AnthropicOAuthCodeState.parse(from: "abcDEF1234#stateXYZ9876") - #expect(parsed == .init(code: "abcDEF1234", state: "stateXYZ9876")) - } - - @Test - func parsesBacktickedToken() { - let parsed = AnthropicOAuthCodeState.parse(from: "`abcDEF1234#stateXYZ9876`") - #expect(parsed == .init(code: "abcDEF1234", state: "stateXYZ9876")) - } - - @Test - func parsesCallbackURL() { - let raw = "https://console.anthropic.com/oauth/code/callback?code=abcDEF1234&state=stateXYZ9876" - let parsed = AnthropicOAuthCodeState.parse(from: raw) - #expect(parsed == .init(code: "abcDEF1234", state: "stateXYZ9876")) - } - - @Test - func extractsFromSurroundingText() { - let raw = "Paste the code#state value: abcDEF1234#stateXYZ9876 then return." - let parsed = AnthropicOAuthCodeState.parse(from: raw) - #expect(parsed == .init(code: "abcDEF1234", state: "stateXYZ9876")) - } -} diff --git a/apps/macos/Tests/OpenClawIPCTests/OpenClawOAuthStoreTests.swift b/apps/macos/Tests/OpenClawIPCTests/OpenClawOAuthStoreTests.swift deleted file mode 100644 index b34e9c3008a..00000000000 --- a/apps/macos/Tests/OpenClawIPCTests/OpenClawOAuthStoreTests.swift +++ /dev/null @@ -1,97 +0,0 @@ -import Foundation -import Testing -@testable import OpenClaw - -@Suite -struct OpenClawOAuthStoreTests { - @Test - func returnsMissingWhenFileAbsent() { - let url = FileManager().temporaryDirectory - .appendingPathComponent("openclaw-oauth-\(UUID().uuidString)") - .appendingPathComponent("oauth.json") - #expect(OpenClawOAuthStore.anthropicOAuthStatus(at: url) == .missingFile) - } - - @Test - func usesEnvOverrideForOpenClawOAuthDir() throws { - let key = "OPENCLAW_OAUTH_DIR" - let previous = ProcessInfo.processInfo.environment[key] - defer { - if let previous { - setenv(key, previous, 1) - } else { - unsetenv(key) - } - } - - let dir = FileManager().temporaryDirectory - .appendingPathComponent("openclaw-oauth-\(UUID().uuidString)", isDirectory: true) - setenv(key, dir.path, 1) - - #expect(OpenClawOAuthStore.oauthDir().standardizedFileURL == dir.standardizedFileURL) - } - - @Test - func acceptsPiFormatTokens() throws { - let url = try self.writeOAuthFile([ - "anthropic": [ - "type": "oauth", - "refresh": "r1", - "access": "a1", - "expires": 1_234_567_890, - ], - ]) - - #expect(OpenClawOAuthStore.anthropicOAuthStatus(at: url).isConnected) - } - - @Test - func acceptsTokenKeyVariants() throws { - let url = try self.writeOAuthFile([ - "anthropic": [ - "type": "oauth", - "refresh_token": "r1", - "access_token": "a1", - ], - ]) - - #expect(OpenClawOAuthStore.anthropicOAuthStatus(at: url).isConnected) - } - - @Test - func reportsMissingProviderEntry() throws { - let url = try self.writeOAuthFile([ - "other": [ - "type": "oauth", - "refresh": "r1", - "access": "a1", - ], - ]) - - #expect(OpenClawOAuthStore.anthropicOAuthStatus(at: url) == .missingProviderEntry) - } - - @Test - func reportsMissingTokens() throws { - let url = try self.writeOAuthFile([ - "anthropic": [ - "type": "oauth", - "refresh": "", - "access": "a1", - ], - ]) - - #expect(OpenClawOAuthStore.anthropicOAuthStatus(at: url) == .missingTokens) - } - - private func writeOAuthFile(_ json: [String: Any]) throws -> URL { - let dir = FileManager().temporaryDirectory - .appendingPathComponent("openclaw-oauth-\(UUID().uuidString)", isDirectory: true) - try FileManager().createDirectory(at: dir, withIntermediateDirectories: true) - - let url = dir.appendingPathComponent("oauth.json") - let data = try JSONSerialization.data(withJSONObject: json, options: [.prettyPrinted, .sortedKeys]) - try data.write(to: url, options: [.atomic]) - return url - } -} diff --git a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatMarkdownPreprocessor.swift b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatMarkdownPreprocessor.swift index a96e288d7f4..0b012586672 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatMarkdownPreprocessor.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatMarkdownPreprocessor.swift @@ -105,7 +105,9 @@ enum ChatMarkdownPreprocessor { outputLines.append(currentLine) } - return outputLines.joined(separator: "\n").replacingOccurrences(of: #"^\n+"#, with: "", options: .regularExpression) + return outputLines + .joined(separator: "\n") + .replacingOccurrences(of: #"^\n+"#, with: "", options: .regularExpression) } private static func stripPrefixedTimestamps(_ raw: String) -> String { diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/DeviceAuthPayload.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/DeviceAuthPayload.swift new file mode 100644 index 00000000000..858ef457c7e --- /dev/null +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/DeviceAuthPayload.swift @@ -0,0 +1,55 @@ +import Foundation + +public enum GatewayDeviceAuthPayload { + public static func buildV3( + deviceId: String, + clientId: String, + clientMode: String, + role: String, + scopes: [String], + signedAtMs: Int, + token: String?, + nonce: String, + platform: String?, + deviceFamily: String?) -> String + { + let scopeString = scopes.joined(separator: ",") + let authToken = token ?? "" + let normalizedPlatform = normalizeMetadataField(platform) + let normalizedDeviceFamily = normalizeMetadataField(deviceFamily) + return [ + "v3", + deviceId, + clientId, + clientMode, + role, + scopeString, + String(signedAtMs), + authToken, + nonce, + normalizedPlatform, + normalizedDeviceFamily, + ].joined(separator: "|") + } + + static func normalizeMetadataField(_ value: String?) -> String { + guard let value else { return "" } + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines) + if trimmed.isEmpty { + return "" + } + // Keep cross-runtime normalization deterministic (TS/Swift/Kotlin): + // lowercase ASCII A-Z only for auth payload metadata fields. + var output = String() + output.reserveCapacity(trimmed.count) + for scalar in trimmed.unicodeScalars { + let codePoint = scalar.value + if codePoint >= 65, codePoint <= 90, let lowered = UnicodeScalar(codePoint + 32) { + output.unicodeScalars.append(lowered) + } else { + output.unicodeScalars.append(scalar) + } + } + return output + } +} diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift index 30935df79d4..e8a53412cd1 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift @@ -398,20 +398,18 @@ public actor GatewayChannelActor { } let signedAtMs = Int(Date().timeIntervalSince1970 * 1000) let connectNonce = try await self.waitForConnectChallenge() - let scopesValue = scopes.joined(separator: ",") - let payloadParts = [ - "v2", - identity?.deviceId ?? "", - clientId, - clientMode, - role, - scopesValue, - String(signedAtMs), - authToken ?? "", - connectNonce, - ] - let payload = payloadParts.joined(separator: "|") if includeDeviceIdentity, let identity { + let payload = GatewayDeviceAuthPayload.buildV3( + deviceId: identity.deviceId, + clientId: clientId, + clientMode: clientMode, + role: role, + scopes: scopes, + signedAtMs: signedAtMs, + token: authToken, + nonce: connectNonce, + platform: platform, + deviceFamily: InstanceIdentity.deviceFamily) if let signature = DeviceIdentityStore.signPayload(payload, identity: identity), let publicKey = DeviceIdentityStore.publicKeyBase64Url(identity) { let device: [String: ProtoAnyCodable] = [ diff --git a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift index 4e766514def..a7aaa7d3914 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift @@ -408,6 +408,7 @@ public struct SendParams: Codable, Sendable { public let gifplayback: Bool? public let channel: String? public let accountid: String? + public let agentid: String? public let threadid: String? public let sessionkey: String? public let idempotencykey: String @@ -420,6 +421,7 @@ public struct SendParams: Codable, Sendable { gifplayback: Bool?, channel: String?, accountid: String?, + agentid: String?, threadid: String?, sessionkey: String?, idempotencykey: String) @@ -431,6 +433,7 @@ public struct SendParams: Codable, Sendable { self.gifplayback = gifplayback self.channel = channel self.accountid = accountid + self.agentid = agentid self.threadid = threadid self.sessionkey = sessionkey self.idempotencykey = idempotencykey @@ -444,6 +447,7 @@ public struct SendParams: Codable, Sendable { case gifplayback = "gifPlayback" case channel case accountid = "accountId" + case agentid = "agentId" case threadid = "threadId" case sessionkey = "sessionKey" case idempotencykey = "idempotencyKey" @@ -2805,6 +2809,9 @@ public struct ExecApprovalsSnapshot: Codable, Sendable { public struct ExecApprovalRequestParams: Codable, Sendable { public let id: String? public let command: String + public let commandargv: [String]? + public let systemrunplanv2: [String: AnyCodable]? + public let env: [String: AnyCodable]? public let cwd: AnyCodable? public let nodeid: AnyCodable? public let host: AnyCodable? @@ -2813,12 +2820,19 @@ public struct ExecApprovalRequestParams: Codable, Sendable { public let agentid: AnyCodable? public let resolvedpath: AnyCodable? public let sessionkey: AnyCodable? + public let turnsourcechannel: AnyCodable? + public let turnsourceto: AnyCodable? + public let turnsourceaccountid: AnyCodable? + public let turnsourcethreadid: AnyCodable? public let timeoutms: Int? public let twophase: Bool? public init( id: String?, command: String, + commandargv: [String]?, + systemrunplanv2: [String: AnyCodable]?, + env: [String: AnyCodable]?, cwd: AnyCodable?, nodeid: AnyCodable?, host: AnyCodable?, @@ -2827,11 +2841,18 @@ public struct ExecApprovalRequestParams: Codable, Sendable { agentid: AnyCodable?, resolvedpath: AnyCodable?, sessionkey: AnyCodable?, + turnsourcechannel: AnyCodable?, + turnsourceto: AnyCodable?, + turnsourceaccountid: AnyCodable?, + turnsourcethreadid: AnyCodable?, timeoutms: Int?, twophase: Bool?) { self.id = id self.command = command + self.commandargv = commandargv + self.systemrunplanv2 = systemrunplanv2 + self.env = env self.cwd = cwd self.nodeid = nodeid self.host = host @@ -2840,6 +2861,10 @@ public struct ExecApprovalRequestParams: Codable, Sendable { self.agentid = agentid self.resolvedpath = resolvedpath self.sessionkey = sessionkey + self.turnsourcechannel = turnsourcechannel + self.turnsourceto = turnsourceto + self.turnsourceaccountid = turnsourceaccountid + self.turnsourcethreadid = turnsourcethreadid self.timeoutms = timeoutms self.twophase = twophase } @@ -2847,6 +2872,9 @@ public struct ExecApprovalRequestParams: Codable, Sendable { private enum CodingKeys: String, CodingKey { case id case command + case commandargv = "commandArgv" + case systemrunplanv2 = "systemRunPlanV2" + case env case cwd case nodeid = "nodeId" case host @@ -2855,6 +2883,10 @@ public struct ExecApprovalRequestParams: Codable, Sendable { case agentid = "agentId" case resolvedpath = "resolvedPath" case sessionkey = "sessionKey" + case turnsourcechannel = "turnSourceChannel" + case turnsourceto = "turnSourceTo" + case turnsourceaccountid = "turnSourceAccountId" + case turnsourcethreadid = "turnSourceThreadId" case timeoutms = "timeoutMs" case twophase = "twoPhase" } @@ -2968,6 +3000,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { public let publickey: String public let displayname: String? public let platform: String? + public let devicefamily: String? public let clientid: String? public let clientmode: String? public let role: String? @@ -2984,6 +3017,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { publickey: String, displayname: String?, platform: String?, + devicefamily: String?, clientid: String?, clientmode: String?, role: String?, @@ -2999,6 +3033,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { self.publickey = publickey self.displayname = displayname self.platform = platform + self.devicefamily = devicefamily self.clientid = clientid self.clientmode = clientmode self.role = role @@ -3016,6 +3051,7 @@ public struct DevicePairRequestedEvent: Codable, Sendable { case publickey = "publicKey" case displayname = "displayName" case platform + case devicefamily = "deviceFamily" case clientid = "clientId" case clientmode = "clientMode" case role diff --git a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeviceAuthPayloadTests.swift b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeviceAuthPayloadTests.swift new file mode 100644 index 00000000000..46a814f81a6 --- /dev/null +++ b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeviceAuthPayloadTests.swift @@ -0,0 +1,30 @@ +import Testing +@testable import OpenClawKit + +@Suite("DeviceAuthPayload") +struct DeviceAuthPayloadTests { + @Test("builds canonical v3 payload vector") + func buildsCanonicalV3PayloadVector() { + let payload = GatewayDeviceAuthPayload.buildV3( + deviceId: "dev-1", + clientId: "openclaw-macos", + clientMode: "ui", + role: "operator", + scopes: ["operator.admin", "operator.read"], + signedAtMs: 1_700_000_000_000, + token: "tok-123", + nonce: "nonce-abc", + platform: " IOS ", + deviceFamily: " iPhone ") + #expect( + payload + == "v3|dev-1|openclaw-macos|ui|operator|operator.admin,operator.read|1700000000000|tok-123|nonce-abc|ios|iphone") + } + + @Test("normalizes metadata with ASCII-only lowercase") + func normalizesMetadataWithAsciiLowercase() { + #expect(GatewayDeviceAuthPayload.normalizeMetadataField(" İOS ") == "İos") + #expect(GatewayDeviceAuthPayload.normalizeMetadataField(" MAC ") == "mac") + #expect(GatewayDeviceAuthPayload.normalizeMetadataField(nil) == "") + } +} diff --git a/assets/chrome-extension/background.js b/assets/chrome-extension/background.js index 60f50d6551e..c78f2c7c452 100644 --- a/assets/chrome-extension/background.js +++ b/assets/chrome-extension/background.js @@ -13,6 +13,9 @@ const BADGE = { let relayWs = null /** @type {Promise|null} */ let relayConnectPromise = null +let relayGatewayToken = '' +/** @type {string|null} */ +let relayConnectRequestId = null let nextSession = 1 @@ -143,6 +146,13 @@ async function ensureRelayConnection() { const ws = new WebSocket(wsUrl) relayWs = ws + relayGatewayToken = gatewayToken + // Bind message handler before open so an immediate first frame (for example + // gateway connect.challenge) cannot be missed. + ws.onmessage = (event) => { + if (ws !== relayWs) return + void whenReady(() => onRelayMessage(String(event.data || ''))) + } await new Promise((resolve, reject) => { const t = setTimeout(() => reject(new Error('WebSocket connect timeout')), 5000) @@ -162,10 +172,6 @@ async function ensureRelayConnection() { // Bind permanent handlers. Guard against stale socket: if this WS was // replaced before its close fires, the handler is a no-op. - ws.onmessage = (event) => { - if (ws !== relayWs) return - void whenReady(() => onRelayMessage(String(event.data || ''))) - } ws.onclose = () => { if (ws !== relayWs) return onRelayClosed('closed') @@ -188,6 +194,8 @@ async function ensureRelayConnection() { // Debugger sessions are kept alive so they survive transient WS drops. function onRelayClosed(reason) { relayWs = null + relayGatewayToken = '' + relayConnectRequestId = null for (const [id, p] of pending.entries()) { pending.delete(id) @@ -308,6 +316,33 @@ function sendToRelay(payload) { ws.send(JSON.stringify(payload)) } +function ensureGatewayHandshakeStarted(payload) { + if (relayConnectRequestId) return + const nonce = typeof payload?.nonce === 'string' ? payload.nonce.trim() : '' + relayConnectRequestId = `ext-connect-${Date.now()}-${Math.random().toString(16).slice(2, 8)}` + sendToRelay({ + type: 'req', + id: relayConnectRequestId, + method: 'connect', + params: { + minProtocol: 3, + maxProtocol: 3, + client: { + id: 'chrome-relay-extension', + version: '1.0.0', + platform: 'chrome-extension', + mode: 'webchat', + }, + role: 'operator', + scopes: ['operator.read', 'operator.write'], + caps: [], + commands: [], + nonce: nonce || undefined, + auth: relayGatewayToken ? { token: relayGatewayToken } : undefined, + }, + }) +} + async function maybeOpenHelpOnce() { try { const stored = await chrome.storage.local.get(['helpOnErrorShown']) @@ -349,6 +384,33 @@ async function onRelayMessage(text) { return } + if (msg && msg.type === 'event' && msg.event === 'connect.challenge') { + try { + ensureGatewayHandshakeStarted(msg.payload) + } catch (err) { + console.warn('gateway connect handshake start failed', err instanceof Error ? err.message : String(err)) + relayConnectRequestId = null + const ws = relayWs + if (ws && ws.readyState === WebSocket.OPEN) { + ws.close(1008, 'gateway connect failed') + } + } + return + } + + if (msg && msg.type === 'res' && relayConnectRequestId && msg.id === relayConnectRequestId) { + relayConnectRequestId = null + if (!msg.ok) { + const detail = msg?.error?.message || msg?.error || 'gateway connect failed' + console.warn('gateway connect handshake rejected', String(detail)) + const ws = relayWs + if (ws && ws.readyState === WebSocket.OPEN) { + ws.close(1008, 'gateway connect failed') + } + } + return + } + if (msg && msg.method === 'ping') { try { sendToRelay({ method: 'pong' }) diff --git a/changelog/fragments/README.md b/changelog/fragments/README.md new file mode 100644 index 00000000000..93bb5b65d70 --- /dev/null +++ b/changelog/fragments/README.md @@ -0,0 +1,13 @@ +# Changelog Fragments + +Use this directory when a PR should not edit `CHANGELOG.md` directly. + +- One fragment file per PR. +- File name recommendation: `pr-.md`. +- Include at least one line with both `#` and `thanks @`. + +Example: + +```md +- Fix LINE monitor lifecycle wait ownership (#27001) (thanks @alice) +``` diff --git a/docker-setup.sh b/docker-setup.sh index 8c67dc0962d..1f6e51cd75d 100755 --- a/docker-setup.sh +++ b/docker-setup.sh @@ -20,6 +20,78 @@ require_cmd() { fi } +read_config_gateway_token() { + local config_path="$OPENCLAW_CONFIG_DIR/openclaw.json" + if [[ ! -f "$config_path" ]]; then + return 0 + fi + if command -v python3 >/dev/null 2>&1; then + python3 - "$config_path" <<'PY' +import json +import sys + +path = sys.argv[1] +try: + with open(path, "r", encoding="utf-8") as f: + cfg = json.load(f) +except Exception: + raise SystemExit(0) + +gateway = cfg.get("gateway") +if not isinstance(gateway, dict): + raise SystemExit(0) +auth = gateway.get("auth") +if not isinstance(auth, dict): + raise SystemExit(0) +token = auth.get("token") +if isinstance(token, str): + token = token.strip() + if token: + print(token) +PY + return 0 + fi + if command -v node >/dev/null 2>&1; then + node - "$config_path" <<'NODE' +const fs = require("node:fs"); +const configPath = process.argv[2]; +try { + const cfg = JSON.parse(fs.readFileSync(configPath, "utf8")); + const token = cfg?.gateway?.auth?.token; + if (typeof token === "string" && token.trim().length > 0) { + process.stdout.write(token.trim()); + } +} catch { + // Keep docker-setup resilient when config parsing fails. +} +NODE + fi +} + +ensure_control_ui_allowed_origins() { + if [[ "${OPENCLAW_GATEWAY_BIND}" == "loopback" ]]; then + return 0 + fi + + local allowed_origin_json + local current_allowed_origins + allowed_origin_json="$(printf '["http://127.0.0.1:%s"]' "$OPENCLAW_GATEWAY_PORT")" + current_allowed_origins="$( + docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \ + config get gateway.controlUi.allowedOrigins 2>/dev/null || true + )" + current_allowed_origins="${current_allowed_origins//$'\r'/}" + + if [[ -n "$current_allowed_origins" && "$current_allowed_origins" != "null" && "$current_allowed_origins" != "[]" ]]; then + echo "Control UI allowlist already configured; leaving gateway.controlUi.allowedOrigins unchanged." + return 0 + fi + + docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \ + config set gateway.controlUi.allowedOrigins "$allowed_origin_json" --strict-json >/dev/null + echo "Set gateway.controlUi.allowedOrigins to $allowed_origin_json for non-loopback bind." +} + contains_disallowed_chars() { local value="$1" [[ "$value" == *$'\n'* || "$value" == *$'\r'* || "$value" == *$'\t'* ]] @@ -97,7 +169,11 @@ export OPENCLAW_EXTRA_MOUNTS="$EXTRA_MOUNTS" export OPENCLAW_HOME_VOLUME="$HOME_VOLUME_NAME" if [[ -z "${OPENCLAW_GATEWAY_TOKEN:-}" ]]; then - if command -v openssl >/dev/null 2>&1; then + EXISTING_CONFIG_TOKEN="$(read_config_gateway_token || true)" + if [[ -n "$EXISTING_CONFIG_TOKEN" ]]; then + OPENCLAW_GATEWAY_TOKEN="$EXISTING_CONFIG_TOKEN" + echo "Reusing gateway token from $OPENCLAW_CONFIG_DIR/openclaw.json" + elif command -v openssl >/dev/null 2>&1; then OPENCLAW_GATEWAY_TOKEN="$(openssl rand -hex 32)" else OPENCLAW_GATEWAY_TOKEN="$(python3 - <<'PY' @@ -247,12 +323,20 @@ upsert_env "$ENV_FILE" \ OPENCLAW_HOME_VOLUME \ OPENCLAW_DOCKER_APT_PACKAGES -echo "==> Building Docker image: $IMAGE_NAME" -docker build \ - --build-arg "OPENCLAW_DOCKER_APT_PACKAGES=${OPENCLAW_DOCKER_APT_PACKAGES}" \ - -t "$IMAGE_NAME" \ - -f "$ROOT_DIR/Dockerfile" \ - "$ROOT_DIR" +if [[ "$IMAGE_NAME" == "openclaw:local" ]]; then + echo "==> Building Docker image: $IMAGE_NAME" + docker build \ + --build-arg "OPENCLAW_DOCKER_APT_PACKAGES=${OPENCLAW_DOCKER_APT_PACKAGES}" \ + -t "$IMAGE_NAME" \ + -f "$ROOT_DIR/Dockerfile" \ + "$ROOT_DIR" +else + echo "==> Pulling Docker image: $IMAGE_NAME" + if ! docker pull "$IMAGE_NAME"; then + echo "ERROR: Failed to pull image $IMAGE_NAME. Please check the image name and your access permissions." >&2 + exit 1 + fi +fi echo "" echo "==> Onboarding (interactive)" @@ -265,6 +349,10 @@ echo " - Install Gateway daemon: No" echo "" docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli onboard --no-install-daemon +echo "" +echo "==> Control UI origin allowlist" +ensure_control_ui_allowed_origins + echo "" echo "==> Provider setup (optional)" echo "WhatsApp (QR):" diff --git a/docs/channels/discord.md b/docs/channels/discord.md index 31913842e4d..58483ef22b6 100644 --- a/docs/channels/discord.md +++ b/docs/channels/discord.md @@ -376,6 +376,12 @@ Example: If DM policy is not open, unknown users are blocked (or prompted for pairing in `pairing` mode). + Multi-account precedence: + + - `channels.discord.accounts.default.allowFrom` applies only to the `default` account. + - Named accounts inherit `channels.discord.allowFrom` when their own `allowFrom` is unset. + - Named accounts do not inherit `channels.discord.accounts.default.allowFrom`. + DM target format for delivery: - `user:` @@ -665,9 +671,10 @@ Default slash command settings: - `session.threadBindings.*` sets global defaults. - `channels.discord.threadBindings.*` overrides Discord behavior. - `spawnSubagentSessions` must be true to auto-create/bind threads for `sessions_spawn({ thread: true })`. + - `spawnAcpSessions` must be true to auto-create/bind threads for ACP (`/acp spawn ... --thread ...` or `sessions_spawn({ runtime: "acp", thread: true })`). - If thread bindings are disabled for an account, `/focus` and related thread binding operations are unavailable. - See [Sub-agents](/tools/subagents) and [Configuration Reference](/gateway/configuration-reference). + See [Sub-agents](/tools/subagents), [ACP Agents](/tools/acp-agents), and [Configuration Reference](/gateway/configuration-reference). diff --git a/docs/channels/googlechat.md b/docs/channels/googlechat.md index 13729257fe7..8281d0fb0d2 100644 --- a/docs/channels/googlechat.md +++ b/docs/channels/googlechat.md @@ -166,6 +166,7 @@ Use these identifiers for delivery and allowlists: googlechat: { enabled: true, serviceAccountFile: "/path/to/service-account.json", + // or serviceAccountRef: { source: "file", provider: "filemain", id: "/channels/googlechat/serviceAccount" } audienceType: "app-url", audience: "https://gateway.example.com/googlechat", webhookPath: "/googlechat", @@ -194,12 +195,15 @@ Use these identifiers for delivery and allowlists: Notes: - Service account credentials can also be passed inline with `serviceAccount` (JSON string). +- `serviceAccountRef` is also supported (env/file SecretRef), including per-account refs under `channels.googlechat.accounts..serviceAccountRef`. - Default webhook path is `/googlechat` if `webhookPath` isn’t set. - `dangerouslyAllowNameMatching` re-enables mutable email principal matching for allowlists (break-glass compatibility mode). - Reactions are available via the `reactions` tool and `channels action` when `actions.reactions` is enabled. - `typingIndicator` supports `none`, `message` (default), and `reaction` (reaction requires user OAuth). - Attachments are downloaded through the Chat API and stored in the media pipeline (size capped by `mediaMaxMb`). +Secrets reference details: [Secrets Management](/gateway/secrets). + ## Troubleshooting ### 405 Method Not Allowed diff --git a/docs/channels/grammy.md b/docs/channels/grammy.md deleted file mode 100644 index 25c197116f6..00000000000 --- a/docs/channels/grammy.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -summary: "Telegram Bot API integration via grammY with setup notes" -read_when: - - Working on Telegram or grammY pathways -title: grammY ---- - -# grammY Integration (Telegram Bot API) - -# Why grammY - -- TS-first Bot API client with built-in long-poll + webhook helpers, middleware, error handling, rate limiter. -- Cleaner media helpers than hand-rolling fetch + FormData; supports all Bot API methods. -- Extensible: proxy support via custom fetch, session middleware (optional), type-safe context. - -# What we shipped - -- **Single client path:** fetch-based implementation removed; grammY is now the sole Telegram client (send + gateway) with the grammY throttler enabled by default. -- **Gateway:** `monitorTelegramProvider` builds a grammY `Bot`, wires mention/allowlist gating, media download via `getFile`/`download`, and delivers replies with `sendMessage/sendPhoto/sendVideo/sendAudio/sendDocument`. Supports long-poll or webhook via `webhookCallback`. -- **Proxy:** optional `channels.telegram.proxy` uses `undici.ProxyAgent` through grammY’s `client.baseFetch`. -- **Webhook support:** `webhook-set.ts` wraps `setWebhook/deleteWebhook`; `webhook.ts` hosts the callback with health + graceful shutdown. Gateway enables webhook mode when `channels.telegram.webhookUrl` + `channels.telegram.webhookSecret` are set (otherwise it long-polls). -- **Sessions:** direct chats collapse into the agent main session (`agent::`); groups use `agent::telegram:group:`; replies route back to the same channel. -- **Config knobs:** `channels.telegram.botToken`, `channels.telegram.dmPolicy`, `channels.telegram.groups` (allowlist + mention defaults), `channels.telegram.allowFrom`, `channels.telegram.groupAllowFrom`, `channels.telegram.groupPolicy`, `channels.telegram.mediaMaxMb`, `channels.telegram.linkPreview`, `channels.telegram.proxy`, `channels.telegram.webhookSecret`, `channels.telegram.webhookUrl`, `channels.telegram.webhookHost`. -- **Live stream preview:** `channels.telegram.streaming` (`off | partial | block | progress`) sends a temporary message and updates it with `editMessageText`. This is separate from channel block streaming. -- **Tests:** grammy mocks cover DM + group mention gating and outbound send; more media/webhook fixtures still welcome. - -Open questions - -- Optional grammY plugins (throttler) if we hit Bot API 429s. -- Add more structured media tests (stickers, voice notes). -- Make webhook listen port configurable (currently fixed to 8787 unless wired through the gateway). diff --git a/docs/channels/groups.md b/docs/channels/groups.md index 8b8af64b94c..3f9df076454 100644 --- a/docs/channels/groups.md +++ b/docs/channels/groups.md @@ -184,6 +184,7 @@ Notes: - `groupPolicy` is separate from mention-gating (which requires @mentions). - WhatsApp/Telegram/Signal/iMessage/Microsoft Teams/Zalo: use `groupAllowFrom` (fallback: explicit `allowFrom`). +- DM pairing approvals (`*-allowFrom` store entries) apply to DM access only; group sender authorization stays explicit to group allowlists. - Discord: allowlist uses `channels.discord.guilds..channels`. - Slack: allowlist uses `channels.slack.channels`. - Matrix: allowlist uses `channels.matrix.groups` (room IDs, aliases, or names). Use `channels.matrix.groupAllowFrom` to restrict senders; per-room `users` allowlists are also supported. diff --git a/docs/channels/index.md b/docs/channels/index.md index f5ae8761852..ff827d20f45 100644 --- a/docs/channels/index.md +++ b/docs/channels/index.md @@ -43,6 +43,5 @@ Text is supported everywhere; media and reactions vary by channel. stores more state on disk. - Group behavior varies by channel; see [Groups](/channels/groups). - DM pairing and allowlists are enforced for safety; see [Security](/gateway/security). -- Telegram internals: [grammY notes](/channels/grammy). - Troubleshooting: [Channel troubleshooting](/channels/troubleshooting). - Model providers are documented separately; see [Model Providers](/providers/models). diff --git a/docs/channels/pairing.md b/docs/channels/pairing.md index 4b575eb87c7..d402de16662 100644 --- a/docs/channels/pairing.md +++ b/docs/channels/pairing.md @@ -43,7 +43,14 @@ Supported channels: `telegram`, `whatsapp`, `signal`, `imessage`, `discord`, `sl Stored under `~/.openclaw/credentials/`: - Pending requests: `-pairing.json` -- Approved allowlist store: `-allowFrom.json` +- Approved allowlist store: + - Default account: `-allowFrom.json` + - Non-default account: `--allowFrom.json` + +Account scoping behavior: + +- Non-default accounts read/write only their scoped allowlist file. +- Default account uses the channel-scoped unscoped allowlist file. Treat these as sensitive (they gate access to your assistant). diff --git a/docs/channels/slack.md b/docs/channels/slack.md index 869df30ad99..22b7f816e37 100644 --- a/docs/channels/slack.md +++ b/docs/channels/slack.md @@ -152,6 +152,12 @@ For actions/directory reads, user token can be preferred when configured. For wr - `dm.groupEnabled` (group DMs default false) - `dm.groupChannels` (optional MPIM allowlist) + Multi-account precedence: + + - `channels.slack.accounts.default.allowFrom` applies only to the `default` account. + - Named accounts inherit `channels.slack.allowFrom` when their own `allowFrom` is unset. + - Named accounts do not inherit `channels.slack.accounts.default.allowFrom`. + Pairing in DMs uses `openclaw pairing approve slack `. diff --git a/docs/channels/telegram.md b/docs/channels/telegram.md index 6a454bd8dcf..880941edd9c 100644 --- a/docs/channels/telegram.md +++ b/docs/channels/telegram.md @@ -109,13 +109,15 @@ Token resolution order is account-aware. In practice, config values win over env `channels.telegram.dmPolicy` controls direct message access: - `pairing` (default) - - `allowlist` + - `allowlist` (requires at least one sender ID in `allowFrom`) - `open` (requires `allowFrom` to include `"*"`) - `disabled` `channels.telegram.allowFrom` accepts numeric Telegram user IDs. `telegram:` / `tg:` prefixes are accepted and normalized. + `dmPolicy: "allowlist"` with empty `allowFrom` blocks all DMs and is rejected by config validation. The onboarding wizard accepts `@username` input and resolves it to numeric IDs. If you upgraded and your config contains `@username` allowlist entries, run `openclaw doctor --fix` to resolve them (best-effort; requires a Telegram bot token). + If you previously relied on pairing-store allowlist files, `openclaw doctor --fix` can recover entries into `channels.telegram.allowFrom` in allowlist flows (for example when `dmPolicy: "allowlist"` has no explicit IDs yet). ### Finding your Telegram user ID @@ -136,10 +138,12 @@ curl "https://api.telegram.org/bot/getUpdates" - There are two independent controls: + Two controls apply together: 1. **Which groups are allowed** (`channels.telegram.groups`) - - no `groups` config: all groups allowed + - no `groups` config: + - with `groupPolicy: "open"`: any group can pass group-ID checks + - with `groupPolicy: "allowlist"` (default): groups are blocked until you add `groups` entries (or `"*"`) - `groups` configured: acts as allowlist (explicit IDs or `"*"`) 2. **Which senders are allowed in groups** (`channels.telegram.groupPolicy`) @@ -148,8 +152,11 @@ curl "https://api.telegram.org/bot/getUpdates" - `disabled` `groupAllowFrom` is used for group sender filtering. If not set, Telegram falls back to `allowFrom`. - `groupAllowFrom` entries must be numeric Telegram user IDs. - Runtime note: if `channels.telegram` is completely missing, runtime falls back to `groupPolicy="allowlist"` for group policy evaluation (even if `channels.defaults.groupPolicy` is set). + `groupAllowFrom` entries should be numeric Telegram user IDs (`telegram:` / `tg:` prefixes are normalized). + Non-numeric entries are ignored for sender authorization. + Security boundary (`2026.2.25+`): group sender auth does **not** inherit DM pairing-store approvals. + Pairing stays DM-only. For groups, set `groupAllowFrom` or per-group/per-topic `allowFrom`. + Runtime note: if `channels.telegram` is completely missing, runtime defaults to fail-closed `groupPolicy="allowlist"` unless `channels.defaults.groupPolicy` is explicitly set. Example: allow any member in one specific group: @@ -383,17 +390,19 @@ curl "https://api.telegram.org/bot/getUpdates" - `react` (`chatId`, `messageId`, `emoji`) - `deleteMessage` (`chatId`, `messageId`) - `editMessage` (`chatId`, `messageId`, `content`) + - `createForumTopic` (`chatId`, `name`, optional `iconColor`, `iconCustomEmojiId`) - Channel message actions expose ergonomic aliases (`send`, `react`, `delete`, `edit`, `sticker`, `sticker-search`). + Channel message actions expose ergonomic aliases (`send`, `react`, `delete`, `edit`, `sticker`, `sticker-search`, `topic-create`). Gating controls: - `channels.telegram.actions.sendMessage` - - `channels.telegram.actions.editMessage` - `channels.telegram.actions.deleteMessage` - `channels.telegram.actions.reactions` - `channels.telegram.actions.sticker` (default: disabled) + Note: `edit` and `topic-create` are currently enabled by default and do not have separate `channels.telegram.actions.*` toggles. + Reaction removal semantics: [/tools/reactions](/tools/reactions) @@ -553,6 +562,7 @@ curl "https://api.telegram.org/bot/getUpdates" Notes: - `own` means user reactions to bot-sent messages only (best-effort via sent-message cache). + - Reaction events still respect Telegram access controls (`dmPolicy`, `allowFrom`, `groupPolicy`, `groupAllowFrom`); unauthorized senders are dropped. - Telegram does not provide thread IDs in reaction updates. - non-forum groups route to group chat session - forum groups route to the group general-topic session (`:topic:1`), not the exact originating topic @@ -609,6 +619,7 @@ curl "https://api.telegram.org/bot/getUpdates" - set `channels.telegram.webhookSecret` (required when webhook URL is set) - optional `channels.telegram.webhookPath` (default `/telegram-webhook`) - optional `channels.telegram.webhookHost` (default `127.0.0.1`) + - optional `channels.telegram.webhookPort` (default `8787`) Default local listener for webhook mode binds to `127.0.0.1:8787`. @@ -626,7 +637,7 @@ curl "https://api.telegram.org/bot/getUpdates" - DM history controls: - `channels.telegram.dmHistoryLimit` - `channels.telegram.dms[""].historyLimit` - - outbound Telegram API retries are configurable via `channels.telegram.retry`. + - `channels.telegram.retry` config applies to Telegram send helpers (CLI/tools/actions) for recoverable outbound API errors. CLI send target can be numeric chat ID or username: @@ -715,9 +726,14 @@ Primary reference: - `channels.telegram.botToken`: bot token (BotFather). - `channels.telegram.tokenFile`: read token from file path. - `channels.telegram.dmPolicy`: `pairing | allowlist | open | disabled` (default: pairing). -- `channels.telegram.allowFrom`: DM allowlist (numeric Telegram user IDs). `open` requires `"*"`. `openclaw doctor --fix` can resolve legacy `@username` entries to IDs. +- `channels.telegram.allowFrom`: DM allowlist (numeric Telegram user IDs). `allowlist` requires at least one sender ID. `open` requires `"*"`. `openclaw doctor --fix` can resolve legacy `@username` entries to IDs and can recover allowlist entries from pairing-store files in allowlist migration flows. +- `channels.telegram.defaultTo`: default Telegram target used by CLI `--deliver` when no explicit `--reply-to` is provided. - `channels.telegram.groupPolicy`: `open | allowlist | disabled` (default: allowlist). -- `channels.telegram.groupAllowFrom`: group sender allowlist (numeric Telegram user IDs). `openclaw doctor --fix` can resolve legacy `@username` entries to IDs. +- `channels.telegram.groupAllowFrom`: group sender allowlist (numeric Telegram user IDs). `openclaw doctor --fix` can resolve legacy `@username` entries to IDs. Non-numeric entries are ignored at auth time. Group auth does not use DM pairing-store fallback (`2026.2.25+`). +- Multi-account precedence: + - `channels.telegram.accounts.default.allowFrom` and `channels.telegram.accounts.default.groupAllowFrom` apply only to the `default` account. + - Named accounts inherit `channels.telegram.allowFrom` and `channels.telegram.groupAllowFrom` when account-level values are unset. + - Named accounts do not inherit `channels.telegram.accounts.default.allowFrom` / `groupAllowFrom`. - `channels.telegram.groups`: per-group defaults + allowlist (use `"*"` for global defaults). - `channels.telegram.groups..groupPolicy`: per-group override for groupPolicy (`open | allowlist | disabled`). - `channels.telegram.groups..requireMention`: mention gating default. @@ -730,13 +746,14 @@ Primary reference: - `channels.telegram.groups..topics..requireMention`: per-topic mention gating override. - `channels.telegram.capabilities.inlineButtons`: `off | dm | group | all | allowlist` (default: allowlist). - `channels.telegram.accounts..capabilities.inlineButtons`: per-account override. +- `channels.telegram.commands.nativeSkills`: enable/disable Telegram native skills commands. - `channels.telegram.replyToMode`: `off | first | all` (default: `off`). - `channels.telegram.textChunkLimit`: outbound chunk size (chars). - `channels.telegram.chunkMode`: `length` (default) or `newline` to split on blank lines (paragraph boundaries) before length chunking. - `channels.telegram.linkPreview`: toggle link previews for outbound messages (default: true). -- `channels.telegram.streaming`: `off | partial | block | progress` (live stream preview; default: `off`; `progress` maps to `partial`). -- `channels.telegram.mediaMaxMb`: inbound/outbound media cap (MB). -- `channels.telegram.retry`: retry policy for outbound Telegram API calls (attempts, minDelayMs, maxDelayMs, jitter). +- `channels.telegram.streaming`: `off | partial | block | progress` (live stream preview; default: `off`; `progress` maps to `partial`; `block` is legacy preview mode compatibility). +- `channels.telegram.mediaMaxMb`: inbound Telegram media download/processing cap (MB). +- `channels.telegram.retry`: retry policy for Telegram send helpers (CLI/tools/actions) on recoverable outbound API errors (attempts, minDelayMs, maxDelayMs, jitter). - `channels.telegram.network.autoSelectFamily`: override Node autoSelectFamily (true=enable, false=disable). Defaults to enabled on Node 22+, with WSL2 defaulting to disabled. - `channels.telegram.network.dnsResultOrder`: override DNS result order (`ipv4first` or `verbatim`). Defaults to `ipv4first` on Node 22+. - `channels.telegram.proxy`: proxy URL for Bot API calls (SOCKS/HTTP). @@ -744,6 +761,7 @@ Primary reference: - `channels.telegram.webhookSecret`: webhook secret (required when webhookUrl is set). - `channels.telegram.webhookPath`: local webhook path (default `/telegram-webhook`). - `channels.telegram.webhookHost`: local webhook bind host (default `127.0.0.1`). +- `channels.telegram.webhookPort`: local webhook bind port (default `8787`). - `channels.telegram.actions.reactions`: gate Telegram tool reactions. - `channels.telegram.actions.sendMessage`: gate Telegram tool message sends. - `channels.telegram.actions.deleteMessage`: gate Telegram tool message deletes. @@ -757,7 +775,7 @@ Telegram-specific high-signal fields: - startup/auth: `enabled`, `botToken`, `tokenFile`, `accounts.*` - access control: `dmPolicy`, `allowFrom`, `groupPolicy`, `groupAllowFrom`, `groups`, `groups.*.topics.*` -- command/menu: `commands.native`, `customCommands` +- command/menu: `commands.native`, `commands.nativeSkills`, `customCommands` - threading/replies: `replyToMode` - streaming: `streaming` (preview), `blockStreaming` - formatting/delivery: `textChunkLimit`, `chunkMode`, `linkPreview`, `responsePrefix` diff --git a/docs/cli/acp.md b/docs/cli/acp.md index 1b1981395e4..3367173ace0 100644 --- a/docs/cli/acp.md +++ b/docs/cli/acp.md @@ -8,7 +8,7 @@ title: "acp" # acp -Run the ACP (Agent Client Protocol) bridge that talks to a OpenClaw Gateway. +Run the [Agent Client Protocol (ACP)](https://agentclientprotocol.com/) bridge that talks to a OpenClaw Gateway. This command speaks ACP over stdio for IDEs and forwards prompts to the Gateway over WebSocket. It keeps ACP sessions mapped to Gateway session keys. diff --git a/docs/cli/agents.md b/docs/cli/agents.md index 39679265f14..5bdc8a68bf2 100644 --- a/docs/cli/agents.md +++ b/docs/cli/agents.md @@ -1,5 +1,5 @@ --- -summary: "CLI reference for `openclaw agents` (list/add/delete/set identity)" +summary: "CLI reference for `openclaw agents` (list/add/delete/bindings/bind/unbind/set identity)" read_when: - You want multiple isolated agents (workspaces + routing + auth) title: "agents" @@ -19,11 +19,59 @@ Related: ```bash openclaw agents list openclaw agents add work --workspace ~/.openclaw/workspace-work +openclaw agents bindings +openclaw agents bind --agent work --bind telegram:ops +openclaw agents unbind --agent work --bind telegram:ops openclaw agents set-identity --workspace ~/.openclaw/workspace --from-identity openclaw agents set-identity --agent main --avatar avatars/openclaw.png openclaw agents delete work ``` +## Routing bindings + +Use routing bindings to pin inbound channel traffic to a specific agent. + +List bindings: + +```bash +openclaw agents bindings +openclaw agents bindings --agent work +openclaw agents bindings --json +``` + +Add bindings: + +```bash +openclaw agents bind --agent work --bind telegram:ops --bind discord:guild-a +``` + +If you omit `accountId` (`--bind `), OpenClaw resolves it from channel defaults and plugin setup hooks when available. + +### Binding scope behavior + +- A binding without `accountId` matches the channel default account only. +- `accountId: "*"` is the channel-wide fallback (all accounts) and is less specific than an explicit account binding. +- If the same agent already has a matching channel binding without `accountId`, and you later bind with an explicit or resolved `accountId`, OpenClaw upgrades that existing binding in place instead of adding a duplicate. + +Example: + +```bash +# initial channel-only binding +openclaw agents bind --agent work --bind telegram + +# later upgrade to account-scoped binding +openclaw agents bind --agent work --bind telegram:ops +``` + +After the upgrade, routing for that binding is scoped to `telegram:ops`. If you also want default-account routing, add it explicitly (for example `--bind telegram:default`). + +Remove bindings: + +```bash +openclaw agents unbind --agent work --bind telegram:ops +openclaw agents unbind --agent work --all +``` + ## Identity files Each agent workspace can include an `IDENTITY.md` at the workspace root: diff --git a/docs/cli/channels.md b/docs/cli/channels.md index 4213efb3eb7..23e0b2cfd4b 100644 --- a/docs/cli/channels.md +++ b/docs/cli/channels.md @@ -35,6 +35,26 @@ openclaw channels remove --channel telegram --delete Tip: `openclaw channels add --help` shows per-channel flags (token, app token, signal-cli paths, etc). +When you run `openclaw channels add` without flags, the interactive wizard can prompt: + +- account ids per selected channel +- optional display names for those accounts +- `Bind configured channel accounts to agents now?` + +If you confirm bind now, the wizard asks which agent should own each configured channel account and writes account-scoped routing bindings. + +You can also manage the same routing rules later with `openclaw agents bindings`, `openclaw agents bind`, and `openclaw agents unbind` (see [agents](/cli/agents)). + +When you add a non-default account to a channel that is still using single-account top-level settings (no `channels..accounts` entries yet), OpenClaw moves account-scoped single-account top-level values into `channels..accounts.default`, then writes the new account. This preserves the original account behavior while moving to the multi-account shape. + +Routing behavior stays consistent: + +- Existing channel-only bindings (no `accountId`) continue to match the default account. +- `channels add` does not auto-create or rewrite bindings in non-interactive mode. +- Interactive setup can optionally add account-scoped bindings. + +If your config was already in a mixed state (named accounts present, missing `default`, and top-level single-account values still set), run `openclaw doctor --fix` to move account-scoped values into `accounts.default`. + ## Login / logout (interactive) ```bash diff --git a/docs/cli/index.md b/docs/cli/index.md index 32eb31b5eb3..bb09b062210 100644 --- a/docs/cli/index.md +++ b/docs/cli/index.md @@ -52,6 +52,7 @@ This page describes the current CLI behavior. If commands change, update this do - [`plugins`](/cli/plugins) (plugin commands) - [`channels`](/cli/channels) - [`security`](/cli/security) +- [`secrets`](/cli/secrets) - [`skills`](/cli/skills) - [`daemon`](/cli/daemon) (legacy alias for gateway service commands) - [`clawbot`](/cli/clawbot) (legacy alias namespace) @@ -104,6 +105,9 @@ openclaw [--dev] [--profile ] dashboard security audit + secrets + reload + migrate reset uninstall update @@ -263,6 +267,13 @@ Note: plugins can add additional top-level commands (for example `openclaw voice - `openclaw security audit --deep` — best-effort live Gateway probe. - `openclaw security audit --fix` — tighten safe defaults and chmod state/config. +## Secrets + +- `openclaw secrets reload` — re-resolve refs and atomically swap the runtime snapshot. +- `openclaw secrets audit` — scan for plaintext residues, unresolved refs, and precedence drift. +- `openclaw secrets configure` — interactive helper for provider setup + SecretRef mapping + preflight/apply. +- `openclaw secrets apply --from ` — apply a previously generated plan (`--dry-run` supported). + ## Plugins Manage extensions and their config: @@ -317,7 +328,8 @@ Interactive wizard to set up gateway, workspace, and skills. Options: - `--workspace ` -- `--reset` (reset config + credentials + sessions + workspace before wizard) +- `--reset` (reset config + credentials + sessions before wizard) +- `--reset-scope ` (default `config+creds+sessions`; use `full` to also remove workspace) - `--non-interactive` - `--mode ` - `--flow ` (manual is an alias for advanced) @@ -326,6 +338,7 @@ Options: - `--token ` (non-interactive; used with `--auth-choice token`) - `--token-profile-id ` (non-interactive; default: `:manual`) - `--token-expires-in ` (non-interactive; e.g. `365d`, `12h`) +- `--secret-input-mode ` (default `plaintext`; use `ref` to store provider default env refs instead of plaintext keys) - `--anthropic-api-key ` - `--openai-api-key ` - `--mistral-api-key ` @@ -400,6 +413,8 @@ Subcommands: - Tip: `channels status` prints warnings with suggested fixes when it can detect common misconfigurations (then points you to `openclaw doctor`). - `channels logs`: show recent channel logs from the gateway log file. - `channels add`: wizard-style setup when no flags are passed; flags switch to non-interactive mode. + - When adding a non-default account to a channel still using single-account top-level config, OpenClaw moves account-scoped values into `channels..accounts.default` before writing the new account. + - Non-interactive `channels add` does not auto-create/upgrade bindings; channel-only bindings continue to match the default account. - `channels remove`: disable by default; pass `--delete` to remove config entries without prompts. - `channels login`: interactive channel login (WhatsApp Web only). - `channels logout`: log out of a channel session (if supported). @@ -574,7 +589,37 @@ Options: - `--non-interactive` - `--json` -Binding specs use `channel[:accountId]`. When `accountId` is omitted for WhatsApp, the default account id is used. +Binding specs use `channel[:accountId]`. When `accountId` is omitted, OpenClaw may resolve account scope via channel defaults/plugin hooks; otherwise it is a channel binding without explicit account scope. + +#### `agents bindings` + +List routing bindings. + +Options: + +- `--agent ` +- `--json` + +#### `agents bind` + +Add routing bindings for an agent. + +Options: + +- `--agent ` +- `--bind ` (repeatable) +- `--json` + +#### `agents unbind` + +Remove routing bindings for an agent. + +Options: + +- `--agent ` +- `--bind ` (repeatable) +- `--all` +- `--json` #### `agents delete ` diff --git a/docs/cli/onboard.md b/docs/cli/onboard.md index 83aeaeaf3be..7485499d1ea 100644 --- a/docs/cli/onboard.md +++ b/docs/cli/onboard.md @@ -34,11 +34,39 @@ openclaw onboard --non-interactive \ --custom-base-url "https://llm.example.com/v1" \ --custom-model-id "foo-large" \ --custom-api-key "$CUSTOM_API_KEY" \ + --secret-input-mode plaintext \ --custom-compatibility openai ``` `--custom-api-key` is optional in non-interactive mode. If omitted, onboarding checks `CUSTOM_API_KEY`. +Store provider keys as refs instead of plaintext: + +```bash +openclaw onboard --non-interactive \ + --auth-choice openai-api-key \ + --secret-input-mode ref \ + --accept-risk +``` + +With `--secret-input-mode ref`, onboarding writes env-backed refs instead of plaintext key values. +For auth-profile backed providers this writes `keyRef` entries; for custom providers this writes `models.providers..apiKey` as an env ref (for example `{ source: "env", provider: "default", id: "CUSTOM_API_KEY" }`). + +Non-interactive `ref` mode contract: + +- Set the provider env var in the onboarding process environment (for example `OPENAI_API_KEY`). +- Do not pass inline key flags (for example `--openai-api-key`) unless that env var is also set. +- If an inline key flag is passed without the required env var, onboarding fails fast with guidance. + +Interactive onboarding behavior with reference mode: + +- Choose **Use secret reference** when prompted. +- Then choose either: + - Environment variable + - Configured secret provider (`file` or `exec`) +- Onboarding performs a fast preflight validation before saving the ref. + - If validation fails, onboarding shows the error and lets you retry. + Non-interactive Z.AI endpoint choices: Note: `--auth-choice zai-api-key` now auto-detects the best Z.AI endpoint for your key (prefers the general API with `zai/glm-5`). diff --git a/docs/cli/secrets.md b/docs/cli/secrets.md new file mode 100644 index 00000000000..66e1c0e4769 --- /dev/null +++ b/docs/cli/secrets.md @@ -0,0 +1,163 @@ +--- +summary: "CLI reference for `openclaw secrets` (reload, audit, configure, apply)" +read_when: + - Re-resolving secret refs at runtime + - Auditing plaintext residues and unresolved refs + - Configuring SecretRefs and applying one-way scrub changes +title: "secrets" +--- + +# `openclaw secrets` + +Use `openclaw secrets` to migrate credentials from plaintext to SecretRefs and keep the active secrets runtime healthy. + +Command roles: + +- `reload`: gateway RPC (`secrets.reload`) that re-resolves refs and swaps runtime snapshot only on full success (no config writes). +- `audit`: read-only scan of config + auth stores + legacy residues (`.env`, `auth.json`) for plaintext, unresolved refs, and precedence drift. +- `configure`: interactive planner for provider setup + target mapping + preflight (TTY required). +- `apply`: execute a saved plan (`--dry-run` for validation only), then scrub migrated plaintext residues. + +Recommended operator loop: + +```bash +openclaw secrets audit --check +openclaw secrets configure +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json --dry-run +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json +openclaw secrets audit --check +openclaw secrets reload +``` + +Exit code note for CI/gates: + +- `audit --check` returns `1` on findings, `2` when refs are unresolved. + +Related: + +- Secrets guide: [Secrets Management](/gateway/secrets) +- Security guide: [Security](/gateway/security) + +## Reload runtime snapshot + +Re-resolve secret refs and atomically swap runtime snapshot. + +```bash +openclaw secrets reload +openclaw secrets reload --json +``` + +Notes: + +- Uses gateway RPC method `secrets.reload`. +- If resolution fails, gateway keeps last-known-good snapshot and returns an error (no partial activation). +- JSON response includes `warningCount`. + +## Audit + +Scan OpenClaw state for: + +- plaintext secret storage +- unresolved refs +- precedence drift (`auth-profiles` shadowing config refs) +- legacy residues (`auth.json`, OAuth out-of-scope notes) + +```bash +openclaw secrets audit +openclaw secrets audit --check +openclaw secrets audit --json +``` + +Exit behavior: + +- `--check` exits non-zero on findings. +- unresolved refs exit with a higher-priority non-zero code. + +Report shape highlights: + +- `status`: `clean | findings | unresolved` +- `summary`: `plaintextCount`, `unresolvedRefCount`, `shadowedRefCount`, `legacyResidueCount` +- finding codes: + - `PLAINTEXT_FOUND` + - `REF_UNRESOLVED` + - `REF_SHADOWED` + - `LEGACY_RESIDUE` + +## Configure (interactive helper) + +Build provider + SecretRef changes interactively, run preflight, and optionally apply: + +```bash +openclaw secrets configure +openclaw secrets configure --plan-out /tmp/openclaw-secrets-plan.json +openclaw secrets configure --apply --yes +openclaw secrets configure --providers-only +openclaw secrets configure --skip-provider-setup +openclaw secrets configure --json +``` + +Flow: + +- Provider setup first (`add/edit/remove` for `secrets.providers` aliases). +- Credential mapping second (select fields and assign `{source, provider, id}` refs). +- Preflight and optional apply last. + +Flags: + +- `--providers-only`: configure `secrets.providers` only, skip credential mapping. +- `--skip-provider-setup`: skip provider setup and map credentials to existing providers. + +Notes: + +- Requires an interactive TTY. +- You cannot combine `--providers-only` with `--skip-provider-setup`. +- `configure` targets secret-bearing fields in `openclaw.json`. +- Include all secret-bearing fields you intend to migrate (for example both `models.providers.*.apiKey` and `skills.entries.*.apiKey`) so audit can reach a clean state. +- It performs preflight resolution before apply. +- Generated plans default to scrub options (`scrubEnv`, `scrubAuthProfilesForProviderTargets`, `scrubLegacyAuthJson` all enabled). +- Apply path is one-way for migrated plaintext values. +- Without `--apply`, CLI still prompts `Apply this plan now?` after preflight. +- With `--apply` (and no `--yes`), CLI prompts an extra irreversible-migration confirmation. + +Exec provider safety note: + +- Homebrew installs often expose symlinked binaries under `/opt/homebrew/bin/*`. +- Set `allowSymlinkCommand: true` only when needed for trusted package-manager paths, and pair it with `trustedDirs` (for example `["/opt/homebrew"]`). + +## Apply a saved plan + +Apply or preflight a plan generated previously: + +```bash +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json --dry-run +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json --json +``` + +Plan contract details (allowed target paths, validation rules, and failure semantics): + +- [Secrets Apply Plan Contract](/gateway/secrets-plan-contract) + +What `apply` may update: + +- `openclaw.json` (SecretRef targets + provider upserts/deletes) +- `auth-profiles.json` (provider-target scrubbing) +- legacy `auth.json` residues +- `~/.openclaw/.env` known secret keys whose values were migrated + +## Why no rollback backups + +`secrets apply` intentionally does not write rollback backups containing old plaintext values. + +Safety comes from strict preflight + atomic-ish apply with best-effort in-memory restore on failure. + +## Example + +```bash +# Audit first, then configure, then confirm clean: +openclaw secrets audit --check +openclaw secrets configure +openclaw secrets audit --check +``` + +If `audit --check` still reports plaintext findings after a partial migration, verify you also migrated skill keys (`skills.entries.*.apiKey`) and any other reported target paths. diff --git a/docs/cli/security.md b/docs/cli/security.md index fe8af41ec25..cc705b31a30 100644 --- a/docs/cli/security.md +++ b/docs/cli/security.md @@ -29,7 +29,7 @@ It also emits `security.trust_model.multi_user_heuristic` when config suggests l For intentional shared-user setups, the audit guidance is to sandbox all sessions, keep filesystem access workspace-scoped, and keep personal/private identities or credentials off that runtime. It also warns when small models (`<=300B`) are used without sandboxing and with web/browser tools enabled. For webhook ingress, it warns when `hooks.defaultSessionKey` is unset, when request `sessionKey` overrides are enabled, and when overrides are enabled without `hooks.allowedSessionKeyPrefixes`. -It also warns when sandbox Docker settings are configured while sandbox mode is off, when `gateway.nodes.denyCommands` uses ineffective pattern-like/unknown entries, when `gateway.nodes.allowCommands` explicitly enables dangerous node commands, when global `tools.profile="minimal"` is overridden by agent tool profiles, when open groups expose runtime/filesystem tools without sandbox/workspace guards, and when installed extension plugin tools may be reachable under permissive tool policy. +It also warns when sandbox Docker settings are configured while sandbox mode is off, when `gateway.nodes.denyCommands` uses ineffective pattern-like/unknown entries (exact node command-name matching only, not shell-text filtering), when `gateway.nodes.allowCommands` explicitly enables dangerous node commands, when global `tools.profile="minimal"` is overridden by agent tool profiles, when open groups expose runtime/filesystem tools without sandbox/workspace guards, and when installed extension plugin tools may be reachable under permissive tool policy. It also flags `gateway.allowRealIpFallback=true` (header-spoofing risk if proxies are misconfigured) and `discovery.mdns.mode="full"` (metadata leakage via mDNS TXT records). It also warns when sandbox browser uses Docker `bridge` network without `sandbox.browser.cdpSourceRange`. It also flags dangerous sandbox Docker network modes (including `host` and `container:*` namespace joins). diff --git a/docs/concepts/architecture.md b/docs/concepts/architecture.md index 75addf3fa57..a36c93313c6 100644 --- a/docs/concepts/architecture.md +++ b/docs/concepts/architecture.md @@ -98,6 +98,9 @@ sequenceDiagram - **Local** connects (loopback or the gateway host’s own tailnet address) can be auto‑approved to keep same‑host UX smooth. - All connects must sign the `connect.challenge` nonce. +- Signature payload `v3` also binds `platform` + `deviceFamily`; the gateway + pins paired metadata on reconnect and requires repair pairing for metadata + changes. - **Non‑local** connects still require explicit approval. - Gateway auth (`gateway.auth.*`) still applies to **all** connections, local or remote. diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 6210f592482..fccd0b84249 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -70,6 +70,8 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Auth: OAuth (ChatGPT) - Example model: `openai-codex/gpt-5.3-codex` - CLI: `openclaw onboard --auth-choice openai-codex` or `openclaw models auth login --provider openai-codex` +- Default transport is `auto` (WebSocket-first, SSE fallback) +- Override per model via `agents.defaults.models["openai-codex/"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`) ```json5 { @@ -102,6 +104,7 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Providers: `google-vertex`, `google-antigravity`, `google-gemini-cli` - Auth: Vertex uses gcloud ADC; Antigravity/Gemini CLI use their respective auth flows +- Caution: Antigravity and Gemini CLI OAuth in OpenClaw are unofficial integrations. Some users have reported Google account restrictions after using third-party clients. Review Google terms and use a non-critical account if you choose to proceed. - Antigravity OAuth is shipped as a bundled plugin (`google-antigravity-auth`, disabled by default). - Enable: `openclaw plugins enable google-antigravity-auth` - Login: `openclaw models auth login --provider google-antigravity --set-default` diff --git a/docs/concepts/models.md b/docs/concepts/models.md index ee8f06ecb3d..b4317273d5c 100644 --- a/docs/concepts/models.md +++ b/docs/concepts/models.md @@ -207,3 +207,9 @@ mode, pass `--yes` to accept defaults. Custom providers in `models.providers` are written into `models.json` under the agent directory (default `~/.openclaw/agents//models.json`). This file is merged by default unless `models.mode` is set to `replace`. + +Merge mode precedence for matching provider IDs: + +- Non-empty `apiKey`/`baseUrl` already present in the agent `models.json` win. +- Empty or missing agent `apiKey`/`baseUrl` fall back to config `models.providers`. +- Other provider fields are refreshed from config and normalized catalog data. diff --git a/docs/concepts/multi-agent.md b/docs/concepts/multi-agent.md index 069fcfb6367..842531cc2a6 100644 --- a/docs/concepts/multi-agent.md +++ b/docs/concepts/multi-agent.md @@ -185,6 +185,12 @@ Bindings are **deterministic** and **most-specific wins**: If multiple bindings match in the same tier, the first one in config order wins. If a binding sets multiple match fields (for example `peer` + `guildId`), all specified fields are required (`AND` semantics). +Important account-scope detail: + +- A binding that omits `accountId` matches the default account only. +- Use `accountId: "*"` for a channel-wide fallback across all accounts. +- If you later add the same binding for the same agent with an explicit account id, OpenClaw upgrades the existing channel-only binding to account-scoped instead of duplicating it. + ## Multiple accounts / phone numbers Channels that support **multiple accounts** (e.g. WhatsApp) use `accountId` to identify diff --git a/docs/concepts/oauth.md b/docs/concepts/oauth.md index 586406cf6b1..741867f188f 100644 --- a/docs/concepts/oauth.md +++ b/docs/concepts/oauth.md @@ -40,8 +40,9 @@ To reduce that, OpenClaw treats `auth-profiles.json` as a **token sink**: Secrets are stored **per-agent**: -- Auth profiles (OAuth + API keys): `~/.openclaw/agents//agent/auth-profiles.json` -- Runtime cache (managed automatically; don’t edit): `~/.openclaw/agents//agent/auth.json` +- Auth profiles (OAuth + API keys + optional value-level refs): `~/.openclaw/agents//agent/auth-profiles.json` +- Legacy compatibility file: `~/.openclaw/agents//agent/auth.json` + (static `api_key` entries are scrubbed when discovered) Legacy import-only file (still supported, but not the main store): @@ -49,6 +50,8 @@ Legacy import-only file (still supported, but not the main store): All of the above also respect `$OPENCLAW_STATE_DIR` (state dir override). Full reference: [/gateway/configuration](/gateway/configuration#auth-storage-oauth--api-keys) +For static secret refs and runtime snapshot activation behavior, see [Secrets Management](/gateway/secrets). + ## Anthropic setup-token (subscription auth) Run `claude setup-token` on any machine, then paste it into OpenClaw: diff --git a/docs/docs.json b/docs/docs.json index 4c83f3058bd..761f30f5157 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -137,7 +137,7 @@ }, { "source": "/providers/grammy", - "destination": "/channels/grammy" + "destination": "/channels/telegram" }, { "source": "/providers/imessage", @@ -365,7 +365,11 @@ }, { "source": "/grammy", - "destination": "/channels/grammy" + "destination": "/channels/telegram" + }, + { + "source": "/channels/grammy", + "destination": "/channels/telegram" }, { "source": "/group-messages", @@ -1002,7 +1006,12 @@ }, { "group": "Agent coordination", - "pages": ["tools/agent-send", "tools/subagents", "tools/multi-agent-sandbox-tools"] + "pages": [ + "tools/agent-send", + "tools/subagents", + "tools/acp-agents", + "tools/multi-agent-sandbox-tools" + ] }, { "group": "Skills", @@ -1135,6 +1144,8 @@ "gateway/configuration-reference", "gateway/configuration-examples", "gateway/authentication", + "gateway/secrets", + "gateway/secrets-plan-contract", "gateway/trusted-proxy-auth", "gateway/health", "gateway/heartbeat", @@ -1230,6 +1241,7 @@ "cli/qr", "cli/reset", "cli/sandbox", + "cli/secrets", "cli/security", "cli/sessions", "cli/setup", @@ -1263,12 +1275,7 @@ }, { "group": "Technical reference", - "pages": [ - "reference/wizard", - "reference/token-use", - "reference/prompt-caching", - "channels/grammy" - ] + "pages": ["reference/wizard", "reference/token-use", "reference/prompt-caching"] }, { "group": "Concept internals", diff --git a/docs/experiments/plans/acp-thread-bound-agents.md b/docs/experiments/plans/acp-thread-bound-agents.md new file mode 100644 index 00000000000..3ca509c9492 --- /dev/null +++ b/docs/experiments/plans/acp-thread-bound-agents.md @@ -0,0 +1,800 @@ +--- +summary: "Integrate ACP coding agents via a first-class ACP control plane in core and plugin-backed runtimes (acpx first)" +owner: "onutc" +status: "draft" +last_updated: "2026-02-25" +title: "ACP Thread Bound Agents" +--- + +# ACP Thread Bound Agents + +## Overview + +This plan defines how OpenClaw should support ACP coding agents in thread-capable channels (Discord first) with production-level lifecycle and recovery. + +Related document: + +- [Unified Runtime Streaming Refactor Plan](/experiments/plans/acp-unified-streaming-refactor) + +Target user experience: + +- a user spawns or focuses an ACP session into a thread +- user messages in that thread route to the bound ACP session +- agent output streams back to the same thread persona +- session can be persistent or one shot with explicit cleanup controls + +## Decision summary + +Long term recommendation is a hybrid architecture: + +- OpenClaw core owns ACP control plane concerns + - session identity and metadata + - thread binding and routing decisions + - delivery invariants and duplicate suppression + - lifecycle cleanup and recovery semantics +- ACP runtime backend is pluggable + - first backend is an acpx-backed plugin service + - runtime does ACP transport, queueing, cancel, reconnect + +OpenClaw should not reimplement ACP transport internals in core. +OpenClaw should not rely on a pure plugin-only interception path for routing. + +## North-star architecture (holy grail) + +Treat ACP as a first-class control plane in OpenClaw, with pluggable runtime adapters. + +Non-negotiable invariants: + +- every ACP thread binding references a valid ACP session record +- every ACP session has explicit lifecycle state (`creating`, `idle`, `running`, `cancelling`, `closed`, `error`) +- every ACP run has explicit run state (`queued`, `running`, `completed`, `failed`, `cancelled`) +- spawn, bind, and initial enqueue are atomic +- command retries are idempotent (no duplicate runs or duplicate Discord outputs) +- bound-thread channel output is a projection of ACP run events, never ad-hoc side effects + +Long-term ownership model: + +- `AcpSessionManager` is the single ACP writer and orchestrator +- manager lives in gateway process first; can be moved to a dedicated sidecar later behind the same interface +- per ACP session key, manager owns one in-memory actor (serialized command execution) +- adapters (`acpx`, future backends) are transport/runtime implementations only + +Long-term persistence model: + +- move ACP control-plane state to a dedicated SQLite store (WAL mode) under OpenClaw state dir +- keep `SessionEntry.acp` as compatibility projection during migration, not source-of-truth +- store ACP events append-only to support replay, crash recovery, and deterministic delivery + +### Delivery strategy (bridge to holy-grail) + +- short-term bridge + - keep current thread binding mechanics and existing ACP config surface + - fix metadata-gap bugs and route ACP turns through a single core ACP branch + - add idempotency keys and fail-closed routing checks immediately +- long-term cutover + - move ACP source-of-truth to control-plane DB + actors + - make bound-thread delivery purely event-projection based + - remove legacy fallback behavior that depends on opportunistic session-entry metadata + +## Why not pure plugin only + +Current plugin hooks are not sufficient for end to end ACP session routing without core changes. + +- inbound routing from thread binding resolves to a session key in core dispatch first +- message hooks are fire-and-forget and cannot short-circuit the main reply path +- plugin commands are good for control operations but not for replacing core per-turn dispatch flow + +Result: + +- ACP runtime can be pluginized +- ACP routing branch must exist in core + +## Existing foundation to reuse + +Already implemented and should remain canonical: + +- thread binding target supports `subagent` and `acp` +- inbound thread routing override resolves by binding before normal dispatch +- outbound thread identity via webhook in reply delivery +- `/focus` and `/unfocus` flow with ACP target compatibility +- persistent binding store with restore on startup +- unbind lifecycle on archive, delete, unfocus, reset, and delete + +This plan extends that foundation rather than replacing it. + +## Architecture + +### Boundary model + +Core (must be in OpenClaw core): + +- ACP session-mode dispatch branch in the reply pipeline +- delivery arbitration to avoid parent plus thread duplication +- ACP control-plane persistence (with `SessionEntry.acp` compatibility projection during migration) +- lifecycle unbind and runtime detach semantics tied to session reset/delete + +Plugin backend (acpx implementation): + +- ACP runtime worker supervision +- acpx process invocation and event parsing +- ACP command handlers (`/acp ...`) and operator UX +- backend-specific config defaults and diagnostics + +### Runtime ownership model + +- one gateway process owns ACP orchestration state +- ACP execution runs in supervised child processes via acpx backend +- process strategy is long lived per active ACP session key, not per message + +This avoids startup cost on every prompt and keeps cancel and reconnect semantics reliable. + +### Core runtime contract + +Add a core ACP runtime contract so routing code does not depend on CLI details and can switch backends without changing dispatch logic: + +```ts +export type AcpRuntimePromptMode = "prompt" | "steer"; + +export type AcpRuntimeHandle = { + sessionKey: string; + backend: string; + runtimeSessionName: string; +}; + +export type AcpRuntimeEvent = + | { type: "text_delta"; stream: "output" | "thought"; text: string } + | { type: "tool_call"; name: string; argumentsText: string } + | { type: "done"; usage?: Record } + | { type: "error"; code: string; message: string; retryable?: boolean }; + +export interface AcpRuntime { + ensureSession(input: { + sessionKey: string; + agent: string; + mode: "persistent" | "oneshot"; + cwd?: string; + env?: Record; + idempotencyKey: string; + }): Promise; + + submit(input: { + handle: AcpRuntimeHandle; + text: string; + mode: AcpRuntimePromptMode; + idempotencyKey: string; + }): Promise<{ runtimeRunId: string }>; + + stream(input: { + handle: AcpRuntimeHandle; + runtimeRunId: string; + onEvent: (event: AcpRuntimeEvent) => Promise | void; + signal?: AbortSignal; + }): Promise; + + cancel(input: { + handle: AcpRuntimeHandle; + runtimeRunId?: string; + reason?: string; + idempotencyKey: string; + }): Promise; + + close(input: { handle: AcpRuntimeHandle; reason: string; idempotencyKey: string }): Promise; + + health?(): Promise<{ ok: boolean; details?: string }>; +} +``` + +Implementation detail: + +- first backend: `AcpxRuntime` shipped as a plugin service +- core resolves runtime via registry and fails with explicit operator error when no ACP runtime backend is available + +### Control-plane data model and persistence + +Long-term source-of-truth is a dedicated ACP SQLite database (WAL mode), for transactional updates and crash-safe recovery: + +- `acp_sessions` + - `session_key` (pk), `backend`, `agent`, `mode`, `cwd`, `state`, `created_at`, `updated_at`, `last_error` +- `acp_runs` + - `run_id` (pk), `session_key` (fk), `state`, `requester_message_id`, `idempotency_key`, `started_at`, `ended_at`, `error_code`, `error_message` +- `acp_bindings` + - `binding_key` (pk), `thread_id`, `channel_id`, `account_id`, `session_key` (fk), `expires_at`, `bound_at` +- `acp_events` + - `event_id` (pk), `run_id` (fk), `seq`, `kind`, `payload_json`, `created_at` +- `acp_delivery_checkpoint` + - `run_id` (pk/fk), `last_event_seq`, `last_discord_message_id`, `updated_at` +- `acp_idempotency` + - `scope`, `idempotency_key`, `result_json`, `created_at`, unique `(scope, idempotency_key)` + +```ts +export type AcpSessionMeta = { + backend: string; + agent: string; + runtimeSessionName: string; + mode: "persistent" | "oneshot"; + cwd?: string; + state: "idle" | "running" | "error"; + lastActivityAt: number; + lastError?: string; +}; +``` + +Storage rules: + +- keep `SessionEntry.acp` as a compatibility projection during migration +- process ids and sockets stay in memory only +- durable lifecycle and run status live in ACP DB, not generic session JSON +- if runtime owner dies, gateway rehydrates from ACP DB and resumes from checkpoints + +### Routing and delivery + +Inbound: + +- keep current thread binding lookup as first routing step +- if bound target is ACP session, route to ACP runtime branch instead of `getReplyFromConfig` +- explicit `/acp steer` command uses `mode: "steer"` + +Outbound: + +- ACP event stream is normalized to OpenClaw reply chunks +- delivery target is resolved through existing bound destination path +- when a bound thread is active for that session turn, parent channel completion is suppressed + +Streaming policy: + +- stream partial output with coalescing window +- configurable min interval and max chunk bytes to stay under Discord rate limits +- final message always emitted on completion or failure + +### State machines and transaction boundaries + +Session state machine: + +- `creating -> idle -> running -> idle` +- `running -> cancelling -> idle | error` +- `idle -> closed` +- `error -> idle | closed` + +Run state machine: + +- `queued -> running -> completed` +- `running -> failed | cancelled` +- `queued -> cancelled` + +Required transaction boundaries: + +- spawn transaction + - create ACP session row + - create/update ACP thread binding row + - enqueue initial run row +- close transaction + - mark session closed + - delete/expire binding rows + - write final close event +- cancel transaction + - mark target run cancelling/cancelled with idempotency key + +No partial success is allowed across these boundaries. + +### Per-session actor model + +`AcpSessionManager` runs one actor per ACP session key: + +- actor mailbox serializes `submit`, `cancel`, `close`, and `stream` side effects +- actor owns runtime handle hydration and runtime adapter process lifecycle for that session +- actor writes run events in-order (`seq`) before any Discord delivery +- actor updates delivery checkpoints after successful outbound send + +This removes cross-turn races and prevents duplicate or out-of-order thread output. + +### Idempotency and delivery projection + +All external ACP actions must carry idempotency keys: + +- spawn idempotency key +- prompt/steer idempotency key +- cancel idempotency key +- close idempotency key + +Delivery rules: + +- Discord messages are derived from `acp_events` plus `acp_delivery_checkpoint` +- retries resume from checkpoint without re-sending already delivered chunks +- final reply emission is exactly-once per run from projection logic + +### Recovery and self-healing + +On gateway start: + +- load non-terminal ACP sessions (`creating`, `idle`, `running`, `cancelling`, `error`) +- recreate actors lazily on first inbound event or eagerly under configured cap +- reconcile any `running` runs missing heartbeats and mark `failed` or recover via adapter + +On inbound Discord thread message: + +- if binding exists but ACP session is missing, fail closed with explicit stale-binding message +- optionally auto-unbind stale binding after operator-safe validation +- never silently route stale ACP bindings to normal LLM path + +### Lifecycle and safety + +Supported operations: + +- cancel current run: `/acp cancel` +- unbind thread: `/unfocus` +- close ACP session: `/acp close` +- auto close idle sessions by effective TTL + +TTL policy: + +- effective TTL is minimum of + - global/session TTL + - Discord thread binding TTL + - ACP runtime owner TTL + +Safety controls: + +- allowlist ACP agents by name +- restrict workspace roots for ACP sessions +- env allowlist passthrough +- max concurrent ACP sessions per account and globally +- bounded restart backoff for runtime crashes + +## Config surface + +Core keys: + +- `acp.enabled` +- `acp.dispatch.enabled` (independent ACP routing kill switch) +- `acp.backend` (default `acpx`) +- `acp.defaultAgent` +- `acp.allowedAgents[]` +- `acp.maxConcurrentSessions` +- `acp.stream.coalesceIdleMs` +- `acp.stream.maxChunkChars` +- `acp.runtime.ttlMinutes` +- `acp.controlPlane.store` (`sqlite` default) +- `acp.controlPlane.storePath` +- `acp.controlPlane.recovery.eagerActors` +- `acp.controlPlane.recovery.reconcileRunningAfterMs` +- `acp.controlPlane.checkpoint.flushEveryEvents` +- `acp.controlPlane.checkpoint.flushEveryMs` +- `acp.idempotency.ttlHours` +- `channels.discord.threadBindings.spawnAcpSessions` + +Plugin/backend keys (acpx plugin section): + +- backend command/path overrides +- backend env allowlist +- backend per-agent presets +- backend startup/stop timeouts +- backend max inflight runs per session + +## Implementation specification + +### Control-plane modules (new) + +Add dedicated ACP control-plane modules in core: + +- `src/acp/control-plane/manager.ts` + - owns ACP actors, lifecycle transitions, command serialization +- `src/acp/control-plane/store.ts` + - SQLite schema management, transactions, query helpers +- `src/acp/control-plane/events.ts` + - typed ACP event definitions and serialization +- `src/acp/control-plane/checkpoint.ts` + - durable delivery checkpoints and replay cursors +- `src/acp/control-plane/idempotency.ts` + - idempotency key reservation and response replay +- `src/acp/control-plane/recovery.ts` + - boot-time reconciliation and actor rehydrate plan + +Compatibility bridge modules: + +- `src/acp/runtime/session-meta.ts` + - remains temporarily for projection into `SessionEntry.acp` + - must stop being source-of-truth after migration cutover + +### Required invariants (must enforce in code) + +- ACP session creation and thread bind are atomic (single transaction) +- there is at most one active run per ACP session actor at a time +- event `seq` is strictly increasing per run +- delivery checkpoint never advances past last committed event +- idempotency replay returns previous success payload for duplicate command keys +- stale/missing ACP metadata cannot route into normal non-ACP reply path + +### Core touchpoints + +Core files to change: + +- `src/auto-reply/reply/dispatch-from-config.ts` + - ACP branch calls `AcpSessionManager.submit` and event-projection delivery + - remove direct ACP fallback that bypasses control-plane invariants +- `src/auto-reply/reply/inbound-context.ts` (or nearest normalized context boundary) + - expose normalized routing keys and idempotency seeds for ACP control plane +- `src/config/sessions/types.ts` + - keep `SessionEntry.acp` as projection-only compatibility field +- `src/gateway/server-methods/sessions.ts` + - reset/delete/archive must call ACP manager close/unbind transaction path +- `src/infra/outbound/bound-delivery-router.ts` + - enforce fail-closed destination behavior for ACP bound session turns +- `src/discord/monitor/thread-bindings.ts` + - add ACP stale-binding validation helpers wired to control-plane lookups +- `src/auto-reply/reply/commands-acp.ts` + - route spawn/cancel/close/steer through ACP manager APIs +- `src/agents/acp-spawn.ts` + - stop ad-hoc metadata writes; call ACP manager spawn transaction +- `src/plugin-sdk/**` and plugin runtime bridge + - expose ACP backend registration and health semantics cleanly + +Core files explicitly not replaced: + +- `src/discord/monitor/message-handler.preflight.ts` + - keep thread binding override behavior as the canonical session-key resolver + +### ACP runtime registry API + +Add a core registry module: + +- `src/acp/runtime/registry.ts` + +Required API: + +```ts +export type AcpRuntimeBackend = { + id: string; + runtime: AcpRuntime; + healthy?: () => boolean; +}; + +export function registerAcpRuntimeBackend(backend: AcpRuntimeBackend): void; +export function unregisterAcpRuntimeBackend(id: string): void; +export function getAcpRuntimeBackend(id?: string): AcpRuntimeBackend | null; +export function requireAcpRuntimeBackend(id?: string): AcpRuntimeBackend; +``` + +Behavior: + +- `requireAcpRuntimeBackend` throws a typed ACP backend missing error when unavailable +- plugin service registers backend on `start` and unregisters on `stop` +- runtime lookups are read-only and process-local + +### acpx runtime plugin contract (implementation detail) + +For the first production backend (`extensions/acpx`), OpenClaw and acpx are +connected with a strict command contract: + +- backend id: `acpx` +- plugin service id: `acpx-runtime` +- runtime handle encoding: `runtimeSessionName = acpx:v1:` +- encoded payload fields: + - `name` (acpx named session; uses OpenClaw `sessionKey`) + - `agent` (acpx agent command) + - `cwd` (session workspace root) + - `mode` (`persistent | oneshot`) + +Command mapping: + +- ensure session: + - `acpx --format json --json-strict --cwd sessions ensure --name ` +- prompt turn: + - `acpx --format json --json-strict --cwd prompt --session --file -` +- cancel: + - `acpx --format json --json-strict --cwd cancel --session ` +- close: + - `acpx --format json --json-strict --cwd sessions close ` + +Streaming: + +- OpenClaw consumes ndjson events from `acpx --format json --json-strict` +- `text` => `text_delta/output` +- `thought` => `text_delta/thought` +- `tool_call` => `tool_call` +- `done` => `done` +- `error` => `error` + +### Session schema patch + +Patch `SessionEntry` in `src/config/sessions/types.ts`: + +```ts +type SessionAcpMeta = { + backend: string; + agent: string; + runtimeSessionName: string; + mode: "persistent" | "oneshot"; + cwd?: string; + state: "idle" | "running" | "error"; + lastActivityAt: number; + lastError?: string; +}; +``` + +Persisted field: + +- `SessionEntry.acp?: SessionAcpMeta` + +Migration rules: + +- phase A: dual-write (`acp` projection + ACP SQLite source-of-truth) +- phase B: read-primary from ACP SQLite, fallback-read from legacy `SessionEntry.acp` +- phase C: migration command backfills missing ACP rows from valid legacy entries +- phase D: remove fallback-read and keep projection optional for UX only +- legacy fields (`cliSessionIds`, `claudeCliSessionId`) remain untouched + +### Error contract + +Add stable ACP error codes and user-facing messages: + +- `ACP_BACKEND_MISSING` + - message: `ACP runtime backend is not configured. Install and enable the acpx runtime plugin.` +- `ACP_BACKEND_UNAVAILABLE` + - message: `ACP runtime backend is currently unavailable. Try again in a moment.` +- `ACP_SESSION_INIT_FAILED` + - message: `Could not initialize ACP session runtime.` +- `ACP_TURN_FAILED` + - message: `ACP turn failed before completion.` + +Rules: + +- return actionable user-safe message in-thread +- log detailed backend/system error only in runtime logs +- never silently fall back to normal LLM path when ACP routing was explicitly selected + +### Duplicate delivery arbitration + +Single routing rule for ACP bound turns: + +- if an active thread binding exists for the target ACP session and requester context, deliver only to that bound thread +- do not also send to parent channel for the same turn +- if bound destination selection is ambiguous, fail closed with explicit error (no implicit parent fallback) +- if no active binding exists, use normal session destination behavior + +### Observability and operational readiness + +Required metrics: + +- ACP spawn success/failure count by backend and error code +- ACP run latency percentiles (queue wait, runtime turn time, delivery projection time) +- ACP actor restart count and restart reason +- stale-binding detection count +- idempotency replay hit rate +- Discord delivery retry and rate-limit counters + +Required logs: + +- structured logs keyed by `sessionKey`, `runId`, `backend`, `threadId`, `idempotencyKey` +- explicit state transition logs for session and run state machines +- adapter command logs with redaction-safe arguments and exit summary + +Required diagnostics: + +- `/acp sessions` includes state, active run, last error, and binding status +- `/acp doctor` (or equivalent) validates backend registration, store health, and stale bindings + +### Config precedence and effective values + +ACP enablement precedence: + +- account override: `channels.discord.accounts..threadBindings.spawnAcpSessions` +- channel override: `channels.discord.threadBindings.spawnAcpSessions` +- global ACP gate: `acp.enabled` +- dispatch gate: `acp.dispatch.enabled` +- backend availability: registered backend for `acp.backend` + +Auto-enable behavior: + +- when ACP is configured (`acp.enabled=true`, `acp.dispatch.enabled=true`, or + `acp.backend=acpx`), plugin auto-enable marks `plugins.entries.acpx.enabled=true` + unless denylisted or explicitly disabled + +TTL effective value: + +- `min(session ttl, discord thread binding ttl, acp runtime ttl)` + +### Test map + +Unit tests: + +- `src/acp/runtime/registry.test.ts` (new) +- `src/auto-reply/reply/dispatch-from-config.acp.test.ts` (new) +- `src/infra/outbound/bound-delivery-router.test.ts` (extend ACP fail-closed cases) +- `src/config/sessions/types.test.ts` or nearest session-store tests (ACP metadata persistence) + +Integration tests: + +- `src/discord/monitor/reply-delivery.test.ts` (bound ACP delivery target behavior) +- `src/discord/monitor/message-handler.preflight*.test.ts` (bound ACP session-key routing continuity) +- acpx plugin runtime tests in backend package (service register/start/stop + event normalization) + +Gateway e2e tests: + +- `src/gateway/server.sessions.gateway-server-sessions-a.e2e.test.ts` (extend ACP reset/delete lifecycle coverage) +- ACP thread turn roundtrip e2e for spawn, message, stream, cancel, unfocus, restart recovery + +### Rollout guard + +Add independent ACP dispatch kill switch: + +- `acp.dispatch.enabled` default `false` for first release +- when disabled: + - ACP spawn/focus control commands may still bind sessions + - ACP dispatch path does not activate + - user receives explicit message that ACP dispatch is disabled by policy +- after canary validation, default can be flipped to `true` in a later release + +## Command and UX plan + +### New commands + +- `/acp spawn [--mode persistent|oneshot] [--thread auto|here|off]` +- `/acp cancel [session]` +- `/acp steer ` +- `/acp close [session]` +- `/acp sessions` + +### Existing command compatibility + +- `/focus ` continues to support ACP targets +- `/unfocus` keeps current semantics +- `/session ttl` remains the top level TTL override + +## Phased rollout + +### Phase 0 ADR and schema freeze + +- ship ADR for ACP control-plane ownership and adapter boundaries +- freeze DB schema (`acp_sessions`, `acp_runs`, `acp_bindings`, `acp_events`, `acp_delivery_checkpoint`, `acp_idempotency`) +- define stable ACP error codes, event contract, and state-transition guards + +### Phase 1 Control-plane foundation in core + +- implement `AcpSessionManager` and per-session actor runtime +- implement ACP SQLite store and transaction helpers +- implement idempotency store and replay helpers +- implement event append + delivery checkpoint modules +- wire spawn/cancel/close APIs to manager with transactional guarantees + +### Phase 2 Core routing and lifecycle integration + +- route thread-bound ACP turns from dispatch pipeline into ACP manager +- enforce fail-closed routing when ACP binding/session invariants fail +- integrate reset/delete/archive/unfocus lifecycle with ACP close/unbind transactions +- add stale-binding detection and optional auto-unbind policy + +### Phase 3 acpx backend adapter/plugin + +- implement `acpx` adapter against runtime contract (`ensureSession`, `submit`, `stream`, `cancel`, `close`) +- add backend health checks and startup/teardown registration +- normalize acpx ndjson events into ACP runtime events +- enforce backend timeouts, process supervision, and restart/backoff policy + +### Phase 4 Delivery projection and channel UX (Discord first) + +- implement event-driven channel projection with checkpoint resume (Discord first) +- coalesce streaming chunks with rate-limit aware flush policy +- guarantee exactly-once final completion message per run +- ship `/acp spawn`, `/acp cancel`, `/acp steer`, `/acp close`, `/acp sessions` + +### Phase 5 Migration and cutover + +- introduce dual-write to `SessionEntry.acp` projection plus ACP SQLite source-of-truth +- add migration utility for legacy ACP metadata rows +- flip read path to ACP SQLite primary +- remove legacy fallback routing that depends on missing `SessionEntry.acp` + +### Phase 6 Hardening, SLOs, and scale limits + +- enforce concurrency limits (global/account/session), queue policies, and timeout budgets +- add full telemetry, dashboards, and alert thresholds +- chaos-test crash recovery and duplicate-delivery suppression +- publish runbook for backend outage, DB corruption, and stale-binding remediation + +### Full implementation checklist + +- core control-plane modules and tests +- DB migrations and rollback plan +- ACP manager API integration across dispatch and commands +- adapter registration interface in plugin runtime bridge +- acpx adapter implementation and tests +- thread-capable channel delivery projection logic with checkpoint replay (Discord first) +- lifecycle hooks for reset/delete/archive/unfocus +- stale-binding detector and operator-facing diagnostics +- config validation and precedence tests for all new ACP keys +- operational docs and troubleshooting runbook + +## Test plan + +Unit tests: + +- ACP DB transaction boundaries (spawn/bind/enqueue atomicity, cancel, close) +- ACP state-machine transition guards for sessions and runs +- idempotency reservation/replay semantics across all ACP commands +- per-session actor serialization and queue ordering +- acpx event parser and chunk coalescer +- runtime supervisor restart and backoff policy +- config precedence and effective TTL calculation +- core ACP routing branch selection and fail-closed behavior when backend/session is invalid + +Integration tests: + +- fake ACP adapter process for deterministic streaming and cancel behavior +- ACP manager + dispatch integration with transactional persistence +- thread-bound inbound routing to ACP session key +- thread-bound outbound delivery suppresses parent channel duplication +- checkpoint replay recovers after delivery failure and resumes from last event +- plugin service registration and teardown of ACP runtime backend + +Gateway e2e tests: + +- spawn ACP with thread, exchange multi-turn prompts, unfocus +- gateway restart with persisted ACP DB and bindings, then continue same session +- concurrent ACP sessions in multiple threads have no cross-talk +- duplicate command retries (same idempotency key) do not create duplicate runs or replies +- stale-binding scenario yields explicit error and optional auto-clean behavior + +## Risks and mitigations + +- Duplicate deliveries during transition + - Mitigation: single destination resolver and idempotent event checkpoint +- Runtime process churn under load + - Mitigation: long lived per session owners + concurrency caps + backoff +- Plugin absent or misconfigured + - Mitigation: explicit operator-facing error and fail-closed ACP routing (no implicit fallback to normal session path) +- Config confusion between subagent and ACP gates + - Mitigation: explicit ACP keys and command feedback that includes effective policy source +- Control-plane store corruption or migration bugs + - Mitigation: WAL mode, backup/restore hooks, migration smoke tests, and read-only fallback diagnostics +- Actor deadlocks or mailbox starvation + - Mitigation: watchdog timers, actor health probes, and bounded mailbox depth with rejection telemetry + +## Acceptance checklist + +- ACP session spawn can create or bind a thread in a supported channel adapter (currently Discord) +- all thread messages route to bound ACP session only +- ACP outputs appear in the same thread identity with streaming or batches +- no duplicate output in parent channel for bound turns +- spawn+bind+initial enqueue are atomic in persistent store +- ACP command retries are idempotent and do not duplicate runs or outputs +- cancel, close, unfocus, archive, reset, and delete perform deterministic cleanup +- crash restart preserves mapping and resumes multi turn continuity +- concurrent thread bound ACP sessions work independently +- ACP backend missing state produces clear actionable error +- stale bindings are detected and surfaced explicitly (with optional safe auto-clean) +- control-plane metrics and diagnostics are available for operators +- new unit, integration, and e2e coverage passes + +## Addendum: targeted refactors for current implementation (status) + +These are non-blocking follow-ups to keep the ACP path maintainable after the current feature set lands. + +### 1) Centralize ACP dispatch policy evaluation (completed) + +- implemented via shared ACP policy helpers in `src/acp/policy.ts` +- dispatch, ACP command lifecycle handlers, and ACP spawn path now consume shared policy logic + +### 2) Split ACP command handler by subcommand domain (completed) + +- `src/auto-reply/reply/commands-acp.ts` is now a thin router +- subcommand behavior is split into: + - `src/auto-reply/reply/commands-acp/lifecycle.ts` + - `src/auto-reply/reply/commands-acp/runtime-options.ts` + - `src/auto-reply/reply/commands-acp/diagnostics.ts` + - shared helpers in `src/auto-reply/reply/commands-acp/shared.ts` + +### 3) Split ACP session manager by responsibility (completed) + +- manager is split into: + - `src/acp/control-plane/manager.ts` (public facade + singleton) + - `src/acp/control-plane/manager.core.ts` (manager implementation) + - `src/acp/control-plane/manager.types.ts` (manager types/deps) + - `src/acp/control-plane/manager.utils.ts` (normalization + helper functions) + +### 4) Optional acpx runtime adapter cleanup + +- `extensions/acpx/src/runtime.ts` can be split into: +- process execution/supervision +- ndjson event parsing/normalization +- runtime API surface (`submit`, `cancel`, `close`, etc.) +- improves testability and makes backend behavior easier to audit diff --git a/docs/experiments/plans/acp-unified-streaming-refactor.md b/docs/experiments/plans/acp-unified-streaming-refactor.md new file mode 100644 index 00000000000..3834fb9f8d8 --- /dev/null +++ b/docs/experiments/plans/acp-unified-streaming-refactor.md @@ -0,0 +1,96 @@ +--- +summary: "Holy grail refactor plan for one unified runtime streaming pipeline across main, subagent, and ACP" +owner: "onutc" +status: "draft" +last_updated: "2026-02-25" +title: "Unified Runtime Streaming Refactor Plan" +--- + +# Unified Runtime Streaming Refactor Plan + +## Objective + +Deliver one shared streaming pipeline for `main`, `subagent`, and `acp` so all runtimes get identical coalescing, chunking, delivery ordering, and crash recovery behavior. + +## Why this exists + +- Current behavior is split across multiple runtime-specific shaping paths. +- Formatting/coalescing bugs can be fixed in one path but remain in others. +- Delivery consistency, duplicate suppression, and recovery semantics are harder to reason about. + +## Target architecture + +Single pipeline, runtime-specific adapters: + +1. Runtime adapters emit canonical events only. +2. Shared stream assembler coalesces and finalizes text/tool/status events. +3. Shared channel projector applies channel-specific chunking/formatting once. +4. Shared delivery ledger enforces idempotent send/replay semantics. +5. Outbound channel adapter executes sends and records delivery checkpoints. + +Canonical event contract: + +- `turn_started` +- `text_delta` +- `block_final` +- `tool_started` +- `tool_finished` +- `status` +- `turn_completed` +- `turn_failed` +- `turn_cancelled` + +## Workstreams + +### 1) Canonical streaming contract + +- Define strict event schema + validation in core. +- Add adapter contract tests to guarantee each runtime emits compatible events. +- Reject malformed runtime events early and surface structured diagnostics. + +### 2) Shared stream processor + +- Replace runtime-specific coalescer/projector logic with one processor. +- Processor owns text delta buffering, idle flush, max-chunk splitting, and completion flush. +- Move ACP/main/subagent config resolution into one helper to prevent drift. + +### 3) Shared channel projection + +- Keep channel adapters dumb: accept finalized blocks and send. +- Move Discord-specific chunking quirks to channel projector only. +- Keep pipeline channel-agnostic before projection. + +### 4) Delivery ledger + replay + +- Add per-turn/per-chunk delivery IDs. +- Record checkpoints before and after physical send. +- On restart, replay pending chunks idempotently and avoid duplicates. + +### 5) Migration and cutover + +- Phase 1: shadow mode (new pipeline computes output but old path sends; compare). +- Phase 2: runtime-by-runtime cutover (`acp`, then `subagent`, then `main` or reverse by risk). +- Phase 3: delete legacy runtime-specific streaming code. + +## Non-goals + +- No changes to ACP policy/permissions model in this refactor. +- No channel-specific feature expansion outside projection compatibility fixes. +- No transport/backend redesign (acpx plugin contract remains as-is unless needed for event parity). + +## Risks and mitigations + +- Risk: behavioral regressions in existing main/subagent paths. + Mitigation: shadow mode diffing + adapter contract tests + channel e2e tests. +- Risk: duplicate sends during crash recovery. + Mitigation: durable delivery IDs + idempotent replay in delivery adapter. +- Risk: runtime adapters diverge again. + Mitigation: required shared contract test suite for all adapters. + +## Acceptance criteria + +- All runtimes pass shared streaming contract tests. +- Discord ACP/main/subagent produce equivalent spacing/chunking behavior for tiny deltas. +- Crash/restart replay sends no duplicate chunk for the same delivery ID. +- Legacy ACP projector/coalescer path is removed. +- Streaming config resolution is shared and runtime-independent. diff --git a/docs/gateway/authentication.md b/docs/gateway/authentication.md index 8dd18f8416d..448789c9a6c 100644 --- a/docs/gateway/authentication.md +++ b/docs/gateway/authentication.md @@ -14,6 +14,7 @@ use the long‑lived token created by `claude setup-token`. See [/concepts/oauth](/concepts/oauth) for the full OAuth flow and storage layout. +For SecretRef-based auth (`env`/`file`/`exec` providers), see [Secrets Management](/gateway/secrets). ## Recommended Anthropic setup (API key) @@ -85,6 +86,11 @@ openclaw models auth paste-token --provider anthropic openclaw models auth paste-token --provider openrouter ``` +Auth profile refs are also supported for static credentials: + +- `api_key` credentials can use `keyRef: { source, provider, id }` +- `token` credentials can use `tokenRef: { source, provider, id }` + Automation-friendly check (exit `1` when expired/missing, `2` when expiring): ```bash diff --git a/docs/gateway/configuration-examples.md b/docs/gateway/configuration-examples.md index d3838bbdae6..0639dc36e92 100644 --- a/docs/gateway/configuration-examples.md +++ b/docs/gateway/configuration-examples.md @@ -273,6 +273,7 @@ Save to `~/.openclaw/openclaw.json` and you can DM the bot from that number. every: "30m", model: "anthropic/claude-sonnet-4-5", target: "last", + directPolicy: "allow", // allow (default) | block to: "+15555550123", prompt: "HEARTBEAT", ackMaxChars: 300, @@ -627,4 +628,4 @@ Only enable direct mutable name/email/nick matching with each channel's `dangero - If you set `dmPolicy: "open"`, the matching `allowFrom` list must include `"*"`. - Provider IDs differ (phone numbers, user IDs, channel IDs). Use the provider docs to confirm the format. - Optional sections to add later: `web`, `browser`, `ui`, `discovery`, `canvasHost`, `talk`, `signal`, `imessage`. -- See [Providers](/channels/whatsapp) and [Troubleshooting](/gateway/troubleshooting) for deeper setup notes. +- See [Providers](/providers) and [Troubleshooting](/gateway/troubleshooting) for deeper setup notes. diff --git a/docs/gateway/configuration-reference.md b/docs/gateway/configuration-reference.md index 01ad82b6098..3feb7462d3f 100644 --- a/docs/gateway/configuration-reference.md +++ b/docs/gateway/configuration-reference.md @@ -65,6 +65,30 @@ Use `channels.modelByChannel` to pin specific channel IDs to a model. Values acc } ``` +### Channel defaults and heartbeat + +Use `channels.defaults` for shared group-policy and heartbeat behavior across providers: + +```json5 +{ + channels: { + defaults: { + groupPolicy: "allowlist", // open | allowlist | disabled + heartbeat: { + showOk: false, + showAlerts: true, + useIndicator: true, + }, + }, + }, +} +``` + +- `channels.defaults.groupPolicy`: fallback group policy when a provider-level `groupPolicy` is unset. +- `channels.defaults.heartbeat.showOk`: include healthy channel statuses in heartbeat output. +- `channels.defaults.heartbeat.showAlerts`: include degraded/error statuses in heartbeat output. +- `channels.defaults.heartbeat.useIndicator`: render compact indicator-style heartbeat output. + ### WhatsApp WhatsApp runs through the gateway's web channel (Baileys Web). It starts automatically when a linked session exists. @@ -321,6 +345,7 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat ``` - Service account JSON: inline (`serviceAccount`) or file-based (`serviceAccountFile`). +- Service account SecretRef is also supported (`serviceAccountRef`). - Env fallbacks: `GOOGLE_CHAT_SERVICE_ACCOUNT` or `GOOGLE_CHAT_SERVICE_ACCOUNT_FILE`. - Use `spaces/` or `users/` for delivery targets. - `channels.googlechat.dangerouslyAllowNameMatching` re-enables mutable email principal matching (break-glass compatibility mode). @@ -421,12 +446,20 @@ Mattermost ships as a plugin: `openclaw plugins install @openclaw/mattermost`. Chat modes: `oncall` (respond on @-mention, default), `onmessage` (every message), `onchar` (messages starting with trigger prefix). +- `channels.mattermost.configWrites`: allow or deny Mattermost-initiated config writes. +- `channels.mattermost.requireMention`: require `@mention` before replying in channels. + ### Signal ```json5 { channels: { signal: { + enabled: true, + account: "+15555550123", // optional account binding + dmPolicy: "pairing", + allowFrom: ["+15551234567", "uuid:123e4567-e89b-12d3-a456-426614174000"], + configWrites: true, reactionNotifications: "own", // off | own | all | allowlist reactionAllowlist: ["+15551234567", "uuid:123e4567-e89b-12d3-a456-426614174000"], historyLimit: 50, @@ -437,6 +470,29 @@ Chat modes: `oncall` (respond on @-mention, default), `onmessage` (every message **Reaction notification modes:** `off`, `own` (default), `all`, `allowlist` (from `reactionAllowlist`). +- `channels.signal.account`: pin channel startup to a specific Signal account identity. +- `channels.signal.configWrites`: allow or deny Signal-initiated config writes. + +### BlueBubbles + +BlueBubbles is the recommended iMessage path (plugin-backed, configured under `channels.bluebubbles`). + +```json5 +{ + channels: { + bluebubbles: { + enabled: true, + dmPolicy: "pairing", + // serverUrl, password, webhookPath, group controls, and advanced actions: + // see /channels/bluebubbles + }, + }, +} +``` + +- Core key paths covered here: `channels.bluebubbles`, `channels.bluebubbles.dmPolicy`. +- Full BlueBubbles channel configuration is documented in [BlueBubbles](/channels/bluebubbles). + ### iMessage OpenClaw spawns `imsg rpc` (JSON-RPC over stdio). No daemon or port required. @@ -468,6 +524,7 @@ OpenClaw spawns `imsg rpc` (JSON-RPC over stdio). No daemon or port required. - `cliPath` can point to an SSH wrapper; set `remoteHost` (`host` or `user@host`) for SCP attachment fetching. - `attachmentRoots` and `remoteAttachmentRoots` restrict inbound attachment paths (default: `/Users/*/Library/Messages/Attachments`). - SCP uses strict host-key checking, so ensure the relay host key already exists in `~/.ssh/known_hosts`. +- `channels.imessage.configWrites`: allow or deny iMessage-initiated config writes. @@ -478,6 +535,52 @@ exec ssh -T gateway-host imsg "$@" +### Microsoft Teams + +Microsoft Teams is extension-backed and configured under `channels.msteams`. + +```json5 +{ + channels: { + msteams: { + enabled: true, + configWrites: true, + // appId, appPassword, tenantId, webhook, team/channel policies: + // see /channels/msteams + }, + }, +} +``` + +- Core key paths covered here: `channels.msteams`, `channels.msteams.configWrites`. +- Full Teams config (credentials, webhook, DM/group policy, per-team/per-channel overrides) is documented in [Microsoft Teams](/channels/msteams). + +### IRC + +IRC is extension-backed and configured under `channels.irc`. + +```json5 +{ + channels: { + irc: { + enabled: true, + dmPolicy: "pairing", + configWrites: true, + nickserv: { + enabled: true, + service: "NickServ", + password: "${IRC_NICKSERV_PASSWORD}", + register: false, + registerEmail: "bot@example.com", + }, + }, + }, +} +``` + +- Core key paths covered here: `channels.irc`, `channels.irc.dmPolicy`, `channels.irc.configWrites`, `channels.irc.nickserv.*`. +- Full IRC channel configuration (host/port/TLS/channels/allowlists/mention gating) is documented in [IRC](/channels/irc). + ### Multi-account (all channels) Run multiple accounts per channel (each with its own `accountId`): @@ -505,6 +608,14 @@ Run multiple accounts per channel (each with its own `accountId`): - Env tokens only apply to the **default** account. - Base channel settings apply to all accounts unless overridden per account. - Use `bindings[].match.accountId` to route each account to a different agent. +- If you add a non-default account via `openclaw channels add` (or channel onboarding) while still on a single-account top-level channel config, OpenClaw moves account-scoped top-level single-account values into `channels..accounts.default` first so the original account keeps working. +- Existing channel-only bindings (no `accountId`) keep matching the default account; account-scoped bindings remain optional. +- `openclaw doctor --fix` also repairs mixed shapes by moving account-scoped top-level single-account values into `accounts.default` when named accounts exist but `default` is missing. + +### Other extension channels + +Many extension channels are configured as `channels.` and documented in their dedicated channel pages (for example Feishu, Matrix, LINE, Nostr, Zalo, Nextcloud Talk, Synology Chat, and Twitch). +See the full channel index: [Channels](/channels). ### Group chat mention gating @@ -800,6 +911,7 @@ Periodic heartbeat runs. includeReasoning: false, session: "main", to: "+15555550123", + directPolicy: "allow", // allow (default) | block target: "none", // default: none | options: last | whatsapp | telegram | discord | ... prompt: "Read HEARTBEAT.md if it exists...", ackMaxChars: 300, @@ -812,7 +924,7 @@ Periodic heartbeat runs. - `every`: duration string (ms/s/m/h). Default: `30m`. - `suppressToolErrorWarnings`: when true, suppresses tool error warning payloads during heartbeat runs. -- Heartbeats never deliver to direct/DM chat targets when the destination can be classified as direct (for example `user:`, Telegram user chat IDs, or WhatsApp direct numbers/JIDs); those runs still execute, but outbound delivery is skipped. +- `directPolicy`: direct/DM delivery policy. `allow` (default) permits direct-target delivery. `block` suppresses direct-target delivery and emits `reason=dm-blocked`. - Per-agent: set `agents.list[].heartbeat`. When any agent defines `heartbeat`, **only those agents** run heartbeats. - Heartbeats run full agent turns — shorter intervals burn more tokens. @@ -1250,6 +1362,7 @@ See [Multi-Agent Sandbox & Tools](/tools/multi-agent-sandbox-tools) for preceden }, resetTriggers: ["/new", "/reset"], store: "~/.openclaw/agents/{agentId}/sessions/sessions.json", + parentForkMaxTokens: 100000, // skip parent-thread fork above this token count (0 disables) maintenance: { mode: "warn", // warn | enforce pruneAfter: "30d", @@ -1283,6 +1396,9 @@ See [Multi-Agent Sandbox & Tools](/tools/multi-agent-sandbox-tools) for preceden - **`identityLinks`**: map canonical ids to provider-prefixed peers for cross-channel session sharing. - **`reset`**: primary reset policy. `daily` resets at `atHour` local time; `idle` resets after `idleMinutes`. When both configured, whichever expires first wins. - **`resetByType`**: per-type overrides (`direct`, `group`, `thread`). Legacy `dm` accepted as alias for `direct`. +- **`parentForkMaxTokens`**: max parent-session `totalTokens` allowed when creating a forked thread session (default `100000`). + - If parent `totalTokens` is above this value, OpenClaw starts a fresh thread session instead of inheriting parent transcript history. + - Set `0` to disable this guard and always allow parent forking. - **`mainKey`**: legacy field. Runtime now always uses `"main"` for the main direct-chat bucket. - **`sendPolicy`**: match by `channel`, `chatType` (`direct|group|channel`, with legacy `dm` alias), `keyPrefix`, or `rawKeyPrefix`. First deny wins. - **`maintenance`**: session-store cleanup + retention controls. @@ -1736,6 +1852,29 @@ OpenClaw uses the pi-coding-agent model catalog. Add custom providers via `model - Use `authHeader: true` + `headers` for custom auth needs. - Override agent config root with `OPENCLAW_AGENT_DIR` (or `PI_CODING_AGENT_DIR`). +- Merge precedence for matching provider IDs: + - Non-empty agent `models.json` `apiKey`/`baseUrl` win. + - Empty or missing agent `apiKey`/`baseUrl` fall back to `models.providers` in config. + - Use `models.mode: "replace"` when you want config to fully rewrite `models.json`. + +### Provider field details + +- `models.mode`: provider catalog behavior (`merge` or `replace`). +- `models.providers`: custom provider map keyed by provider id. +- `models.providers.*.api`: request adapter (`openai-completions`, `openai-responses`, `anthropic-messages`, `google-generative-ai`, etc). +- `models.providers.*.apiKey`: provider credential (prefer SecretRef/env substitution). +- `models.providers.*.auth`: auth strategy (`api-key`, `token`, `oauth`, `aws-sdk`). +- `models.providers.*.authHeader`: force credential transport in the `Authorization` header when required. +- `models.providers.*.baseUrl`: upstream API base URL. +- `models.providers.*.headers`: extra static headers for proxy/tenant routing. +- `models.providers.*.models`: explicit provider model catalog entries. +- `models.bedrockDiscovery`: Bedrock auto-discovery settings root. +- `models.bedrockDiscovery.enabled`: turn discovery polling on/off. +- `models.bedrockDiscovery.region`: AWS region for discovery. +- `models.bedrockDiscovery.providerFilter`: optional provider-id filter for targeted discovery. +- `models.bedrockDiscovery.refreshInterval`: polling interval for discovery refresh. +- `models.bedrockDiscovery.defaultContextWindow`: fallback context window for discovered models. +- `models.bedrockDiscovery.defaultMaxTokens`: fallback max output tokens for discovered models. ### Provider examples @@ -1974,7 +2113,7 @@ See [Local Models](/gateway/local-models). TL;DR: run MiniMax M2.1 via LM Studio }, entries: { "nano-banana-pro": { - apiKey: "GEMINI_KEY_HERE", + apiKey: { source: "env", provider: "default", id: "GEMINI_API_KEY" }, // or plaintext string env: { GEMINI_API_KEY: "GEMINI_KEY_HERE" }, }, peekaboo: { enabled: true }, @@ -1986,7 +2125,7 @@ See [Local Models](/gateway/local-models). TL;DR: run MiniMax M2.1 via LM Studio - `allowBundled`: optional allowlist for bundled skills only (managed/workspace skills unaffected). - `entries..enabled: false` disables a skill even if bundled/installed. -- `entries..apiKey`: convenience for skills declaring a primary env var. +- `entries..apiKey`: convenience for skills declaring a primary env var (plaintext string or SecretRef object). --- @@ -2014,6 +2153,13 @@ See [Local Models](/gateway/local-models). TL;DR: run MiniMax M2.1 via LM Studio - Loaded from `~/.openclaw/extensions`, `/.openclaw/extensions`, plus `plugins.load.paths`. - **Config changes require a gateway restart.** - `allow`: optional allowlist (only listed plugins load). `deny` wins. +- `plugins.entries..apiKey`: plugin-level API key convenience field (when supported by the plugin). +- `plugins.entries..env`: plugin-scoped env var map. +- `plugins.entries..config`: plugin-defined config object (validated by plugin schema). +- `plugins.slots.memory`: pick the active memory plugin id, or `"none"` to disable memory plugins. +- `plugins.installs`: CLI-managed install metadata used by `openclaw plugins update`. + - Includes `source`, `spec`, `sourcePath`, `installPath`, `version`, `resolvedName`, `resolvedVersion`, `resolvedSpec`, `integrity`, `shasum`, `resolvedAt`, `installedAt`. + - Treat `plugins.installs.*` as managed state; prefer CLI commands over manual edits. See [Plugins](/tools/plugin). @@ -2136,16 +2282,18 @@ See [Plugins](/tools/plugin). - `port`: single multiplexed port for WS + HTTP. Precedence: `--port` > `OPENCLAW_GATEWAY_PORT` > `gateway.port` > `18789`. - `bind`: `auto`, `loopback` (default), `lan` (`0.0.0.0`), `tailnet` (Tailscale IP only), or `custom`. - **Auth**: required by default. Non-loopback binds require a shared token/password. Onboarding wizard generates a token by default. -- `auth.mode: "none"`: explicit no-auth mode. Use only for trusted local loopback setups; this is intentionally not offered by onboarding prompts. -- `auth.mode: "trusted-proxy"`: delegate auth to an identity-aware reverse proxy and trust identity headers from `gateway.trustedProxies` (see [Trusted Proxy Auth](/gateway/trusted-proxy-auth)). -- `auth.allowTailscale`: when `true`, Tailscale Serve identity headers can satisfy Control UI/WebSocket auth (verified via `tailscale whois`); HTTP API endpoints still require token/password auth. This tokenless flow assumes the gateway host is trusted. Defaults to `true` when `tailscale.mode = "serve"`. -- `auth.rateLimit`: optional failed-auth limiter. Applies per client IP and per auth scope (shared-secret and device-token are tracked independently). Blocked attempts return `429` + `Retry-After`. - - `auth.rateLimit.exemptLoopback` defaults to `true`; set `false` when you intentionally want localhost traffic rate-limited too (for test setups or strict proxy deployments). +- `gateway.auth.mode: "none"`: explicit no-auth mode. Use only for trusted local loopback setups; this is intentionally not offered by onboarding prompts. +- `gateway.auth.mode: "trusted-proxy"`: delegate auth to an identity-aware reverse proxy and trust identity headers from `gateway.trustedProxies` (see [Trusted Proxy Auth](/gateway/trusted-proxy-auth)). +- `gateway.auth.allowTailscale`: when `true`, Tailscale Serve identity headers can satisfy Control UI/WebSocket auth (verified via `tailscale whois`); HTTP API endpoints still require token/password auth. This tokenless flow assumes the gateway host is trusted. Defaults to `true` when `tailscale.mode = "serve"`. +- `gateway.auth.rateLimit`: optional failed-auth limiter. Applies per client IP and per auth scope (shared-secret and device-token are tracked independently). Blocked attempts return `429` + `Retry-After`. + - `gateway.auth.rateLimit.exemptLoopback` defaults to `true`; set `false` when you intentionally want localhost traffic rate-limited too (for test setups or strict proxy deployments). +- Browser-origin WS auth attempts are always throttled with loopback exemption disabled (defense-in-depth against browser-based localhost brute force). - `tailscale.mode`: `serve` (tailnet only, loopback bind) or `funnel` (public, requires auth). -- `controlUi.allowedOrigins`: explicit browser-origin allowlist for Control UI/WebChat WebSocket connects. Required when Control UI is reachable on non-loopback binds. +- `controlUi.allowedOrigins`: explicit browser-origin allowlist for Gateway WebSocket connects. Required when browser clients are expected from non-loopback origins. - `controlUi.dangerouslyAllowHostHeaderOriginFallback`: dangerous mode that enables Host-header origin fallback for deployments that intentionally rely on Host-header origin policy. - `remote.transport`: `ssh` (default) or `direct` (ws/wss). For `direct`, `remote.url` must be `ws://` or `wss://`. -- `gateway.remote.token` is for remote CLI calls only; does not enable local gateway auth. +- `gateway.remote.token` / `.password` are remote-client credential fields. They do not configure gateway auth by themselves. +- Local gateway call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` is unset. - `trustedProxies`: reverse proxy IPs that terminate TLS. Only list proxies you control. - `allowRealIpFallback`: when `true`, the gateway accepts `X-Real-IP` if `X-Forwarded-For` is missing. Default `false` for fail-closed behavior. - `gateway.tools.deny`: extra tool names blocked for HTTP `POST /tools/invoke` (extends default deny list). @@ -2371,6 +2519,73 @@ Reference env vars in any config string with `${VAR_NAME}`: --- +## Secrets + +Secret refs are additive: plaintext values still work. + +### `SecretRef` + +Use one object shape: + +```json5 +{ source: "env" | "file" | "exec", provider: "default", id: "..." } +``` + +Validation: + +- `provider` pattern: `^[a-z][a-z0-9_-]{0,63}$` +- `source: "env"` id pattern: `^[A-Z][A-Z0-9_]{0,127}$` +- `source: "file"` id: absolute JSON pointer (for example `"/providers/openai/apiKey"`) +- `source: "exec"` id pattern: `^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$` + +### Supported fields in config + +- `models.providers..apiKey` +- `skills.entries..apiKey` +- `channels.googlechat.serviceAccount` +- `channels.googlechat.serviceAccountRef` +- `channels.googlechat.accounts..serviceAccount` +- `channels.googlechat.accounts..serviceAccountRef` + +### Secret providers config + +```json5 +{ + secrets: { + providers: { + default: { source: "env" }, // optional explicit env provider + filemain: { + source: "file", + path: "~/.openclaw/secrets.json", + mode: "json", + timeoutMs: 5000, + }, + vault: { + source: "exec", + command: "/usr/local/bin/openclaw-vault-resolver", + passEnv: ["PATH", "VAULT_ADDR"], + }, + }, + defaults: { + env: "default", + file: "filemain", + exec: "vault", + }, + }, +} +``` + +Notes: + +- `file` provider supports `mode: "json"` and `mode: "singleValue"` (`id` must be `"value"` in singleValue mode). +- `exec` provider requires an absolute `command` path and uses protocol payloads on stdin/stdout. +- By default, symlink command paths are rejected. Set `allowSymlinkCommand: true` to allow symlink paths while validating the resolved target path. +- If `trustedDirs` is configured, the trusted-dir check applies to the resolved target path. +- `exec` child environment is minimal by default; pass required variables explicitly with `passEnv`. +- Secret refs are resolved at activation time into an in-memory snapshot, then request paths read the snapshot only. + +--- + ## Auth storage ```json5 @@ -2388,8 +2603,11 @@ Reference env vars in any config string with `${VAR_NAME}`: ``` - Per-agent auth profiles stored at `/auth-profiles.json`. +- Auth profiles support value-level refs (`keyRef` for `api_key`, `tokenRef` for `token`). +- Static runtime credentials come from in-memory resolved snapshots; legacy static `auth.json` entries are scrubbed when discovered. - Legacy OAuth imports from `~/.openclaw/credentials/oauth.json`. - See [OAuth](/concepts/oauth). +- Secrets runtime behavior and `audit/configure/apply` tooling: [Secrets Management](/gateway/secrets). --- @@ -2514,7 +2732,7 @@ See [Cron Jobs](/automation/cron-jobs). ## Media model template variables -Template placeholders expanded in `tools.media.*.models[].args`: +Template placeholders expanded in `tools.media.models[].args`: | Variable | Description | | ------------------ | ------------------------------------------------- | diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index 3f7403d4647..46756dbc01a 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -239,7 +239,8 @@ When validation fails: ``` - `every`: duration string (`30m`, `2h`). Set `0m` to disable. - - `target`: `last` | `whatsapp` | `telegram` | `discord` | `none` (DM-style `user:` heartbeat delivery is blocked) + - `target`: `last` | `whatsapp` | `telegram` | `discord` | `none` + - `directPolicy`: `allow` (default) or `block` for DM-style heartbeat targets - See [Heartbeat](/gateway/heartbeat) for the full guide. @@ -491,6 +492,42 @@ Rules: + + For fields that support SecretRef objects, you can use: + +```json5 +{ + models: { + providers: { + openai: { apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" } }, + }, + }, + skills: { + entries: { + "nano-banana-pro": { + apiKey: { + source: "file", + provider: "filemain", + id: "/skills/entries/nano-banana-pro/apiKey", + }, + }, + }, + }, + channels: { + googlechat: { + serviceAccountRef: { + source: "exec", + provider: "vault", + id: "channels/googlechat/serviceAccount", + }, + }, + }, +} +``` + +SecretRef details (including `secrets.providers` for `env`/`file`/`exec`) are in [Secrets Management](/gateway/secrets). + + See [Environment](/help/environment) for full precedence and sources. ## Full reference diff --git a/docs/gateway/doctor.md b/docs/gateway/doctor.md index 4647cb8b411..4ecc10b4c66 100644 --- a/docs/gateway/doctor.md +++ b/docs/gateway/doctor.md @@ -121,6 +121,7 @@ Current migrations: - `routing.agentToAgent` → `tools.agentToAgent` - `routing.transcribeAudio` → `tools.media.audio.models` - `bindings[].match.accountID` → `bindings[].match.accountId` +- For channels with named `accounts` but missing `accounts.default`, move account-scoped top-level single-account channel values into `channels..accounts.default` when present - `identity` → `agents.list[].identity` - `agent.*` → `agents.defaults` + `tools.*` (tools/elevated/exec/sandbox/subagents) - `agent.model`/`allowedModels`/`modelAliases`/`modelFallbacks`/`imageModelFallbacks` diff --git a/docs/gateway/heartbeat.md b/docs/gateway/heartbeat.md index cf7ea489c40..a4f4aa64ea9 100644 --- a/docs/gateway/heartbeat.md +++ b/docs/gateway/heartbeat.md @@ -32,6 +32,7 @@ Example config: heartbeat: { every: "30m", target: "last", // explicit delivery to last contact (default is "none") + directPolicy: "allow", // default: allow direct/DM targets; set "block" to suppress // activeHours: { start: "08:00", end: "24:00" }, // includeReasoning: true, // optional: send separate `Reasoning:` message too }, @@ -215,7 +216,9 @@ Use `accountId` to target a specific account on multi-account channels like Tele - `last`: deliver to the last used external channel. - explicit channel: `whatsapp` / `telegram` / `discord` / `googlechat` / `slack` / `msteams` / `signal` / `imessage`. - `none` (default): run the heartbeat but **do not deliver** externally. -- Direct/DM heartbeat destinations are blocked when target parsing identifies a direct chat (for example `user:`, Telegram user chat IDs, or WhatsApp direct numbers/JIDs). +- `directPolicy`: controls direct/DM delivery behavior: + - `allow` (default): allow direct/DM heartbeat delivery. + - `block`: suppress direct/DM delivery (`reason=dm-blocked`). - `to`: optional recipient override (channel-specific id, e.g. E.164 for WhatsApp or a Telegram chat id). For Telegram topics/threads, use `:topic:`. - `accountId`: optional account id for multi-account channels. When `target: "last"`, the account id applies to the resolved last channel if it supports accounts; otherwise it is ignored. If the account id does not match a configured account for the resolved channel, delivery is skipped. - `prompt`: overrides the default prompt body (not merged). @@ -236,7 +239,7 @@ Use `accountId` to target a specific account on multi-account channels like Tele - `session` only affects the run context; delivery is controlled by `target` and `to`. - To deliver to a specific channel/recipient, set `target` + `to`. With `target: "last"`, delivery uses the last external channel for that session. -- Heartbeat deliveries never send to direct/DM targets when the destination is identified as direct; those runs still execute, but outbound delivery is skipped. +- Heartbeat deliveries allow direct/DM targets by default. Set `directPolicy: "block"` to suppress direct-target sends while still running the heartbeat turn. - If the main queue is busy, the heartbeat is skipped and retried later. - If `target` resolves to no external destination, the run still happens but no outbound message is sent. diff --git a/docs/gateway/index.md b/docs/gateway/index.md index c1e06d63457..f64de55f32a 100644 --- a/docs/gateway/index.md +++ b/docs/gateway/index.md @@ -16,6 +16,12 @@ Use this page for day-1 startup and day-2 operations of the Gateway service. Task-oriented setup guide + full configuration reference. + + SecretRef contract, runtime snapshot behavior, and migrate/reload operations. + + + Exact `secrets apply` target/path rules and ref-only auth-profile behavior. + ## 5-minute local startup @@ -94,6 +100,7 @@ openclaw gateway status --json openclaw gateway install openclaw gateway restart openclaw gateway stop +openclaw secrets reload openclaw logs --follow openclaw doctor ``` diff --git a/docs/gateway/protocol.md b/docs/gateway/protocol.md index 85a69aca679..e80263ab443 100644 --- a/docs/gateway/protocol.md +++ b/docs/gateway/protocol.md @@ -215,6 +215,10 @@ The Gateway treats these as **claims** and enforces server-side allowlists. Control UI can omit it **only** when `gateway.controlUi.dangerouslyDisableDeviceAuth` is enabled for break-glass use. - All connections must sign the server-provided `connect.challenge` nonce. +- Preferred signature payload is `v3`, which binds `platform` and `deviceFamily` + in addition to device/client/role/scopes/token/nonce fields. +- Legacy `v2` signatures remain accepted for compatibility, but paired-device + metadata pinning still controls command policy on reconnect. ## TLS + pinning diff --git a/docs/gateway/remote.md b/docs/gateway/remote.md index 52b6e095390..68170fe2b88 100644 --- a/docs/gateway/remote.md +++ b/docs/gateway/remote.md @@ -107,8 +107,8 @@ Gateway call/probe credential resolution now follows one shared contract: - Explicit credentials (`--token`, `--password`, or tool `gatewayToken`) always win. - Local mode defaults: - - token: `OPENCLAW_GATEWAY_TOKEN` -> `gateway.auth.token` - - password: `OPENCLAW_GATEWAY_PASSWORD` -> `gateway.auth.password` + - token: `OPENCLAW_GATEWAY_TOKEN` -> `gateway.auth.token` -> `gateway.remote.token` + - password: `OPENCLAW_GATEWAY_PASSWORD` -> `gateway.auth.password` -> `gateway.remote.password` - Remote mode defaults: - token: `gateway.remote.token` -> `OPENCLAW_GATEWAY_TOKEN` -> `gateway.auth.token` - password: `OPENCLAW_GATEWAY_PASSWORD` -> `gateway.remote.password` -> `gateway.auth.password` @@ -134,7 +134,8 @@ Short version: **keep the Gateway loopback-only** unless you’re sure you need - **Loopback + SSH/Tailscale Serve** is the safest default (no public exposure). - **Non-loopback binds** (`lan`/`tailnet`/`custom`, or `auto` when loopback is unavailable) must use auth tokens/passwords. -- `gateway.remote.token` is **only** for remote CLI calls — it does **not** enable local auth. +- `gateway.remote.token` / `.password` are client credential sources. They do **not** configure server auth by themselves. +- Local call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` is unset. - `gateway.remote.tlsFingerprint` pins the remote TLS cert when using `wss://`. - **Tailscale Serve** can authenticate Control UI/WebSocket traffic via identity headers when `gateway.auth.allowTailscale: true`; HTTP API endpoints still diff --git a/docs/gateway/secrets-plan-contract.md b/docs/gateway/secrets-plan-contract.md new file mode 100644 index 00000000000..d503d6cac82 --- /dev/null +++ b/docs/gateway/secrets-plan-contract.md @@ -0,0 +1,94 @@ +--- +summary: "Contract for `secrets apply` plans: allowed target paths, validation, and ref-only auth-profile behavior" +read_when: + - Generating or reviewing `openclaw secrets apply` plan files + - Debugging `Invalid plan target path` errors + - Understanding how `keyRef` and `tokenRef` influence implicit provider discovery +title: "Secrets Apply Plan Contract" +--- + +# Secrets apply plan contract + +This page defines the strict contract enforced by `openclaw secrets apply`. + +If a target does not match these rules, apply fails before mutating config. + +## Plan file shape + +`openclaw secrets apply --from ` expects a `targets` array of plan targets: + +```json5 +{ + version: 1, + protocolVersion: 1, + targets: [ + { + type: "models.providers.apiKey", + path: "models.providers.openai.apiKey", + pathSegments: ["models", "providers", "openai", "apiKey"], + providerId: "openai", + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], +} +``` + +## Allowed target types and paths + +| `target.type` | Allowed `target.path` shape | Optional id match rule | +| ------------------------------------ | --------------------------------------------------------- | --------------------------------------------------- | +| `models.providers.apiKey` | `models.providers..apiKey` | `providerId` must match `` when present | +| `skills.entries.apiKey` | `skills.entries..apiKey` | n/a | +| `channels.googlechat.serviceAccount` | `channels.googlechat.serviceAccount` | `accountId` must be empty/omitted | +| `channels.googlechat.serviceAccount` | `channels.googlechat.accounts..serviceAccount` | `accountId` must match `` when present | + +## Path validation rules + +Each target is validated with all of the following: + +- `type` must be one of the allowed target types above. +- `path` must be a non-empty dot path. +- `pathSegments` can be omitted. If provided, it must normalize to exactly the same path as `path`. +- Forbidden segments are rejected: `__proto__`, `prototype`, `constructor`. +- The normalized path must match one of the allowed path shapes for the target type. +- If `providerId` / `accountId` is set, it must match the id encoded in the path. + +## Failure behavior + +If a target fails validation, apply exits with an error like: + +```text +Invalid plan target path for models.providers.apiKey: models.providers.openai.baseUrl +``` + +No partial mutation is committed for that invalid target path. + +## Ref-only auth profiles and implicit providers + +Implicit provider discovery also considers auth profiles that store refs instead of plaintext credentials: + +- `type: "api_key"` profiles can use `keyRef` (for example env-backed refs). +- `type: "token"` profiles can use `tokenRef`. + +Behavior: + +- For API-key providers (for example `volcengine`, `byteplus`), ref-only profiles can still activate implicit provider entries. +- For `github-copilot`, if the profile has no plaintext token, discovery will try `tokenRef` env resolution before token exchange. + +## Operator checks + +```bash +# Validate plan without writes +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json --dry-run + +# Then apply for real +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json +``` + +If apply fails with an invalid target path message, regenerate the plan with `openclaw secrets configure` or fix the target path to one of the allowed shapes above. + +## Related docs + +- [Secrets Management](/gateway/secrets) +- [CLI `secrets`](/cli/secrets) +- [Configuration Reference](/gateway/configuration-reference) diff --git a/docs/gateway/secrets.md b/docs/gateway/secrets.md new file mode 100644 index 00000000000..9fdec280d61 --- /dev/null +++ b/docs/gateway/secrets.md @@ -0,0 +1,386 @@ +--- +summary: "Secrets management: SecretRef contract, runtime snapshot behavior, and safe one-way scrubbing" +read_when: + - Configuring SecretRefs for providers, auth profiles, skills, or Google Chat + - Operating secrets reload/audit/configure/apply safely in production + - Understanding fail-fast and last-known-good behavior +title: "Secrets Management" +--- + +# Secrets management + +OpenClaw supports additive secret references so credentials do not need to be stored as plaintext in config files. + +Plaintext still works. Secret refs are optional. + +## Goals and runtime model + +Secrets are resolved into an in-memory runtime snapshot. + +- Resolution is eager during activation, not lazy on request paths. +- Startup fails fast if any referenced credential cannot be resolved. +- Reload uses atomic swap: full success or keep last-known-good. +- Runtime requests read from the active in-memory snapshot. + +This keeps secret-provider outages off the hot request path. + +## Onboarding reference preflight + +When onboarding runs in interactive mode and you choose secret reference storage, OpenClaw performs a fast preflight check before saving: + +- Env refs: validates env var name and confirms a non-empty value is visible during onboarding. +- Provider refs (`file` or `exec`): validates the selected provider, resolves the provided `id`, and checks value type. + +If validation fails, onboarding shows the error and lets you retry. + +## SecretRef contract + +Use one object shape everywhere: + +```json5 +{ source: "env" | "file" | "exec", provider: "default", id: "..." } +``` + +### `source: "env"` + +```json5 +{ source: "env", provider: "default", id: "OPENAI_API_KEY" } +``` + +Validation: + +- `provider` must match `^[a-z][a-z0-9_-]{0,63}$` +- `id` must match `^[A-Z][A-Z0-9_]{0,127}$` + +### `source: "file"` + +```json5 +{ source: "file", provider: "filemain", id: "/providers/openai/apiKey" } +``` + +Validation: + +- `provider` must match `^[a-z][a-z0-9_-]{0,63}$` +- `id` must be an absolute JSON pointer (`/...`) +- RFC6901 escaping in segments: `~` => `~0`, `/` => `~1` + +### `source: "exec"` + +```json5 +{ source: "exec", provider: "vault", id: "providers/openai/apiKey" } +``` + +Validation: + +- `provider` must match `^[a-z][a-z0-9_-]{0,63}$` +- `id` must match `^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$` + +## Provider config + +Define providers under `secrets.providers`: + +```json5 +{ + secrets: { + providers: { + default: { source: "env" }, + filemain: { + source: "file", + path: "~/.openclaw/secrets.json", + mode: "json", // or "singleValue" + }, + vault: { + source: "exec", + command: "/usr/local/bin/openclaw-vault-resolver", + args: ["--profile", "prod"], + passEnv: ["PATH", "VAULT_ADDR"], + jsonOnly: true, + }, + }, + defaults: { + env: "default", + file: "filemain", + exec: "vault", + }, + resolution: { + maxProviderConcurrency: 4, + maxRefsPerProvider: 512, + maxBatchBytes: 262144, + }, + }, +} +``` + +### Env provider + +- Optional allowlist via `allowlist`. +- Missing/empty env values fail resolution. + +### File provider + +- Reads local file from `path`. +- `mode: "json"` expects JSON object payload and resolves `id` as pointer. +- `mode: "singleValue"` expects ref id `"value"` and returns file contents. +- Path must pass ownership/permission checks. + +### Exec provider + +- Runs configured absolute binary path, no shell. +- By default, `command` must point to a regular file (not a symlink). +- Set `allowSymlinkCommand: true` to allow symlink command paths (for example Homebrew shims). OpenClaw validates the resolved target path. +- Enable `allowSymlinkCommand` only when required for trusted package-manager paths, and pair it with `trustedDirs` (for example `["/opt/homebrew"]`). +- When `trustedDirs` is set, checks apply to the resolved target path. +- Supports timeout, no-output timeout, output byte limits, env allowlist, and trusted dirs. +- Request payload (stdin): + +```json +{ "protocolVersion": 1, "provider": "vault", "ids": ["providers/openai/apiKey"] } +``` + +- Response payload (stdout): + +```json +{ "protocolVersion": 1, "values": { "providers/openai/apiKey": "sk-..." } } +``` + +Optional per-id errors: + +```json +{ + "protocolVersion": 1, + "values": {}, + "errors": { "providers/openai/apiKey": { "message": "not found" } } +} +``` + +## Exec integration examples + +### 1Password CLI + +```json5 +{ + secrets: { + providers: { + onepassword_openai: { + source: "exec", + command: "/opt/homebrew/bin/op", + allowSymlinkCommand: true, // required for Homebrew symlinked binaries + trustedDirs: ["/opt/homebrew"], + args: ["read", "op://Personal/OpenClaw QA API Key/password"], + passEnv: ["HOME"], + jsonOnly: false, + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + models: [{ id: "gpt-5", name: "gpt-5" }], + apiKey: { source: "exec", provider: "onepassword_openai", id: "value" }, + }, + }, + }, +} +``` + +### HashiCorp Vault CLI + +```json5 +{ + secrets: { + providers: { + vault_openai: { + source: "exec", + command: "/opt/homebrew/bin/vault", + allowSymlinkCommand: true, // required for Homebrew symlinked binaries + trustedDirs: ["/opt/homebrew"], + args: ["kv", "get", "-field=OPENAI_API_KEY", "secret/openclaw"], + passEnv: ["VAULT_ADDR", "VAULT_TOKEN"], + jsonOnly: false, + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + models: [{ id: "gpt-5", name: "gpt-5" }], + apiKey: { source: "exec", provider: "vault_openai", id: "value" }, + }, + }, + }, +} +``` + +### `sops` + +```json5 +{ + secrets: { + providers: { + sops_openai: { + source: "exec", + command: "/opt/homebrew/bin/sops", + allowSymlinkCommand: true, // required for Homebrew symlinked binaries + trustedDirs: ["/opt/homebrew"], + args: ["-d", "--extract", '["providers"]["openai"]["apiKey"]', "/path/to/secrets.enc.json"], + passEnv: ["SOPS_AGE_KEY_FILE"], + jsonOnly: false, + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + models: [{ id: "gpt-5", name: "gpt-5" }], + apiKey: { source: "exec", provider: "sops_openai", id: "value" }, + }, + }, + }, +} +``` + +## In-scope fields (v1) + +### `~/.openclaw/openclaw.json` + +- `models.providers..apiKey` +- `skills.entries..apiKey` +- `channels.googlechat.serviceAccount` +- `channels.googlechat.serviceAccountRef` +- `channels.googlechat.accounts..serviceAccount` +- `channels.googlechat.accounts..serviceAccountRef` + +### `~/.openclaw/agents//agent/auth-profiles.json` + +- `profiles..keyRef` for `type: "api_key"` +- `profiles..tokenRef` for `type: "token"` + +OAuth credential storage changes are out of scope. + +## Required behavior and precedence + +- Field without ref: unchanged. +- Field with ref: required at activation time. +- If plaintext and ref both exist, ref wins at runtime and plaintext is ignored. + +Warning code: + +- `SECRETS_REF_OVERRIDES_PLAINTEXT` + +## Activation triggers + +Secret activation is attempted on: + +- Startup (preflight plus final activation) +- Config reload hot-apply path +- Config reload restart-check path +- Manual reload via `secrets.reload` + +Activation contract: + +- Success swaps the snapshot atomically. +- Startup failure aborts gateway startup. +- Runtime reload failure keeps last-known-good snapshot. + +## Degraded and recovered operator signals + +When reload-time activation fails after a healthy state, OpenClaw enters degraded secrets state. + +One-shot system event and log codes: + +- `SECRETS_RELOADER_DEGRADED` +- `SECRETS_RELOADER_RECOVERED` + +Behavior: + +- Degraded: runtime keeps last-known-good snapshot. +- Recovered: emitted once after a successful activation. +- Repeated failures while already degraded log warnings but do not spam events. +- Startup fail-fast does not emit degraded events because no runtime snapshot exists yet. + +## Audit and configure workflow + +Use this default operator flow: + +```bash +openclaw secrets audit --check +openclaw secrets configure +openclaw secrets audit --check +``` + +Migration completeness: + +- Include `skills.entries..apiKey` targets when those skills use API keys. +- If `audit --check` still reports plaintext findings after a partial migration, migrate the remaining reported paths and rerun audit. + +### `secrets audit` + +Findings include: + +- plaintext values at rest (`openclaw.json`, `auth-profiles.json`, `.env`) +- unresolved refs +- precedence shadowing (`auth-profiles` taking priority over config refs) +- legacy residues (`auth.json`, OAuth out-of-scope reminders) + +### `secrets configure` + +Interactive helper that: + +- configures `secrets.providers` first (`env`/`file`/`exec`, add/edit/remove) +- lets you select secret-bearing fields in `openclaw.json` +- captures SecretRef details (`source`, `provider`, `id`) +- runs preflight resolution +- can apply immediately + +Helpful modes: + +- `openclaw secrets configure --providers-only` +- `openclaw secrets configure --skip-provider-setup` + +`configure` apply defaults to: + +- scrub matching static creds from `auth-profiles.json` for targeted providers +- scrub legacy static `api_key` entries from `auth.json` +- scrub matching known secret lines from `/.env` + +### `secrets apply` + +Apply a saved plan: + +```bash +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json +openclaw secrets apply --from /tmp/openclaw-secrets-plan.json --dry-run +``` + +For strict target/path contract details and exact rejection rules, see: + +- [Secrets Apply Plan Contract](/gateway/secrets-plan-contract) + +## One-way safety policy + +OpenClaw intentionally does **not** write rollback backups that contain pre-migration plaintext secret values. + +Safety model: + +- preflight must succeed before write mode +- runtime activation is validated before commit +- apply updates files using atomic file replacement and best-effort in-memory restore on failure + +## `auth.json` compatibility notes + +For static credentials, OpenClaw runtime no longer depends on plaintext `auth.json`. + +- Runtime credential source is the resolved in-memory snapshot. +- Legacy `auth.json` static `api_key` entries are scrubbed when discovered. +- OAuth-related legacy compatibility behavior remains separate. + +## Related docs + +- CLI commands: [secrets](/cli/secrets) +- Plan contract details: [Secrets Apply Plan Contract](/gateway/secrets-plan-contract) +- Auth setup: [Authentication](/gateway/authentication) +- Security posture: [Security](/gateway/security) +- Environment precedence: [Environment Variables](/help/environment) diff --git a/docs/gateway/security/index.md b/docs/gateway/security/index.md index 3824d1d283e..55e2a076766 100644 --- a/docs/gateway/security/index.md +++ b/docs/gateway/security/index.md @@ -188,7 +188,7 @@ If more than one person can DM your bot: - **Browser control exposure** (remote nodes, relay ports, remote CDP endpoints). - **Local disk hygiene** (permissions, symlinks, config includes, “synced folder” paths). - **Plugins** (extensions exist without an explicit allowlist). -- **Policy drift/misconfig** (sandbox docker settings configured but sandbox mode off; ineffective `gateway.nodes.denyCommands` patterns; dangerous `gateway.nodes.allowCommands` entries; global `tools.profile="minimal"` overridden by per-agent profiles; extension plugin tools reachable under permissive tool policy). +- **Policy drift/misconfig** (sandbox docker settings configured but sandbox mode off; ineffective `gateway.nodes.denyCommands` patterns because matching is exact command-name only (for example `system.run`) and does not inspect shell text; dangerous `gateway.nodes.allowCommands` entries; global `tools.profile="minimal"` overridden by per-agent profiles; extension plugin tools reachable under permissive tool policy). - **Runtime expectation drift** (for example `tools.exec.host="sandbox"` while sandbox mode is off, which runs directly on the gateway host). - **Model hygiene** (warn when configured models look legacy; not a hard block). @@ -202,8 +202,11 @@ Use this when auditing access or deciding what to back up: - **Telegram bot token**: config/env or `channels.telegram.tokenFile` - **Discord bot token**: config/env (token file not yet supported) - **Slack tokens**: config/env (`channels.slack.*`) -- **Pairing allowlists**: `~/.openclaw/credentials/-allowFrom.json` +- **Pairing allowlists**: + - `~/.openclaw/credentials/-allowFrom.json` (default account) + - `~/.openclaw/credentials/--allowFrom.json` (non-default accounts) - **Model auth profiles**: `~/.openclaw/agents//agent/auth-profiles.json` +- **File-backed secrets payload (optional)**: `~/.openclaw/secrets.json` - **Legacy OAuth import**: `~/.openclaw/credentials/oauth.json` ## Security Audit Checklist @@ -488,7 +491,7 @@ If you run multiple accounts on the same channel, use `per-account-channel-peer` OpenClaw has two separate “who can trigger me?” layers: - **DM allowlist** (`allowFrom` / `channels.discord.allowFrom` / `channels.slack.allowFrom`; legacy: `channels.discord.dm.allowFrom`, `channels.slack.dm.allowFrom`): who is allowed to talk to the bot in direct messages. - - When `dmPolicy="pairing"`, approvals are written to `~/.openclaw/credentials/-allowFrom.json` (merged with config allowlists). + - When `dmPolicy="pairing"`, approvals are written to the account-scoped pairing allowlist store under `~/.openclaw/credentials/` (`-allowFrom.json` for default account, `--allowFrom.json` for non-default accounts), merged with config allowlists. - **Group allowlist** (channel-specific): which groups/channels/guilds the bot will accept messages from at all. - Common patterns: - `channels.whatsapp.groups`, `channels.telegram.groups`, `channels.imessage.groups`: per-group defaults like `requireMention`; when set, it also acts as a group allowlist (include `"*"` to keep allow-all behavior). @@ -683,8 +686,10 @@ Set a token so **all** WS clients must authenticate: Doctor can generate one for you: `openclaw doctor --generate-gateway-token`. -Note: `gateway.remote.token` is **only** for remote CLI calls; it does not -protect local WS access. +Note: `gateway.remote.token` / `.password` are client credential sources. They +do **not** protect local WS access by themselves. +Local call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` +is unset. Optional: pin remote TLS with `gateway.remote.tlsFingerprint` when using `wss://`. Local device pairing: @@ -758,7 +763,9 @@ Assume anything under `~/.openclaw/` (or `$OPENCLAW_STATE_DIR/`) may contain sec - `openclaw.json`: config may include tokens (gateway, remote gateway), provider settings, and allowlists. - `credentials/**`: channel credentials (example: WhatsApp creds), pairing allowlists, legacy OAuth imports. -- `agents//agent/auth-profiles.json`: API keys + OAuth tokens (imported from legacy `credentials/oauth.json`). +- `agents//agent/auth-profiles.json`: API keys, token profiles, OAuth tokens, and optional `keyRef`/`tokenRef`. +- `secrets.json` (optional): file-backed secret payload used by `file` SecretRef providers (`secrets.providers`). +- `agents//agent/auth.json`: legacy compatibility file. Static `api_key` entries are scrubbed when discovered. - `agents//sessions/**`: session transcripts (`*.jsonl`) + routing metadata (`sessions.json`) that can contain private messages and tool output. - `extensions/**`: installed plugins (plus their `node_modules/`). - `sandboxes/**`: tool sandbox workspaces; can accumulate copies of files you read/write inside the sandbox. @@ -1057,7 +1064,7 @@ If your AI does something bad: 1. Rotate Gateway auth (`gateway.auth.token` / `OPENCLAW_GATEWAY_PASSWORD`) and restart. 2. Rotate remote client secrets (`gateway.remote.token` / `.password`) on any machine that can call the Gateway. -3. Rotate provider/API credentials (WhatsApp creds, Slack/Discord tokens, model/API keys in `auth-profiles.json`). +3. Rotate provider/API credentials (WhatsApp creds, Slack/Discord tokens, model/API keys in `auth-profiles.json`, and encrypted secrets payload values when used). ### Audit diff --git a/docs/gateway/troubleshooting.md b/docs/gateway/troubleshooting.md index 23483076102..45963f15579 100644 --- a/docs/gateway/troubleshooting.md +++ b/docs/gateway/troubleshooting.md @@ -174,7 +174,7 @@ Common signatures: - `cron: timer tick failed` → scheduler tick failed; check file/log/runtime errors. - `heartbeat skipped` with `reason=quiet-hours` → outside active hours window. - `heartbeat: unknown accountId` → invalid account id for heartbeat delivery target. -- `heartbeat skipped` with `reason=dm-blocked` → heartbeat target resolved to a DM-style `user:` destination (blocked by design). +- `heartbeat skipped` with `reason=dm-blocked` → heartbeat target resolved to a DM-style destination while `agents.defaults.heartbeat.directPolicy` (or per-agent override) is set to `block`. Related: diff --git a/docs/help/environment.md b/docs/help/environment.md index 7e969c816a5..d261faeaa07 100644 --- a/docs/help/environment.md +++ b/docs/help/environment.md @@ -74,6 +74,15 @@ You can reference env vars directly in config string values using `${VAR_NAME}` See [Configuration: Env var substitution](/gateway/configuration#env-var-substitution-in-config) for full details. +## Secret refs vs `${ENV}` strings + +OpenClaw supports two env-driven patterns: + +- `${VAR}` string substitution in config values. +- SecretRef objects (`{ source: "env", provider: "default", id: "VAR" }`) for fields that support secrets references. + +Both resolve from process env at activation time. SecretRef details are documented in [Secrets Management](/gateway/secrets). + ## Path-related env vars | Variable | Purpose | diff --git a/docs/help/faq.md b/docs/help/faq.md index b5c5fa8f24a..cd12c790f53 100644 --- a/docs/help/faq.md +++ b/docs/help/faq.md @@ -1291,16 +1291,17 @@ Related: [Agent workspace](/concepts/agent-workspace), [Memory](/concepts/memory Everything lives under `$OPENCLAW_STATE_DIR` (default: `~/.openclaw`): -| Path | Purpose | -| --------------------------------------------------------------- | ------------------------------------------------------------ | -| `$OPENCLAW_STATE_DIR/openclaw.json` | Main config (JSON5) | -| `$OPENCLAW_STATE_DIR/credentials/oauth.json` | Legacy OAuth import (copied into auth profiles on first use) | -| `$OPENCLAW_STATE_DIR/agents//agent/auth-profiles.json` | Auth profiles (OAuth + API keys) | -| `$OPENCLAW_STATE_DIR/agents//agent/auth.json` | Runtime auth cache (managed automatically) | -| `$OPENCLAW_STATE_DIR/credentials/` | Provider state (e.g. `whatsapp//creds.json`) | -| `$OPENCLAW_STATE_DIR/agents/` | Per-agent state (agentDir + sessions) | -| `$OPENCLAW_STATE_DIR/agents//sessions/` | Conversation history & state (per agent) | -| `$OPENCLAW_STATE_DIR/agents//sessions/sessions.json` | Session metadata (per agent) | +| Path | Purpose | +| --------------------------------------------------------------- | ------------------------------------------------------------------ | +| `$OPENCLAW_STATE_DIR/openclaw.json` | Main config (JSON5) | +| `$OPENCLAW_STATE_DIR/credentials/oauth.json` | Legacy OAuth import (copied into auth profiles on first use) | +| `$OPENCLAW_STATE_DIR/agents//agent/auth-profiles.json` | Auth profiles (OAuth, API keys, and optional `keyRef`/`tokenRef`) | +| `$OPENCLAW_STATE_DIR/secrets.json` | Optional file-backed secret payload for `file` SecretRef providers | +| `$OPENCLAW_STATE_DIR/agents//agent/auth.json` | Legacy compatibility file (static `api_key` entries scrubbed) | +| `$OPENCLAW_STATE_DIR/credentials/` | Provider state (e.g. `whatsapp//creds.json`) | +| `$OPENCLAW_STATE_DIR/agents/` | Per-agent state (agentDir + sessions) | +| `$OPENCLAW_STATE_DIR/agents//sessions/` | Conversation history & state (per agent) | +| `$OPENCLAW_STATE_DIR/agents//sessions/sessions.json` | Session metadata (per agent) | Legacy single-agent path: `~/.openclaw/agent/*` (migrated by `openclaw doctor`). @@ -1338,7 +1339,7 @@ Put your **agent workspace** in a **private** git repo and back it up somewhere private (for example GitHub private). This captures memory + AGENTS/SOUL/USER files, and lets you restore the assistant's "mind" later. -Do **not** commit anything under `~/.openclaw` (credentials, sessions, tokens). +Do **not** commit anything under `~/.openclaw` (credentials, sessions, tokens, or encrypted secrets payloads). If you need a full restore, back up both the workspace and the state directory separately (see the migration question above). @@ -1404,7 +1405,8 @@ Non-loopback binds **require auth**. Configure `gateway.auth.mode` + `gateway.au Notes: -- `gateway.remote.token` is for **remote CLI calls** only; it does not enable local gateway auth. +- `gateway.remote.token` / `.password` do **not** enable local gateway auth by themselves. +- Local call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` is unset. - The Control UI authenticates via `connect.params.auth.token` (stored in app/UI settings). Avoid putting tokens in URLs. ### Why do I need a token on localhost now diff --git a/docs/help/testing.md b/docs/help/testing.md index 7932a1f244f..01bb80abb47 100644 --- a/docs/help/testing.md +++ b/docs/help/testing.md @@ -336,6 +336,11 @@ These run `pnpm test:live` inside the repo Docker image, mounting your local con - Gateway networking (two containers, WS auth + health): `pnpm test:docker:gateway-network` (script: `scripts/e2e/gateway-network-docker.sh`) - Plugins (custom extension load + registry smoke): `pnpm test:docker:plugins` (script: `scripts/e2e/plugins-docker.sh`) +Manual ACP plain-language thread smoke (not CI): + +- `bun scripts/dev/discord-acp-plain-language-smoke.ts --channel ...` +- Keep this script for regression/debug workflows. It may be needed again for ACP thread routing validation, so do not delete it. + Useful env vars: - `OPENCLAW_CONFIG_DIR=...` (default: `~/.openclaw`) mounted to `/home/node/.openclaw` diff --git a/docs/install/docker.md b/docs/install/docker.md index decd1d779ee..42cefd4be01 100644 --- a/docs/install/docker.md +++ b/docs/install/docker.md @@ -26,6 +26,7 @@ Sandboxing details: [Sandboxing](/gateway/sandboxing) ## Requirements - Docker Desktop (or Docker Engine) + Docker Compose v2 +- At least 2 GB RAM for image build (`pnpm install` may be OOM-killed on 1 GB hosts with exit 137) - Enough disk for images + logs ## Containerized Gateway (Docker Compose) diff --git a/docs/install/gcp.md b/docs/install/gcp.md index b0ec51a75dd..2c6bdd8ac1f 100644 --- a/docs/install/gcp.md +++ b/docs/install/gcp.md @@ -114,10 +114,11 @@ gcloud services enable compute.googleapis.com **Machine types:** -| Type | Specs | Cost | Notes | -| -------- | ------------------------ | ------------------ | ------------------ | -| e2-small | 2 vCPU, 2GB RAM | ~$12/mo | Recommended | -| e2-micro | 2 vCPU (shared), 1GB RAM | Free tier eligible | May OOM under load | +| Type | Specs | Cost | Notes | +| --------- | ------------------------ | ------------------ | -------------------------------------------- | +| e2-medium | 2 vCPU, 4GB RAM | ~$25/mo | Most reliable for local Docker builds | +| e2-small | 2 vCPU, 2GB RAM | ~$12/mo | Minimum recommended for Docker build | +| e2-micro | 2 vCPU (shared), 1GB RAM | Free tier eligible | Often fails with Docker build OOM (exit 137) | **CLI:** @@ -350,6 +351,16 @@ docker compose build docker compose up -d openclaw-gateway ``` +If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. + +When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing: + +```bash +docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins '["http://127.0.0.1:18789"]' --strict-json +``` + +If you changed the gateway port, replace `18789` with your configured port. + Verify binaries: ```bash @@ -394,7 +405,20 @@ Open in your browser: `http://127.0.0.1:18789/` -Paste your gateway token. +Fetch a fresh tokenized dashboard link: + +```bash +docker compose run --rm openclaw-cli dashboard --no-open +``` + +Paste the token from that URL. + +If Control UI shows `unauthorized` or `disconnected (1008): pairing required`, approve the browser device: + +```bash +docker compose run --rm openclaw-cli devices list +docker compose run --rm openclaw-cli devices approve +``` --- @@ -449,7 +473,7 @@ Ensure your account has the required IAM permissions (Compute OS Login or Comput **Out of memory (OOM)** -If using e2-micro and hitting OOM, upgrade to e2-small or e2-medium: +If Docker build fails with `Killed` and `exit code 137`, the VM was OOM-killed. Upgrade to e2-small (minimum) or e2-medium (recommended for reliable local builds): ```bash # Stop the VM first diff --git a/docs/install/podman.md b/docs/install/podman.md index 3b56c9ce25e..707fdd3a106 100644 --- a/docs/install/podman.md +++ b/docs/install/podman.md @@ -85,6 +85,7 @@ To add quadlet **after** an initial setup that did not use it, re-run: `./setup- - **Token:** Stored in `~openclaw/.openclaw/.env` as `OPENCLAW_GATEWAY_TOKEN`. `setup-podman.sh` and `run-openclaw-podman.sh` generate it if missing (uses `openssl`, `python3`, or `od`). - **Optional:** In that `.env` you can set provider keys (e.g. `GROQ_API_KEY`, `OLLAMA_API_KEY`) and other OpenClaw env vars. - **Host ports:** By default the script maps `18789` (gateway) and `18790` (bridge). Override the **host** port mapping with `OPENCLAW_PODMAN_GATEWAY_HOST_PORT` and `OPENCLAW_PODMAN_BRIDGE_HOST_PORT` when launching. +- **Gateway bind:** By default, `run-openclaw-podman.sh` starts the gateway with `--bind loopback` for safe local access. To expose on LAN, set `OPENCLAW_GATEWAY_BIND=lan` and configure `gateway.controlUi.allowedOrigins` (or explicitly enable host-header fallback) in `openclaw.json`. - **Paths:** Host config and workspace default to `~openclaw/.openclaw` and `~openclaw/.openclaw/workspace`. Override the host paths used by the launch script with `OPENCLAW_CONFIG_DIR` and `OPENCLAW_WORKSPACE_DIR`. ## Useful commands diff --git a/docs/pi.md b/docs/pi.md index 944224da19c..2689b480963 100644 --- a/docs/pi.md +++ b/docs/pi.md @@ -232,6 +232,10 @@ await session.prompt(effectivePrompt, { images: imageResult.images }); The SDK handles the full agent loop: sending to LLM, executing tool calls, streaming responses. +Image injection is prompt-local: OpenClaw loads image refs from the current prompt and +passes them via `images` for that turn only. It does not re-scan older history turns +to re-inject image payloads. + ## Tool Architecture ### Tool Pipeline diff --git a/docs/platforms/mac/release.md b/docs/platforms/mac/release.md index 978e79ff480..57e68f53f05 100644 --- a/docs/platforms/mac/release.md +++ b/docs/platforms/mac/release.md @@ -34,17 +34,17 @@ Notes: # From repo root; set release IDs so Sparkle feed is enabled. # APP_BUILD must be numeric + monotonic for Sparkle compare. BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.2.25 \ +APP_VERSION=2026.2.26 \ APP_BUILD="$(git rev-list --count HEAD)" \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-app.sh # Zip for distribution (includes resource forks for Sparkle delta support) -ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.2.25.zip +ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.2.26.zip # Optional: also build a styled DMG for humans (drag to /Applications) -scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.2.25.dmg +scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.2.26.dmg # Recommended: build + notarize/staple zip + DMG # First, create a keychain profile once: @@ -52,14 +52,14 @@ scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.2.25.dmg # --apple-id "" --team-id "" --password "" NOTARIZE=1 NOTARYTOOL_PROFILE=openclaw-notary \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.2.25 \ +APP_VERSION=2026.2.26 \ APP_BUILD="$(git rev-list --count HEAD)" \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh # Optional: ship dSYM alongside the release -ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.2.25.dSYM.zip +ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.2.26.dSYM.zip ``` ## Appcast entry @@ -67,7 +67,7 @@ ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenCl Use the release note generator so Sparkle renders formatted HTML notes: ```bash -SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.2.25.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml +SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.2.26.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml ``` Generates HTML release notes from `CHANGELOG.md` (via [`scripts/changelog-to-html.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/changelog-to-html.sh)) and embeds them in the appcast entry. @@ -75,7 +75,7 @@ Commit the updated `appcast.xml` alongside the release assets (zip + dSYM) when ## Publish & verify -- Upload `OpenClaw-2026.2.25.zip` (and `OpenClaw-2026.2.25.dSYM.zip`) to the GitHub release for tag `v2026.2.25`. +- Upload `OpenClaw-2026.2.26.zip` (and `OpenClaw-2026.2.26.dSYM.zip`) to the GitHub release for tag `v2026.2.26`. - Ensure the raw appcast URL matches the baked feed: `https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml`. - Sanity checks: - `curl -I https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml` returns 200. diff --git a/docs/providers/openai.md b/docs/providers/openai.md index 54e3d29e454..1a47081a9a6 100644 --- a/docs/providers/openai.md +++ b/docs/providers/openai.md @@ -56,6 +56,33 @@ openclaw models auth login --provider openai-codex } ``` +### Codex transport default + +OpenClaw uses `pi-ai` for model streaming. For `openai-codex/*` models you can set +`agents.defaults.models..params.transport` to select transport: + +- Default is `"auto"` (WebSocket-first, then SSE fallback). +- `"sse"`: force SSE +- `"websocket"`: force WebSocket +- `"auto"`: try WebSocket, then fall back to SSE + +```json5 +{ + agents: { + defaults: { + model: { primary: "openai-codex/gpt-5.3-codex" }, + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "auto", + }, + }, + }, + }, + }, +} +``` + ## Notes - Model refs always use `provider/model` (see [/concepts/models](/concepts/models)). diff --git a/docs/reference/session-management-compaction.md b/docs/reference/session-management-compaction.md index aff09a303e8..d258eeb6722 100644 --- a/docs/reference/session-management-compaction.md +++ b/docs/reference/session-management-compaction.md @@ -128,6 +128,7 @@ Rules of thumb: - **Reset** (`/new`, `/reset`) creates a new `sessionId` for that `sessionKey`. - **Daily reset** (default 4:00 AM local time on the gateway host) creates a new `sessionId` on the next message after the reset boundary. - **Idle expiry** (`session.reset.idleMinutes` or legacy `session.idleMinutes`) creates a new `sessionId` when a message arrives after the idle window. When daily + idle are both configured, whichever expires first wins. +- **Thread parent fork guard** (`session.parentForkMaxTokens`, default `100000`) skips parent transcript forking when the parent session is already too large; the new thread starts fresh. Set `0` to disable. Implementation detail: the decision happens in `initSessionState()` in `src/auto-reply/reply/session.ts`. diff --git a/docs/reference/wizard.md b/docs/reference/wizard.md index 1bd83a0bc28..4f85e7e866d 100644 --- a/docs/reference/wizard.md +++ b/docs/reference/wizard.md @@ -20,6 +20,8 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard). - If `~/.openclaw/openclaw.json` exists, choose **Keep / Modify / Reset**. - Re-running the wizard does **not** wipe anything unless you explicitly choose **Reset** (or pass `--reset`). + - CLI `--reset` defaults to `config+creds+sessions`; use `--reset-scope full` + to also remove workspace. - If the config is invalid or contains legacy keys, the wizard stops and asks you to run `openclaw doctor` before continuing. - Reset uses `trash` (never `rm`) and offers scopes: @@ -34,7 +36,7 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard). - **OpenAI Code (Codex) subscription (Codex CLI)**: if `~/.codex/auth.json` exists, the wizard can reuse it. - **OpenAI Code (Codex) subscription (OAuth)**: browser flow; paste the `code#state`. - Sets `agents.defaults.model` to `openai-codex/gpt-5.2` when model is unset or `openai/*`. - - **OpenAI API key**: uses `OPENAI_API_KEY` if present or prompts for a key, then saves it to `~/.openclaw/.env` so launchd can read it. + - **OpenAI API key**: uses `OPENAI_API_KEY` if present or prompts for a key, then stores it in auth profiles. - **xAI (Grok) API key**: prompts for `XAI_API_KEY` and configures xAI as a model provider. - **OpenCode Zen (multi-model proxy)**: prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`, get it at https://opencode.ai/auth). - **API key**: stores the key for you. @@ -52,6 +54,7 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard). - **Skip**: no auth configured yet. - Pick a default model from detected options (or enter provider/model manually). - Wizard runs a model check and warns if the configured model is unknown or missing auth. + - API key storage mode defaults to plaintext auth-profile values. Use `--secret-input-mode ref` to store env-backed refs instead (for example `keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }`). - OAuth credentials live in `~/.openclaw/credentials/oauth.json`; auth profiles live in `~/.openclaw/agents//agent/auth-profiles.json` (API keys + OAuth). - More detail: [/concepts/oauth](/concepts/oauth) diff --git a/docs/start/hubs.md b/docs/start/hubs.md index 082ebc4b741..e02741716df 100644 --- a/docs/start/hubs.md +++ b/docs/start/hubs.md @@ -73,7 +73,6 @@ Use these hubs to discover every page, including deep dives and reference docs t - [Model providers hub](/providers/models) - [WhatsApp](/channels/whatsapp) - [Telegram](/channels/telegram) -- [Telegram (grammY notes)](/channels/grammy) - [Slack](/channels/slack) - [Discord](/channels/discord) - [Mattermost](/channels/mattermost) (plugin) diff --git a/docs/start/onboarding.md b/docs/start/onboarding.md index ab9289b8a11..dfa058af545 100644 --- a/docs/start/onboarding.md +++ b/docs/start/onboarding.md @@ -29,6 +29,12 @@ For a general overview of onboarding paths, see [Onboarding Overview](/start/onb + +Security trust model: + +- By default, OpenClaw is a personal agent: one trusted operator boundary. +- Shared/multi-user setups require lock-down (split trust boundaries, keep tool access minimal, and follow [Security](/gateway/security)). + @@ -37,17 +43,19 @@ For a general overview of onboarding paths, see [Onboarding Overview](/start/onb Where does the **Gateway** run? -- **This Mac (Local only):** onboarding can run OAuth flows and write credentials +- **This Mac (Local only):** onboarding can configure auth and write credentials locally. -- **Remote (over SSH/Tailnet):** onboarding does **not** run OAuth locally; +- **Remote (over SSH/Tailnet):** onboarding does **not** configure local auth; credentials must exist on the gateway host. - **Configure later:** skip setup and leave the app unconfigured. **Gateway auth tip:** + - The wizard now generates a **token** even for loopback, so local WS clients must authenticate. - If you disable auth, any local process can connect; use that only on fully trusted machines. - Use a **token** for multi‑machine access or non‑loopback binds. + diff --git a/docs/start/openclaw.md b/docs/start/openclaw.md index 058f2fa67fe..671efe420c7 100644 --- a/docs/start/openclaw.md +++ b/docs/start/openclaw.md @@ -164,7 +164,7 @@ Set `agents.defaults.heartbeat.every: "0m"` to disable. - If `HEARTBEAT.md` exists but is effectively empty (only blank lines and markdown headers like `# Heading`), OpenClaw skips the heartbeat run to save API calls. - If the file is missing, the heartbeat still runs and the model decides what to do. - If the agent replies with `HEARTBEAT_OK` (optionally with short padding; see `agents.defaults.heartbeat.ackMaxChars`), OpenClaw suppresses outbound delivery for that heartbeat. -- Heartbeat delivery to DM-style `user:` targets is blocked; those runs still execute but skip outbound delivery. +- By default, heartbeat delivery to DM-style `user:` targets is allowed. Set `agents.defaults.heartbeat.directPolicy: "block"` to suppress direct-target delivery while keeping heartbeat runs active. - Heartbeats run full agent turns — shorter intervals burn more tokens. ```json5 diff --git a/docs/start/setup.md b/docs/start/setup.md index ee50e02afd4..d1fbb7edf7e 100644 --- a/docs/start/setup.md +++ b/docs/start/setup.md @@ -130,8 +130,11 @@ Use this when debugging auth or deciding what to back up: - **Telegram bot token**: config/env or `channels.telegram.tokenFile` - **Discord bot token**: config/env (token file not yet supported) - **Slack tokens**: config/env (`channels.slack.*`) -- **Pairing allowlists**: `~/.openclaw/credentials/-allowFrom.json` +- **Pairing allowlists**: + - `~/.openclaw/credentials/-allowFrom.json` (default account) + - `~/.openclaw/credentials/--allowFrom.json` (non-default accounts) - **Model auth profiles**: `~/.openclaw/agents//agent/auth-profiles.json` +- **File-backed secrets payload (optional)**: `~/.openclaw/secrets.json` - **Legacy OAuth import**: `~/.openclaw/credentials/oauth.json` More detail: [Security](/gateway/security#credential-storage-map). diff --git a/docs/start/wizard-cli-automation.md b/docs/start/wizard-cli-automation.md index 5a8d3e9ac0e..14f4a9d5d32 100644 --- a/docs/start/wizard-cli-automation.md +++ b/docs/start/wizard-cli-automation.md @@ -22,6 +22,7 @@ openclaw onboard --non-interactive \ --mode local \ --auth-choice apiKey \ --anthropic-api-key "$ANTHROPIC_API_KEY" \ + --secret-input-mode plaintext \ --gateway-port 18789 \ --gateway-bind loopback \ --install-daemon \ @@ -31,6 +32,22 @@ openclaw onboard --non-interactive \ Add `--json` for a machine-readable summary. +Use `--secret-input-mode ref` to store env-backed refs in auth profiles instead of plaintext values. +Interactive selection between env refs and configured provider refs (`file` or `exec`) is available in the onboarding wizard flow. + +In non-interactive `ref` mode, provider env vars must be set in the process environment. +Passing inline key flags without the matching env var now fails fast. + +Example: + +```bash +openclaw onboard --non-interactive \ + --mode local \ + --auth-choice openai-api-key \ + --secret-input-mode ref \ + --accept-risk +``` + ## Provider-specific examples @@ -132,6 +149,24 @@ Add `--json` for a machine-readable summary. `--custom-api-key` is optional. If omitted, onboarding checks `CUSTOM_API_KEY`. + Ref-mode variant: + + ```bash + export CUSTOM_API_KEY="your-key" + openclaw onboard --non-interactive \ + --mode local \ + --auth-choice custom-api-key \ + --custom-base-url "https://llm.example.com/v1" \ + --custom-model-id "foo-large" \ + --secret-input-mode ref \ + --custom-provider-id "my-custom" \ + --custom-compatibility anthropic \ + --gateway-port 18789 \ + --gateway-bind loopback + ``` + + In this mode, onboarding stores `apiKey` as `{ source: "env", provider: "default", id: "CUSTOM_API_KEY" }`. + diff --git a/docs/start/wizard-cli-reference.md b/docs/start/wizard-cli-reference.md index 96fd1d87afc..5019956a05c 100644 --- a/docs/start/wizard-cli-reference.md +++ b/docs/start/wizard-cli-reference.md @@ -33,6 +33,7 @@ It does not install or modify anything on the remote host. - If `~/.openclaw/openclaw.json` exists, choose Keep, Modify, or Reset. - Re-running the wizard does not wipe anything unless you explicitly choose Reset (or pass `--reset`). + - CLI `--reset` defaults to `config+creds+sessions`; use `--reset-scope full` to also remove workspace. - If config is invalid or contains legacy keys, the wizard stops and asks you to run `openclaw doctor` before continuing. - Reset uses `trash` and offers scopes: - Config only @@ -139,8 +140,7 @@ What you set: - Uses `OPENAI_API_KEY` if present or prompts for a key, then saves it to - `~/.openclaw/.env` so launchd can read it. + Uses `OPENAI_API_KEY` if present or prompts for a key, then stores the credential in auth profiles. Sets `agents.defaults.model` to `openai/gpt-5.1-codex` when model is unset, `openai/*`, or `openai-codex/*`. @@ -178,6 +178,10 @@ What you set: Works with OpenAI-compatible and Anthropic-compatible endpoints. + Interactive onboarding supports the same API key storage choices as other provider API key flows: + - **Paste API key now** (plaintext) + - **Use secret reference** (env ref or configured provider ref, with preflight validation) + Non-interactive flags: - `--auth-choice custom-api-key` - `--custom-base-url` @@ -202,6 +206,24 @@ Credential and profile paths: - OAuth credentials: `~/.openclaw/credentials/oauth.json` - Auth profiles (API keys + OAuth): `~/.openclaw/agents//agent/auth-profiles.json` +API key storage mode: + +- Default onboarding behavior persists API keys as plaintext values in auth profiles. +- `--secret-input-mode ref` enables reference mode instead of plaintext key storage. + In interactive onboarding, you can choose either: + - environment variable ref (for example `keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }`) + - configured provider ref (`file` or `exec`) with provider alias + id +- Interactive reference mode runs a fast preflight validation before saving. + - Env refs: validates variable name + non-empty value in the current onboarding environment. + - Provider refs: validates provider config and resolves the requested id. + - If preflight fails, onboarding shows the error and lets you retry. +- In non-interactive mode, `--secret-input-mode ref` is env-backed only. + - Set the provider env var in the onboarding process environment. + - Inline key flags (for example `--openai-api-key`) require that env var to be set; otherwise onboarding fails fast. + - For custom providers, non-interactive `ref` mode stores `models.providers..apiKey` as `{ source: "env", provider: "default", id: "CUSTOM_API_KEY" }`. + - In that custom-provider case, `--custom-api-key` requires `CUSTOM_API_KEY` to be set; otherwise onboarding fails fast. +- Existing plaintext setups continue to work unchanged. + Headless and server tip: complete OAuth on a machine with a browser, then copy `~/.openclaw/credentials/oauth.json` (or `$OPENCLAW_STATE_DIR/credentials/oauth.json`) diff --git a/docs/start/wizard.md b/docs/start/wizard.md index d653574f488..ecf059c3b89 100644 --- a/docs/start/wizard.md +++ b/docs/start/wizard.md @@ -65,6 +65,9 @@ The wizard starts with **QuickStart** (defaults) vs **Advanced** (full control). 1. **Model/Auth** — Anthropic API key (recommended), OpenAI, or Custom Provider (OpenAI-compatible, Anthropic-compatible, or Unknown auto-detect). Pick a default model. + For non-interactive runs, `--secret-input-mode ref` stores env-backed refs in auth profiles instead of plaintext API key values. + In non-interactive `ref` mode, the provider env var must be set; passing inline key flags without that env var fails fast. + In interactive runs, choosing secret reference mode lets you point at either an environment variable or a configured provider ref (`file` or `exec`), with a fast preflight validation before saving. 2. **Workspace** — Location for agent files (default `~/.openclaw/workspace`). Seeds bootstrap files. 3. **Gateway** — Port, bind address, auth mode, Tailscale exposure. 4. **Channels** — WhatsApp, Telegram, Discord, Google Chat, Mattermost, Signal, BlueBubbles, or iMessage. @@ -74,6 +77,7 @@ The wizard starts with **QuickStart** (defaults) vs **Advanced** (full control). Re-running the wizard does **not** wipe anything unless you explicitly choose **Reset** (or pass `--reset`). +CLI `--reset` defaults to config, credentials, and sessions; use `--reset-scope full` to include workspace. If the config is invalid or contains legacy keys, the wizard asks you to run `openclaw doctor` first. diff --git a/docs/tools/acp-agents.md b/docs/tools/acp-agents.md new file mode 100644 index 00000000000..0b1ec4510c3 --- /dev/null +++ b/docs/tools/acp-agents.md @@ -0,0 +1,346 @@ +--- +summary: "Use ACP runtime sessions for Pi, Claude Code, Codex, OpenCode, Gemini CLI, and other harness agents" +read_when: + - Running coding harnesses through ACP + - Setting up thread-bound ACP sessions on thread-capable channels + - Troubleshooting ACP backend and plugin wiring + - Operating /acp commands from chat +title: "ACP Agents" +--- + +# ACP agents + +[Agent Client Protocol (ACP)](https://agentclientprotocol.com/) sessions let OpenClaw run external coding harnesses (for example Pi, Claude Code, Codex, OpenCode, and Gemini CLI) through an ACP backend plugin. + +If you ask OpenClaw in plain language to "run this in Codex" or "start Claude Code in a thread", OpenClaw should route that request to the ACP runtime (not the native sub-agent runtime). + +## Fast operator flow + +Use this when you want a practical `/acp` runbook: + +1. Spawn a session: + - `/acp spawn codex --mode persistent --thread auto` +2. Work in the bound thread (or target that session key explicitly). +3. Check runtime state: + - `/acp status` +4. Tune runtime options as needed: + - `/acp model ` + - `/acp permissions ` + - `/acp timeout ` +5. Nudge an active session without replacing context: + - `/acp steer tighten logging and continue` +6. Stop work: + - `/acp cancel` (stop current turn), or + - `/acp close` (close session + remove bindings) + +## Quick start for humans + +Examples of natural requests: + +- "Start a persistent Codex session in a thread here and keep it focused." +- "Run this as a one-shot Claude Code ACP session and summarize the result." +- "Use Gemini CLI for this task in a thread, then keep follow-ups in that same thread." + +What OpenClaw should do: + +1. Pick `runtime: "acp"`. +2. Resolve the requested harness target (`agentId`, for example `codex`). +3. If thread binding is requested and the current channel supports it, bind the ACP session to the thread. +4. Route follow-up thread messages to that same ACP session until unfocused/closed/expired. + +## ACP versus sub-agents + +Use ACP when you want an external harness runtime. Use sub-agents when you want OpenClaw-native delegated runs. + +| Area | ACP session | Sub-agent run | +| ------------- | ------------------------------------- | ---------------------------------- | +| Runtime | ACP backend plugin (for example acpx) | OpenClaw native sub-agent runtime | +| Session key | `agent::acp:` | `agent::subagent:` | +| Main commands | `/acp ...` | `/subagents ...` | +| Spawn tool | `sessions_spawn` with `runtime:"acp"` | `sessions_spawn` (default runtime) | + +See also [Sub-agents](/tools/subagents). + +## Thread-bound sessions (channel-agnostic) + +When thread bindings are enabled for a channel adapter, ACP sessions can be bound to threads: + +- OpenClaw binds a thread to a target ACP session. +- Follow-up messages in that thread route to the bound ACP session. +- ACP output is delivered back to the same thread. +- Unfocus/close/archive/TTL expiry removes the binding. + +Thread binding support is adapter-specific. If the active channel adapter does not support thread bindings, OpenClaw returns a clear unsupported/unavailable message. + +Required feature flags for thread-bound ACP: + +- `acp.enabled=true` +- `acp.dispatch.enabled=true` +- Channel-adapter ACP thread-spawn flag enabled (adapter-specific) + - Discord: `channels.discord.threadBindings.spawnAcpSessions=true` + +### Thread supporting channels + +- Any channel adapter that exposes session/thread binding capability. +- Current built-in support: Discord. +- Plugin channels can add support through the same binding interface. + +## Start ACP sessions (interfaces) + +### From `sessions_spawn` + +Use `runtime: "acp"` to start an ACP session from an agent turn or tool call. + +```json +{ + "task": "Open the repo and summarize failing tests", + "runtime": "acp", + "agentId": "codex", + "thread": true, + "mode": "session" +} +``` + +Notes: + +- `runtime` defaults to `subagent`, so set `runtime: "acp"` explicitly for ACP sessions. +- If `agentId` is omitted, OpenClaw uses `acp.defaultAgent` when configured. +- `mode: "session"` requires `thread: true` to keep a persistent bound conversation. + +Interface details: + +- `task` (required): initial prompt sent to the ACP session. +- `runtime` (required for ACP): must be `"acp"`. +- `agentId` (optional): ACP target harness id. Falls back to `acp.defaultAgent` if set. +- `thread` (optional, default `false`): request thread binding flow where supported. +- `mode` (optional): `run` (one-shot) or `session` (persistent). + - default is `run` + - if `thread: true` and mode omitted, OpenClaw may default to persistent behavior per runtime path + - `mode: "session"` requires `thread: true` +- `cwd` (optional): requested runtime working directory (validated by backend/runtime policy). +- `label` (optional): operator-facing label used in session/banner text. + +### From `/acp` command + +Use `/acp spawn` for explicit operator control from chat when needed. + +```text +/acp spawn codex --mode persistent --thread auto +/acp spawn codex --mode oneshot --thread off +/acp spawn codex --thread here +``` + +Key flags: + +- `--mode persistent|oneshot` +- `--thread auto|here|off` +- `--cwd ` +- `--label ` + +See [Slash Commands](/tools/slash-commands). + +## Session target resolution + +Most `/acp` actions accept an optional session target (`session-key`, `session-id`, or `session-label`). + +Resolution order: + +1. Explicit target argument (or `--session` for `/acp steer`) + - tries key + - then UUID-shaped session id + - then label +2. Current thread binding (if this conversation/thread is bound to an ACP session) +3. Current requester session fallback + +If no target resolves, OpenClaw returns a clear error (`Unable to resolve session target: ...`). + +## Spawn thread modes + +`/acp spawn` supports `--thread auto|here|off`. + +| Mode | Behavior | +| ------ | --------------------------------------------------------------------------------------------------- | +| `auto` | In an active thread: bind that thread. Outside a thread: create/bind a child thread when supported. | +| `here` | Require current active thread; fail if not in one. | +| `off` | No binding. Session starts unbound. | + +Notes: + +- On non-thread binding surfaces, default behavior is effectively `off`. +- Thread-bound spawn requires channel policy support (for Discord: `channels.discord.threadBindings.spawnAcpSessions=true`). + +## ACP controls + +Available command family: + +- `/acp spawn` +- `/acp cancel` +- `/acp steer` +- `/acp close` +- `/acp status` +- `/acp set-mode` +- `/acp set` +- `/acp cwd` +- `/acp permissions` +- `/acp timeout` +- `/acp model` +- `/acp reset-options` +- `/acp sessions` +- `/acp doctor` +- `/acp install` + +`/acp status` shows the effective runtime options and, when available, both runtime-level and backend-level session identifiers. + +Some controls depend on backend capabilities. If a backend does not support a control, OpenClaw returns a clear unsupported-control error. + +## ACP command cookbook + +| Command | What it does | Example | +| -------------------- | --------------------------------------------------------- | -------------------------------------------------------------- | +| `/acp spawn` | Create ACP session; optional thread bind. | `/acp spawn codex --mode persistent --thread auto --cwd /repo` | +| `/acp cancel` | Cancel in-flight turn for target session. | `/acp cancel agent:codex:acp:` | +| `/acp steer` | Send steer instruction to running session. | `/acp steer --session support inbox prioritize failing tests` | +| `/acp close` | Close session and unbind thread targets. | `/acp close` | +| `/acp status` | Show backend, mode, state, runtime options, capabilities. | `/acp status` | +| `/acp set-mode` | Set runtime mode for target session. | `/acp set-mode plan` | +| `/acp set` | Generic runtime config option write. | `/acp set model openai/gpt-5.2` | +| `/acp cwd` | Set runtime working directory override. | `/acp cwd /Users/user/Projects/repo` | +| `/acp permissions` | Set approval policy profile. | `/acp permissions strict` | +| `/acp timeout` | Set runtime timeout (seconds). | `/acp timeout 120` | +| `/acp model` | Set runtime model override. | `/acp model anthropic/claude-opus-4-5` | +| `/acp reset-options` | Remove session runtime option overrides. | `/acp reset-options` | +| `/acp sessions` | List recent ACP sessions from store. | `/acp sessions` | +| `/acp doctor` | Backend health, capabilities, actionable fixes. | `/acp doctor` | +| `/acp install` | Print deterministic install and enable steps. | `/acp install` | + +## Runtime options mapping + +`/acp` has convenience commands and a generic setter. + +Equivalent operations: + +- `/acp model ` maps to runtime config key `model`. +- `/acp permissions ` maps to runtime config key `approval_policy`. +- `/acp timeout ` maps to runtime config key `timeout`. +- `/acp cwd ` updates runtime cwd override directly. +- `/acp set ` is the generic path. + - Special case: `key=cwd` uses the cwd override path. +- `/acp reset-options` clears all runtime overrides for target session. + +## acpx harness support (current) + +Current acpx built-in harness aliases: + +- `pi` +- `claude` +- `codex` +- `opencode` +- `gemini` + +When OpenClaw uses the acpx backend, prefer these values for `agentId` unless your acpx config defines custom agent aliases. + +Direct acpx CLI usage can also target arbitrary adapters via `--agent `, but that raw escape hatch is an acpx CLI feature (not the normal OpenClaw `agentId` path). + +## Required config + +Core ACP baseline: + +```json5 +{ + acp: { + enabled: true, + dispatch: { enabled: true }, + backend: "acpx", + defaultAgent: "codex", + allowedAgents: ["pi", "claude", "codex", "opencode", "gemini"], + maxConcurrentSessions: 8, + stream: { + coalesceIdleMs: 300, + maxChunkChars: 1200, + }, + runtime: { + ttlMinutes: 120, + }, + }, +} +``` + +Thread binding config is channel-adapter specific. Example for Discord: + +```json5 +{ + session: { + threadBindings: { + enabled: true, + ttlHours: 24, + }, + }, + channels: { + discord: { + threadBindings: { + enabled: true, + spawnAcpSessions: true, + }, + }, + }, +} +``` + +If thread-bound ACP spawn does not work, verify the adapter feature flag first: + +- Discord: `channels.discord.threadBindings.spawnAcpSessions=true` + +See [Configuration Reference](/gateway/configuration-reference). + +## Plugin setup for acpx backend + +Install and enable plugin: + +```bash +openclaw plugins install @openclaw/acpx +openclaw config set plugins.entries.acpx.enabled true +``` + +Local workspace install during development: + +```bash +openclaw plugins install ./extensions/acpx +``` + +Then verify backend health: + +```text +/acp doctor +``` + +### Pinned acpx install strategy (current behavior) + +`@openclaw/acpx` now enforces a strict plugin-local pinning model: + +1. The extension pins an exact acpx dependency in `extensions/acpx/package.json`. +2. Runtime command is fixed to the plugin-local binary (`extensions/acpx/node_modules/.bin/acpx`), not global `PATH`. +3. Plugin config does not expose `command` or `commandArgs`, so runtime command drift is blocked. +4. Startup registers the ACP backend immediately as not-ready. +5. A background ensure job verifies `acpx --version` against the pinned version. +6. If missing/mismatched, it runs plugin-local install (`npm install --omit=dev --no-save acpx@`) and re-verifies before healthy. + +Notes: + +- OpenClaw startup stays non-blocking while acpx ensure runs. +- If network/install fails, backend remains unavailable and `/acp doctor` reports an actionable fix. + +See [Plugins](/tools/plugin). + +## Troubleshooting + +| Symptom | Likely cause | Fix | +| ----------------------------------------------------------------------- | ---------------------------------------------- | ---------------------------------------------------------- | +| `ACP runtime backend is not configured` | Backend plugin missing or disabled. | Install and enable backend plugin, then run `/acp doctor`. | +| `ACP is disabled by policy (acp.enabled=false)` | ACP globally disabled. | Set `acp.enabled=true`. | +| `ACP dispatch is disabled by policy (acp.dispatch.enabled=false)` | Dispatch from normal thread messages disabled. | Set `acp.dispatch.enabled=true`. | +| `ACP agent "" is not allowed by policy` | Agent not in allowlist. | Use allowed `agentId` or update `acp.allowedAgents`. | +| `Unable to resolve session target: ...` | Bad key/id/label token. | Run `/acp sessions`, copy exact key/label, retry. | +| `--thread here requires running /acp spawn inside an active ... thread` | `--thread here` used outside a thread context. | Move to target thread or use `--thread auto`/`off`. | +| `Only can rebind this thread.` | Another user owns thread binding. | Rebind as owner or use a different thread. | +| `Thread bindings are unavailable for .` | Adapter lacks thread binding capability. | Use `--thread off` or move to supported adapter/channel. | +| Missing ACP metadata for bound session | Stale/deleted ACP session metadata. | Recreate with `/acp spawn`, then rebind/focus thread. | diff --git a/docs/tools/index.md b/docs/tools/index.md index 269b6856d03..fa35a63cb7b 100644 --- a/docs/tools/index.md +++ b/docs/tools/index.md @@ -464,7 +464,7 @@ Core parameters: - `sessions_list`: `kinds?`, `limit?`, `activeMinutes?`, `messageLimit?` (0 = none) - `sessions_history`: `sessionKey` (or `sessionId`), `limit?`, `includeTools?` - `sessions_send`: `sessionKey` (or `sessionId`), `message`, `timeoutSeconds?` (0 = fire-and-forget) -- `sessions_spawn`: `task`, `label?`, `agentId?`, `model?`, `thinking?`, `runTimeoutSeconds?`, `thread?`, `mode?`, `cleanup?` +- `sessions_spawn`: `task`, `label?`, `runtime?`, `agentId?`, `model?`, `thinking?`, `cwd?`, `runTimeoutSeconds?`, `thread?`, `mode?`, `cleanup?` - `session_status`: `sessionKey?` (default current; accepts `sessionId`), `model?` (`default` clears override) Notes: @@ -474,6 +474,7 @@ Notes: - Session targeting is controlled by `tools.sessions.visibility` (default `tree`: current session + spawned subagent sessions). If you run a shared agent for multiple users, consider setting `tools.sessions.visibility: "self"` to prevent cross-session browsing. - `sessions_send` waits for final completion when `timeoutSeconds > 0`. - Delivery/announce happens after completion and is best-effort; `status: "ok"` confirms the agent run finished, not that the announce was delivered. +- `sessions_spawn` supports `runtime: "subagent" | "acp"` (`subagent` default). For ACP runtime behavior, see [ACP Agents](/tools/acp-agents). - `sessions_spawn` starts a sub-agent run and posts an announce reply back to the requester chat. - Supports one-shot mode (`mode: "run"`) and persistent thread-bound mode (`mode: "session"` with `thread: true`). - If `thread: true` and `mode` is omitted, mode defaults to `session`. diff --git a/docs/tools/plugin.md b/docs/tools/plugin.md index 9250501f2d9..3dc575088eb 100644 --- a/docs/tools/plugin.md +++ b/docs/tools/plugin.md @@ -452,6 +452,29 @@ Notes: - `meta.preferOver` lists channel ids to skip auto-enable when both are configured. - `meta.detailLabel` and `meta.systemImage` let UIs show richer channel labels/icons. +### Channel onboarding hooks + +Channel plugins can define optional onboarding hooks on `plugin.onboarding`: + +- `configure(ctx)` is the baseline setup flow. +- `configureInteractive(ctx)` can fully own interactive setup for both configured and unconfigured states. +- `configureWhenConfigured(ctx)` can override behavior only for already configured channels. + +Hook precedence in the wizard: + +1. `configureInteractive` (if present) +2. `configureWhenConfigured` (only when channel status is already configured) +3. fallback to `configure` + +Context details: + +- `configureInteractive` and `configureWhenConfigured` receive: + - `configured` (`true` or `false`) + - `label` (user-facing channel name used by prompts) + - plus the shared config/runtime/prompter/options fields +- Returning `"skip"` leaves selection and account tracking unchanged. +- Returning `{ cfg, accountId? }` applies config updates and records account selection. + ### Write a new messaging channel (step‑by‑step) Use this when you want a **new chat surface** (a "messaging channel"), not a model provider. diff --git a/docs/tools/skills-config.md b/docs/tools/skills-config.md index d4d666ec198..589d464bb13 100644 --- a/docs/tools/skills-config.md +++ b/docs/tools/skills-config.md @@ -26,7 +26,7 @@ All skills-related configuration lives under `skills` in `~/.openclaw/openclaw.j entries: { "nano-banana-pro": { enabled: true, - apiKey: "GEMINI_KEY_HERE", + apiKey: { source: "env", provider: "default", id: "GEMINI_API_KEY" }, // or plaintext string env: { GEMINI_API_KEY: "GEMINI_KEY_HERE", }, @@ -56,6 +56,7 @@ Per-skill fields: - `enabled`: set `false` to disable a skill even if it’s bundled/installed. - `env`: environment variables injected for the agent run (only if not already set). - `apiKey`: optional convenience for skills that declare a primary env var. + Supports plaintext string or SecretRef object (`{ source, provider, id }`). ## Notes diff --git a/docs/tools/skills.md b/docs/tools/skills.md index 1e5fa2c5048..de3fe807ed2 100644 --- a/docs/tools/skills.md +++ b/docs/tools/skills.md @@ -195,7 +195,7 @@ Bundled/managed skills can be toggled and supplied with env values: entries: { "nano-banana-pro": { enabled: true, - apiKey: "GEMINI_KEY_HERE", + apiKey: { source: "env", provider: "default", id: "GEMINI_API_KEY" }, // or plaintext string env: { GEMINI_API_KEY: "GEMINI_KEY_HERE", }, @@ -221,6 +221,7 @@ Rules: - `enabled: false` disables the skill even if it’s bundled/installed. - `env`: injected **only if** the variable isn’t already set in the process. - `apiKey`: convenience for skills that declare `metadata.openclaw.primaryEnv`. + Supports plaintext string or SecretRef object (`{ source, provider, id }`). - `config`: optional bag for custom per-skill fields; custom keys must live here. - `allowBundled`: optional allowlist for **bundled** skills only. If set, only bundled skills in the list are eligible (managed/workspace skills unaffected). diff --git a/docs/tools/slash-commands.md b/docs/tools/slash-commands.md index 86dd32a83c8..4d045d4ee71 100644 --- a/docs/tools/slash-commands.md +++ b/docs/tools/slash-commands.md @@ -80,6 +80,7 @@ Text + native (when enabled): - `/whoami` (show your sender id; alias: `/id`) - `/session ttl ` (manage session-level settings, such as TTL) - `/subagents list|kill|log|info|send|steer|spawn` (inspect, control, or spawn sub-agent runs for the current session) +- `/acp spawn|cancel|steer|close|status|set-mode|set|cwd|permissions|timeout|model|reset-options|doctor|install|sessions` (inspect and control ACP runtime sessions) - `/agents` (list thread-bound agents for this session) - `/focus ` (Discord: bind this thread, or a new thread, to a session/subagent target) - `/unfocus` (Discord: remove the current thread binding) @@ -125,6 +126,7 @@ Notes: - `/restart` is enabled by default; set `commands.restart: false` to disable it. - Discord-only native command: `/vc join|leave|status` controls voice channels (requires `channels.discord.voice` and native commands; not available as text). - Discord thread-binding commands (`/focus`, `/unfocus`, `/agents`, `/session ttl`) require effective thread bindings to be enabled (`session.threadBindings.enabled` and/or `channels.discord.threadBindings.enabled`). +- ACP command reference and runtime behavior: [ACP Agents](/tools/acp-agents). - `/verbose` is meant for debugging and extra visibility; keep it **off** in normal use. - Tool failure summaries are still shown when relevant, but detailed failure text is only included when `/verbose` is `on` or `full`. - `/reasoning` (and `/verbose`) are risky in group settings: they may reveal internal reasoning or tool output you did not intend to expose. Prefer leaving them off, especially in group chats. diff --git a/docs/tools/subagents.md b/docs/tools/subagents.md index 9542858c840..8d066a94e7f 100644 --- a/docs/tools/subagents.md +++ b/docs/tools/subagents.md @@ -51,6 +51,7 @@ These commands work on channels that support persistent thread bindings. See **T - `--model` and `--thinking` override defaults for that specific run. - Use `info`/`log` to inspect details and output after completion. - `/subagents spawn` is one-shot mode (`mode: "run"`). For persistent thread-bound sessions, use `sessions_spawn` with `thread: true` and `mode: "session"`. +- For ACP harness sessions (Codex, Claude Code, Gemini CLI), use `sessions_spawn` with `runtime: "acp"` and see [ACP Agents](/tools/acp-agents). Primary goals: diff --git a/extensions/acpx/index.ts b/extensions/acpx/index.ts new file mode 100644 index 00000000000..5f57e396f80 --- /dev/null +++ b/extensions/acpx/index.ts @@ -0,0 +1,19 @@ +import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; +import { createAcpxPluginConfigSchema } from "./src/config.js"; +import { createAcpxRuntimeService } from "./src/service.js"; + +const plugin = { + id: "acpx", + name: "ACPX Runtime", + description: "ACP runtime backend powered by the acpx CLI.", + configSchema: createAcpxPluginConfigSchema(), + register(api: OpenClawPluginApi) { + api.registerService( + createAcpxRuntimeService({ + pluginConfig: api.pluginConfig, + }), + ); + }, +}; + +export default plugin; diff --git a/extensions/acpx/openclaw.plugin.json b/extensions/acpx/openclaw.plugin.json new file mode 100644 index 00000000000..61790e6ca05 --- /dev/null +++ b/extensions/acpx/openclaw.plugin.json @@ -0,0 +1,55 @@ +{ + "id": "acpx", + "name": "ACPX Runtime", + "description": "ACP runtime backend powered by a pinned plugin-local acpx CLI.", + "skills": ["./skills"], + "configSchema": { + "type": "object", + "additionalProperties": false, + "properties": { + "cwd": { + "type": "string" + }, + "permissionMode": { + "type": "string", + "enum": ["approve-all", "approve-reads", "deny-all"] + }, + "nonInteractivePermissions": { + "type": "string", + "enum": ["deny", "fail"] + }, + "timeoutSeconds": { + "type": "number", + "minimum": 0.001 + }, + "queueOwnerTtlSeconds": { + "type": "number", + "minimum": 0 + } + } + }, + "uiHints": { + "cwd": { + "label": "Default Working Directory", + "help": "Default cwd for ACP session operations when not set per session." + }, + "permissionMode": { + "label": "Permission Mode", + "help": "Default acpx permission policy for runtime prompts." + }, + "nonInteractivePermissions": { + "label": "Non-Interactive Permission Policy", + "help": "acpx policy when interactive permission prompts are unavailable." + }, + "timeoutSeconds": { + "label": "Prompt Timeout Seconds", + "help": "Optional acpx timeout for each runtime turn.", + "advanced": true + }, + "queueOwnerTtlSeconds": { + "label": "Queue Owner TTL Seconds", + "help": "Idle queue-owner TTL for acpx prompt turns. Keep this short in OpenClaw to avoid delayed completion after each turn.", + "advanced": true + } + } +} diff --git a/extensions/acpx/package.json b/extensions/acpx/package.json new file mode 100644 index 00000000000..503748e3798 --- /dev/null +++ b/extensions/acpx/package.json @@ -0,0 +1,14 @@ +{ + "name": "@openclaw/acpx", + "version": "2026.2.26", + "description": "OpenClaw ACP runtime backend via acpx", + "type": "module", + "dependencies": { + "acpx": "^0.1.13" + }, + "openclaw": { + "extensions": [ + "./index.ts" + ] + } +} diff --git a/extensions/acpx/skills/acp-router/SKILL.md b/extensions/acpx/skills/acp-router/SKILL.md new file mode 100644 index 00000000000..c80978fa8ae --- /dev/null +++ b/extensions/acpx/skills/acp-router/SKILL.md @@ -0,0 +1,209 @@ +--- +name: acp-router +description: Route plain-language requests for Pi, Claude Code, Codex, OpenCode, Gemini CLI, or ACP harness work into either OpenClaw ACP runtime sessions or direct acpx-driven sessions ("telephone game" flow). +user-invocable: false +--- + +# ACP Harness Router + +When user intent is "run this in Pi/Claude Code/Codex/OpenCode/Gemini (ACP harness)", do not use subagent runtime or PTY scraping. Route through ACP-aware flows. + +## Intent detection + +Trigger this skill when the user asks OpenClaw to: + +- run something in Pi / Claude Code / Codex / OpenCode / Gemini +- continue existing harness work +- relay instructions to an external coding harness +- keep an external harness conversation in a thread-like conversation + +## Mode selection + +Choose one of these paths: + +1. OpenClaw ACP runtime path (default): use `sessions_spawn` / ACP runtime tools. +2. Direct `acpx` path (telephone game): use `acpx` CLI through `exec` to drive the harness session directly. + +Use direct `acpx` when one of these is true: + +- user explicitly asks for direct `acpx` driving +- ACP runtime/plugin path is unavailable or unhealthy +- the task is "just relay prompts to harness" and no OpenClaw ACP lifecycle features are needed + +Do not use: + +- `subagents` runtime for harness control +- `/acp` command delegation as a requirement for the user +- PTY scraping of pi/claude/codex/opencode/gemini CLIs when `acpx` is available + +## AgentId mapping + +Use these defaults when user names a harness directly: + +- "pi" -> `agentId: "pi"` +- "claude" or "claude code" -> `agentId: "claude"` +- "codex" -> `agentId: "codex"` +- "opencode" -> `agentId: "opencode"` +- "gemini" or "gemini cli" -> `agentId: "gemini"` + +These defaults match current acpx built-in aliases. + +If policy rejects the chosen id, report the policy error clearly and ask for the allowed ACP agent id. + +## OpenClaw ACP runtime path + +Required behavior: + +1. Use `sessions_spawn` with: + - `runtime: "acp"` + - `thread: true` + - `mode: "session"` (unless user explicitly wants one-shot) +2. Put requested work in `task` so the ACP session gets it immediately. +3. Set `agentId` explicitly unless ACP default agent is known. +4. Do not ask user to run slash commands or CLI when this path works directly. + +Example: + +User: "spawn a test codex session in thread and tell it to say hi" + +Call: + +```json +{ + "task": "Say hi.", + "runtime": "acp", + "agentId": "codex", + "thread": true, + "mode": "session" +} +``` + +## Thread spawn recovery policy + +When the user asks to start a coding harness in a thread (for example "start a codex/claude/pi thread"), treat that as an ACP runtime request and try to satisfy it end-to-end. + +Required behavior when ACP backend is unavailable: + +1. Do not immediately ask the user to pick an alternate path. +2. First attempt automatic local repair: + - ensure plugin-local pinned acpx is installed in `extensions/acpx` + - verify `${ACPX_CMD} --version` +3. After reinstall/repair, restart the gateway and explicitly offer to run that restart for the user. +4. Retry ACP thread spawn once after repair. +5. Only if repair+retry fails, report the concrete error and then offer fallback options. + +When offering fallback, keep ACP first: + +- Option 1: retry ACP spawn after showing exact failing step +- Option 2: direct acpx telephone-game flow + +Do not default to subagent runtime for these requests. + +## ACPX install and version policy (direct acpx path) + +For this repo, direct `acpx` calls must follow the same pinned policy as the `@openclaw/acpx` extension. + +1. Prefer plugin-local binary, not global PATH: + - `./extensions/acpx/node_modules/.bin/acpx` +2. Resolve pinned version from extension dependency: + - `node -e "console.log(require('./extensions/acpx/package.json').dependencies.acpx)"` +3. If binary is missing or version mismatched, install plugin-local pinned version: + - `cd extensions/acpx && npm install --omit=dev --no-save acpx@` +4. Verify before use: + - `./extensions/acpx/node_modules/.bin/acpx --version` +5. If install/repair changed ACPX artifacts, restart the gateway and offer to run the restart. +6. Do not run `npm install -g acpx` unless the user explicitly asks for global install. + +Set and reuse: + +```bash +ACPX_CMD="./extensions/acpx/node_modules/.bin/acpx" +``` + +## Direct acpx path ("telephone game") + +Use this path to drive harness sessions without `/acp` or subagent runtime. + +### Rules + +1. Use `exec` commands that call `${ACPX_CMD}`. +2. Reuse a stable session name per conversation so follow-up prompts stay in the same harness context. +3. Prefer `--format quiet` for clean assistant text to relay back to user. +4. Use `exec` (one-shot) only when the user wants one-shot behavior. +5. Keep working directory explicit (`--cwd`) when task scope depends on repo context. + +### Session naming + +Use a deterministic name, for example: + +- `oc--` + +Where `conversationId` is thread id when available, otherwise channel/conversation id. + +### Command templates + +Persistent session (create if missing, then prompt): + +```bash +${ACPX_CMD} codex sessions show oc-codex- \ + || ${ACPX_CMD} codex sessions new --name oc-codex- + +${ACPX_CMD} codex -s oc-codex- --cwd --format quiet "" +``` + +One-shot: + +```bash +${ACPX_CMD} codex exec --cwd --format quiet "" +``` + +Cancel in-flight turn: + +```bash +${ACPX_CMD} codex cancel -s oc-codex- +``` + +Close session: + +```bash +${ACPX_CMD} codex sessions close oc-codex- +``` + +### Harness aliases in acpx + +- `pi` +- `claude` +- `codex` +- `opencode` +- `gemini` + +### Built-in adapter commands in acpx + +Defaults are: + +- `pi -> npx pi-acp` +- `claude -> npx -y @zed-industries/claude-agent-acp` +- `codex -> npx @zed-industries/codex-acp` +- `opencode -> npx -y opencode-ai acp` +- `gemini -> gemini` + +If `~/.acpx/config.json` overrides `agents`, those overrides replace defaults. + +### Failure handling + +- `acpx: command not found`: + - for thread-spawn ACP requests, install plugin-local pinned acpx in `extensions/acpx` immediately + - restart gateway after install and offer to run the restart automatically + - then retry once + - do not ask for install permission first unless policy explicitly requires it + - do not install global `acpx` unless explicitly requested +- adapter command missing (for example `claude-agent-acp` not found): + - for thread-spawn ACP requests, first restore built-in defaults by removing broken `~/.acpx/config.json` agent overrides + - then retry once before offering fallback + - if user wants binary-based overrides, install exactly the configured adapter binary +- `NO_SESSION`: run `${ACPX_CMD} sessions new --name ` then retry prompt. +- queue busy: either wait for completion (default) or use `--no-wait` when async behavior is explicitly desired. + +### Output relay + +When relaying to user, return the final assistant text output from `acpx` command result. Avoid relaying raw local tool noise unless user asked for verbose logs. diff --git a/extensions/acpx/src/config.test.ts b/extensions/acpx/src/config.test.ts new file mode 100644 index 00000000000..efd6d5c7e73 --- /dev/null +++ b/extensions/acpx/src/config.test.ts @@ -0,0 +1,53 @@ +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { + ACPX_BUNDLED_BIN, + createAcpxPluginConfigSchema, + resolveAcpxPluginConfig, +} from "./config.js"; + +describe("acpx plugin config parsing", () => { + it("resolves a strict plugin-local acpx command", () => { + const resolved = resolveAcpxPluginConfig({ + rawConfig: { + cwd: "/tmp/workspace", + }, + workspaceDir: "/tmp/workspace", + }); + + expect(resolved.command).toBe(ACPX_BUNDLED_BIN); + expect(resolved.cwd).toBe(path.resolve("/tmp/workspace")); + }); + + it("rejects command overrides", () => { + expect(() => + resolveAcpxPluginConfig({ + rawConfig: { + command: "acpx-custom", + }, + workspaceDir: "/tmp/workspace", + }), + ).toThrow("unknown config key: command"); + }); + + it("rejects commandArgs overrides", () => { + expect(() => + resolveAcpxPluginConfig({ + rawConfig: { + commandArgs: ["--foo"], + }, + workspaceDir: "/tmp/workspace", + }), + ).toThrow("unknown config key: commandArgs"); + }); + + it("schema rejects empty cwd", () => { + const schema = createAcpxPluginConfigSchema(); + if (!schema.safeParse) { + throw new Error("acpx config schema missing safeParse"); + } + const parsed = schema.safeParse({ cwd: " " }); + + expect(parsed.success).toBe(false); + }); +}); diff --git a/extensions/acpx/src/config.ts b/extensions/acpx/src/config.ts new file mode 100644 index 00000000000..bf5d0e0993e --- /dev/null +++ b/extensions/acpx/src/config.ts @@ -0,0 +1,196 @@ +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import type { OpenClawPluginConfigSchema } from "openclaw/plugin-sdk"; + +export const ACPX_PERMISSION_MODES = ["approve-all", "approve-reads", "deny-all"] as const; +export type AcpxPermissionMode = (typeof ACPX_PERMISSION_MODES)[number]; + +export const ACPX_NON_INTERACTIVE_POLICIES = ["deny", "fail"] as const; +export type AcpxNonInteractivePermissionPolicy = (typeof ACPX_NON_INTERACTIVE_POLICIES)[number]; + +export const ACPX_PINNED_VERSION = "0.1.13"; +const ACPX_BIN_NAME = process.platform === "win32" ? "acpx.cmd" : "acpx"; +export const ACPX_PLUGIN_ROOT = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +export const ACPX_BUNDLED_BIN = path.join(ACPX_PLUGIN_ROOT, "node_modules", ".bin", ACPX_BIN_NAME); +export const ACPX_LOCAL_INSTALL_COMMAND = `npm install --omit=dev --no-save acpx@${ACPX_PINNED_VERSION}`; + +export type AcpxPluginConfig = { + cwd?: string; + permissionMode?: AcpxPermissionMode; + nonInteractivePermissions?: AcpxNonInteractivePermissionPolicy; + timeoutSeconds?: number; + queueOwnerTtlSeconds?: number; +}; + +export type ResolvedAcpxPluginConfig = { + command: string; + cwd: string; + permissionMode: AcpxPermissionMode; + nonInteractivePermissions: AcpxNonInteractivePermissionPolicy; + timeoutSeconds?: number; + queueOwnerTtlSeconds: number; +}; + +const DEFAULT_PERMISSION_MODE: AcpxPermissionMode = "approve-reads"; +const DEFAULT_NON_INTERACTIVE_POLICY: AcpxNonInteractivePermissionPolicy = "fail"; +const DEFAULT_QUEUE_OWNER_TTL_SECONDS = 0.1; + +type ParseResult = + | { ok: true; value: AcpxPluginConfig | undefined } + | { ok: false; message: string }; + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +function isPermissionMode(value: string): value is AcpxPermissionMode { + return ACPX_PERMISSION_MODES.includes(value as AcpxPermissionMode); +} + +function isNonInteractivePermissionPolicy( + value: string, +): value is AcpxNonInteractivePermissionPolicy { + return ACPX_NON_INTERACTIVE_POLICIES.includes(value as AcpxNonInteractivePermissionPolicy); +} + +function parseAcpxPluginConfig(value: unknown): ParseResult { + if (value === undefined) { + return { ok: true, value: undefined }; + } + if (!isRecord(value)) { + return { ok: false, message: "expected config object" }; + } + const allowedKeys = new Set([ + "cwd", + "permissionMode", + "nonInteractivePermissions", + "timeoutSeconds", + "queueOwnerTtlSeconds", + ]); + for (const key of Object.keys(value)) { + if (!allowedKeys.has(key)) { + return { ok: false, message: `unknown config key: ${key}` }; + } + } + + const cwd = value.cwd; + if (cwd !== undefined && (typeof cwd !== "string" || cwd.trim() === "")) { + return { ok: false, message: "cwd must be a non-empty string" }; + } + + const permissionMode = value.permissionMode; + if ( + permissionMode !== undefined && + (typeof permissionMode !== "string" || !isPermissionMode(permissionMode)) + ) { + return { + ok: false, + message: `permissionMode must be one of: ${ACPX_PERMISSION_MODES.join(", ")}`, + }; + } + + const nonInteractivePermissions = value.nonInteractivePermissions; + if ( + nonInteractivePermissions !== undefined && + (typeof nonInteractivePermissions !== "string" || + !isNonInteractivePermissionPolicy(nonInteractivePermissions)) + ) { + return { + ok: false, + message: `nonInteractivePermissions must be one of: ${ACPX_NON_INTERACTIVE_POLICIES.join(", ")}`, + }; + } + + const timeoutSeconds = value.timeoutSeconds; + if ( + timeoutSeconds !== undefined && + (typeof timeoutSeconds !== "number" || !Number.isFinite(timeoutSeconds) || timeoutSeconds <= 0) + ) { + return { ok: false, message: "timeoutSeconds must be a positive number" }; + } + + const queueOwnerTtlSeconds = value.queueOwnerTtlSeconds; + if ( + queueOwnerTtlSeconds !== undefined && + (typeof queueOwnerTtlSeconds !== "number" || + !Number.isFinite(queueOwnerTtlSeconds) || + queueOwnerTtlSeconds < 0) + ) { + return { ok: false, message: "queueOwnerTtlSeconds must be a non-negative number" }; + } + + return { + ok: true, + value: { + cwd: typeof cwd === "string" ? cwd.trim() : undefined, + permissionMode: typeof permissionMode === "string" ? permissionMode : undefined, + nonInteractivePermissions: + typeof nonInteractivePermissions === "string" ? nonInteractivePermissions : undefined, + timeoutSeconds: typeof timeoutSeconds === "number" ? timeoutSeconds : undefined, + queueOwnerTtlSeconds: + typeof queueOwnerTtlSeconds === "number" ? queueOwnerTtlSeconds : undefined, + }, + }; +} + +export function createAcpxPluginConfigSchema(): OpenClawPluginConfigSchema { + return { + safeParse(value: unknown): + | { success: true; data?: unknown } + | { + success: false; + error: { issues: Array<{ path: Array; message: string }> }; + } { + const parsed = parseAcpxPluginConfig(value); + if (parsed.ok) { + return { success: true, data: parsed.value }; + } + return { + success: false, + error: { + issues: [{ path: [], message: parsed.message }], + }, + }; + }, + jsonSchema: { + type: "object", + additionalProperties: false, + properties: { + cwd: { type: "string" }, + permissionMode: { + type: "string", + enum: [...ACPX_PERMISSION_MODES], + }, + nonInteractivePermissions: { + type: "string", + enum: [...ACPX_NON_INTERACTIVE_POLICIES], + }, + timeoutSeconds: { type: "number", minimum: 0.001 }, + queueOwnerTtlSeconds: { type: "number", minimum: 0 }, + }, + }, + }; +} + +export function resolveAcpxPluginConfig(params: { + rawConfig: unknown; + workspaceDir?: string; +}): ResolvedAcpxPluginConfig { + const parsed = parseAcpxPluginConfig(params.rawConfig); + if (!parsed.ok) { + throw new Error(parsed.message); + } + const normalized = parsed.value ?? {}; + const fallbackCwd = params.workspaceDir?.trim() || process.cwd(); + const cwd = path.resolve(normalized.cwd?.trim() || fallbackCwd); + + return { + command: ACPX_BUNDLED_BIN, + cwd, + permissionMode: normalized.permissionMode ?? DEFAULT_PERMISSION_MODE, + nonInteractivePermissions: + normalized.nonInteractivePermissions ?? DEFAULT_NON_INTERACTIVE_POLICY, + timeoutSeconds: normalized.timeoutSeconds, + queueOwnerTtlSeconds: normalized.queueOwnerTtlSeconds ?? DEFAULT_QUEUE_OWNER_TTL_SECONDS, + }; +} diff --git a/extensions/acpx/src/ensure.test.ts b/extensions/acpx/src/ensure.test.ts new file mode 100644 index 00000000000..0b36c3def36 --- /dev/null +++ b/extensions/acpx/src/ensure.test.ts @@ -0,0 +1,125 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { ACPX_LOCAL_INSTALL_COMMAND, ACPX_PINNED_VERSION } from "./config.js"; + +const { resolveSpawnFailureMock, spawnAndCollectMock } = vi.hoisted(() => ({ + resolveSpawnFailureMock: vi.fn(() => null), + spawnAndCollectMock: vi.fn(), +})); + +vi.mock("./runtime-internals/process.js", () => ({ + resolveSpawnFailure: resolveSpawnFailureMock, + spawnAndCollect: spawnAndCollectMock, +})); + +import { checkPinnedAcpxVersion, ensurePinnedAcpx } from "./ensure.js"; + +describe("acpx ensure", () => { + beforeEach(() => { + resolveSpawnFailureMock.mockReset(); + resolveSpawnFailureMock.mockReturnValue(null); + spawnAndCollectMock.mockReset(); + }); + + it("accepts the pinned acpx version", async () => { + spawnAndCollectMock.mockResolvedValueOnce({ + stdout: `acpx ${ACPX_PINNED_VERSION}\n`, + stderr: "", + code: 0, + error: null, + }); + + const result = await checkPinnedAcpxVersion({ + command: "/plugin/node_modules/.bin/acpx", + cwd: "/plugin", + expectedVersion: ACPX_PINNED_VERSION, + }); + + expect(result).toEqual({ + ok: true, + version: ACPX_PINNED_VERSION, + expectedVersion: ACPX_PINNED_VERSION, + }); + }); + + it("reports version mismatch", async () => { + spawnAndCollectMock.mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }); + + const result = await checkPinnedAcpxVersion({ + command: "/plugin/node_modules/.bin/acpx", + cwd: "/plugin", + expectedVersion: ACPX_PINNED_VERSION, + }); + + expect(result).toMatchObject({ + ok: false, + reason: "version-mismatch", + expectedVersion: ACPX_PINNED_VERSION, + installedVersion: "0.0.9", + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + }); + }); + + it("installs and verifies pinned acpx when precheck fails", async () => { + spawnAndCollectMock + .mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: "added 1 package\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: `acpx ${ACPX_PINNED_VERSION}\n`, + stderr: "", + code: 0, + error: null, + }); + + await ensurePinnedAcpx({ + command: "/plugin/node_modules/.bin/acpx", + pluginRoot: "/plugin", + expectedVersion: ACPX_PINNED_VERSION, + }); + + expect(spawnAndCollectMock).toHaveBeenCalledTimes(3); + expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ + command: "npm", + args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], + cwd: "/plugin", + }); + }); + + it("fails with actionable error when npm install fails", async () => { + spawnAndCollectMock + .mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: "", + stderr: "network down", + code: 1, + error: null, + }); + + await expect( + ensurePinnedAcpx({ + command: "/plugin/node_modules/.bin/acpx", + pluginRoot: "/plugin", + expectedVersion: ACPX_PINNED_VERSION, + }), + ).rejects.toThrow("failed to install plugin-local acpx"); + }); +}); diff --git a/extensions/acpx/src/ensure.ts b/extensions/acpx/src/ensure.ts new file mode 100644 index 00000000000..6bb015587ae --- /dev/null +++ b/extensions/acpx/src/ensure.ts @@ -0,0 +1,169 @@ +import type { PluginLogger } from "openclaw/plugin-sdk"; +import { ACPX_LOCAL_INSTALL_COMMAND, ACPX_PINNED_VERSION, ACPX_PLUGIN_ROOT } from "./config.js"; +import { resolveSpawnFailure, spawnAndCollect } from "./runtime-internals/process.js"; + +const SEMVER_PATTERN = /\b\d+\.\d+\.\d+(?:-[0-9A-Za-z.-]+)?\b/; + +export type AcpxVersionCheckResult = + | { + ok: true; + version: string; + expectedVersion: string; + } + | { + ok: false; + reason: "missing-command" | "missing-version" | "version-mismatch" | "execution-failed"; + message: string; + expectedVersion: string; + installCommand: string; + installedVersion?: string; + }; + +function extractVersion(stdout: string, stderr: string): string | null { + const combined = `${stdout}\n${stderr}`; + const match = combined.match(SEMVER_PATTERN); + return match?.[0] ?? null; +} + +export async function checkPinnedAcpxVersion(params: { + command: string; + cwd?: string; + expectedVersion?: string; +}): Promise { + const expectedVersion = params.expectedVersion ?? ACPX_PINNED_VERSION; + const cwd = params.cwd ?? ACPX_PLUGIN_ROOT; + const result = await spawnAndCollect({ + command: params.command, + args: ["--version"], + cwd, + }); + + if (result.error) { + const spawnFailure = resolveSpawnFailure(result.error, cwd); + if (spawnFailure === "missing-command") { + return { + ok: false, + reason: "missing-command", + message: `acpx command not found at ${params.command}`, + expectedVersion, + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + }; + } + return { + ok: false, + reason: "execution-failed", + message: result.error.message, + expectedVersion, + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + }; + } + + if ((result.code ?? 0) !== 0) { + const stderr = result.stderr.trim(); + return { + ok: false, + reason: "execution-failed", + message: stderr || `acpx --version failed with code ${result.code ?? "unknown"}`, + expectedVersion, + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + }; + } + + const installedVersion = extractVersion(result.stdout, result.stderr); + if (!installedVersion) { + return { + ok: false, + reason: "missing-version", + message: "acpx --version output did not include a parseable version", + expectedVersion, + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + }; + } + + if (installedVersion !== expectedVersion) { + return { + ok: false, + reason: "version-mismatch", + message: `acpx version mismatch: found ${installedVersion}, expected ${expectedVersion}`, + expectedVersion, + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + installedVersion, + }; + } + + return { + ok: true, + version: installedVersion, + expectedVersion, + }; +} + +let pendingEnsure: Promise | null = null; + +export async function ensurePinnedAcpx(params: { + command: string; + logger?: PluginLogger; + pluginRoot?: string; + expectedVersion?: string; +}): Promise { + if (pendingEnsure) { + return await pendingEnsure; + } + + pendingEnsure = (async () => { + const pluginRoot = params.pluginRoot ?? ACPX_PLUGIN_ROOT; + const expectedVersion = params.expectedVersion ?? ACPX_PINNED_VERSION; + + const precheck = await checkPinnedAcpxVersion({ + command: params.command, + cwd: pluginRoot, + expectedVersion, + }); + if (precheck.ok) { + return; + } + + params.logger?.warn( + `acpx local binary unavailable or mismatched (${precheck.message}); running plugin-local install`, + ); + + const install = await spawnAndCollect({ + command: "npm", + args: ["install", "--omit=dev", "--no-save", `acpx@${expectedVersion}`], + cwd: pluginRoot, + }); + + if (install.error) { + const spawnFailure = resolveSpawnFailure(install.error, pluginRoot); + if (spawnFailure === "missing-command") { + throw new Error("npm is required to install plugin-local acpx but was not found on PATH"); + } + throw new Error(`failed to install plugin-local acpx: ${install.error.message}`); + } + + if ((install.code ?? 0) !== 0) { + const stderr = install.stderr.trim(); + const stdout = install.stdout.trim(); + const detail = stderr || stdout || `npm exited with code ${install.code ?? "unknown"}`; + throw new Error(`failed to install plugin-local acpx: ${detail}`); + } + + const postcheck = await checkPinnedAcpxVersion({ + command: params.command, + cwd: pluginRoot, + expectedVersion, + }); + + if (!postcheck.ok) { + throw new Error(`plugin-local acpx verification failed after install: ${postcheck.message}`); + } + + params.logger?.info(`acpx plugin-local binary ready (version ${postcheck.version})`); + })(); + + try { + await pendingEnsure; + } finally { + pendingEnsure = null; + } +} diff --git a/extensions/acpx/src/runtime-internals/events.ts b/extensions/acpx/src/runtime-internals/events.ts new file mode 100644 index 00000000000..074787b3fdf --- /dev/null +++ b/extensions/acpx/src/runtime-internals/events.ts @@ -0,0 +1,140 @@ +import type { AcpRuntimeEvent } from "openclaw/plugin-sdk"; +import { + asOptionalBoolean, + asOptionalString, + asString, + asTrimmedString, + type AcpxErrorEvent, + type AcpxJsonObject, + isRecord, +} from "./shared.js"; + +export function toAcpxErrorEvent(value: unknown): AcpxErrorEvent | null { + if (!isRecord(value)) { + return null; + } + if (asTrimmedString(value.type) !== "error") { + return null; + } + return { + message: asTrimmedString(value.message) || "acpx reported an error", + code: asOptionalString(value.code), + retryable: asOptionalBoolean(value.retryable), + }; +} + +export function parseJsonLines(value: string): AcpxJsonObject[] { + const events: AcpxJsonObject[] = []; + for (const line of value.split(/\r?\n/)) { + const trimmed = line.trim(); + if (!trimmed) { + continue; + } + try { + const parsed = JSON.parse(trimmed) as unknown; + if (isRecord(parsed)) { + events.push(parsed); + } + } catch { + // Ignore malformed lines; callers handle missing typed events via exit code. + } + } + return events; +} + +export function parsePromptEventLine(line: string): AcpRuntimeEvent | null { + const trimmed = line.trim(); + if (!trimmed) { + return null; + } + let parsed: unknown; + try { + parsed = JSON.parse(trimmed); + } catch { + return { + type: "status", + text: trimmed, + }; + } + + if (!isRecord(parsed)) { + return null; + } + + const type = asTrimmedString(parsed.type); + switch (type) { + case "text": { + const content = asString(parsed.content); + if (content == null || content.length === 0) { + return null; + } + return { + type: "text_delta", + text: content, + stream: "output", + }; + } + case "thought": { + const content = asString(parsed.content); + if (content == null || content.length === 0) { + return null; + } + return { + type: "text_delta", + text: content, + stream: "thought", + }; + } + case "tool_call": { + const title = asTrimmedString(parsed.title) || asTrimmedString(parsed.toolCallId) || "tool"; + const status = asTrimmedString(parsed.status); + return { + type: "tool_call", + text: status ? `${title} (${status})` : title, + }; + } + case "client_operation": { + const method = asTrimmedString(parsed.method) || "operation"; + const status = asTrimmedString(parsed.status); + const summary = asTrimmedString(parsed.summary); + const text = [method, status, summary].filter(Boolean).join(" "); + if (!text) { + return null; + } + return { type: "status", text }; + } + case "plan": { + const entries = Array.isArray(parsed.entries) ? parsed.entries : []; + const first = entries.find((entry) => isRecord(entry)) as Record | undefined; + const content = asTrimmedString(first?.content); + if (!content) { + return null; + } + return { type: "status", text: `plan: ${content}` }; + } + case "update": { + const update = asTrimmedString(parsed.update); + if (!update) { + return null; + } + return { type: "status", text: update }; + } + case "done": { + return { + type: "done", + stopReason: asOptionalString(parsed.stopReason), + }; + } + case "error": { + const message = asTrimmedString(parsed.message) || "acpx runtime error"; + return { + type: "error", + message, + code: asOptionalString(parsed.code), + retryable: asOptionalBoolean(parsed.retryable), + }; + } + default: + return null; + } +} diff --git a/extensions/acpx/src/runtime-internals/process.ts b/extensions/acpx/src/runtime-internals/process.ts new file mode 100644 index 00000000000..752b48835ec --- /dev/null +++ b/extensions/acpx/src/runtime-internals/process.ts @@ -0,0 +1,137 @@ +import { spawn, type ChildProcessWithoutNullStreams } from "node:child_process"; +import { existsSync } from "node:fs"; +import path from "node:path"; + +export type SpawnExit = { + code: number | null; + signal: NodeJS.Signals | null; + error: Error | null; +}; + +type ResolvedSpawnCommand = { + command: string; + args: string[]; + shell?: boolean; +}; + +function resolveSpawnCommand(params: { command: string; args: string[] }): ResolvedSpawnCommand { + if (process.platform !== "win32") { + return { command: params.command, args: params.args }; + } + + const extension = path.extname(params.command).toLowerCase(); + if (extension === ".js" || extension === ".cjs" || extension === ".mjs") { + return { + command: process.execPath, + args: [params.command, ...params.args], + }; + } + + if (extension === ".cmd" || extension === ".bat") { + return { + command: params.command, + args: params.args, + shell: true, + }; + } + + return { + command: params.command, + args: params.args, + }; +} + +export function spawnWithResolvedCommand(params: { + command: string; + args: string[]; + cwd: string; +}): ChildProcessWithoutNullStreams { + const resolved = resolveSpawnCommand({ + command: params.command, + args: params.args, + }); + + return spawn(resolved.command, resolved.args, { + cwd: params.cwd, + env: process.env, + stdio: ["pipe", "pipe", "pipe"], + shell: resolved.shell, + }); +} + +export async function waitForExit(child: ChildProcessWithoutNullStreams): Promise { + return await new Promise((resolve) => { + let settled = false; + const finish = (result: SpawnExit) => { + if (settled) { + return; + } + settled = true; + resolve(result); + }; + + child.once("error", (err) => { + finish({ code: null, signal: null, error: err }); + }); + + child.once("close", (code, signal) => { + finish({ code, signal, error: null }); + }); + }); +} + +export async function spawnAndCollect(params: { + command: string; + args: string[]; + cwd: string; +}): Promise<{ + stdout: string; + stderr: string; + code: number | null; + error: Error | null; +}> { + const child = spawnWithResolvedCommand(params); + child.stdin.end(); + + let stdout = ""; + let stderr = ""; + child.stdout.on("data", (chunk) => { + stdout += String(chunk); + }); + child.stderr.on("data", (chunk) => { + stderr += String(chunk); + }); + + const exit = await waitForExit(child); + return { + stdout, + stderr, + code: exit.code, + error: exit.error, + }; +} + +export function resolveSpawnFailure( + err: unknown, + cwd: string, +): "missing-command" | "missing-cwd" | null { + if (!err || typeof err !== "object") { + return null; + } + const code = (err as NodeJS.ErrnoException).code; + if (code !== "ENOENT") { + return null; + } + return directoryExists(cwd) ? "missing-command" : "missing-cwd"; +} + +function directoryExists(cwd: string): boolean { + if (!cwd) { + return false; + } + try { + return existsSync(cwd); + } catch { + return false; + } +} diff --git a/extensions/acpx/src/runtime-internals/shared.ts b/extensions/acpx/src/runtime-internals/shared.ts new file mode 100644 index 00000000000..2f9b48025e6 --- /dev/null +++ b/extensions/acpx/src/runtime-internals/shared.ts @@ -0,0 +1,56 @@ +import type { ResolvedAcpxPluginConfig } from "../config.js"; + +export type AcpxHandleState = { + name: string; + agent: string; + cwd: string; + mode: "persistent" | "oneshot"; + acpxRecordId?: string; + backendSessionId?: string; + agentSessionId?: string; +}; + +export type AcpxJsonObject = Record; + +export type AcpxErrorEvent = { + message: string; + code?: string; + retryable?: boolean; +}; + +export function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +export function asTrimmedString(value: unknown): string { + return typeof value === "string" ? value.trim() : ""; +} + +export function asString(value: unknown): string | undefined { + return typeof value === "string" ? value : undefined; +} + +export function asOptionalString(value: unknown): string | undefined { + const text = asTrimmedString(value); + return text || undefined; +} + +export function asOptionalBoolean(value: unknown): boolean | undefined { + return typeof value === "boolean" ? value : undefined; +} + +export function deriveAgentFromSessionKey(sessionKey: string, fallbackAgent: string): string { + const match = sessionKey.match(/^agent:([^:]+):/i); + const candidate = match?.[1] ? asTrimmedString(match[1]) : ""; + return candidate || fallbackAgent; +} + +export function buildPermissionArgs(mode: ResolvedAcpxPluginConfig["permissionMode"]): string[] { + if (mode === "approve-all") { + return ["--approve-all"]; + } + if (mode === "deny-all") { + return ["--deny-all"]; + } + return ["--approve-reads"]; +} diff --git a/extensions/acpx/src/runtime.test.ts b/extensions/acpx/src/runtime.test.ts new file mode 100644 index 00000000000..d5e4fd275c7 --- /dev/null +++ b/extensions/acpx/src/runtime.test.ts @@ -0,0 +1,619 @@ +import fs from "node:fs"; +import { chmod, mkdtemp, readFile, rm, writeFile } from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { runAcpRuntimeAdapterContract } from "../../../src/acp/runtime/adapter-contract.testkit.js"; +import { ACPX_PINNED_VERSION, type ResolvedAcpxPluginConfig } from "./config.js"; +import { AcpxRuntime, decodeAcpxRuntimeHandleState } from "./runtime.js"; + +const NOOP_LOGGER = { + info: (_message: string) => {}, + warn: (_message: string) => {}, + error: (_message: string) => {}, + debug: (_message: string) => {}, +}; + +const MOCK_CLI_SCRIPT = String.raw`#!/usr/bin/env node +const fs = require("node:fs"); + +const args = process.argv.slice(2); +const logPath = process.env.MOCK_ACPX_LOG; +const writeLog = (entry) => { + if (!logPath) return; + fs.appendFileSync(logPath, JSON.stringify(entry) + "\n"); +}; + +if (args.includes("--version")) { + process.stdout.write("mock-acpx ${ACPX_PINNED_VERSION}\\n"); + process.exit(0); +} + +if (args.includes("--help")) { + process.stdout.write("mock-acpx help\\n"); + process.exit(0); +} + +const commandIndex = args.findIndex( + (arg) => + arg === "prompt" || + arg === "cancel" || + arg === "sessions" || + arg === "set-mode" || + arg === "set" || + arg === "status", +); +const command = commandIndex >= 0 ? args[commandIndex] : ""; +const agent = commandIndex > 0 ? args[commandIndex - 1] : "unknown"; + +const readFlag = (flag) => { + const idx = args.indexOf(flag); + if (idx < 0) return ""; + return String(args[idx + 1] || ""); +}; + +const sessionFromOption = readFlag("--session"); +const ensureName = readFlag("--name"); +const closeName = command === "sessions" && args[commandIndex + 1] === "close" ? String(args[commandIndex + 2] || "") : ""; +const setModeValue = command === "set-mode" ? String(args[commandIndex + 1] || "") : ""; +const setKey = command === "set" ? String(args[commandIndex + 1] || "") : ""; +const setValue = command === "set" ? String(args[commandIndex + 2] || "") : ""; + +if (command === "sessions" && args[commandIndex + 1] === "ensure") { + writeLog({ kind: "ensure", agent, args, sessionName: ensureName }); + process.stdout.write(JSON.stringify({ + type: "session_ensured", + acpxRecordId: "rec-" + ensureName, + acpxSessionId: "sid-" + ensureName, + agentSessionId: "inner-" + ensureName, + name: ensureName, + created: true, + }) + "\n"); + process.exit(0); +} + +if (command === "cancel") { + writeLog({ kind: "cancel", agent, args, sessionName: sessionFromOption }); + process.stdout.write(JSON.stringify({ + acpxSessionId: "sid-" + sessionFromOption, + cancelled: true, + }) + "\n"); + process.exit(0); +} + +if (command === "set-mode") { + writeLog({ kind: "set-mode", agent, args, sessionName: sessionFromOption, mode: setModeValue }); + process.stdout.write(JSON.stringify({ + type: "mode_set", + acpxSessionId: "sid-" + sessionFromOption, + mode: setModeValue, + }) + "\n"); + process.exit(0); +} + +if (command === "set") { + writeLog({ + kind: "set", + agent, + args, + sessionName: sessionFromOption, + key: setKey, + value: setValue, + }); + process.stdout.write(JSON.stringify({ + type: "config_set", + acpxSessionId: "sid-" + sessionFromOption, + key: setKey, + value: setValue, + }) + "\n"); + process.exit(0); +} + +if (command === "status") { + writeLog({ kind: "status", agent, args, sessionName: sessionFromOption }); + process.stdout.write(JSON.stringify({ + acpxRecordId: sessionFromOption ? "rec-" + sessionFromOption : null, + acpxSessionId: sessionFromOption ? "sid-" + sessionFromOption : null, + agentSessionId: sessionFromOption ? "inner-" + sessionFromOption : null, + status: sessionFromOption ? "alive" : "no-session", + pid: 4242, + uptime: 120, + }) + "\n"); + process.exit(0); +} + +if (command === "sessions" && args[commandIndex + 1] === "close") { + writeLog({ kind: "close", agent, args, sessionName: closeName }); + process.stdout.write(JSON.stringify({ + type: "session_closed", + acpxRecordId: "rec-" + closeName, + acpxSessionId: "sid-" + closeName, + name: closeName, + }) + "\n"); + process.exit(0); +} + +if (command === "prompt") { + const stdinText = fs.readFileSync(0, "utf8"); + writeLog({ kind: "prompt", agent, args, sessionName: sessionFromOption, stdinText }); + const acpxSessionId = "sid-" + sessionFromOption; + + if (stdinText.includes("trigger-error")) { + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 0, + stream: "prompt", + type: "error", + code: "RUNTIME", + message: "mock failure", + }) + "\n"); + process.exit(1); + } + + if (stdinText.includes("split-spacing")) { + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 0, + stream: "prompt", + type: "text", + content: "alpha", + }) + "\n"); + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 1, + stream: "prompt", + type: "text", + content: " beta", + }) + "\n"); + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 2, + stream: "prompt", + type: "text", + content: " gamma", + }) + "\n"); + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 3, + stream: "prompt", + type: "done", + stopReason: "end_turn", + }) + "\n"); + process.exit(0); + } + + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 0, + stream: "prompt", + type: "thought", + content: "thinking", + }) + "\n"); + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 1, + stream: "prompt", + type: "tool_call", + title: "run-tests", + status: "in_progress", + }) + "\n"); + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 2, + stream: "prompt", + type: "text", + content: "echo:" + stdinText.trim(), + }) + "\n"); + process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId, + requestId: "req-1", + seq: 3, + stream: "prompt", + type: "done", + stopReason: "end_turn", + }) + "\n"); + process.exit(0); +} + +writeLog({ kind: "unknown", args }); +process.stdout.write(JSON.stringify({ + eventVersion: 1, + acpxSessionId: "unknown", + seq: 0, + stream: "control", + type: "error", + code: "USAGE", + message: "unknown command", +}) + "\n"); +process.exit(2); +`; + +const tempDirs: string[] = []; + +async function createMockRuntime(params?: { + permissionMode?: ResolvedAcpxPluginConfig["permissionMode"]; + queueOwnerTtlSeconds?: number; +}): Promise<{ + runtime: AcpxRuntime; + logPath: string; + config: ResolvedAcpxPluginConfig; +}> { + const dir = await mkdtemp(path.join(os.tmpdir(), "openclaw-acpx-runtime-test-")); + tempDirs.push(dir); + const scriptPath = path.join(dir, "mock-acpx.cjs"); + const logPath = path.join(dir, "calls.log"); + await writeFile(scriptPath, MOCK_CLI_SCRIPT, "utf8"); + await chmod(scriptPath, 0o755); + process.env.MOCK_ACPX_LOG = logPath; + + const config: ResolvedAcpxPluginConfig = { + command: scriptPath, + cwd: dir, + permissionMode: params?.permissionMode ?? "approve-all", + nonInteractivePermissions: "fail", + queueOwnerTtlSeconds: params?.queueOwnerTtlSeconds ?? 0.1, + }; + + return { + runtime: new AcpxRuntime(config, { + queueOwnerTtlSeconds: params?.queueOwnerTtlSeconds, + logger: NOOP_LOGGER, + }), + logPath, + config, + }; +} + +async function readLogEntries(logPath: string): Promise>> { + if (!fs.existsSync(logPath)) { + return []; + } + const raw = await readFile(logPath, "utf8"); + return raw + .split(/\r?\n/) + .map((line) => line.trim()) + .filter(Boolean) + .map((line) => JSON.parse(line) as Record); +} + +afterEach(async () => { + delete process.env.MOCK_ACPX_LOG; + while (tempDirs.length > 0) { + const dir = tempDirs.pop(); + if (!dir) { + continue; + } + await rm(dir, { + recursive: true, + force: true, + maxRetries: 10, + retryDelay: 10, + }); + } +}); + +describe("AcpxRuntime", () => { + it("passes the shared ACP adapter contract suite", async () => { + const fixture = await createMockRuntime(); + await runAcpRuntimeAdapterContract({ + createRuntime: async () => fixture.runtime, + agentId: "codex", + successPrompt: "contract-pass", + errorPrompt: "trigger-error", + assertSuccessEvents: (events) => { + expect(events.some((event) => event.type === "done")).toBe(true); + }, + assertErrorOutcome: ({ events, thrown }) => { + expect(events.some((event) => event.type === "error") || Boolean(thrown)).toBe(true); + }, + }); + + const logs = await readLogEntries(fixture.logPath); + expect(logs.some((entry) => entry.kind === "ensure")).toBe(true); + expect(logs.some((entry) => entry.kind === "status")).toBe(true); + expect(logs.some((entry) => entry.kind === "set-mode")).toBe(true); + expect(logs.some((entry) => entry.kind === "set")).toBe(true); + expect(logs.some((entry) => entry.kind === "cancel")).toBe(true); + expect(logs.some((entry) => entry.kind === "close")).toBe(true); + }); + + it("ensures sessions and streams prompt events", async () => { + const { runtime, logPath } = await createMockRuntime({ queueOwnerTtlSeconds: 180 }); + + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:123", + agent: "codex", + mode: "persistent", + }); + expect(handle.backend).toBe("acpx"); + expect(handle.acpxRecordId).toBe("rec-agent:codex:acp:123"); + expect(handle.agentSessionId).toBe("inner-agent:codex:acp:123"); + expect(handle.backendSessionId).toBe("sid-agent:codex:acp:123"); + const decoded = decodeAcpxRuntimeHandleState(handle.runtimeSessionName); + expect(decoded?.acpxRecordId).toBe("rec-agent:codex:acp:123"); + expect(decoded?.agentSessionId).toBe("inner-agent:codex:acp:123"); + expect(decoded?.backendSessionId).toBe("sid-agent:codex:acp:123"); + + const events = []; + for await (const event of runtime.runTurn({ + handle, + text: "hello world", + mode: "prompt", + requestId: "req-test", + })) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "text_delta", + text: "thinking", + stream: "thought", + }); + expect(events).toContainEqual({ + type: "tool_call", + text: "run-tests (in_progress)", + }); + expect(events).toContainEqual({ + type: "text_delta", + text: "echo:hello world", + stream: "output", + }); + expect(events).toContainEqual({ + type: "done", + stopReason: "end_turn", + }); + + const logs = await readLogEntries(logPath); + const ensure = logs.find((entry) => entry.kind === "ensure"); + const prompt = logs.find((entry) => entry.kind === "prompt"); + expect(ensure).toBeDefined(); + expect(prompt).toBeDefined(); + expect(Array.isArray(prompt?.args)).toBe(true); + const promptArgs = (prompt?.args as string[]) ?? []; + expect(promptArgs).toContain("--ttl"); + expect(promptArgs).toContain("180"); + expect(promptArgs).toContain("--approve-all"); + }); + + it("passes a queue-owner TTL by default to avoid long idle stalls", async () => { + const { runtime, logPath } = await createMockRuntime(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:ttl-default", + agent: "codex", + mode: "persistent", + }); + + for await (const _event of runtime.runTurn({ + handle, + text: "ttl-default", + mode: "prompt", + requestId: "req-ttl-default", + })) { + // drain + } + + const logs = await readLogEntries(logPath); + const prompt = logs.find((entry) => entry.kind === "prompt"); + expect(prompt).toBeDefined(); + const promptArgs = (prompt?.args as string[]) ?? []; + const ttlFlagIndex = promptArgs.indexOf("--ttl"); + expect(ttlFlagIndex).toBeGreaterThanOrEqual(0); + expect(promptArgs[ttlFlagIndex + 1]).toBe("0.1"); + }); + + it("preserves leading spaces across streamed text deltas", async () => { + const { runtime } = await createMockRuntime(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:space", + agent: "codex", + mode: "persistent", + }); + + const textDeltas: string[] = []; + for await (const event of runtime.runTurn({ + handle, + text: "split-spacing", + mode: "prompt", + requestId: "req-space", + })) { + if (event.type === "text_delta" && event.stream === "output") { + textDeltas.push(event.text); + } + } + + expect(textDeltas).toEqual(["alpha", " beta", " gamma"]); + expect(textDeltas.join("")).toBe("alpha beta gamma"); + }); + + it("maps acpx error events into ACP runtime error events", async () => { + const { runtime } = await createMockRuntime(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:456", + agent: "codex", + mode: "persistent", + }); + + const events = []; + for await (const event of runtime.runTurn({ + handle, + text: "trigger-error", + mode: "prompt", + requestId: "req-err", + })) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "error", + message: "mock failure", + code: "RUNTIME", + retryable: undefined, + }); + }); + + it("supports cancel and close using encoded runtime handle state", async () => { + const { runtime, logPath, config } = await createMockRuntime(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:claude:acp:789", + agent: "claude", + mode: "persistent", + }); + + const decoded = decodeAcpxRuntimeHandleState(handle.runtimeSessionName); + expect(decoded?.name).toBe("agent:claude:acp:789"); + + const secondRuntime = new AcpxRuntime(config, { logger: NOOP_LOGGER }); + + await secondRuntime.cancel({ handle, reason: "test" }); + await secondRuntime.close({ handle, reason: "test" }); + + const logs = await readLogEntries(logPath); + const cancel = logs.find((entry) => entry.kind === "cancel"); + const close = logs.find((entry) => entry.kind === "close"); + expect(cancel?.sessionName).toBe("agent:claude:acp:789"); + expect(close?.sessionName).toBe("agent:claude:acp:789"); + }); + + it("exposes control capabilities and runs set-mode/set/status commands", async () => { + const { runtime, logPath } = await createMockRuntime(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:controls", + agent: "codex", + mode: "persistent", + }); + + const capabilities = runtime.getCapabilities(); + expect(capabilities.controls).toContain("session/set_mode"); + expect(capabilities.controls).toContain("session/set_config_option"); + expect(capabilities.controls).toContain("session/status"); + + await runtime.setMode({ + handle, + mode: "plan", + }); + await runtime.setConfigOption({ + handle, + key: "model", + value: "openai-codex/gpt-5.3-codex", + }); + const status = await runtime.getStatus({ handle }); + const ensuredSessionName = "agent:codex:acp:controls"; + + expect(status.summary).toContain("status=alive"); + expect(status.acpxRecordId).toBe("rec-" + ensuredSessionName); + expect(status.backendSessionId).toBe("sid-" + ensuredSessionName); + expect(status.agentSessionId).toBe("inner-" + ensuredSessionName); + expect(status.details?.acpxRecordId).toBe("rec-" + ensuredSessionName); + expect(status.details?.status).toBe("alive"); + expect(status.details?.pid).toBe(4242); + + const logs = await readLogEntries(logPath); + expect(logs.find((entry) => entry.kind === "set-mode")?.mode).toBe("plan"); + expect(logs.find((entry) => entry.kind === "set")?.key).toBe("model"); + expect(logs.find((entry) => entry.kind === "status")).toBeDefined(); + }); + + it("skips prompt execution when runTurn starts with an already-aborted signal", async () => { + const { runtime, logPath } = await createMockRuntime(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:aborted", + agent: "codex", + mode: "persistent", + }); + const controller = new AbortController(); + controller.abort(); + + const events = []; + for await (const event of runtime.runTurn({ + handle, + text: "should-not-run", + mode: "prompt", + requestId: "req-aborted", + signal: controller.signal, + })) { + events.push(event); + } + + const logs = await readLogEntries(logPath); + expect(events).toEqual([]); + expect(logs.some((entry) => entry.kind === "prompt")).toBe(false); + }); + + it("does not mark backend unhealthy when a per-session cwd is missing", async () => { + const { runtime } = await createMockRuntime(); + const missingCwd = path.join(os.tmpdir(), "openclaw-acpx-runtime-test-missing-cwd"); + + await runtime.probeAvailability(); + expect(runtime.isHealthy()).toBe(true); + + await expect( + runtime.ensureSession({ + sessionKey: "agent:codex:acp:missing-cwd", + agent: "codex", + mode: "persistent", + cwd: missingCwd, + }), + ).rejects.toMatchObject({ + code: "ACP_SESSION_INIT_FAILED", + message: expect.stringContaining("working directory does not exist"), + }); + expect(runtime.isHealthy()).toBe(true); + }); + + it("marks runtime unhealthy when command is missing", async () => { + const runtime = new AcpxRuntime( + { + command: "/definitely/missing/acpx", + cwd: process.cwd(), + permissionMode: "approve-reads", + nonInteractivePermissions: "fail", + queueOwnerTtlSeconds: 0.1, + }, + { logger: NOOP_LOGGER }, + ); + + await runtime.probeAvailability(); + expect(runtime.isHealthy()).toBe(false); + }); + + it("marks runtime healthy when command is available", async () => { + const { runtime } = await createMockRuntime(); + await runtime.probeAvailability(); + expect(runtime.isHealthy()).toBe(true); + }); + + it("returns doctor report for missing command", async () => { + const runtime = new AcpxRuntime( + { + command: "/definitely/missing/acpx", + cwd: process.cwd(), + permissionMode: "approve-reads", + nonInteractivePermissions: "fail", + queueOwnerTtlSeconds: 0.1, + }, + { logger: NOOP_LOGGER }, + ); + + const report = await runtime.doctor(); + expect(report.ok).toBe(false); + expect(report.code).toBe("ACP_BACKEND_UNAVAILABLE"); + expect(report.installCommand).toContain("acpx"); + }); +}); diff --git a/extensions/acpx/src/runtime.ts b/extensions/acpx/src/runtime.ts new file mode 100644 index 00000000000..a5273c7e0f2 --- /dev/null +++ b/extensions/acpx/src/runtime.ts @@ -0,0 +1,578 @@ +import { createInterface } from "node:readline"; +import type { + AcpRuntimeCapabilities, + AcpRuntimeDoctorReport, + AcpRuntime, + AcpRuntimeEnsureInput, + AcpRuntimeErrorCode, + AcpRuntimeEvent, + AcpRuntimeHandle, + AcpRuntimeStatus, + AcpRuntimeTurnInput, + PluginLogger, +} from "openclaw/plugin-sdk"; +import { AcpRuntimeError } from "openclaw/plugin-sdk"; +import { + ACPX_LOCAL_INSTALL_COMMAND, + ACPX_PINNED_VERSION, + type ResolvedAcpxPluginConfig, +} from "./config.js"; +import { checkPinnedAcpxVersion } from "./ensure.js"; +import { + parseJsonLines, + parsePromptEventLine, + toAcpxErrorEvent, +} from "./runtime-internals/events.js"; +import { + resolveSpawnFailure, + spawnAndCollect, + spawnWithResolvedCommand, + waitForExit, +} from "./runtime-internals/process.js"; +import { + asOptionalString, + asTrimmedString, + buildPermissionArgs, + deriveAgentFromSessionKey, + isRecord, + type AcpxHandleState, + type AcpxJsonObject, +} from "./runtime-internals/shared.js"; + +export const ACPX_BACKEND_ID = "acpx"; + +const ACPX_RUNTIME_HANDLE_PREFIX = "acpx:v1:"; +const DEFAULT_AGENT_FALLBACK = "codex"; +const ACPX_CAPABILITIES: AcpRuntimeCapabilities = { + controls: ["session/set_mode", "session/set_config_option", "session/status"], +}; + +export function encodeAcpxRuntimeHandleState(state: AcpxHandleState): string { + const payload = Buffer.from(JSON.stringify(state), "utf8").toString("base64url"); + return `${ACPX_RUNTIME_HANDLE_PREFIX}${payload}`; +} + +export function decodeAcpxRuntimeHandleState(runtimeSessionName: string): AcpxHandleState | null { + const trimmed = runtimeSessionName.trim(); + if (!trimmed.startsWith(ACPX_RUNTIME_HANDLE_PREFIX)) { + return null; + } + const encoded = trimmed.slice(ACPX_RUNTIME_HANDLE_PREFIX.length); + if (!encoded) { + return null; + } + try { + const raw = Buffer.from(encoded, "base64url").toString("utf8"); + const parsed = JSON.parse(raw) as unknown; + if (!isRecord(parsed)) { + return null; + } + const name = asTrimmedString(parsed.name); + const agent = asTrimmedString(parsed.agent); + const cwd = asTrimmedString(parsed.cwd); + const mode = asTrimmedString(parsed.mode); + const acpxRecordId = asOptionalString(parsed.acpxRecordId); + const backendSessionId = asOptionalString(parsed.backendSessionId); + const agentSessionId = asOptionalString(parsed.agentSessionId); + if (!name || !agent || !cwd) { + return null; + } + if (mode !== "persistent" && mode !== "oneshot") { + return null; + } + return { + name, + agent, + cwd, + mode, + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(backendSessionId ? { backendSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + }; + } catch { + return null; + } +} + +export class AcpxRuntime implements AcpRuntime { + private healthy = false; + private readonly logger?: PluginLogger; + private readonly queueOwnerTtlSeconds: number; + + constructor( + private readonly config: ResolvedAcpxPluginConfig, + opts?: { + logger?: PluginLogger; + queueOwnerTtlSeconds?: number; + }, + ) { + this.logger = opts?.logger; + const requestedQueueOwnerTtlSeconds = opts?.queueOwnerTtlSeconds; + this.queueOwnerTtlSeconds = + typeof requestedQueueOwnerTtlSeconds === "number" && + Number.isFinite(requestedQueueOwnerTtlSeconds) && + requestedQueueOwnerTtlSeconds >= 0 + ? requestedQueueOwnerTtlSeconds + : this.config.queueOwnerTtlSeconds; + } + + isHealthy(): boolean { + return this.healthy; + } + + async probeAvailability(): Promise { + const versionCheck = await checkPinnedAcpxVersion({ + command: this.config.command, + cwd: this.config.cwd, + expectedVersion: ACPX_PINNED_VERSION, + }); + if (!versionCheck.ok) { + this.healthy = false; + return; + } + + try { + const result = await spawnAndCollect({ + command: this.config.command, + args: ["--help"], + cwd: this.config.cwd, + }); + this.healthy = result.error == null && (result.code ?? 0) === 0; + } catch { + this.healthy = false; + } + } + + async ensureSession(input: AcpRuntimeEnsureInput): Promise { + const sessionName = asTrimmedString(input.sessionKey); + if (!sessionName) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + const agent = asTrimmedString(input.agent); + if (!agent) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP agent id is required."); + } + const cwd = asTrimmedString(input.cwd) || this.config.cwd; + const mode = input.mode; + + const events = await this.runControlCommand({ + args: this.buildControlArgs({ + cwd, + command: [agent, "sessions", "ensure", "--name", sessionName], + }), + cwd, + fallbackCode: "ACP_SESSION_INIT_FAILED", + }); + const ensuredEvent = events.find( + (event) => + asOptionalString(event.agentSessionId) || + asOptionalString(event.acpxSessionId) || + asOptionalString(event.acpxRecordId), + ); + const acpxRecordId = ensuredEvent ? asOptionalString(ensuredEvent.acpxRecordId) : undefined; + const agentSessionId = ensuredEvent ? asOptionalString(ensuredEvent.agentSessionId) : undefined; + const backendSessionId = ensuredEvent + ? asOptionalString(ensuredEvent.acpxSessionId) + : undefined; + + return { + sessionKey: input.sessionKey, + backend: ACPX_BACKEND_ID, + runtimeSessionName: encodeAcpxRuntimeHandleState({ + name: sessionName, + agent, + cwd, + mode, + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(backendSessionId ? { backendSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + }), + cwd, + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(backendSessionId ? { backendSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + }; + } + + async *runTurn(input: AcpRuntimeTurnInput): AsyncIterable { + const state = this.resolveHandleState(input.handle); + const args = this.buildPromptArgs({ + agent: state.agent, + sessionName: state.name, + cwd: state.cwd, + }); + + const cancelOnAbort = async () => { + await this.cancel({ + handle: input.handle, + reason: "abort-signal", + }).catch((err) => { + this.logger?.warn?.(`acpx runtime abort-cancel failed: ${String(err)}`); + }); + }; + const onAbort = () => { + void cancelOnAbort(); + }; + + if (input.signal?.aborted) { + await cancelOnAbort(); + return; + } + if (input.signal) { + input.signal.addEventListener("abort", onAbort, { once: true }); + } + const child = spawnWithResolvedCommand({ + command: this.config.command, + args, + cwd: state.cwd, + }); + child.stdin.on("error", () => { + // Ignore EPIPE when the child exits before stdin flush completes. + }); + + child.stdin.end(input.text); + + let stderr = ""; + child.stderr.on("data", (chunk) => { + stderr += String(chunk); + }); + + let sawDone = false; + let sawError = false; + const lines = createInterface({ input: child.stdout }); + try { + for await (const line of lines) { + const parsed = parsePromptEventLine(line); + if (!parsed) { + continue; + } + if (parsed.type === "done") { + sawDone = true; + } + if (parsed.type === "error") { + sawError = true; + } + yield parsed; + } + + const exit = await waitForExit(child); + if (exit.error) { + const spawnFailure = resolveSpawnFailure(exit.error, state.cwd); + if (spawnFailure === "missing-command") { + this.healthy = false; + throw new AcpRuntimeError( + "ACP_BACKEND_UNAVAILABLE", + `acpx command not found: ${this.config.command}`, + { cause: exit.error }, + ); + } + if (spawnFailure === "missing-cwd") { + throw new AcpRuntimeError( + "ACP_TURN_FAILED", + `ACP runtime working directory does not exist: ${state.cwd}`, + { cause: exit.error }, + ); + } + throw new AcpRuntimeError("ACP_TURN_FAILED", exit.error.message, { cause: exit.error }); + } + + if ((exit.code ?? 0) !== 0 && !sawError) { + yield { + type: "error", + message: stderr.trim() || `acpx exited with code ${exit.code ?? "unknown"}`, + }; + return; + } + + if (!sawDone && !sawError) { + yield { type: "done" }; + } + } finally { + lines.close(); + if (input.signal) { + input.signal.removeEventListener("abort", onAbort); + } + } + } + + getCapabilities(): AcpRuntimeCapabilities { + return ACPX_CAPABILITIES; + } + + async getStatus(input: { handle: AcpRuntimeHandle }): Promise { + const state = this.resolveHandleState(input.handle); + const events = await this.runControlCommand({ + args: this.buildControlArgs({ + cwd: state.cwd, + command: [state.agent, "status", "--session", state.name], + }), + cwd: state.cwd, + fallbackCode: "ACP_TURN_FAILED", + ignoreNoSession: true, + }); + const detail = events.find((event) => !toAcpxErrorEvent(event)) ?? events[0]; + if (!detail) { + return { + summary: "acpx status unavailable", + }; + } + const status = asTrimmedString(detail.status) || "unknown"; + const acpxRecordId = asOptionalString(detail.acpxRecordId); + const acpxSessionId = asOptionalString(detail.acpxSessionId); + const agentSessionId = asOptionalString(detail.agentSessionId); + const pid = typeof detail.pid === "number" && Number.isFinite(detail.pid) ? detail.pid : null; + const summary = [ + `status=${status}`, + acpxRecordId ? `acpxRecordId=${acpxRecordId}` : null, + acpxSessionId ? `acpxSessionId=${acpxSessionId}` : null, + pid != null ? `pid=${pid}` : null, + ] + .filter(Boolean) + .join(" "); + return { + summary, + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(acpxSessionId ? { backendSessionId: acpxSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + details: detail, + }; + } + + async setMode(input: { handle: AcpRuntimeHandle; mode: string }): Promise { + const state = this.resolveHandleState(input.handle); + const mode = asTrimmedString(input.mode); + if (!mode) { + throw new AcpRuntimeError("ACP_TURN_FAILED", "ACP runtime mode is required."); + } + await this.runControlCommand({ + args: this.buildControlArgs({ + cwd: state.cwd, + command: [state.agent, "set-mode", mode, "--session", state.name], + }), + cwd: state.cwd, + fallbackCode: "ACP_TURN_FAILED", + }); + } + + async setConfigOption(input: { + handle: AcpRuntimeHandle; + key: string; + value: string; + }): Promise { + const state = this.resolveHandleState(input.handle); + const key = asTrimmedString(input.key); + const value = asTrimmedString(input.value); + if (!key || !value) { + throw new AcpRuntimeError("ACP_TURN_FAILED", "ACP config option key/value are required."); + } + await this.runControlCommand({ + args: this.buildControlArgs({ + cwd: state.cwd, + command: [state.agent, "set", key, value, "--session", state.name], + }), + cwd: state.cwd, + fallbackCode: "ACP_TURN_FAILED", + }); + } + + async doctor(): Promise { + const versionCheck = await checkPinnedAcpxVersion({ + command: this.config.command, + cwd: this.config.cwd, + expectedVersion: ACPX_PINNED_VERSION, + }); + if (!versionCheck.ok) { + this.healthy = false; + const details = [ + `expected=${versionCheck.expectedVersion}`, + versionCheck.installedVersion ? `installed=${versionCheck.installedVersion}` : null, + ].filter((detail): detail is string => Boolean(detail)); + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: versionCheck.message, + installCommand: versionCheck.installCommand, + details, + }; + } + + try { + const result = await spawnAndCollect({ + command: this.config.command, + args: ["--help"], + cwd: this.config.cwd, + }); + if (result.error) { + const spawnFailure = resolveSpawnFailure(result.error, this.config.cwd); + if (spawnFailure === "missing-command") { + this.healthy = false; + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: `acpx command not found: ${this.config.command}`, + installCommand: ACPX_LOCAL_INSTALL_COMMAND, + }; + } + if (spawnFailure === "missing-cwd") { + this.healthy = false; + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: `ACP runtime working directory does not exist: ${this.config.cwd}`, + }; + } + this.healthy = false; + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: result.error.message, + details: [String(result.error)], + }; + } + if ((result.code ?? 0) !== 0) { + this.healthy = false; + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: result.stderr.trim() || `acpx exited with code ${result.code ?? "unknown"}`, + }; + } + this.healthy = true; + return { + ok: true, + message: `acpx command available (${this.config.command}, version ${versionCheck.version})`, + }; + } catch (error) { + this.healthy = false; + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: error instanceof Error ? error.message : String(error), + }; + } + } + + async cancel(input: { handle: AcpRuntimeHandle; reason?: string }): Promise { + const state = this.resolveHandleState(input.handle); + await this.runControlCommand({ + args: this.buildControlArgs({ + cwd: state.cwd, + command: [state.agent, "cancel", "--session", state.name], + }), + cwd: state.cwd, + fallbackCode: "ACP_TURN_FAILED", + ignoreNoSession: true, + }); + } + + async close(input: { handle: AcpRuntimeHandle; reason: string }): Promise { + const state = this.resolveHandleState(input.handle); + await this.runControlCommand({ + args: this.buildControlArgs({ + cwd: state.cwd, + command: [state.agent, "sessions", "close", state.name], + }), + cwd: state.cwd, + fallbackCode: "ACP_TURN_FAILED", + ignoreNoSession: true, + }); + } + + private resolveHandleState(handle: AcpRuntimeHandle): AcpxHandleState { + const decoded = decodeAcpxRuntimeHandleState(handle.runtimeSessionName); + if (decoded) { + return decoded; + } + + const legacyName = asTrimmedString(handle.runtimeSessionName); + if (!legacyName) { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + "Invalid acpx runtime handle: runtimeSessionName is missing.", + ); + } + + return { + name: legacyName, + agent: deriveAgentFromSessionKey(handle.sessionKey, DEFAULT_AGENT_FALLBACK), + cwd: this.config.cwd, + mode: "persistent", + }; + } + + private buildControlArgs(params: { cwd: string; command: string[] }): string[] { + return ["--format", "json", "--json-strict", "--cwd", params.cwd, ...params.command]; + } + + private buildPromptArgs(params: { agent: string; sessionName: string; cwd: string }): string[] { + const args = [ + "--format", + "json", + "--json-strict", + "--cwd", + params.cwd, + ...buildPermissionArgs(this.config.permissionMode), + "--non-interactive-permissions", + this.config.nonInteractivePermissions, + ]; + if (this.config.timeoutSeconds) { + args.push("--timeout", String(this.config.timeoutSeconds)); + } + args.push("--ttl", String(this.queueOwnerTtlSeconds)); + args.push(params.agent, "prompt", "--session", params.sessionName, "--file", "-"); + return args; + } + + private async runControlCommand(params: { + args: string[]; + cwd: string; + fallbackCode: AcpRuntimeErrorCode; + ignoreNoSession?: boolean; + }): Promise { + const result = await spawnAndCollect({ + command: this.config.command, + args: params.args, + cwd: params.cwd, + }); + + if (result.error) { + const spawnFailure = resolveSpawnFailure(result.error, params.cwd); + if (spawnFailure === "missing-command") { + this.healthy = false; + throw new AcpRuntimeError( + "ACP_BACKEND_UNAVAILABLE", + `acpx command not found: ${this.config.command}`, + { cause: result.error }, + ); + } + if (spawnFailure === "missing-cwd") { + throw new AcpRuntimeError( + params.fallbackCode, + `ACP runtime working directory does not exist: ${params.cwd}`, + { cause: result.error }, + ); + } + throw new AcpRuntimeError(params.fallbackCode, result.error.message, { cause: result.error }); + } + + const events = parseJsonLines(result.stdout); + const errorEvent = events.map((event) => toAcpxErrorEvent(event)).find(Boolean) ?? null; + if (errorEvent) { + if (params.ignoreNoSession && errorEvent.code === "NO_SESSION") { + return events; + } + throw new AcpRuntimeError( + params.fallbackCode, + errorEvent.code ? `${errorEvent.code}: ${errorEvent.message}` : errorEvent.message, + ); + } + + if ((result.code ?? 0) !== 0) { + throw new AcpRuntimeError( + params.fallbackCode, + result.stderr.trim() || `acpx exited with code ${result.code ?? "unknown"}`, + ); + } + return events; + } +} diff --git a/extensions/acpx/src/service.test.ts b/extensions/acpx/src/service.test.ts new file mode 100644 index 00000000000..30fc9fa7205 --- /dev/null +++ b/extensions/acpx/src/service.test.ts @@ -0,0 +1,173 @@ +import type { AcpRuntime, OpenClawPluginServiceContext } from "openclaw/plugin-sdk"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { AcpRuntimeError } from "../../../src/acp/runtime/errors.js"; +import { + __testing, + getAcpRuntimeBackend, + requireAcpRuntimeBackend, +} from "../../../src/acp/runtime/registry.js"; +import { ACPX_BUNDLED_BIN } from "./config.js"; +import { createAcpxRuntimeService } from "./service.js"; + +const { ensurePinnedAcpxSpy } = vi.hoisted(() => ({ + ensurePinnedAcpxSpy: vi.fn(async () => {}), +})); + +vi.mock("./ensure.js", () => ({ + ensurePinnedAcpx: ensurePinnedAcpxSpy, +})); + +type RuntimeStub = AcpRuntime & { + probeAvailability(): Promise; + isHealthy(): boolean; +}; + +function createRuntimeStub(healthy: boolean): { + runtime: RuntimeStub; + probeAvailabilitySpy: ReturnType; + isHealthySpy: ReturnType; +} { + const probeAvailabilitySpy = vi.fn(async () => {}); + const isHealthySpy = vi.fn(() => healthy); + return { + runtime: { + ensureSession: vi.fn(async (input) => ({ + sessionKey: input.sessionKey, + backend: "acpx", + runtimeSessionName: input.sessionKey, + })), + runTurn: vi.fn(async function* () { + yield { type: "done" as const }; + }), + cancel: vi.fn(async () => {}), + close: vi.fn(async () => {}), + async probeAvailability() { + await probeAvailabilitySpy(); + }, + isHealthy() { + return isHealthySpy(); + }, + }, + probeAvailabilitySpy, + isHealthySpy, + }; +} + +function createServiceContext( + overrides: Partial = {}, +): OpenClawPluginServiceContext { + return { + config: {}, + workspaceDir: "/tmp/workspace", + stateDir: "/tmp/state", + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, + ...overrides, + }; +} + +describe("createAcpxRuntimeService", () => { + beforeEach(() => { + __testing.resetAcpRuntimeBackendsForTests(); + ensurePinnedAcpxSpy.mockReset(); + ensurePinnedAcpxSpy.mockImplementation(async () => {}); + }); + + it("registers and unregisters the acpx backend", async () => { + const { runtime, probeAvailabilitySpy } = createRuntimeStub(true); + const service = createAcpxRuntimeService({ + runtimeFactory: () => runtime, + }); + const context = createServiceContext(); + + await service.start(context); + expect(getAcpRuntimeBackend("acpx")?.runtime).toBe(runtime); + + await vi.waitFor(() => { + expect(ensurePinnedAcpxSpy).toHaveBeenCalledOnce(); + expect(probeAvailabilitySpy).toHaveBeenCalledOnce(); + }); + + await service.stop?.(context); + expect(getAcpRuntimeBackend("acpx")).toBeNull(); + }); + + it("marks backend unavailable when runtime health check fails", async () => { + const { runtime } = createRuntimeStub(false); + const service = createAcpxRuntimeService({ + runtimeFactory: () => runtime, + }); + const context = createServiceContext(); + + await service.start(context); + + expect(() => requireAcpRuntimeBackend("acpx")).toThrowError(AcpRuntimeError); + try { + requireAcpRuntimeBackend("acpx"); + throw new Error("expected ACP backend lookup to fail"); + } catch (error) { + expect((error as AcpRuntimeError).code).toBe("ACP_BACKEND_UNAVAILABLE"); + } + }); + + it("passes queue-owner TTL from plugin config", async () => { + const { runtime } = createRuntimeStub(true); + const runtimeFactory = vi.fn(() => runtime); + const service = createAcpxRuntimeService({ + runtimeFactory, + pluginConfig: { + queueOwnerTtlSeconds: 0.25, + }, + }); + const context = createServiceContext(); + + await service.start(context); + + expect(runtimeFactory).toHaveBeenCalledWith( + expect.objectContaining({ + queueOwnerTtlSeconds: 0.25, + pluginConfig: expect.objectContaining({ + command: ACPX_BUNDLED_BIN, + }), + }), + ); + }); + + it("uses a short default queue-owner TTL", async () => { + const { runtime } = createRuntimeStub(true); + const runtimeFactory = vi.fn(() => runtime); + const service = createAcpxRuntimeService({ + runtimeFactory, + }); + const context = createServiceContext(); + + await service.start(context); + + expect(runtimeFactory).toHaveBeenCalledWith( + expect.objectContaining({ + queueOwnerTtlSeconds: 0.1, + }), + ); + }); + + it("does not block startup while acpx ensure runs", async () => { + const { runtime } = createRuntimeStub(true); + ensurePinnedAcpxSpy.mockImplementation(() => new Promise(() => {})); + const service = createAcpxRuntimeService({ + runtimeFactory: () => runtime, + }); + const context = createServiceContext(); + + const startResult = await Promise.race([ + Promise.resolve(service.start(context)).then(() => "started"), + new Promise((resolve) => setTimeout(() => resolve("timed_out"), 100)), + ]); + + expect(startResult).toBe("started"); + expect(getAcpRuntimeBackend("acpx")?.runtime).toBe(runtime); + }); +}); diff --git a/extensions/acpx/src/service.ts b/extensions/acpx/src/service.ts new file mode 100644 index 00000000000..65768d00ce8 --- /dev/null +++ b/extensions/acpx/src/service.ts @@ -0,0 +1,102 @@ +import type { + AcpRuntime, + OpenClawPluginService, + OpenClawPluginServiceContext, + PluginLogger, +} from "openclaw/plugin-sdk"; +import { registerAcpRuntimeBackend, unregisterAcpRuntimeBackend } from "openclaw/plugin-sdk"; +import { + ACPX_PINNED_VERSION, + resolveAcpxPluginConfig, + type ResolvedAcpxPluginConfig, +} from "./config.js"; +import { ensurePinnedAcpx } from "./ensure.js"; +import { ACPX_BACKEND_ID, AcpxRuntime } from "./runtime.js"; + +type AcpxRuntimeLike = AcpRuntime & { + probeAvailability(): Promise; + isHealthy(): boolean; +}; + +type AcpxRuntimeFactoryParams = { + pluginConfig: ResolvedAcpxPluginConfig; + queueOwnerTtlSeconds: number; + logger?: PluginLogger; +}; + +type CreateAcpxRuntimeServiceParams = { + pluginConfig?: unknown; + runtimeFactory?: (params: AcpxRuntimeFactoryParams) => AcpxRuntimeLike; +}; + +function createDefaultRuntime(params: AcpxRuntimeFactoryParams): AcpxRuntimeLike { + return new AcpxRuntime(params.pluginConfig, { + logger: params.logger, + queueOwnerTtlSeconds: params.queueOwnerTtlSeconds, + }); +} + +export function createAcpxRuntimeService( + params: CreateAcpxRuntimeServiceParams = {}, +): OpenClawPluginService { + let runtime: AcpxRuntimeLike | null = null; + let lifecycleRevision = 0; + + return { + id: "acpx-runtime", + async start(ctx: OpenClawPluginServiceContext): Promise { + const pluginConfig = resolveAcpxPluginConfig({ + rawConfig: params.pluginConfig, + workspaceDir: ctx.workspaceDir, + }); + const runtimeFactory = params.runtimeFactory ?? createDefaultRuntime; + runtime = runtimeFactory({ + pluginConfig, + queueOwnerTtlSeconds: pluginConfig.queueOwnerTtlSeconds, + logger: ctx.logger, + }); + + registerAcpRuntimeBackend({ + id: ACPX_BACKEND_ID, + runtime, + healthy: () => runtime?.isHealthy() ?? false, + }); + ctx.logger.info( + `acpx runtime backend registered (command: ${pluginConfig.command}, pinned: ${ACPX_PINNED_VERSION})`, + ); + + lifecycleRevision += 1; + const currentRevision = lifecycleRevision; + void (async () => { + try { + await ensurePinnedAcpx({ + command: pluginConfig.command, + logger: ctx.logger, + expectedVersion: ACPX_PINNED_VERSION, + }); + if (currentRevision !== lifecycleRevision) { + return; + } + await runtime?.probeAvailability(); + if (runtime?.isHealthy()) { + ctx.logger.info("acpx runtime backend ready"); + } else { + ctx.logger.warn("acpx runtime backend probe failed after local install"); + } + } catch (err) { + if (currentRevision !== lifecycleRevision) { + return; + } + ctx.logger.warn( + `acpx runtime setup failed: ${err instanceof Error ? err.message : String(err)}`, + ); + } + })(); + }, + async stop(_ctx: OpenClawPluginServiceContext): Promise { + lifecycleRevision += 1; + unregisterAcpRuntimeBackend(ACPX_BACKEND_ID); + runtime = null; + }, + }; +} diff --git a/extensions/bluebubbles/package.json b/extensions/bluebubbles/package.json index 8f752f59350..f6f193e29ff 100644 --- a/extensions/bluebubbles/package.json +++ b/extensions/bluebubbles/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/bluebubbles", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw BlueBubbles channel plugin", "type": "module", "openclaw": { diff --git a/extensions/bluebubbles/src/attachments.test.ts b/extensions/bluebubbles/src/attachments.test.ts index d6b12d311f8..da431c7325f 100644 --- a/extensions/bluebubbles/src/attachments.test.ts +++ b/extensions/bluebubbles/src/attachments.test.ts @@ -294,7 +294,7 @@ describe("downloadBlueBubblesAttachment", () => { expect(fetchMediaArgs.ssrfPolicy).toEqual({ allowPrivateNetwork: true }); }); - it("does not pass ssrfPolicy when allowPrivateNetwork is not set", async () => { + it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => { const mockBuffer = new Uint8Array([1]); mockFetch.mockResolvedValueOnce({ ok: true, @@ -309,7 +309,25 @@ describe("downloadBlueBubblesAttachment", () => { }); const fetchMediaArgs = fetchRemoteMediaMock.mock.calls[0][0] as Record; - expect(fetchMediaArgs.ssrfPolicy).toBeUndefined(); + expect(fetchMediaArgs.ssrfPolicy).toEqual({ allowedHostnames: ["localhost"] }); + }); + + it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => { + const mockBuffer = new Uint8Array([1]); + mockFetch.mockResolvedValueOnce({ + ok: true, + headers: new Headers(), + arrayBuffer: () => Promise.resolve(mockBuffer.buffer), + }); + + const attachment: BlueBubblesAttachment = { guid: "att-private-ip" }; + await downloadBlueBubblesAttachment(attachment, { + serverUrl: "http://192.168.1.5:1234", + password: "test", + }); + + const fetchMediaArgs = fetchRemoteMediaMock.mock.calls[0][0] as Record; + expect(fetchMediaArgs.ssrfPolicy).toEqual({ allowedHostnames: ["192.168.1.5"] }); }); }); diff --git a/extensions/bluebubbles/src/attachments.ts b/extensions/bluebubbles/src/attachments.ts index 6ccb043845f..ca7ce69a89c 100644 --- a/extensions/bluebubbles/src/attachments.ts +++ b/extensions/bluebubbles/src/attachments.ts @@ -62,6 +62,15 @@ function resolveAccount(params: BlueBubblesAttachmentOpts) { return resolveBlueBubblesServerAccount(params); } +function safeExtractHostname(url: string): string | undefined { + try { + const hostname = new URL(url).hostname.trim(); + return hostname || undefined; + } catch { + return undefined; + } +} + type MediaFetchErrorCode = "max_bytes" | "http_error" | "fetch_failed"; function readMediaFetchErrorCode(error: unknown): MediaFetchErrorCode | undefined { @@ -89,12 +98,17 @@ export async function downloadBlueBubblesAttachment( password, }); const maxBytes = typeof opts.maxBytes === "number" ? opts.maxBytes : DEFAULT_ATTACHMENT_MAX_BYTES; + const trustedHostname = safeExtractHostname(baseUrl); try { const fetched = await getBlueBubblesRuntime().channel.media.fetchRemoteMedia({ url, filePathHint: attachment.transferName ?? attachment.guid ?? "attachment", maxBytes, - ssrfPolicy: allowPrivateNetwork ? { allowPrivateNetwork: true } : undefined, + ssrfPolicy: allowPrivateNetwork + ? { allowPrivateNetwork: true } + : trustedHostname + ? { allowedHostnames: [trustedHostname] } + : undefined, fetchImpl: async (input, init) => await blueBubblesFetchWithTimeout( resolveRequestUrl(input), diff --git a/extensions/bluebubbles/src/monitor-processing.ts b/extensions/bluebubbles/src/monitor-processing.ts index 67fb50a78c6..486864fa4c3 100644 --- a/extensions/bluebubbles/src/monitor-processing.ts +++ b/extensions/bluebubbles/src/monitor-processing.ts @@ -1,14 +1,16 @@ import type { OpenClawConfig } from "openclaw/plugin-sdk"; import { + DM_GROUP_ACCESS_REASON, + createScopedPairingAccess, createReplyPrefixOptions, evictOldHistoryKeys, logAckFailure, logInboundDrop, logTypingFailure, + readStoreAllowFromForDmPolicy, recordPendingHistoryEntryIfEnabled, resolveAckReaction, - resolveDmGroupAccessDecision, - resolveEffectiveAllowFromLists, + resolveDmGroupAccessWithLists, resolveControlCommandGate, stripMarkdown, type HistoryEntry, @@ -420,6 +422,11 @@ export async function processMessage( target: WebhookTarget, ): Promise { const { account, config, runtime, core, statusSink } = target; + const pairing = createScopedPairingAccess({ + core, + channel: "bluebubbles", + accountId: account.accountId, + }); const privateApiEnabled = isBlueBubblesPrivateApiEnabled(account.accountId); const groupFlag = resolveGroupFlagFromChatGuid(message.chatGuid); @@ -501,27 +508,20 @@ export async function processMessage( const dmPolicy = account.config.dmPolicy ?? "pairing"; const groupPolicy = account.config.groupPolicy ?? "allowlist"; - const storeAllowFrom = await core.channel.pairing - .readAllowFromStore("bluebubbles") - .catch(() => []); - const { effectiveAllowFrom, effectiveGroupAllowFrom } = resolveEffectiveAllowFromLists({ - allowFrom: account.config.allowFrom, - groupAllowFrom: account.config.groupAllowFrom, - storeAllowFrom, + const configuredAllowFrom = (account.config.allowFrom ?? []).map((entry) => String(entry)); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "bluebubbles", + accountId: account.accountId, dmPolicy, + readStore: pairing.readStoreForDmPolicy, }); - const groupAllowEntry = formatGroupAllowlistEntry({ - chatGuid: message.chatGuid, - chatId: message.chatId ?? undefined, - chatIdentifier: message.chatIdentifier ?? undefined, - }); - const groupName = message.chatName?.trim() || undefined; - const accessDecision = resolveDmGroupAccessDecision({ + const accessDecision = resolveDmGroupAccessWithLists({ isGroup, dmPolicy, groupPolicy, - effectiveAllowFrom, - effectiveGroupAllowFrom, + allowFrom: configuredAllowFrom, + groupAllowFrom: account.config.groupAllowFrom, + storeAllowFrom, isSenderAllowed: (allowFrom) => isAllowedBlueBubblesSender({ allowFrom, @@ -531,10 +531,18 @@ export async function processMessage( chatIdentifier: message.chatIdentifier ?? undefined, }), }); + const effectiveAllowFrom = accessDecision.effectiveAllowFrom; + const effectiveGroupAllowFrom = accessDecision.effectiveGroupAllowFrom; + const groupAllowEntry = formatGroupAllowlistEntry({ + chatGuid: message.chatGuid, + chatId: message.chatId ?? undefined, + chatIdentifier: message.chatIdentifier ?? undefined, + }); + const groupName = message.chatName?.trim() || undefined; if (accessDecision.decision !== "allow") { if (isGroup) { - if (accessDecision.reason === "groupPolicy=disabled") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_DISABLED) { logVerbose(core, runtime, "Blocked BlueBubbles group message (groupPolicy=disabled)"); logGroupAllowlistHint({ runtime, @@ -545,7 +553,7 @@ export async function processMessage( }); return; } - if (accessDecision.reason === "groupPolicy=allowlist (empty allowlist)") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_EMPTY_ALLOWLIST) { logVerbose(core, runtime, "Blocked BlueBubbles group message (no allowlist)"); logGroupAllowlistHint({ runtime, @@ -556,7 +564,7 @@ export async function processMessage( }); return; } - if (accessDecision.reason === "groupPolicy=allowlist (not allowlisted)") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_NOT_ALLOWLISTED) { logVerbose( core, runtime, @@ -579,15 +587,14 @@ export async function processMessage( return; } - if (accessDecision.reason === "dmPolicy=disabled") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.DM_POLICY_DISABLED) { logVerbose(core, runtime, `Blocked BlueBubbles DM from ${message.senderId}`); logVerbose(core, runtime, `drop: dmPolicy disabled sender=${message.senderId}`); return; } if (accessDecision.decision === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "bluebubbles", + const { code, created } = await pairing.upsertPairingRequest({ id: message.senderId, meta: { name: message.senderName }, }); @@ -666,10 +673,11 @@ export async function processMessage( // Command gating (parity with iMessage/WhatsApp) const useAccessGroups = config.commands?.useAccessGroups !== false; const hasControlCmd = core.channel.text.hasControlCommand(messageText, config); + const commandDmAllowFrom = isGroup ? configuredAllowFrom : effectiveAllowFrom; const ownerAllowedForCommands = - effectiveAllowFrom.length > 0 + commandDmAllowFrom.length > 0 ? isAllowedBlueBubblesSender({ - allowFrom: effectiveAllowFrom, + allowFrom: commandDmAllowFrom, sender: message.senderId, chatId: message.chatId ?? undefined, chatGuid: message.chatGuid ?? undefined, @@ -686,17 +694,16 @@ export async function processMessage( chatIdentifier: message.chatIdentifier ?? undefined, }) : false; - const dmAuthorized = dmPolicy === "open" || ownerAllowedForCommands; const commandGate = resolveControlCommandGate({ useAccessGroups, authorizers: [ - { configured: effectiveAllowFrom.length > 0, allowed: ownerAllowedForCommands }, + { configured: commandDmAllowFrom.length > 0, allowed: ownerAllowedForCommands }, { configured: effectiveGroupAllowFrom.length > 0, allowed: groupAllowedForCommands }, ], allowTextCommands: true, hasControlCommand: hasControlCmd, }); - const commandAuthorized = isGroup ? commandGate.commandAuthorized : dmAuthorized; + const commandAuthorized = commandGate.commandAuthorized; // Block control commands from unauthorized senders in groups if (isGroup && commandGate.shouldBlock) { @@ -1380,27 +1387,30 @@ export async function processReaction( target: WebhookTarget, ): Promise { const { account, config, runtime, core } = target; + const pairing = createScopedPairingAccess({ + core, + channel: "bluebubbles", + accountId: account.accountId, + }); if (reaction.fromMe) { return; } const dmPolicy = account.config.dmPolicy ?? "pairing"; const groupPolicy = account.config.groupPolicy ?? "allowlist"; - const storeAllowFrom = await core.channel.pairing - .readAllowFromStore("bluebubbles") - .catch(() => []); - const { effectiveAllowFrom, effectiveGroupAllowFrom } = resolveEffectiveAllowFromLists({ - allowFrom: account.config.allowFrom, - groupAllowFrom: account.config.groupAllowFrom, - storeAllowFrom, + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "bluebubbles", + accountId: account.accountId, dmPolicy, + readStore: pairing.readStoreForDmPolicy, }); - const accessDecision = resolveDmGroupAccessDecision({ + const accessDecision = resolveDmGroupAccessWithLists({ isGroup: reaction.isGroup, dmPolicy, groupPolicy, - effectiveAllowFrom, - effectiveGroupAllowFrom, + allowFrom: account.config.allowFrom, + groupAllowFrom: account.config.groupAllowFrom, + storeAllowFrom, isSenderAllowed: (allowFrom) => isAllowedBlueBubblesSender({ allowFrom, diff --git a/extensions/bluebubbles/src/monitor.test.ts b/extensions/bluebubbles/src/monitor.test.ts index 496d6c36278..43777f648ad 100644 --- a/extensions/bluebubbles/src/monitor.test.ts +++ b/extensions/bluebubbles/src/monitor.test.ts @@ -162,6 +162,24 @@ function createMockRuntime(): PluginRuntime { vi.fn() as unknown as PluginRuntime["channel"]["reply"]["resolveHumanDelayConfig"], dispatchReplyFromConfig: vi.fn() as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyFromConfig"], + withReplyDispatcher: vi.fn( + async ({ + dispatcher, + run, + onSettled, + }: Parameters[0]) => { + try { + return await run(); + } finally { + dispatcher.markComplete(); + try { + await dispatcher.waitForIdle(); + } finally { + await onSettled?.(); + } + } + }, + ) as unknown as PluginRuntime["channel"]["reply"]["withReplyDispatcher"], finalizeInboundContext: vi.fn( (ctx: Record) => ctx, ) as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"], @@ -2287,6 +2305,51 @@ describe("BlueBubbles webhook monitor", () => { expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); }); + + it("does not auto-authorize DM control commands in open mode without allowlists", async () => { + mockHasControlCommand.mockReturnValue(true); + + const account = createMockAccount({ + dmPolicy: "open", + allowFrom: [], + }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const payload = { + type: "new-message", + data: { + text: "/status", + handle: { address: "+15559999999" }, + isGroup: false, + isFromMe: false, + guid: "msg-dm-open-unauthorized", + date: Date.now(), + }, + }; + + const req = createMockRequest("POST", "/bluebubbles-webhook", payload); + const res = createMockResponse(); + + await handleBlueBubblesWebhookRequest(req, res); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + const latestDispatch = + mockDispatchReplyWithBufferedBlockDispatcher.mock.calls[ + mockDispatchReplyWithBufferedBlockDispatcher.mock.calls.length - 1 + ]?.[0]; + expect(latestDispatch?.ctx?.CommandAuthorized).toBe(false); + }); }); describe("typing/read receipt toggles", () => { diff --git a/extensions/copilot-proxy/package.json b/extensions/copilot-proxy/package.json index 8dd561f27f3..cc3bad01a8f 100644 --- a/extensions/copilot-proxy/package.json +++ b/extensions/copilot-proxy/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/copilot-proxy", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw Copilot Proxy provider plugin", "type": "module", diff --git a/extensions/diagnostics-otel/package.json b/extensions/diagnostics-otel/package.json index 32c5ad8275d..700f444f05e 100644 --- a/extensions/diagnostics-otel/package.json +++ b/extensions/diagnostics-otel/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diagnostics-otel", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw diagnostics OpenTelemetry exporter", "type": "module", "dependencies": { diff --git a/extensions/discord/package.json b/extensions/discord/package.json index 2553b1c0814..2f2be908ce2 100644 --- a/extensions/discord/package.json +++ b/extensions/discord/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/discord", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Discord channel plugin", "type": "module", "openclaw": { diff --git a/extensions/feishu/package.json b/extensions/feishu/package.json index afacb5432eb..b0e7b21ef78 100644 --- a/extensions/feishu/package.json +++ b/extensions/feishu/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/feishu", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Feishu/Lark channel plugin (community maintained by @m1heng)", "type": "module", "dependencies": { diff --git a/extensions/feishu/src/bitable.ts b/extensions/feishu/src/bitable.ts index 3fe46409766..5e0575bba06 100644 --- a/extensions/feishu/src/bitable.ts +++ b/extensions/feishu/src/bitable.ts @@ -1,7 +1,8 @@ +import type * as Lark from "@larksuiteoapi/node-sdk"; import { Type } from "@sinclair/typebox"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; -import { createFeishuClient } from "./client.js"; -import type { FeishuConfig } from "./types.js"; +import { listEnabledFeishuAccounts } from "./accounts.js"; +import { createFeishuToolClient } from "./tool-account.js"; // ============ Helpers ============ @@ -64,10 +65,7 @@ function parseBitableUrl(url: string): { token: string; tableId?: string; isWiki } /** Get app_token from wiki node_token */ -async function getAppTokenFromWiki( - client: ReturnType, - nodeToken: string, -): Promise { +async function getAppTokenFromWiki(client: Lark.Client, nodeToken: string): Promise { const res = await client.wiki.space.getNode({ params: { token: nodeToken }, }); @@ -87,7 +85,7 @@ async function getAppTokenFromWiki( } /** Get bitable metadata from URL (handles both /base/ and /wiki/ URLs) */ -async function getBitableMeta(client: ReturnType, url: string) { +async function getBitableMeta(client: Lark.Client, url: string) { const parsed = parseBitableUrl(url); if (!parsed) { throw new Error("Invalid URL format. Expected /base/XXX or /wiki/XXX URL"); @@ -134,11 +132,7 @@ async function getBitableMeta(client: ReturnType, url }; } -async function listFields( - client: ReturnType, - appToken: string, - tableId: string, -) { +async function listFields(client: Lark.Client, appToken: string, tableId: string) { const res = await client.bitable.appTableField.list({ path: { app_token: appToken, table_id: tableId }, }); @@ -161,7 +155,7 @@ async function listFields( } async function listRecords( - client: ReturnType, + client: Lark.Client, appToken: string, tableId: string, pageSize?: number, @@ -186,12 +180,7 @@ async function listRecords( }; } -async function getRecord( - client: ReturnType, - appToken: string, - tableId: string, - recordId: string, -) { +async function getRecord(client: Lark.Client, appToken: string, tableId: string, recordId: string) { const res = await client.bitable.appTableRecord.get({ path: { app_token: appToken, table_id: tableId, record_id: recordId }, }); @@ -205,7 +194,7 @@ async function getRecord( } async function createRecord( - client: ReturnType, + client: Lark.Client, appToken: string, tableId: string, fields: Record, @@ -235,7 +224,7 @@ const DEFAULT_CLEANUP_FIELD_TYPES = new Set([3, 5, 17]); // SingleSelect, DateTi /** Clean up default placeholder rows and fields in a newly created Bitable table */ async function cleanupNewBitable( - client: ReturnType, + client: Lark.Client, appToken: string, tableId: string, tableName: string, @@ -334,7 +323,7 @@ async function cleanupNewBitable( } async function createApp( - client: ReturnType, + client: Lark.Client, name: string, folderToken?: string, logger?: CleanupLogger, @@ -389,7 +378,7 @@ async function createApp( } async function createField( - client: ReturnType, + client: Lark.Client, appToken: string, tableId: string, fieldName: string, @@ -417,7 +406,7 @@ async function createField( } async function updateRecord( - client: ReturnType, + client: Lark.Client, appToken: string, tableId: string, recordId: string, @@ -532,208 +521,193 @@ const UpdateRecordSchema = Type.Object({ // ============ Tool Registration ============ export function registerFeishuBitableTools(api: OpenClawPluginApi) { - const feishuCfg = api.config?.channels?.feishu as FeishuConfig | undefined; - if (!feishuCfg?.appId || !feishuCfg?.appSecret) { - api.logger.debug?.("feishu_bitable: Feishu credentials not configured, skipping bitable tools"); + if (!api.config) { + api.logger.debug?.("feishu_bitable: No config available, skipping bitable tools"); return; } - const getClient = () => createFeishuClient(feishuCfg); + const accounts = listEnabledFeishuAccounts(api.config); + if (accounts.length === 0) { + api.logger.debug?.("feishu_bitable: No Feishu accounts configured, skipping bitable tools"); + return; + } - // Tool 0: feishu_bitable_get_meta (helper to parse URLs) - api.registerTool( - { - name: "feishu_bitable_get_meta", - label: "Feishu Bitable Get Meta", - description: - "Parse a Bitable URL and get app_token, table_id, and table list. Use this first when given a /wiki/ or /base/ URL.", - parameters: GetMetaSchema, - async execute(_toolCallId, params) { - const { url } = params as { url: string }; - try { - const result = await getBitableMeta(getClient(), url); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, - }, - { name: "feishu_bitable_get_meta" }, - ); + type AccountAwareParams = { accountId?: string }; - // Tool 1: feishu_bitable_list_fields - api.registerTool( - { - name: "feishu_bitable_list_fields", - label: "Feishu Bitable List Fields", - description: "List all fields (columns) in a Bitable table with their types and properties", - parameters: ListFieldsSchema, - async execute(_toolCallId, params) { - const { app_token, table_id } = params as { app_token: string; table_id: string }; - try { - const result = await listFields(getClient(), app_token, table_id); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, - }, - { name: "feishu_bitable_list_fields" }, - ); + const getClient = (params: AccountAwareParams | undefined, defaultAccountId?: string) => + createFeishuToolClient({ api, executeParams: params, defaultAccountId }); - // Tool 2: feishu_bitable_list_records - api.registerTool( - { - name: "feishu_bitable_list_records", - label: "Feishu Bitable List Records", - description: "List records (rows) from a Bitable table with pagination support", - parameters: ListRecordsSchema, - async execute(_toolCallId, params) { - const { app_token, table_id, page_size, page_token } = params as { - app_token: string; - table_id: string; - page_size?: number; - page_token?: string; - }; - try { - const result = await listRecords(getClient(), app_token, table_id, page_size, page_token); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, - }, - { name: "feishu_bitable_list_records" }, - ); + const registerBitableTool = (params: { + name: string; + label: string; + description: string; + parameters: unknown; + execute: (args: { params: TParams; defaultAccountId?: string }) => Promise; + }) => { + api.registerTool( + (ctx) => ({ + name: params.name, + label: params.label, + description: params.description, + parameters: params.parameters, + async execute(_toolCallId, rawParams) { + try { + return json( + await params.execute({ + params: rawParams as TParams, + defaultAccountId: ctx.agentAccountId, + }), + ); + } catch (err) { + return json({ error: err instanceof Error ? err.message : String(err) }); + } + }, + }), + { name: params.name }, + ); + }; - // Tool 3: feishu_bitable_get_record - api.registerTool( - { - name: "feishu_bitable_get_record", - label: "Feishu Bitable Get Record", - description: "Get a single record by ID from a Bitable table", - parameters: GetRecordSchema, - async execute(_toolCallId, params) { - const { app_token, table_id, record_id } = params as { - app_token: string; - table_id: string; - record_id: string; - }; - try { - const result = await getRecord(getClient(), app_token, table_id, record_id); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + registerBitableTool<{ url: string; accountId?: string }>({ + name: "feishu_bitable_get_meta", + label: "Feishu Bitable Get Meta", + description: + "Parse a Bitable URL and get app_token, table_id, and table list. Use this first when given a /wiki/ or /base/ URL.", + parameters: GetMetaSchema, + async execute({ params, defaultAccountId }) { + return getBitableMeta(getClient(params, defaultAccountId), params.url); }, - { name: "feishu_bitable_get_record" }, - ); + }); - // Tool 4: feishu_bitable_create_record - api.registerTool( - { - name: "feishu_bitable_create_record", - label: "Feishu Bitable Create Record", - description: "Create a new record (row) in a Bitable table", - parameters: CreateRecordSchema, - async execute(_toolCallId, params) { - const { app_token, table_id, fields } = params as { - app_token: string; - table_id: string; - fields: Record; - }; - try { - const result = await createRecord(getClient(), app_token, table_id, fields); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + registerBitableTool<{ app_token: string; table_id: string; accountId?: string }>({ + name: "feishu_bitable_list_fields", + label: "Feishu Bitable List Fields", + description: "List all fields (columns) in a Bitable table with their types and properties", + parameters: ListFieldsSchema, + async execute({ params, defaultAccountId }) { + return listFields(getClient(params, defaultAccountId), params.app_token, params.table_id); }, - { name: "feishu_bitable_create_record" }, - ); + }); - // Tool 5: feishu_bitable_update_record - api.registerTool( - { - name: "feishu_bitable_update_record", - label: "Feishu Bitable Update Record", - description: "Update an existing record (row) in a Bitable table", - parameters: UpdateRecordSchema, - async execute(_toolCallId, params) { - const { app_token, table_id, record_id, fields } = params as { - app_token: string; - table_id: string; - record_id: string; - fields: Record; - }; - try { - const result = await updateRecord(getClient(), app_token, table_id, record_id, fields); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + registerBitableTool<{ + app_token: string; + table_id: string; + page_size?: number; + page_token?: string; + accountId?: string; + }>({ + name: "feishu_bitable_list_records", + label: "Feishu Bitable List Records", + description: "List records (rows) from a Bitable table with pagination support", + parameters: ListRecordsSchema, + async execute({ params, defaultAccountId }) { + return listRecords( + getClient(params, defaultAccountId), + params.app_token, + params.table_id, + params.page_size, + params.page_token, + ); }, - { name: "feishu_bitable_update_record" }, - ); + }); - // Tool 6: feishu_bitable_create_app - api.registerTool( - { - name: "feishu_bitable_create_app", - label: "Feishu Bitable Create App", - description: "Create a new Bitable (multidimensional table) application", - parameters: CreateAppSchema, - async execute(_toolCallId, params) { - const { name, folder_token } = params as { name: string; folder_token?: string }; - try { - const result = await createApp(getClient(), name, folder_token, { - debug: (msg) => api.logger.debug?.(msg), - warn: (msg) => api.logger.warn?.(msg), - }); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + registerBitableTool<{ + app_token: string; + table_id: string; + record_id: string; + accountId?: string; + }>({ + name: "feishu_bitable_get_record", + label: "Feishu Bitable Get Record", + description: "Get a single record by ID from a Bitable table", + parameters: GetRecordSchema, + async execute({ params, defaultAccountId }) { + return getRecord( + getClient(params, defaultAccountId), + params.app_token, + params.table_id, + params.record_id, + ); }, - { name: "feishu_bitable_create_app" }, - ); + }); - // Tool 7: feishu_bitable_create_field - api.registerTool( - { - name: "feishu_bitable_create_field", - label: "Feishu Bitable Create Field", - description: "Create a new field (column) in a Bitable table", - parameters: CreateFieldSchema, - async execute(_toolCallId, params) { - const { app_token, table_id, field_name, field_type, property } = params as { - app_token: string; - table_id: string; - field_name: string; - field_type: number; - property?: Record; - }; - try { - const result = await createField( - getClient(), - app_token, - table_id, - field_name, - field_type, - property, - ); - return json(result); - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + registerBitableTool<{ + app_token: string; + table_id: string; + fields: Record; + accountId?: string; + }>({ + name: "feishu_bitable_create_record", + label: "Feishu Bitable Create Record", + description: "Create a new record (row) in a Bitable table", + parameters: CreateRecordSchema, + async execute({ params, defaultAccountId }) { + return createRecord( + getClient(params, defaultAccountId), + params.app_token, + params.table_id, + params.fields, + ); }, - { name: "feishu_bitable_create_field" }, - ); + }); + + registerBitableTool<{ + app_token: string; + table_id: string; + record_id: string; + fields: Record; + accountId?: string; + }>({ + name: "feishu_bitable_update_record", + label: "Feishu Bitable Update Record", + description: "Update an existing record (row) in a Bitable table", + parameters: UpdateRecordSchema, + async execute({ params, defaultAccountId }) { + return updateRecord( + getClient(params, defaultAccountId), + params.app_token, + params.table_id, + params.record_id, + params.fields, + ); + }, + }); + + registerBitableTool<{ name: string; folder_token?: string; accountId?: string }>({ + name: "feishu_bitable_create_app", + label: "Feishu Bitable Create App", + description: "Create a new Bitable (multidimensional table) application", + parameters: CreateAppSchema, + async execute({ params, defaultAccountId }) { + return createApp(getClient(params, defaultAccountId), params.name, params.folder_token, { + debug: (msg) => api.logger.debug?.(msg), + warn: (msg) => api.logger.warn?.(msg), + }); + }, + }); + + registerBitableTool<{ + app_token: string; + table_id: string; + field_name: string; + field_type: number; + property?: Record; + accountId?: string; + }>({ + name: "feishu_bitable_create_field", + label: "Feishu Bitable Create Field", + description: "Create a new field (column) in a Bitable table", + parameters: CreateFieldSchema, + async execute({ params, defaultAccountId }) { + return createField( + getClient(params, defaultAccountId), + params.app_token, + params.table_id, + params.field_name, + params.field_type, + params.property, + ); + }, + }); api.logger.info?.("feishu_bitable: Registered bitable tools"); } diff --git a/extensions/feishu/src/bot.test.ts b/extensions/feishu/src/bot.test.ts index 40f03a4f993..ca0792f2e82 100644 --- a/extensions/feishu/src/bot.test.ts +++ b/extensions/feishu/src/bot.test.ts @@ -1,7 +1,7 @@ import type { ClawdbotConfig, PluginRuntime, RuntimeEnv } from "openclaw/plugin-sdk"; import { beforeEach, describe, expect, it, vi } from "vitest"; import type { FeishuMessageEvent } from "./bot.js"; -import { handleFeishuMessage } from "./bot.js"; +import { buildFeishuAgentBody, handleFeishuMessage } from "./bot.js"; import { setFeishuRuntime } from "./runtime.js"; const { @@ -9,6 +9,7 @@ const { mockSendMessageFeishu, mockGetMessageFeishu, mockDownloadMessageResourceFeishu, + mockCreateFeishuClient, } = vi.hoisted(() => ({ mockCreateFeishuReplyDispatcher: vi.fn(() => ({ dispatcher: vi.fn(), @@ -22,6 +23,7 @@ const { contentType: "video/mp4", fileName: "clip.mp4", }), + mockCreateFeishuClient: vi.fn(), })); vi.mock("./reply-dispatcher.js", () => ({ @@ -37,6 +39,10 @@ vi.mock("./media.js", () => ({ downloadMessageResourceFeishu: mockDownloadMessageResourceFeishu, })); +vi.mock("./client.js", () => ({ + createFeishuClient: mockCreateFeishuClient, +})); + function createRuntimeEnv(): RuntimeEnv { return { log: vi.fn(), @@ -55,11 +61,53 @@ async function dispatchMessage(params: { cfg: ClawdbotConfig; event: FeishuMessa }); } +describe("buildFeishuAgentBody", () => { + it("builds message id, speaker, quoted content, mentions, and permission notice in order", () => { + const body = buildFeishuAgentBody({ + ctx: { + content: "hello world", + senderName: "Sender Name", + senderOpenId: "ou-sender", + messageId: "msg-42", + mentionTargets: [{ openId: "ou-target", name: "Target User", key: "@_user_1" }], + }, + quotedContent: "previous message", + permissionErrorForAgent: { + code: 99991672, + message: "permission denied", + grantUrl: "https://open.feishu.cn/app/cli_test", + }, + }); + + expect(body).toBe( + '[message_id: msg-42]\nSender Name: [Replying to: "previous message"]\n\nhello world\n\n[System: Your reply will automatically @mention: Target User. Do not write @xxx yourself.]\n\n[System: The bot encountered a Feishu API permission error. Please inform the user about this issue and provide the permission grant URL for the admin to authorize. Permission grant URL: https://open.feishu.cn/app/cli_test]', + ); + }); +}); + describe("handleFeishuMessage command authorization", () => { const mockFinalizeInboundContext = vi.fn((ctx: unknown) => ctx); const mockDispatchReplyFromConfig = vi .fn() .mockResolvedValue({ queuedFinal: false, counts: { final: 1 } }); + const mockWithReplyDispatcher = vi.fn( + async ({ + dispatcher, + run, + onSettled, + }: Parameters[0]) => { + try { + return await run(); + } finally { + dispatcher.markComplete(); + try { + await dispatcher.waitForIdle(); + } finally { + await onSettled?.(); + } + } + }, + ); const mockResolveCommandAuthorizedFromAuthorizers = vi.fn(() => false); const mockShouldComputeCommandAuthorized = vi.fn(() => true); const mockReadAllowFromStore = vi.fn().mockResolvedValue([]); @@ -72,6 +120,13 @@ describe("handleFeishuMessage command authorization", () => { beforeEach(() => { vi.clearAllMocks(); + mockCreateFeishuClient.mockReturnValue({ + contact: { + user: { + get: vi.fn().mockResolvedValue({ data: { user: { name: "Sender" } } }), + }, + }, + }); setFeishuRuntime({ system: { enqueueSystemEvent: vi.fn(), @@ -90,6 +145,7 @@ describe("handleFeishuMessage command authorization", () => { formatAgentEnvelope: vi.fn((params: { body: string }) => params.body), finalizeInboundContext: mockFinalizeInboundContext, dispatchReplyFromConfig: mockDispatchReplyFromConfig, + withReplyDispatcher: mockWithReplyDispatcher, }, commands: { shouldComputeCommandAuthorized: mockShouldComputeCommandAuthorized, @@ -183,7 +239,10 @@ describe("handleFeishuMessage command authorization", () => { await dispatchMessage({ cfg, event }); - expect(mockReadAllowFromStore).toHaveBeenCalledWith("feishu"); + expect(mockReadAllowFromStore).toHaveBeenCalledWith({ + channel: "feishu", + accountId: "default", + }); expect(mockResolveCommandAuthorizedFromAuthorizers).not.toHaveBeenCalled(); expect(mockFinalizeInboundContext).toHaveBeenCalledTimes(1); expect(mockDispatchReplyFromConfig).toHaveBeenCalledTimes(1); @@ -222,6 +281,7 @@ describe("handleFeishuMessage command authorization", () => { expect(mockUpsertPairingRequest).toHaveBeenCalledWith({ channel: "feishu", + accountId: "default", id: "ou-unapproved", meta: { name: undefined }, }); @@ -382,4 +442,102 @@ describe("handleFeishuMessage command authorization", () => { "clip.mp4", ); }); + + it("includes message_id in BodyForAgent on its own line", async () => { + mockShouldComputeCommandAuthorized.mockReturnValue(false); + + const cfg: ClawdbotConfig = { + channels: { + feishu: { + dmPolicy: "open", + }, + }, + } as ClawdbotConfig; + + const event: FeishuMessageEvent = { + sender: { + sender_id: { + open_id: "ou-msgid", + }, + }, + message: { + message_id: "msg-message-id-line", + chat_id: "oc-dm", + chat_type: "p2p", + message_type: "text", + content: JSON.stringify({ text: "hello" }), + }, + }; + + await dispatchMessage({ cfg, event }); + + expect(mockFinalizeInboundContext).toHaveBeenCalledWith( + expect.objectContaining({ + BodyForAgent: "[message_id: msg-message-id-line]\nou-msgid: hello", + }), + ); + }); + + it("dispatches once and appends permission notice to the main agent body", async () => { + mockShouldComputeCommandAuthorized.mockReturnValue(false); + mockCreateFeishuClient.mockReturnValue({ + contact: { + user: { + get: vi.fn().mockRejectedValue({ + response: { + data: { + code: 99991672, + msg: "permission denied https://open.feishu.cn/app/cli_test", + }, + }, + }), + }, + }, + }); + + const cfg: ClawdbotConfig = { + channels: { + feishu: { + appId: "cli_test", + appSecret: "sec_test", + groups: { + "oc-group": { + requireMention: false, + }, + }, + }, + }, + } as ClawdbotConfig; + + const event: FeishuMessageEvent = { + sender: { + sender_id: { + open_id: "ou-perm", + }, + }, + message: { + message_id: "msg-perm-1", + chat_id: "oc-group", + chat_type: "group", + message_type: "text", + content: JSON.stringify({ text: "hello group" }), + }, + }; + + await dispatchMessage({ cfg, event }); + + expect(mockDispatchReplyFromConfig).toHaveBeenCalledTimes(1); + expect(mockFinalizeInboundContext).toHaveBeenCalledWith( + expect.objectContaining({ + BodyForAgent: expect.stringContaining( + "Permission grant URL: https://open.feishu.cn/app/cli_test", + ), + }), + ); + expect(mockFinalizeInboundContext).toHaveBeenCalledWith( + expect.objectContaining({ + BodyForAgent: expect.stringContaining("ou-perm: hello group"), + }), + ); + }); }); diff --git a/extensions/feishu/src/bot.ts b/extensions/feishu/src/bot.ts index f18658e62b5..61c65973762 100644 --- a/extensions/feishu/src/bot.ts +++ b/extensions/feishu/src/bot.ts @@ -3,6 +3,7 @@ import { buildAgentMediaPayload, buildPendingHistoryContextFromMap, clearHistoryEntriesIfEnabled, + createScopedPairingAccess, DEFAULT_GROUP_HISTORY_LIMIT, type HistoryEntry, recordPendingHistoryEntryIfEnabled, @@ -496,6 +497,40 @@ export function parseFeishuMessageEvent( return ctx; } +export function buildFeishuAgentBody(params: { + ctx: Pick< + FeishuMessageContext, + "content" | "senderName" | "senderOpenId" | "mentionTargets" | "messageId" + >; + quotedContent?: string; + permissionErrorForAgent?: PermissionError; +}): string { + const { ctx, quotedContent, permissionErrorForAgent } = params; + let messageBody = ctx.content; + if (quotedContent) { + messageBody = `[Replying to: "${quotedContent}"]\n\n${ctx.content}`; + } + + // DMs already have per-sender sessions, but this label still improves attribution. + const speaker = ctx.senderName ?? ctx.senderOpenId; + messageBody = `${speaker}: ${messageBody}`; + + if (ctx.mentionTargets && ctx.mentionTargets.length > 0) { + const targetNames = ctx.mentionTargets.map((t) => t.name).join(", "); + messageBody += `\n\n[System: Your reply will automatically @mention: ${targetNames}. Do not write @xxx yourself.]`; + } + + // Keep message_id on its own line so shared message-id hint stripping can parse it reliably. + messageBody = `[message_id: ${ctx.messageId}]\n${messageBody}`; + + if (permissionErrorForAgent) { + const grantUrl = permissionErrorForAgent.grantUrl ?? ""; + messageBody += `\n\n[System: The bot encountered a Feishu API permission error. Please inform the user about this issue and provide the permission grant URL for the admin to authorize. Permission grant URL: ${grantUrl}]`; + } + + return messageBody; +} + export async function handleFeishuMessage(params: { cfg: ClawdbotConfig; event: FeishuMessageEvent; @@ -641,6 +676,11 @@ export async function handleFeishuMessage(params: { try { const core = getFeishuRuntime(); + const pairing = createScopedPairingAccess({ + core, + channel: "feishu", + accountId: account.accountId, + }); const shouldComputeCommandAuthorized = core.channel.commands.shouldComputeCommandAuthorized( ctx.content, cfg, @@ -649,7 +689,7 @@ export async function handleFeishuMessage(params: { !isGroup && dmPolicy !== "allowlist" && (dmPolicy !== "open" || shouldComputeCommandAuthorized) - ? await core.channel.pairing.readAllowFromStore("feishu").catch(() => []) + ? await pairing.readAllowFromStore().catch(() => []) : []; const effectiveDmAllowFrom = [...configAllowFrom, ...storeAllowFrom]; const dmAllowed = resolveFeishuAllowlistMatch({ @@ -661,8 +701,7 @@ export async function handleFeishuMessage(params: { if (!isGroup && dmPolicy !== "open" && !dmAllowed) { if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "feishu", + const { code, created } = await pairing.upsertPairingRequest({ id: ctx.senderOpenId, meta: { name: ctx.senderName }, }); @@ -823,85 +862,15 @@ export async function handleFeishuMessage(params: { } const envelopeOptions = core.channel.reply.resolveEnvelopeFormatOptions(cfg); - - // Build message body with quoted content if available - let messageBody = ctx.content; - if (quotedContent) { - messageBody = `[Replying to: "${quotedContent}"]\n\n${ctx.content}`; - } - - // Include a readable speaker label so the model can attribute instructions. - // (DMs already have per-sender sessions, but the prefix is still useful for clarity.) - const speaker = ctx.senderName ?? ctx.senderOpenId; - messageBody = `${speaker}: ${messageBody}`; - - // If there are mention targets, inform the agent that replies will auto-mention them - if (ctx.mentionTargets && ctx.mentionTargets.length > 0) { - const targetNames = ctx.mentionTargets.map((t) => t.name).join(", "); - messageBody += `\n\n[System: Your reply will automatically @mention: ${targetNames}. Do not write @xxx yourself.]`; - } - + const messageBody = buildFeishuAgentBody({ + ctx, + quotedContent, + permissionErrorForAgent, + }); const envelopeFrom = isGroup ? `${ctx.chatId}:${ctx.senderOpenId}` : ctx.senderOpenId; - - // If there's a permission error, dispatch a separate notification first if (permissionErrorForAgent) { - const grantUrl = permissionErrorForAgent.grantUrl ?? ""; - const permissionNotifyBody = `[System: The bot encountered a Feishu API permission error. Please inform the user about this issue and provide the permission grant URL for the admin to authorize. Permission grant URL: ${grantUrl}]`; - - const permissionBody = core.channel.reply.formatAgentEnvelope({ - channel: "Feishu", - from: envelopeFrom, - timestamp: new Date(), - envelope: envelopeOptions, - body: permissionNotifyBody, - }); - - const permissionCtx = core.channel.reply.finalizeInboundContext({ - Body: permissionBody, - BodyForAgent: permissionNotifyBody, - RawBody: permissionNotifyBody, - CommandBody: permissionNotifyBody, - From: feishuFrom, - To: feishuTo, - SessionKey: route.sessionKey, - AccountId: route.accountId, - ChatType: isGroup ? "group" : "direct", - GroupSubject: isGroup ? ctx.chatId : undefined, - SenderName: "system", - SenderId: "system", - Provider: "feishu" as const, - Surface: "feishu" as const, - MessageSid: `${ctx.messageId}:permission-error`, - Timestamp: Date.now(), - WasMentioned: false, - CommandAuthorized: commandAuthorized, - OriginatingChannel: "feishu" as const, - OriginatingTo: feishuTo, - }); - - const { - dispatcher: permDispatcher, - replyOptions: permReplyOptions, - markDispatchIdle: markPermIdle, - } = createFeishuReplyDispatcher({ - cfg, - agentId: route.agentId, - runtime: runtime as RuntimeEnv, - chatId: ctx.chatId, - replyToMessageId: ctx.messageId, - accountId: account.accountId, - }); - - log(`feishu[${account.accountId}]: dispatching permission error notification to agent`); - - await core.channel.reply.dispatchReplyFromConfig({ - ctx: permissionCtx, - cfg, - dispatcher: permDispatcher, - replyOptions: permReplyOptions, - }); - - markPermIdle(); + // Keep the notice in a single dispatch to avoid duplicate replies (#27372). + log(`feishu[${account.accountId}]: appending permission error notice to message body`); } const body = core.channel.reply.formatAgentEnvelope({ @@ -944,7 +913,7 @@ export async function handleFeishuMessage(params: { const ctxPayload = core.channel.reply.finalizeInboundContext({ Body: combinedBody, - BodyForAgent: ctx.content, + BodyForAgent: messageBody, InboundHistory: inboundHistory, RawBody: ctx.content, CommandBody: ctx.content, @@ -979,16 +948,20 @@ export async function handleFeishuMessage(params: { }); log(`feishu[${account.accountId}]: dispatching to agent (session=${route.sessionKey})`); - - const { queuedFinal, counts } = await core.channel.reply.dispatchReplyFromConfig({ - ctx: ctxPayload, - cfg, + const { queuedFinal, counts } = await core.channel.reply.withReplyDispatcher({ dispatcher, - replyOptions, + onSettled: () => { + markDispatchIdle(); + }, + run: () => + core.channel.reply.dispatchReplyFromConfig({ + ctx: ctxPayload, + cfg, + dispatcher, + replyOptions, + }), }); - markDispatchIdle(); - if (isGroup && historyKey && chatHistories) { clearHistoryEntriesIfEnabled({ historyMap: chatHistories, diff --git a/extensions/feishu/src/docx.account-selection.test.ts b/extensions/feishu/src/docx.account-selection.test.ts new file mode 100644 index 00000000000..6471192b6fe --- /dev/null +++ b/extensions/feishu/src/docx.account-selection.test.ts @@ -0,0 +1,76 @@ +import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; +import { describe, expect, test, vi } from "vitest"; +import { registerFeishuDocTools } from "./docx.js"; +import { createToolFactoryHarness } from "./tool-factory-test-harness.js"; + +const createFeishuClientMock = vi.fn((creds: { appId?: string } | undefined) => ({ + __appId: creds?.appId, +})); + +vi.mock("./client.js", () => { + return { + createFeishuClient: (creds: { appId?: string } | undefined) => createFeishuClientMock(creds), + }; +}); + +// Patch SDK import so tool execution can run without network concerns. +vi.mock("@larksuiteoapi/node-sdk", () => { + return { + default: {}, + }; +}); + +describe("feishu_doc account selection", () => { + test("uses agentAccountId context when params omit accountId", async () => { + const cfg = { + channels: { + feishu: { + enabled: true, + accounts: { + a: { appId: "app-a", appSecret: "sec-a", tools: { doc: true } }, + b: { appId: "app-b", appSecret: "sec-b", tools: { doc: true } }, + }, + }, + }, + } as OpenClawPluginApi["config"]; + + const { api, resolveTool } = createToolFactoryHarness(cfg); + registerFeishuDocTools(api); + + const docToolA = resolveTool("feishu_doc", { agentAccountId: "a" }); + const docToolB = resolveTool("feishu_doc", { agentAccountId: "b" }); + + await docToolA.execute("call-a", { action: "list_blocks", doc_token: "d" }); + await docToolB.execute("call-b", { action: "list_blocks", doc_token: "d" }); + + expect(createFeishuClientMock).toHaveBeenCalledTimes(2); + expect(createFeishuClientMock.mock.calls[0]?.[0]?.appId).toBe("app-a"); + expect(createFeishuClientMock.mock.calls[1]?.[0]?.appId).toBe("app-b"); + }); + + test("explicit accountId param overrides agentAccountId context", async () => { + const cfg = { + channels: { + feishu: { + enabled: true, + accounts: { + a: { appId: "app-a", appSecret: "sec-a", tools: { doc: true } }, + b: { appId: "app-b", appSecret: "sec-b", tools: { doc: true } }, + }, + }, + }, + } as OpenClawPluginApi["config"]; + + const { api, resolveTool } = createToolFactoryHarness(cfg); + registerFeishuDocTools(api); + + const docTool = resolveTool("feishu_doc", { agentAccountId: "b" }); + await docTool.execute("call-override", { + action: "list_blocks", + doc_token: "d", + accountId: "a", + }); + + expect(createFeishuClientMock.mock.calls.at(-1)?.[0]?.appId).toBe("app-a"); + }); +}); diff --git a/extensions/feishu/src/docx.test.ts b/extensions/feishu/src/docx.test.ts index 14f400fab08..bcf1774f086 100644 --- a/extensions/feishu/src/docx.test.ts +++ b/extensions/feishu/src/docx.test.ts @@ -104,6 +104,7 @@ describe("feishu_doc image fetch hardening", () => { const feishuDocTool = registerTool.mock.calls .map((call) => call[0]) + .map((tool) => (typeof tool === "function" ? tool({}) : tool)) .find((tool) => tool.name === "feishu_doc"); expect(feishuDocTool).toBeDefined(); diff --git a/extensions/feishu/src/docx.ts b/extensions/feishu/src/docx.ts index 195cc8c81e7..33cfe924d1d 100644 --- a/extensions/feishu/src/docx.ts +++ b/extensions/feishu/src/docx.ts @@ -3,10 +3,13 @@ import type * as Lark from "@larksuiteoapi/node-sdk"; import { Type } from "@sinclair/typebox"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; import { listEnabledFeishuAccounts } from "./accounts.js"; -import { createFeishuClient } from "./client.js"; import { FeishuDocSchema, type FeishuDocParams } from "./doc-schema.js"; import { getFeishuRuntime } from "./runtime.js"; -import { resolveToolsConfig } from "./tools-config.js"; +import { + createFeishuToolClient, + resolveAnyEnabledFeishuToolsConfig, + resolveFeishuToolAccount, +} from "./tool-account.js"; // ============ Helpers ============ @@ -454,53 +457,80 @@ export function registerFeishuDocTools(api: OpenClawPluginApi) { return; } - // Use first account's config for tools configuration - const firstAccount = accounts[0]; - const toolsCfg = resolveToolsConfig(firstAccount.config.tools); - const mediaMaxBytes = (firstAccount.config?.mediaMaxMb ?? 30) * 1024 * 1024; + // Register if enabled on any account; account routing is resolved per execution. + const toolsCfg = resolveAnyEnabledFeishuToolsConfig(accounts); - // Helper to get client for the default account - const getClient = () => createFeishuClient(firstAccount); const registered: string[] = []; + type FeishuDocExecuteParams = FeishuDocParams & { accountId?: string }; + + const getClient = (params: { accountId?: string } | undefined, defaultAccountId?: string) => + createFeishuToolClient({ api, executeParams: params, defaultAccountId }); + + const getMediaMaxBytes = ( + params: { accountId?: string } | undefined, + defaultAccountId?: string, + ) => + (resolveFeishuToolAccount({ api, executeParams: params, defaultAccountId }).config + ?.mediaMaxMb ?? 30) * + 1024 * + 1024; // Main document tool with action-based dispatch if (toolsCfg.doc) { api.registerTool( - { - name: "feishu_doc", - label: "Feishu Doc", - description: - "Feishu document operations. Actions: read, write, append, create, list_blocks, get_block, update_block, delete_block", - parameters: FeishuDocSchema, - async execute(_toolCallId, params) { - const p = params as FeishuDocParams; - try { - const client = getClient(); - switch (p.action) { - case "read": - return json(await readDoc(client, p.doc_token)); - case "write": - return json(await writeDoc(client, p.doc_token, p.content, mediaMaxBytes)); - case "append": - return json(await appendDoc(client, p.doc_token, p.content, mediaMaxBytes)); - case "create": - return json(await createDoc(client, p.title, p.folder_token)); - case "list_blocks": - return json(await listBlocks(client, p.doc_token)); - case "get_block": - return json(await getBlock(client, p.doc_token, p.block_id)); - case "update_block": - return json(await updateBlock(client, p.doc_token, p.block_id, p.content)); - case "delete_block": - return json(await deleteBlock(client, p.doc_token, p.block_id)); - default: - // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback - return json({ error: `Unknown action: ${(p as any).action}` }); + (ctx) => { + const defaultAccountId = ctx.agentAccountId; + return { + name: "feishu_doc", + label: "Feishu Doc", + description: + "Feishu document operations. Actions: read, write, append, create, list_blocks, get_block, update_block, delete_block", + parameters: FeishuDocSchema, + async execute(_toolCallId, params) { + const p = params as FeishuDocExecuteParams; + try { + const client = getClient(p, defaultAccountId); + switch (p.action) { + case "read": + return json(await readDoc(client, p.doc_token)); + case "write": + return json( + await writeDoc( + client, + p.doc_token, + p.content, + getMediaMaxBytes(p, defaultAccountId), + ), + ); + case "append": + return json( + await appendDoc( + client, + p.doc_token, + p.content, + getMediaMaxBytes(p, defaultAccountId), + ), + ); + case "create": + return json(await createDoc(client, p.title, p.folder_token)); + case "list_blocks": + return json(await listBlocks(client, p.doc_token)); + case "get_block": + return json(await getBlock(client, p.doc_token, p.block_id)); + case "update_block": + return json(await updateBlock(client, p.doc_token, p.block_id, p.content)); + case "delete_block": + return json(await deleteBlock(client, p.doc_token, p.block_id)); + default: { + const exhaustiveCheck: never = p; + return json({ error: `Unknown action: ${String(exhaustiveCheck)}` }); + } + } + } catch (err) { + return json({ error: err instanceof Error ? err.message : String(err) }); } - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + }, + }; }, { name: "feishu_doc" }, ); @@ -510,7 +540,7 @@ export function registerFeishuDocTools(api: OpenClawPluginApi) { // Keep feishu_app_scopes as independent tool if (toolsCfg.scopes) { api.registerTool( - { + (ctx) => ({ name: "feishu_app_scopes", label: "Feishu App Scopes", description: @@ -518,13 +548,13 @@ export function registerFeishuDocTools(api: OpenClawPluginApi) { parameters: Type.Object({}), async execute() { try { - const result = await listAppScopes(getClient()); + const result = await listAppScopes(getClient(undefined, ctx.agentAccountId)); return json(result); } catch (err) { return json({ error: err instanceof Error ? err.message : String(err) }); } }, - }, + }), { name: "feishu_app_scopes" }, ); registered.push("feishu_app_scopes"); diff --git a/extensions/feishu/src/drive.ts b/extensions/feishu/src/drive.ts index beefceba35d..d4bde43aff3 100644 --- a/extensions/feishu/src/drive.ts +++ b/extensions/feishu/src/drive.ts @@ -1,9 +1,8 @@ import type * as Lark from "@larksuiteoapi/node-sdk"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; import { listEnabledFeishuAccounts } from "./accounts.js"; -import { createFeishuClient } from "./client.js"; import { FeishuDriveSchema, type FeishuDriveParams } from "./drive-schema.js"; -import { resolveToolsConfig } from "./tools-config.js"; +import { createFeishuToolClient, resolveAnyEnabledFeishuToolsConfig } from "./tool-account.js"; // ============ Helpers ============ @@ -180,45 +179,51 @@ export function registerFeishuDriveTools(api: OpenClawPluginApi) { return; } - const firstAccount = accounts[0]; - const toolsCfg = resolveToolsConfig(firstAccount.config.tools); + const toolsCfg = resolveAnyEnabledFeishuToolsConfig(accounts); if (!toolsCfg.drive) { api.logger.debug?.("feishu_drive: drive tool disabled in config"); return; } - const getClient = () => createFeishuClient(firstAccount); + type FeishuDriveExecuteParams = FeishuDriveParams & { accountId?: string }; api.registerTool( - { - name: "feishu_drive", - label: "Feishu Drive", - description: - "Feishu cloud storage operations. Actions: list, info, create_folder, move, delete", - parameters: FeishuDriveSchema, - async execute(_toolCallId, params) { - const p = params as FeishuDriveParams; - try { - const client = getClient(); - switch (p.action) { - case "list": - return json(await listFolder(client, p.folder_token)); - case "info": - return json(await getFileInfo(client, p.file_token)); - case "create_folder": - return json(await createFolder(client, p.name, p.folder_token)); - case "move": - return json(await moveFile(client, p.file_token, p.type, p.folder_token)); - case "delete": - return json(await deleteFile(client, p.file_token, p.type)); - default: - // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback - return json({ error: `Unknown action: ${(p as any).action}` }); + (ctx) => { + const defaultAccountId = ctx.agentAccountId; + return { + name: "feishu_drive", + label: "Feishu Drive", + description: + "Feishu cloud storage operations. Actions: list, info, create_folder, move, delete", + parameters: FeishuDriveSchema, + async execute(_toolCallId, params) { + const p = params as FeishuDriveExecuteParams; + try { + const client = createFeishuToolClient({ + api, + executeParams: p, + defaultAccountId, + }); + switch (p.action) { + case "list": + return json(await listFolder(client, p.folder_token)); + case "info": + return json(await getFileInfo(client, p.file_token)); + case "create_folder": + return json(await createFolder(client, p.name, p.folder_token)); + case "move": + return json(await moveFile(client, p.file_token, p.type, p.folder_token)); + case "delete": + return json(await deleteFile(client, p.file_token, p.type)); + default: + // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback + return json({ error: `Unknown action: ${(p as any).action}` }); + } + } catch (err) { + return json({ error: err instanceof Error ? err.message : String(err) }); } - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + }, + }; }, { name: "feishu_drive" }, ); diff --git a/extensions/feishu/src/perm.ts b/extensions/feishu/src/perm.ts index f11fb9882ec..92c3bb8cdd9 100644 --- a/extensions/feishu/src/perm.ts +++ b/extensions/feishu/src/perm.ts @@ -1,9 +1,8 @@ import type * as Lark from "@larksuiteoapi/node-sdk"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; import { listEnabledFeishuAccounts } from "./accounts.js"; -import { createFeishuClient } from "./client.js"; import { FeishuPermSchema, type FeishuPermParams } from "./perm-schema.js"; -import { resolveToolsConfig } from "./tools-config.js"; +import { createFeishuToolClient, resolveAnyEnabledFeishuToolsConfig } from "./tool-account.js"; // ============ Helpers ============ @@ -129,42 +128,50 @@ export function registerFeishuPermTools(api: OpenClawPluginApi) { return; } - const firstAccount = accounts[0]; - const toolsCfg = resolveToolsConfig(firstAccount.config.tools); + const toolsCfg = resolveAnyEnabledFeishuToolsConfig(accounts); if (!toolsCfg.perm) { api.logger.debug?.("feishu_perm: perm tool disabled in config (default: false)"); return; } - const getClient = () => createFeishuClient(firstAccount); + type FeishuPermExecuteParams = FeishuPermParams & { accountId?: string }; api.registerTool( - { - name: "feishu_perm", - label: "Feishu Perm", - description: "Feishu permission management. Actions: list, add, remove", - parameters: FeishuPermSchema, - async execute(_toolCallId, params) { - const p = params as FeishuPermParams; - try { - const client = getClient(); - switch (p.action) { - case "list": - return json(await listMembers(client, p.token, p.type)); - case "add": - return json( - await addMember(client, p.token, p.type, p.member_type, p.member_id, p.perm), - ); - case "remove": - return json(await removeMember(client, p.token, p.type, p.member_type, p.member_id)); - default: - // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback - return json({ error: `Unknown action: ${(p as any).action}` }); + (ctx) => { + const defaultAccountId = ctx.agentAccountId; + return { + name: "feishu_perm", + label: "Feishu Perm", + description: "Feishu permission management. Actions: list, add, remove", + parameters: FeishuPermSchema, + async execute(_toolCallId, params) { + const p = params as FeishuPermExecuteParams; + try { + const client = createFeishuToolClient({ + api, + executeParams: p, + defaultAccountId, + }); + switch (p.action) { + case "list": + return json(await listMembers(client, p.token, p.type)); + case "add": + return json( + await addMember(client, p.token, p.type, p.member_type, p.member_id, p.perm), + ); + case "remove": + return json( + await removeMember(client, p.token, p.type, p.member_type, p.member_id), + ); + default: + // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback + return json({ error: `Unknown action: ${(p as any).action}` }); + } + } catch (err) { + return json({ error: err instanceof Error ? err.message : String(err) }); } - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + }, + }; }, { name: "feishu_perm" }, ); diff --git a/extensions/feishu/src/tool-account-routing.test.ts b/extensions/feishu/src/tool-account-routing.test.ts new file mode 100644 index 00000000000..4baa667112c --- /dev/null +++ b/extensions/feishu/src/tool-account-routing.test.ts @@ -0,0 +1,111 @@ +import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; +import { beforeEach, describe, expect, test, vi } from "vitest"; +import { registerFeishuBitableTools } from "./bitable.js"; +import { registerFeishuDriveTools } from "./drive.js"; +import { registerFeishuPermTools } from "./perm.js"; +import { createToolFactoryHarness } from "./tool-factory-test-harness.js"; +import { registerFeishuWikiTools } from "./wiki.js"; + +const createFeishuClientMock = vi.fn((account: { appId?: string } | undefined) => ({ + __appId: account?.appId, +})); + +vi.mock("./client.js", () => ({ + createFeishuClient: (account: { appId?: string } | undefined) => createFeishuClientMock(account), +})); + +function createConfig(params: { + toolsA?: { + wiki?: boolean; + drive?: boolean; + perm?: boolean; + }; + toolsB?: { + wiki?: boolean; + drive?: boolean; + perm?: boolean; + }; +}): OpenClawPluginApi["config"] { + return { + channels: { + feishu: { + enabled: true, + accounts: { + a: { + appId: "app-a", + appSecret: "sec-a", + tools: params.toolsA, + }, + b: { + appId: "app-b", + appSecret: "sec-b", + tools: params.toolsB, + }, + }, + }, + }, + } as OpenClawPluginApi["config"]; +} + +describe("feishu tool account routing", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test("wiki tool registers when first account disables it and routes to agentAccountId", async () => { + const { api, resolveTool } = createToolFactoryHarness( + createConfig({ + toolsA: { wiki: false }, + toolsB: { wiki: true }, + }), + ); + registerFeishuWikiTools(api); + + const tool = resolveTool("feishu_wiki", { agentAccountId: "b" }); + await tool.execute("call", { action: "search" }); + + expect(createFeishuClientMock.mock.calls.at(-1)?.[0]?.appId).toBe("app-b"); + }); + + test("drive tool registers when first account disables it and routes to agentAccountId", async () => { + const { api, resolveTool } = createToolFactoryHarness( + createConfig({ + toolsA: { drive: false }, + toolsB: { drive: true }, + }), + ); + registerFeishuDriveTools(api); + + const tool = resolveTool("feishu_drive", { agentAccountId: "b" }); + await tool.execute("call", { action: "unknown_action" }); + + expect(createFeishuClientMock.mock.calls.at(-1)?.[0]?.appId).toBe("app-b"); + }); + + test("perm tool registers when only second account enables it and routes to agentAccountId", async () => { + const { api, resolveTool } = createToolFactoryHarness( + createConfig({ + toolsA: { perm: false }, + toolsB: { perm: true }, + }), + ); + registerFeishuPermTools(api); + + const tool = resolveTool("feishu_perm", { agentAccountId: "b" }); + await tool.execute("call", { action: "unknown_action" }); + + expect(createFeishuClientMock.mock.calls.at(-1)?.[0]?.appId).toBe("app-b"); + }); + + test("bitable tool routes to agentAccountId and allows explicit accountId override", async () => { + const { api, resolveTool } = createToolFactoryHarness(createConfig({})); + registerFeishuBitableTools(api); + + const tool = resolveTool("feishu_bitable_get_meta", { agentAccountId: "b" }); + await tool.execute("call-ctx", { url: "invalid-url" }); + await tool.execute("call-override", { url: "invalid-url", accountId: "a" }); + + expect(createFeishuClientMock.mock.calls[0]?.[0]?.appId).toBe("app-b"); + expect(createFeishuClientMock.mock.calls[1]?.[0]?.appId).toBe("app-a"); + }); +}); diff --git a/extensions/feishu/src/tool-account.ts b/extensions/feishu/src/tool-account.ts new file mode 100644 index 00000000000..72b5db9b777 --- /dev/null +++ b/extensions/feishu/src/tool-account.ts @@ -0,0 +1,58 @@ +import type * as Lark from "@larksuiteoapi/node-sdk"; +import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; +import { resolveFeishuAccount } from "./accounts.js"; +import { createFeishuClient } from "./client.js"; +import { resolveToolsConfig } from "./tools-config.js"; +import type { FeishuToolsConfig, ResolvedFeishuAccount } from "./types.js"; + +type AccountAwareParams = { accountId?: string }; + +function normalizeOptionalAccountId(value: string | undefined): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + +export function resolveFeishuToolAccount(params: { + api: Pick; + executeParams?: AccountAwareParams; + defaultAccountId?: string; +}): ResolvedFeishuAccount { + if (!params.api.config) { + throw new Error("Feishu config unavailable"); + } + return resolveFeishuAccount({ + cfg: params.api.config, + accountId: + normalizeOptionalAccountId(params.executeParams?.accountId) ?? + normalizeOptionalAccountId(params.defaultAccountId), + }); +} + +export function createFeishuToolClient(params: { + api: Pick; + executeParams?: AccountAwareParams; + defaultAccountId?: string; +}): Lark.Client { + return createFeishuClient(resolveFeishuToolAccount(params)); +} + +export function resolveAnyEnabledFeishuToolsConfig( + accounts: ResolvedFeishuAccount[], +): Required { + const merged: Required = { + doc: false, + wiki: false, + drive: false, + perm: false, + scopes: false, + }; + for (const account of accounts) { + const cfg = resolveToolsConfig(account.config.tools); + merged.doc = merged.doc || cfg.doc; + merged.wiki = merged.wiki || cfg.wiki; + merged.drive = merged.drive || cfg.drive; + merged.perm = merged.perm || cfg.perm; + merged.scopes = merged.scopes || cfg.scopes; + } + return merged; +} diff --git a/extensions/feishu/src/tool-factory-test-harness.ts b/extensions/feishu/src/tool-factory-test-harness.ts new file mode 100644 index 00000000000..a945e063900 --- /dev/null +++ b/extensions/feishu/src/tool-factory-test-harness.ts @@ -0,0 +1,76 @@ +import type { AnyAgentTool, OpenClawPluginApi } from "openclaw/plugin-sdk"; + +type ToolContextLike = { + agentAccountId?: string; +}; + +type ToolFactoryLike = (ctx: ToolContextLike) => AnyAgentTool | AnyAgentTool[] | null | undefined; + +export type ToolLike = { + name: string; + execute: (toolCallId: string, params: unknown) => Promise | unknown; +}; + +type RegisteredTool = { + tool: AnyAgentTool | ToolFactoryLike; + opts?: { name?: string }; +}; + +function toToolList(value: AnyAgentTool | AnyAgentTool[] | null | undefined): AnyAgentTool[] { + if (!value) return []; + return Array.isArray(value) ? value : [value]; +} + +function asToolLike(tool: AnyAgentTool, fallbackName?: string): ToolLike { + const candidate = tool as Partial; + const name = candidate.name ?? fallbackName; + const execute = candidate.execute; + if (!name || typeof execute !== "function") { + throw new Error(`Resolved tool is missing required fields (name=${String(name)})`); + } + return { + name, + execute: (toolCallId, params) => execute(toolCallId, params), + }; +} + +export function createToolFactoryHarness(cfg: OpenClawPluginApi["config"]) { + const registered: RegisteredTool[] = []; + + const api: Pick = { + config: cfg, + logger: { + info: () => {}, + warn: () => {}, + error: () => {}, + debug: () => {}, + }, + registerTool: (tool, opts) => { + registered.push({ tool, opts }); + }, + }; + + const resolveTool = (name: string, ctx: ToolContextLike = {}): ToolLike => { + for (const entry of registered) { + if (entry.opts?.name === name && typeof entry.tool !== "function") { + return asToolLike(entry.tool, name); + } + + if (typeof entry.tool === "function") { + const builtTools = toToolList(entry.tool(ctx)); + const hit = builtTools.find((tool) => (tool as { name?: string }).name === name); + if (hit) { + return asToolLike(hit, name); + } + } else if ((entry.tool as { name?: string }).name === name) { + return asToolLike(entry.tool, name); + } + } + throw new Error(`Tool not registered: ${name}`); + }; + + return { + api: api as OpenClawPluginApi, + resolveTool, + }; +} diff --git a/extensions/feishu/src/wiki.ts b/extensions/feishu/src/wiki.ts index dc76bcc6d75..0c4383b0647 100644 --- a/extensions/feishu/src/wiki.ts +++ b/extensions/feishu/src/wiki.ts @@ -1,8 +1,7 @@ import type * as Lark from "@larksuiteoapi/node-sdk"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; import { listEnabledFeishuAccounts } from "./accounts.js"; -import { createFeishuClient } from "./client.js"; -import { resolveToolsConfig } from "./tools-config.js"; +import { createFeishuToolClient, resolveAnyEnabledFeishuToolsConfig } from "./tool-account.js"; import { FeishuWikiSchema, type FeishuWikiParams } from "./wiki-schema.js"; // ============ Helpers ============ @@ -168,62 +167,68 @@ export function registerFeishuWikiTools(api: OpenClawPluginApi) { return; } - const firstAccount = accounts[0]; - const toolsCfg = resolveToolsConfig(firstAccount.config.tools); + const toolsCfg = resolveAnyEnabledFeishuToolsConfig(accounts); if (!toolsCfg.wiki) { api.logger.debug?.("feishu_wiki: wiki tool disabled in config"); return; } - const getClient = () => createFeishuClient(firstAccount); + type FeishuWikiExecuteParams = FeishuWikiParams & { accountId?: string }; api.registerTool( - { - name: "feishu_wiki", - label: "Feishu Wiki", - description: - "Feishu knowledge base operations. Actions: spaces, nodes, get, create, move, rename", - parameters: FeishuWikiSchema, - async execute(_toolCallId, params) { - const p = params as FeishuWikiParams; - try { - const client = getClient(); - switch (p.action) { - case "spaces": - return json(await listSpaces(client)); - case "nodes": - return json(await listNodes(client, p.space_id, p.parent_node_token)); - case "get": - return json(await getNode(client, p.token)); - case "search": - return json({ - error: - "Search is not available. Use feishu_wiki with action: 'nodes' to browse or action: 'get' to lookup by token.", - }); - case "create": - return json( - await createNode(client, p.space_id, p.title, p.obj_type, p.parent_node_token), - ); - case "move": - return json( - await moveNode( - client, - p.space_id, - p.node_token, - p.target_space_id, - p.target_parent_token, - ), - ); - case "rename": - return json(await renameNode(client, p.space_id, p.node_token, p.title)); - default: - // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback - return json({ error: `Unknown action: ${(p as any).action}` }); + (ctx) => { + const defaultAccountId = ctx.agentAccountId; + return { + name: "feishu_wiki", + label: "Feishu Wiki", + description: + "Feishu knowledge base operations. Actions: spaces, nodes, get, create, move, rename", + parameters: FeishuWikiSchema, + async execute(_toolCallId, params) { + const p = params as FeishuWikiExecuteParams; + try { + const client = createFeishuToolClient({ + api, + executeParams: p, + defaultAccountId, + }); + switch (p.action) { + case "spaces": + return json(await listSpaces(client)); + case "nodes": + return json(await listNodes(client, p.space_id, p.parent_node_token)); + case "get": + return json(await getNode(client, p.token)); + case "search": + return json({ + error: + "Search is not available. Use feishu_wiki with action: 'nodes' to browse or action: 'get' to lookup by token.", + }); + case "create": + return json( + await createNode(client, p.space_id, p.title, p.obj_type, p.parent_node_token), + ); + case "move": + return json( + await moveNode( + client, + p.space_id, + p.node_token, + p.target_space_id, + p.target_parent_token, + ), + ); + case "rename": + return json(await renameNode(client, p.space_id, p.node_token, p.title)); + default: + // eslint-disable-next-line @typescript-eslint/no-explicit-any -- exhaustive check fallback + return json({ error: `Unknown action: ${(p as any).action}` }); + } + } catch (err) { + return json({ error: err instanceof Error ? err.message : String(err) }); } - } catch (err) { - return json({ error: err instanceof Error ? err.message : String(err) }); - } - }, + }, + }; }, { name: "feishu_wiki" }, ); diff --git a/extensions/google-gemini-cli-auth/README.md b/extensions/google-gemini-cli-auth/README.md index 07dcd13c52a..bbca53ba1ce 100644 --- a/extensions/google-gemini-cli-auth/README.md +++ b/extensions/google-gemini-cli-auth/README.md @@ -2,6 +2,12 @@ OAuth provider plugin for **Gemini CLI** (Google Code Assist). +## Account safety caution + +- This plugin is an unofficial integration and is not endorsed by Google. +- Some users have reported account restrictions or suspensions after using third-party Gemini CLI and Antigravity OAuth clients. +- Use caution, review the applicable Google terms, and avoid using a mission-critical account. + ## Enable Bundled plugins are disabled by default. Enable this one: diff --git a/extensions/google-gemini-cli-auth/oauth.test.ts b/extensions/google-gemini-cli-auth/oauth.test.ts index 018eae78dd6..46a12a0a5ee 100644 --- a/extensions/google-gemini-cli-auth/oauth.test.ts +++ b/extensions/google-gemini-cli-auth/oauth.test.ts @@ -3,6 +3,19 @@ import { describe, expect, it, vi, beforeEach, afterEach } from "vitest"; vi.mock("openclaw/plugin-sdk", () => ({ isWSL2Sync: () => false, + fetchWithSsrFGuard: async (params: { + url: string; + init?: RequestInit; + fetchImpl?: (input: RequestInfo | URL, init?: RequestInit) => Promise; + }) => { + const fetchImpl = params.fetchImpl ?? globalThis.fetch; + const response = await fetchImpl(params.url, params.init); + return { + response, + finalUrl: params.url, + release: async () => {}, + }; + }, })); // Mock fs module before importing the module under test @@ -96,6 +109,41 @@ describe("extractGeminiCliCredentials", () => { return layout; } + function installNpmShimLayout(params: { oauth2Exists?: boolean; oauth2Content?: string }) { + const binDir = join(rootDir, "fake", "npm-bin"); + const geminiPath = join(binDir, "gemini"); + const resolvedPath = geminiPath; + const oauth2Path = join( + binDir, + "node_modules", + "@google", + "gemini-cli", + "node_modules", + "@google", + "gemini-cli-core", + "dist", + "src", + "code_assist", + "oauth2.js", + ); + process.env.PATH = binDir; + + mockExistsSync.mockImplementation((p: string) => { + const normalized = normalizePath(p); + if (normalized === normalizePath(geminiPath)) { + return true; + } + if (params.oauth2Exists && normalized === normalizePath(oauth2Path)) { + return true; + } + return false; + }); + mockRealpathSync.mockReturnValue(resolvedPath); + if (params.oauth2Content !== undefined) { + mockReadFileSync.mockReturnValue(params.oauth2Content); + } + } + beforeEach(async () => { vi.clearAllMocks(); originalPath = process.env.PATH; @@ -127,6 +175,19 @@ describe("extractGeminiCliCredentials", () => { }); }); + it("extracts credentials when PATH entry is an npm global shim", async () => { + installNpmShimLayout({ oauth2Exists: true, oauth2Content: FAKE_OAUTH2_CONTENT }); + + const { extractGeminiCliCredentials, clearCredentialsCache } = await import("./oauth.js"); + clearCredentialsCache(); + const result = extractGeminiCliCredentials(); + + expect(result).toEqual({ + clientId: FAKE_CLIENT_ID, + clientSecret: FAKE_CLIENT_SECRET, + }); + }); + it("returns null when oauth2.js cannot be found", async () => { installGeminiLayout({ oauth2Exists: false, readdir: [] }); @@ -160,3 +221,204 @@ describe("extractGeminiCliCredentials", () => { expect(mockReadFileSync.mock.calls.length).toBe(readCount); }); }); + +describe("loginGeminiCliOAuth", () => { + const TOKEN_URL = "https://oauth2.googleapis.com/token"; + const USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo?alt=json"; + const LOAD_PROD = "https://cloudcode-pa.googleapis.com/v1internal:loadCodeAssist"; + const LOAD_DAILY = "https://daily-cloudcode-pa.sandbox.googleapis.com/v1internal:loadCodeAssist"; + const LOAD_AUTOPUSH = + "https://autopush-cloudcode-pa.sandbox.googleapis.com/v1internal:loadCodeAssist"; + + const ENV_KEYS = [ + "OPENCLAW_GEMINI_OAUTH_CLIENT_ID", + "OPENCLAW_GEMINI_OAUTH_CLIENT_SECRET", + "GEMINI_CLI_OAUTH_CLIENT_ID", + "GEMINI_CLI_OAUTH_CLIENT_SECRET", + "GOOGLE_CLOUD_PROJECT", + "GOOGLE_CLOUD_PROJECT_ID", + ] as const; + + function getExpectedPlatform(): "WINDOWS" | "MACOS" | "LINUX" { + if (process.platform === "win32") { + return "WINDOWS"; + } + if (process.platform === "linux") { + return "LINUX"; + } + return "MACOS"; + } + + function getRequestUrl(input: string | URL | Request): string { + return typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url; + } + + function getHeaderValue(headers: HeadersInit | undefined, name: string): string | undefined { + if (!headers) { + return undefined; + } + if (headers instanceof Headers) { + return headers.get(name) ?? undefined; + } + if (Array.isArray(headers)) { + return headers.find(([key]) => key.toLowerCase() === name.toLowerCase())?.[1]; + } + return (headers as Record)[name]; + } + + function responseJson(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); + } + + let envSnapshot: Partial>; + beforeEach(() => { + envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]])); + process.env.OPENCLAW_GEMINI_OAUTH_CLIENT_ID = "test-client-id.apps.googleusercontent.com"; + process.env.OPENCLAW_GEMINI_OAUTH_CLIENT_SECRET = "GOCSPX-test-client-secret"; + delete process.env.GEMINI_CLI_OAUTH_CLIENT_ID; + delete process.env.GEMINI_CLI_OAUTH_CLIENT_SECRET; + delete process.env.GOOGLE_CLOUD_PROJECT; + delete process.env.GOOGLE_CLOUD_PROJECT_ID; + }); + + afterEach(() => { + for (const key of ENV_KEYS) { + const value = envSnapshot[key]; + if (value === undefined) { + delete process.env[key]; + } else { + process.env[key] = value; + } + } + vi.unstubAllGlobals(); + }); + + it("falls back across loadCodeAssist endpoints with aligned headers and metadata", async () => { + const requests: Array<{ url: string; init?: RequestInit }> = []; + const fetchMock = vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = getRequestUrl(input); + requests.push({ url, init }); + + if (url === TOKEN_URL) { + return responseJson({ + access_token: "access-token", + refresh_token: "refresh-token", + expires_in: 3600, + }); + } + if (url === USERINFO_URL) { + return responseJson({ email: "lobster@openclaw.ai" }); + } + if (url === LOAD_PROD) { + return responseJson({ error: { message: "temporary failure" } }, 503); + } + if (url === LOAD_DAILY) { + return responseJson({ + currentTier: { id: "standard-tier" }, + cloudaicompanionProject: { id: "daily-project" }, + }); + } + throw new Error(`Unexpected request: ${url}`); + }); + vi.stubGlobal("fetch", fetchMock); + + let authUrl = ""; + const { loginGeminiCliOAuth } = await import("./oauth.js"); + const result = await loginGeminiCliOAuth({ + isRemote: true, + openUrl: async () => {}, + log: (msg) => { + const found = msg.match(/https:\/\/accounts\.google\.com\/o\/oauth2\/v2\/auth\?[^\s]+/); + if (found?.[0]) { + authUrl = found[0]; + } + }, + note: async () => {}, + prompt: async () => { + const state = new URL(authUrl).searchParams.get("state"); + return `${"http://localhost:8085/oauth2callback"}?code=oauth-code&state=${state}`; + }, + progress: { update: () => {}, stop: () => {} }, + }); + + expect(result.projectId).toBe("daily-project"); + const loadRequests = requests.filter((request) => + request.url.includes("v1internal:loadCodeAssist"), + ); + expect(loadRequests.map((request) => request.url)).toEqual([LOAD_PROD, LOAD_DAILY]); + + const firstHeaders = loadRequests[0]?.init?.headers; + expect(getHeaderValue(firstHeaders, "X-Goog-Api-Client")).toBe( + `gl-node/${process.versions.node}`, + ); + + const clientMetadata = getHeaderValue(firstHeaders, "Client-Metadata"); + expect(clientMetadata).toBeDefined(); + expect(JSON.parse(clientMetadata as string)).toEqual({ + ideType: "ANTIGRAVITY", + platform: getExpectedPlatform(), + pluginType: "GEMINI", + }); + + const body = JSON.parse(String(loadRequests[0]?.init?.body)); + expect(body).toEqual({ + metadata: { + ideType: "ANTIGRAVITY", + platform: getExpectedPlatform(), + pluginType: "GEMINI", + }, + }); + }); + + it("falls back to GOOGLE_CLOUD_PROJECT when all loadCodeAssist endpoints fail", async () => { + process.env.GOOGLE_CLOUD_PROJECT = "env-project"; + + const requests: string[] = []; + const fetchMock = vi.fn(async (input: string | URL | Request) => { + const url = getRequestUrl(input); + requests.push(url); + + if (url === TOKEN_URL) { + return responseJson({ + access_token: "access-token", + refresh_token: "refresh-token", + expires_in: 3600, + }); + } + if (url === USERINFO_URL) { + return responseJson({ email: "lobster@openclaw.ai" }); + } + if ([LOAD_PROD, LOAD_DAILY, LOAD_AUTOPUSH].includes(url)) { + return responseJson({ error: { message: "unavailable" } }, 503); + } + throw new Error(`Unexpected request: ${url}`); + }); + vi.stubGlobal("fetch", fetchMock); + + let authUrl = ""; + const { loginGeminiCliOAuth } = await import("./oauth.js"); + const result = await loginGeminiCliOAuth({ + isRemote: true, + openUrl: async () => {}, + log: (msg) => { + const found = msg.match(/https:\/\/accounts\.google\.com\/o\/oauth2\/v2\/auth\?[^\s]+/); + if (found?.[0]) { + authUrl = found[0]; + } + }, + note: async () => {}, + prompt: async () => { + const state = new URL(authUrl).searchParams.get("state"); + return `${"http://localhost:8085/oauth2callback"}?code=oauth-code&state=${state}`; + }, + progress: { update: () => {}, stop: () => {} }, + }); + + expect(result.projectId).toBe("env-project"); + expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3); + expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false); + }); +}); diff --git a/extensions/google-gemini-cli-auth/oauth.ts b/extensions/google-gemini-cli-auth/oauth.ts index 7977ab52981..7e2280b9c9f 100644 --- a/extensions/google-gemini-cli-auth/oauth.ts +++ b/extensions/google-gemini-cli-auth/oauth.ts @@ -2,7 +2,7 @@ import { createHash, randomBytes } from "node:crypto"; import { existsSync, readFileSync, readdirSync, realpathSync } from "node:fs"; import { createServer } from "node:http"; import { delimiter, dirname, join } from "node:path"; -import { isWSL2Sync } from "openclaw/plugin-sdk"; +import { fetchWithSsrFGuard, isWSL2Sync } from "openclaw/plugin-sdk"; const CLIENT_ID_KEYS = ["OPENCLAW_GEMINI_OAUTH_CLIENT_ID", "GEMINI_CLI_OAUTH_CLIENT_ID"]; const CLIENT_SECRET_KEYS = [ @@ -13,7 +13,15 @@ const REDIRECT_URI = "http://localhost:8085/oauth2callback"; const AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"; const TOKEN_URL = "https://oauth2.googleapis.com/token"; const USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo?alt=json"; -const CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com"; +const CODE_ASSIST_ENDPOINT_PROD = "https://cloudcode-pa.googleapis.com"; +const CODE_ASSIST_ENDPOINT_DAILY = "https://daily-cloudcode-pa.sandbox.googleapis.com"; +const CODE_ASSIST_ENDPOINT_AUTOPUSH = "https://autopush-cloudcode-pa.sandbox.googleapis.com"; +const LOAD_CODE_ASSIST_ENDPOINTS = [ + CODE_ASSIST_ENDPOINT_PROD, + CODE_ASSIST_ENDPOINT_DAILY, + CODE_ASSIST_ENDPOINT_AUTOPUSH, +]; +const DEFAULT_FETCH_TIMEOUT_MS = 10_000; const SCOPES = [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/userinfo.email", @@ -71,41 +79,45 @@ export function extractGeminiCliCredentials(): { clientId: string; clientSecret: } const resolvedPath = realpathSync(geminiPath); - const geminiCliDir = dirname(dirname(resolvedPath)); - - const searchPaths = [ - join( - geminiCliDir, - "node_modules", - "@google", - "gemini-cli-core", - "dist", - "src", - "code_assist", - "oauth2.js", - ), - join( - geminiCliDir, - "node_modules", - "@google", - "gemini-cli-core", - "dist", - "code_assist", - "oauth2.js", - ), - ]; + const geminiCliDirs = resolveGeminiCliDirs(geminiPath, resolvedPath); let content: string | null = null; - for (const p of searchPaths) { - if (existsSync(p)) { - content = readFileSync(p, "utf8"); + for (const geminiCliDir of geminiCliDirs) { + const searchPaths = [ + join( + geminiCliDir, + "node_modules", + "@google", + "gemini-cli-core", + "dist", + "src", + "code_assist", + "oauth2.js", + ), + join( + geminiCliDir, + "node_modules", + "@google", + "gemini-cli-core", + "dist", + "code_assist", + "oauth2.js", + ), + ]; + + for (const p of searchPaths) { + if (existsSync(p)) { + content = readFileSync(p, "utf8"); + break; + } + } + if (content) { break; } - } - if (!content) { const found = findFile(geminiCliDir, "oauth2.js", 10); if (found) { content = readFileSync(found, "utf8"); + break; } } if (!content) { @@ -124,6 +136,30 @@ export function extractGeminiCliCredentials(): { clientId: string; clientSecret: return null; } +function resolveGeminiCliDirs(geminiPath: string, resolvedPath: string): string[] { + const binDir = dirname(geminiPath); + const candidates = [ + dirname(dirname(resolvedPath)), + join(dirname(resolvedPath), "node_modules", "@google", "gemini-cli"), + join(binDir, "node_modules", "@google", "gemini-cli"), + join(dirname(binDir), "node_modules", "@google", "gemini-cli"), + join(dirname(binDir), "lib", "node_modules", "@google", "gemini-cli"), + ]; + + const deduped: string[] = []; + const seen = new Set(); + for (const candidate of candidates) { + const key = + process.platform === "win32" ? candidate.replace(/\\/g, "/").toLowerCase() : candidate; + if (seen.has(key)) { + continue; + } + seen.add(key); + deduped.push(candidate); + } + return deduped; +} + function findInPath(name: string): string | null { const exts = process.platform === "win32" ? [".cmd", ".bat", ".exe", ""] : [""]; for (const dir of (process.env.PATH ?? "").split(delimiter)) { @@ -188,6 +224,38 @@ function generatePkce(): { verifier: string; challenge: string } { return { verifier, challenge }; } +function resolvePlatform(): "WINDOWS" | "MACOS" | "LINUX" { + if (process.platform === "win32") { + return "WINDOWS"; + } + if (process.platform === "linux") { + return "LINUX"; + } + return "MACOS"; +} + +async function fetchWithTimeout( + url: string, + init: RequestInit, + timeoutMs = DEFAULT_FETCH_TIMEOUT_MS, +): Promise { + const { response, release } = await fetchWithSsrFGuard({ + url, + init, + timeoutMs, + }); + try { + const body = await response.arrayBuffer(); + return new Response(body, { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } finally { + await release(); + } +} + function buildAuthUrl(challenge: string, verifier: string): string { const { clientId } = resolveOAuthClientConfig(); const params = new URLSearchParams({ @@ -341,9 +409,13 @@ async function exchangeCodeForTokens( body.set("client_secret", clientSecret); } - const response = await fetch(TOKEN_URL, { + const response = await fetchWithTimeout(TOKEN_URL, { method: "POST", - headers: { "Content-Type": "application/x-www-form-urlencoded" }, + headers: { + "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", + Accept: "*/*", + "User-Agent": "google-api-nodejs-client/9.15.1", + }, body, }); @@ -377,7 +449,7 @@ async function exchangeCodeForTokens( async function getUserEmail(accessToken: string): Promise { try { - const response = await fetch(USERINFO_URL, { + const response = await fetchWithTimeout(USERINFO_URL, { headers: { Authorization: `Bearer ${accessToken}` }, }); if (response.ok) { @@ -392,20 +464,25 @@ async function getUserEmail(accessToken: string): Promise { async function discoverProject(accessToken: string): Promise { const envProject = process.env.GOOGLE_CLOUD_PROJECT || process.env.GOOGLE_CLOUD_PROJECT_ID; + const platform = resolvePlatform(); + const metadata = { + ideType: "ANTIGRAVITY", + platform, + pluginType: "GEMINI", + }; const headers = { Authorization: `Bearer ${accessToken}`, "Content-Type": "application/json", "User-Agent": "google-api-nodejs-client/9.15.1", - "X-Goog-Api-Client": "gl-node/openclaw", + "X-Goog-Api-Client": `gl-node/${process.versions.node}`, + "Client-Metadata": JSON.stringify(metadata), }; const loadBody = { - cloudaicompanionProject: envProject, + ...(envProject ? { cloudaicompanionProject: envProject } : {}), metadata: { - ideType: "IDE_UNSPECIFIED", - platform: "PLATFORM_UNSPECIFIED", - pluginType: "GEMINI", - duetProject: envProject, + ...metadata, + ...(envProject ? { duetProject: envProject } : {}), }, }; @@ -414,29 +491,46 @@ async function discoverProject(accessToken: string): Promise { cloudaicompanionProject?: string | { id?: string }; allowedTiers?: Array<{ id?: string; isDefault?: boolean }>; } = {}; + let activeEndpoint = CODE_ASSIST_ENDPOINT_PROD; + let loadError: Error | undefined; + for (const endpoint of LOAD_CODE_ASSIST_ENDPOINTS) { + try { + const response = await fetchWithTimeout(`${endpoint}/v1internal:loadCodeAssist`, { + method: "POST", + headers, + body: JSON.stringify(loadBody), + }); - try { - const response = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal:loadCodeAssist`, { - method: "POST", - headers, - body: JSON.stringify(loadBody), - }); - - if (!response.ok) { - const errorPayload = await response.json().catch(() => null); - if (isVpcScAffected(errorPayload)) { - data = { currentTier: { id: TIER_STANDARD } }; - } else { - throw new Error(`loadCodeAssist failed: ${response.status} ${response.statusText}`); + if (!response.ok) { + const errorPayload = await response.json().catch(() => null); + if (isVpcScAffected(errorPayload)) { + data = { currentTier: { id: TIER_STANDARD } }; + activeEndpoint = endpoint; + loadError = undefined; + break; + } + loadError = new Error(`loadCodeAssist failed: ${response.status} ${response.statusText}`); + continue; } - } else { + data = (await response.json()) as typeof data; + activeEndpoint = endpoint; + loadError = undefined; + break; + } catch (err) { + loadError = err instanceof Error ? err : new Error("loadCodeAssist failed", { cause: err }); } - } catch (err) { - if (err instanceof Error) { - throw err; + } + + const hasLoadCodeAssistData = + Boolean(data.currentTier) || + Boolean(data.cloudaicompanionProject) || + Boolean(data.allowedTiers?.length); + if (!hasLoadCodeAssistData && loadError) { + if (envProject) { + return envProject; } - throw new Error("loadCodeAssist failed", { cause: err }); + throw loadError; } if (data.currentTier) { @@ -466,9 +560,7 @@ async function discoverProject(accessToken: string): Promise { const onboardBody: Record = { tierId, metadata: { - ideType: "IDE_UNSPECIFIED", - platform: "PLATFORM_UNSPECIFIED", - pluginType: "GEMINI", + ...metadata, }, }; if (tierId !== TIER_FREE && envProject) { @@ -476,7 +568,7 @@ async function discoverProject(accessToken: string): Promise { (onboardBody.metadata as Record).duetProject = envProject; } - const onboardResponse = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal:onboardUser`, { + const onboardResponse = await fetchWithTimeout(`${activeEndpoint}/v1internal:onboardUser`, { method: "POST", headers, body: JSON.stringify(onboardBody), @@ -493,7 +585,7 @@ async function discoverProject(accessToken: string): Promise { }; if (!lro.done && lro.name) { - lro = await pollOperation(lro.name, headers); + lro = await pollOperation(activeEndpoint, lro.name, headers); } const projectId = lro.response?.cloudaicompanionProject?.id; @@ -539,12 +631,13 @@ function getDefaultTier( } async function pollOperation( + endpoint: string, operationName: string, headers: Record, ): Promise<{ done?: boolean; response?: { cloudaicompanionProject?: { id?: string } } }> { for (let attempt = 0; attempt < 24; attempt += 1) { await new Promise((resolve) => setTimeout(resolve, 5000)); - const response = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal/${operationName}`, { + const response = await fetchWithTimeout(`${endpoint}/v1internal/${operationName}`, { headers, }); if (!response.ok) { diff --git a/extensions/google-gemini-cli-auth/package.json b/extensions/google-gemini-cli-auth/package.json index 9ec1c1af360..bbd4efd7fc7 100644 --- a/extensions/google-gemini-cli-auth/package.json +++ b/extensions/google-gemini-cli-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/google-gemini-cli-auth", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw Gemini CLI OAuth provider plugin", "type": "module", diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index fd43f2faa26..cfaf35b137d 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/googlechat", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw Google Chat channel plugin", "type": "module", diff --git a/extensions/googlechat/src/channel.startup.test.ts b/extensions/googlechat/src/channel.startup.test.ts new file mode 100644 index 00000000000..8823775cfd6 --- /dev/null +++ b/extensions/googlechat/src/channel.startup.test.ts @@ -0,0 +1,102 @@ +import type { + ChannelAccountSnapshot, + ChannelGatewayContext, + OpenClawConfig, +} from "openclaw/plugin-sdk"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createRuntimeEnv } from "../../test-utils/runtime-env.js"; +import type { ResolvedGoogleChatAccount } from "./accounts.js"; + +const hoisted = vi.hoisted(() => ({ + startGoogleChatMonitor: vi.fn(), +})); + +vi.mock("./monitor.js", async () => { + const actual = await vi.importActual("./monitor.js"); + return { + ...actual, + startGoogleChatMonitor: hoisted.startGoogleChatMonitor, + }; +}); + +import { googlechatPlugin } from "./channel.js"; + +function createStartAccountCtx(params: { + account: ResolvedGoogleChatAccount; + abortSignal: AbortSignal; + statusPatchSink?: (next: ChannelAccountSnapshot) => void; +}): ChannelGatewayContext { + const snapshot: ChannelAccountSnapshot = { + accountId: params.account.accountId, + configured: true, + enabled: true, + running: false, + }; + return { + accountId: params.account.accountId, + account: params.account, + cfg: {} as OpenClawConfig, + runtime: createRuntimeEnv(), + abortSignal: params.abortSignal, + log: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, + getStatus: () => snapshot, + setStatus: (next) => { + Object.assign(snapshot, next); + params.statusPatchSink?.(snapshot); + }, + }; +} + +describe("googlechatPlugin gateway.startAccount", () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it("keeps startAccount pending until abort, then unregisters", async () => { + const unregister = vi.fn(); + hoisted.startGoogleChatMonitor.mockResolvedValue(unregister); + + const account: ResolvedGoogleChatAccount = { + accountId: "default", + enabled: true, + credentialSource: "inline", + credentials: {}, + config: { + webhookPath: "/googlechat", + webhookUrl: "https://example.com/googlechat", + audienceType: "app-url", + audience: "https://example.com/googlechat", + }, + }; + + const patches: ChannelAccountSnapshot[] = []; + const abort = new AbortController(); + const task = googlechatPlugin.gateway!.startAccount!( + createStartAccountCtx({ + account, + abortSignal: abort.signal, + statusPatchSink: (next) => patches.push({ ...next }), + }), + ); + + await new Promise((resolve) => setTimeout(resolve, 20)); + + let settled = false; + void task.then(() => { + settled = true; + }); + + await new Promise((resolve) => setTimeout(resolve, 20)); + expect(settled).toBe(false); + + expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce(); + expect(unregister).not.toHaveBeenCalled(); + + abort.abort(); + await task; + + expect(unregister).toHaveBeenCalledOnce(); + expect(patches.some((entry) => entry.running === true)).toBe(true); + expect(patches.some((entry) => entry.running === false)).toBe(true); + }); +}); diff --git a/extensions/googlechat/src/channel.ts b/extensions/googlechat/src/channel.ts index 52943f63049..0233cac7017 100644 --- a/extensions/googlechat/src/channel.ts +++ b/extensions/googlechat/src/channel.ts @@ -563,14 +563,20 @@ export const googlechatPlugin: ChannelPlugin = { webhookUrl: account.config.webhookUrl, statusSink: (patch) => ctx.setStatus({ accountId: account.accountId, ...patch }), }); - return () => { - unregister?.(); - ctx.setStatus({ - accountId: account.accountId, - running: false, - lastStopAt: Date.now(), - }); - }; + // Keep the promise pending until abort (webhook mode is passive). + await new Promise((resolve) => { + if (ctx.abortSignal.aborted) { + resolve(); + return; + } + ctx.abortSignal.addEventListener("abort", () => resolve(), { once: true }); + }); + unregister?.(); + ctx.setStatus({ + accountId: account.accountId, + running: false, + lastStopAt: Date.now(), + }); }, }, }; diff --git a/extensions/googlechat/src/monitor.ts b/extensions/googlechat/src/monitor.ts index c7529489695..e31905a55ce 100644 --- a/extensions/googlechat/src/monitor.ts +++ b/extensions/googlechat/src/monitor.ts @@ -2,6 +2,7 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import type { OpenClawConfig } from "openclaw/plugin-sdk"; import { GROUP_POLICY_BLOCKED_LABEL, + createScopedPairingAccess, createReplyPrefixOptions, readJsonBodyWithLimit, registerWebhookTarget, @@ -15,6 +16,7 @@ import { warnMissingProviderGroupPolicyFallbackOnce, requestBodyErrorToText, resolveMentionGatingWithBypass, + resolveDmGroupAccessWithLists, } from "openclaw/plugin-sdk"; import { type ResolvedGoogleChatAccount } from "./accounts.js"; import { @@ -395,6 +397,11 @@ async function processMessageWithPipeline(params: { mediaMaxMb: number; }): Promise { const { event, account, config, runtime, core, statusSink, mediaMaxMb } = params; + const pairing = createScopedPairingAccess({ + core, + channel: "googlechat", + accountId: account.accountId, + }); const space = event.space; const message = event.message; if (!space || !message) { @@ -503,14 +510,33 @@ async function processMessageWithPipeline(params: { const dmPolicy = account.config.dm?.policy ?? "pairing"; const configAllowFrom = (account.config.dm?.allowFrom ?? []).map((v) => String(v)); + const normalizedGroupUsers = groupUsers.map((v) => String(v)); + const senderGroupPolicy = + groupPolicy === "disabled" + ? "disabled" + : normalizedGroupUsers.length > 0 + ? "allowlist" + : "open"; const shouldComputeAuth = core.channel.commands.shouldComputeCommandAuthorized(rawBody, config); const storeAllowFrom = !isGroup && dmPolicy !== "allowlist" && (dmPolicy !== "open" || shouldComputeAuth) - ? await core.channel.pairing.readAllowFromStore("googlechat").catch(() => []) + ? await pairing.readAllowFromStore().catch(() => []) : []; - const effectiveAllowFrom = [...configAllowFrom, ...storeAllowFrom]; + const access = resolveDmGroupAccessWithLists({ + isGroup, + dmPolicy, + groupPolicy: senderGroupPolicy, + allowFrom: configAllowFrom, + groupAllowFrom: normalizedGroupUsers, + storeAllowFrom, + groupAllowFromFallbackToAllowFrom: false, + isSenderAllowed: (allowFrom) => + isSenderAllowed(senderId, senderEmail, allowFrom, allowNameMatching), + }); + const effectiveAllowFrom = access.effectiveAllowFrom; + const effectiveGroupAllowFrom = access.effectiveGroupAllowFrom; warnDeprecatedUsersEmailEntries(core, runtime, effectiveAllowFrom); - const commandAllowFrom = isGroup ? groupUsers.map((v) => String(v)) : effectiveAllowFrom; + const commandAllowFrom = isGroup ? effectiveGroupAllowFrom : effectiveAllowFrom; const useAccessGroups = config.commands?.useAccessGroups !== false; const senderAllowedForCommands = isSenderAllowed( senderId, @@ -553,47 +579,52 @@ async function processMessageWithPipeline(params: { } } + if (isGroup && access.decision !== "allow") { + logVerbose( + core, + runtime, + `drop group message (sender policy blocked, reason=${access.reason}, space=${spaceId})`, + ); + return; + } + if (!isGroup) { - if (dmPolicy === "disabled" || account.config.dm?.enabled === false) { + if (account.config.dm?.enabled === false) { logVerbose(core, runtime, `Blocked Google Chat DM from ${senderId} (dmPolicy=disabled)`); return; } - if (dmPolicy !== "open") { - const allowed = senderAllowedForCommands; - if (!allowed) { - if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "googlechat", - id: senderId, - meta: { name: senderName || undefined, email: senderEmail }, - }); - if (created) { - logVerbose(core, runtime, `googlechat pairing request sender=${senderId}`); - try { - await sendGoogleChatMessage({ - account, - space: spaceId, - text: core.channel.pairing.buildPairingReply({ - channel: "googlechat", - idLine: `Your Google Chat user id: ${senderId}`, - code, - }), - }); - statusSink?.({ lastOutboundAt: Date.now() }); - } catch (err) { - logVerbose(core, runtime, `pairing reply failed for ${senderId}: ${String(err)}`); - } + if (access.decision !== "allow") { + if (access.decision === "pairing") { + const { code, created } = await pairing.upsertPairingRequest({ + id: senderId, + meta: { name: senderName || undefined, email: senderEmail }, + }); + if (created) { + logVerbose(core, runtime, `googlechat pairing request sender=${senderId}`); + try { + await sendGoogleChatMessage({ + account, + space: spaceId, + text: core.channel.pairing.buildPairingReply({ + channel: "googlechat", + idLine: `Your Google Chat user id: ${senderId}`, + code, + }), + }); + statusSink?.({ lastOutboundAt: Date.now() }); + } catch (err) { + logVerbose(core, runtime, `pairing reply failed for ${senderId}: ${String(err)}`); } - } else { - logVerbose( - core, - runtime, - `Blocked unauthorized Google Chat sender ${senderId} (dmPolicy=${dmPolicy})`, - ); } - return; + } else { + logVerbose( + core, + runtime, + `Blocked unauthorized Google Chat sender ${senderId} (dmPolicy=${dmPolicy})`, + ); } + return; } } diff --git a/extensions/imessage/package.json b/extensions/imessage/package.json index 7eeafd8b872..e0e82149419 100644 --- a/extensions/imessage/package.json +++ b/extensions/imessage/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/imessage", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw iMessage channel plugin", "type": "module", diff --git a/extensions/irc/package.json b/extensions/irc/package.json index e5937ee763b..583b2cb04c1 100644 --- a/extensions/irc/package.json +++ b/extensions/irc/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/irc", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw IRC channel plugin", "type": "module", "openclaw": { diff --git a/extensions/irc/src/inbound.policy.test.ts b/extensions/irc/src/inbound.policy.test.ts index c5b6cdfac89..c3c317c5000 100644 --- a/extensions/irc/src/inbound.policy.test.ts +++ b/extensions/irc/src/inbound.policy.test.ts @@ -7,6 +7,7 @@ describe("irc inbound policy", () => { configAllowFrom: ["owner"], configGroupAllowFrom: [], storeAllowList: ["paired-user"], + dmPolicy: "pairing", }); expect(resolved.effectiveAllowFrom).toEqual(["owner", "paired-user"]); @@ -17,6 +18,7 @@ describe("irc inbound policy", () => { configAllowFrom: ["owner"], configGroupAllowFrom: ["group-owner"], storeAllowList: ["paired-user"], + dmPolicy: "pairing", }); expect(resolved.effectiveGroupAllowFrom).toEqual(["group-owner"]); @@ -27,6 +29,7 @@ describe("irc inbound policy", () => { configAllowFrom: ["owner"], configGroupAllowFrom: [], storeAllowList: ["paired-user"], + dmPolicy: "pairing", }); expect(resolved.effectiveGroupAllowFrom).toEqual([]); diff --git a/extensions/irc/src/inbound.ts b/extensions/irc/src/inbound.ts index efb0b781d4a..cb21b92c361 100644 --- a/extensions/irc/src/inbound.ts +++ b/extensions/irc/src/inbound.ts @@ -1,14 +1,17 @@ import { GROUP_POLICY_BLOCKED_LABEL, + createScopedPairingAccess, createNormalizedOutboundDeliverer, createReplyPrefixOptions, formatTextWithAttachmentLinks, logInboundDrop, isDangerousNameMatchingEnabled, + readStoreAllowFromForDmPolicy, resolveControlCommandGate, resolveOutboundMediaUrls, resolveAllowlistProviderRuntimeGroupPolicy, resolveDefaultGroupPolicy, + resolveEffectiveAllowFromLists, warnMissingProviderGroupPolicyFallbackOnce, type OutboundReplyPayload, type OpenClawConfig, @@ -35,13 +38,19 @@ function resolveIrcEffectiveAllowlists(params: { configAllowFrom: string[]; configGroupAllowFrom: string[]; storeAllowList: string[]; + dmPolicy: string; }): { effectiveAllowFrom: string[]; effectiveGroupAllowFrom: string[]; } { - const effectiveAllowFrom = [...params.configAllowFrom, ...params.storeAllowList].filter(Boolean); - // Pairing-store entries are DM approvals and must not widen group sender authorization. - const effectiveGroupAllowFrom = [...params.configGroupAllowFrom].filter(Boolean); + const { effectiveAllowFrom, effectiveGroupAllowFrom } = resolveEffectiveAllowFromLists({ + allowFrom: params.configAllowFrom, + groupAllowFrom: params.configGroupAllowFrom, + storeAllowFrom: params.storeAllowList, + dmPolicy: params.dmPolicy, + // IRC intentionally requires explicit groupAllowFrom; do not fallback to allowFrom. + groupAllowFromFallbackToAllowFrom: false, + }); return { effectiveAllowFrom, effectiveGroupAllowFrom }; } @@ -82,6 +91,11 @@ export async function handleIrcInbound(params: { }): Promise { const { message, account, config, runtime, connectedNick, statusSink } = params; const core = getIrcRuntime(); + const pairing = createScopedPairingAccess({ + core, + channel: CHANNEL_ID, + accountId: account.accountId, + }); const rawBody = message.text?.trim() ?? ""; if (!rawBody) { @@ -113,10 +127,12 @@ export async function handleIrcInbound(params: { const configAllowFrom = normalizeIrcAllowlist(account.config.allowFrom); const configGroupAllowFrom = normalizeIrcAllowlist(account.config.groupAllowFrom); - const storeAllowFrom = - dmPolicy === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore(CHANNEL_ID).catch(() => []); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: CHANNEL_ID, + accountId: account.accountId, + dmPolicy, + readStore: pairing.readStoreForDmPolicy, + }); const storeAllowList = normalizeIrcAllowlist(storeAllowFrom); const groupMatch = resolveIrcGroupMatch({ @@ -141,6 +157,7 @@ export async function handleIrcInbound(params: { configAllowFrom, configGroupAllowFrom, storeAllowList, + dmPolicy, }); const allowTextCommands = core.channel.commands.shouldHandleTextCommands({ @@ -192,8 +209,7 @@ export async function handleIrcInbound(params: { }).allowed; if (!dmAllowed) { if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: CHANNEL_ID, + const { code, created } = await pairing.upsertPairingRequest({ id: senderDisplay.toLowerCase(), meta: { name: message.senderNick || undefined }, }); diff --git a/extensions/line/package.json b/extensions/line/package.json index 402952b084c..03f640cf7af 100644 --- a/extensions/line/package.json +++ b/extensions/line/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/line", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw LINE channel plugin", "type": "module", diff --git a/extensions/line/src/channel.startup.test.ts b/extensions/line/src/channel.startup.test.ts index e5b0ce333f5..812636113cb 100644 --- a/extensions/line/src/channel.startup.test.ts +++ b/extensions/line/src/channel.startup.test.ts @@ -37,6 +37,7 @@ function createStartAccountCtx(params: { token: string; secret: string; runtime: ReturnType; + abortSignal?: AbortSignal; }): ChannelGatewayContext { const snapshot: ChannelAccountSnapshot = { accountId: "default", @@ -56,7 +57,7 @@ function createStartAccountCtx(params: { }, cfg: {} as OpenClawConfig, runtime: params.runtime, - abortSignal: new AbortController().signal, + abortSignal: params.abortSignal ?? new AbortController().signal, log: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, getStatus: () => snapshot, setStatus: vi.fn(), @@ -104,14 +105,19 @@ describe("linePlugin gateway.startAccount", () => { const { runtime, monitorLineProvider } = createRuntime(); setLineRuntime(runtime); - await linePlugin.gateway!.startAccount!( + const abort = new AbortController(); + const task = linePlugin.gateway!.startAccount!( createStartAccountCtx({ token: "token", secret: "secret", runtime: createRuntimeEnv(), + abortSignal: abort.signal, }), ); + // Allow async internals (probeLineBot await) to flush + await new Promise((r) => setTimeout(r, 20)); + expect(monitorLineProvider).toHaveBeenCalledWith( expect.objectContaining({ channelAccessToken: "token", @@ -119,5 +125,8 @@ describe("linePlugin gateway.startAccount", () => { accountId: "default", }), ); + + abort.abort(); + await task; }); }); diff --git a/extensions/line/src/channel.ts b/extensions/line/src/channel.ts index a260d96c961..1c87ad8e2f3 100644 --- a/extensions/line/src/channel.ts +++ b/extensions/line/src/channel.ts @@ -651,7 +651,7 @@ export const linePlugin: ChannelPlugin = { ctx.log?.info(`[${account.accountId}] starting LINE provider${lineBotLabel}`); - return getLineRuntime().channel.line.monitorLineProvider({ + const monitor = await getLineRuntime().channel.line.monitorLineProvider({ channelAccessToken: token, channelSecret: secret, accountId: account.accountId, @@ -660,6 +660,8 @@ export const linePlugin: ChannelPlugin = { abortSignal: ctx.abortSignal, webhookPath: account.config.webhookPath, }); + + return monitor; }, logoutAccount: async ({ accountId, cfg }) => { const envToken = process.env.LINE_CHANNEL_ACCESS_TOKEN?.trim() ?? ""; diff --git a/extensions/llm-task/package.json b/extensions/llm-task/package.json index 9e182b90134..9252bdb7ea0 100644 --- a/extensions/llm-task/package.json +++ b/extensions/llm-task/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/llm-task", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw JSON-only LLM task plugin", "type": "module", diff --git a/extensions/lobster/package.json b/extensions/lobster/package.json index f60a1ff73a6..ffbd1fad2b8 100644 --- a/extensions/lobster/package.json +++ b/extensions/lobster/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/lobster", - "version": "2026.2.25", + "version": "2026.2.26", "description": "Lobster workflow tool plugin (typed pipelines + resumable approvals)", "type": "module", "openclaw": { diff --git a/extensions/matrix/CHANGELOG.md b/extensions/matrix/CHANGELOG.md index deffac4088a..14085e49a92 100644 --- a/extensions/matrix/CHANGELOG.md +++ b/extensions/matrix/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/matrix/package.json b/extensions/matrix/package.json index 615cbc74855..cce28f2a65e 100644 --- a/extensions/matrix/package.json +++ b/extensions/matrix/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/matrix", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Matrix channel plugin", "type": "module", "dependencies": { diff --git a/extensions/matrix/src/matrix/monitor/access-policy.ts b/extensions/matrix/src/matrix/monitor/access-policy.ts new file mode 100644 index 00000000000..e937ba81848 --- /dev/null +++ b/extensions/matrix/src/matrix/monitor/access-policy.ts @@ -0,0 +1,127 @@ +import { + formatAllowlistMatchMeta, + issuePairingChallenge, + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithLists, +} from "openclaw/plugin-sdk"; +import { + normalizeMatrixAllowList, + resolveMatrixAllowListMatch, + resolveMatrixAllowListMatches, +} from "./allowlist.js"; + +type MatrixDmPolicy = "open" | "pairing" | "allowlist" | "disabled"; +type MatrixGroupPolicy = "open" | "allowlist" | "disabled"; + +export async function resolveMatrixAccessState(params: { + isDirectMessage: boolean; + resolvedAccountId: string; + dmPolicy: MatrixDmPolicy; + groupPolicy: MatrixGroupPolicy; + allowFrom: string[]; + groupAllowFrom: Array; + senderId: string; + readStoreForDmPolicy: (provider: string, accountId: string) => Promise; +}) { + const storeAllowFrom = params.isDirectMessage + ? await readStoreAllowFromForDmPolicy({ + provider: "matrix", + accountId: params.resolvedAccountId, + dmPolicy: params.dmPolicy, + readStore: params.readStoreForDmPolicy, + }) + : []; + const normalizedGroupAllowFrom = normalizeMatrixAllowList(params.groupAllowFrom); + const senderGroupPolicy = + params.groupPolicy === "disabled" + ? "disabled" + : normalizedGroupAllowFrom.length > 0 + ? "allowlist" + : "open"; + const access = resolveDmGroupAccessWithLists({ + isGroup: !params.isDirectMessage, + dmPolicy: params.dmPolicy, + groupPolicy: senderGroupPolicy, + allowFrom: params.allowFrom, + groupAllowFrom: normalizedGroupAllowFrom, + storeAllowFrom, + groupAllowFromFallbackToAllowFrom: false, + isSenderAllowed: (allowFrom) => + resolveMatrixAllowListMatches({ + allowList: normalizeMatrixAllowList(allowFrom), + userId: params.senderId, + }), + }); + const effectiveAllowFrom = normalizeMatrixAllowList(access.effectiveAllowFrom); + const effectiveGroupAllowFrom = normalizeMatrixAllowList(access.effectiveGroupAllowFrom); + return { + access, + effectiveAllowFrom, + effectiveGroupAllowFrom, + groupAllowConfigured: effectiveGroupAllowFrom.length > 0, + }; +} + +export async function enforceMatrixDirectMessageAccess(params: { + dmEnabled: boolean; + dmPolicy: MatrixDmPolicy; + accessDecision: "allow" | "block" | "pairing"; + senderId: string; + senderName: string; + effectiveAllowFrom: string[]; + upsertPairingRequest: (input: { + id: string; + meta?: Record; + }) => Promise<{ + code: string; + created: boolean; + }>; + sendPairingReply: (text: string) => Promise; + logVerboseMessage: (message: string) => void; +}): Promise { + if (!params.dmEnabled) { + return false; + } + if (params.accessDecision === "allow") { + return true; + } + const allowMatch = resolveMatrixAllowListMatch({ + allowList: params.effectiveAllowFrom, + userId: params.senderId, + }); + const allowMatchMeta = formatAllowlistMatchMeta(allowMatch); + if (params.accessDecision === "pairing") { + await issuePairingChallenge({ + channel: "matrix", + senderId: params.senderId, + senderIdLine: `Matrix user id: ${params.senderId}`, + meta: { name: params.senderName }, + upsertPairingRequest: params.upsertPairingRequest, + buildReplyText: ({ code }) => + [ + "OpenClaw: access not configured.", + "", + `Pairing code: ${code}`, + "", + "Ask the bot owner to approve with:", + "openclaw pairing approve matrix ", + ].join("\n"), + sendPairingReply: params.sendPairingReply, + onCreated: () => { + params.logVerboseMessage( + `matrix pairing request sender=${params.senderId} name=${params.senderName ?? "unknown"} (${allowMatchMeta})`, + ); + }, + onReplyError: (err) => { + params.logVerboseMessage( + `matrix pairing reply failed for ${params.senderId}: ${String(err)}`, + ); + }, + }); + return false; + } + params.logVerboseMessage( + `matrix: blocked dm sender ${params.senderId} (dmPolicy=${params.dmPolicy}, ${allowMatchMeta})`, + ); + return false; +} diff --git a/extensions/matrix/src/matrix/monitor/handler.body-for-agent.test.ts b/extensions/matrix/src/matrix/monitor/handler.body-for-agent.test.ts new file mode 100644 index 00000000000..49ae7323317 --- /dev/null +++ b/extensions/matrix/src/matrix/monitor/handler.body-for-agent.test.ts @@ -0,0 +1,142 @@ +import type { MatrixClient } from "@vector-im/matrix-bot-sdk"; +import type { PluginRuntime, RuntimeEnv, RuntimeLogger } from "openclaw/plugin-sdk"; +import { describe, expect, it, vi } from "vitest"; +import { createMatrixRoomMessageHandler } from "./handler.js"; +import { EventType, type MatrixRawEvent } from "./types.js"; + +describe("createMatrixRoomMessageHandler BodyForAgent sender label", () => { + it("stores sender-labeled BodyForAgent for group thread messages", async () => { + const recordInboundSession = vi.fn().mockResolvedValue(undefined); + const formatInboundEnvelope = vi + .fn() + .mockImplementation((params: { senderLabel?: string; body: string }) => params.body); + const finalizeInboundContext = vi + .fn() + .mockImplementation((ctx: Record) => ctx); + + const core = { + channel: { + pairing: { + readAllowFromStore: vi.fn().mockResolvedValue([]), + }, + routing: { + resolveAgentRoute: vi.fn().mockReturnValue({ + agentId: "main", + accountId: undefined, + sessionKey: "agent:main:matrix:channel:!room:example.org", + mainSessionKey: "agent:main:main", + }), + }, + session: { + resolveStorePath: vi.fn().mockReturnValue("/tmp/openclaw-test-session.json"), + readSessionUpdatedAt: vi.fn().mockReturnValue(123), + recordInboundSession, + }, + reply: { + resolveEnvelopeFormatOptions: vi.fn().mockReturnValue({}), + formatInboundEnvelope, + formatAgentEnvelope: vi + .fn() + .mockImplementation((params: { body: string }) => params.body), + finalizeInboundContext, + resolveHumanDelayConfig: vi.fn().mockReturnValue(undefined), + createReplyDispatcherWithTyping: vi.fn().mockReturnValue({ + dispatcher: {}, + replyOptions: {}, + markDispatchIdle: vi.fn(), + }), + withReplyDispatcher: vi + .fn() + .mockResolvedValue({ queuedFinal: false, counts: { final: 0, partial: 0, tool: 0 } }), + }, + commands: { + shouldHandleTextCommands: vi.fn().mockReturnValue(true), + }, + text: { + hasControlCommand: vi.fn().mockReturnValue(false), + resolveMarkdownTableMode: vi.fn().mockReturnValue("code"), + }, + }, + system: { + enqueueSystemEvent: vi.fn(), + }, + } as unknown as PluginRuntime; + + const runtime = { + error: vi.fn(), + } as unknown as RuntimeEnv; + const logger = { + info: vi.fn(), + warn: vi.fn(), + } as unknown as RuntimeLogger; + const logVerboseMessage = vi.fn(); + + const client = { + getUserId: vi.fn().mockResolvedValue("@bot:matrix.example.org"), + } as unknown as MatrixClient; + + const handler = createMatrixRoomMessageHandler({ + client, + core, + cfg: {}, + runtime, + logger, + logVerboseMessage, + allowFrom: [], + roomsConfig: undefined, + mentionRegexes: [], + groupPolicy: "open", + replyToMode: "first", + threadReplies: "inbound", + dmEnabled: true, + dmPolicy: "open", + textLimit: 4000, + mediaMaxBytes: 5 * 1024 * 1024, + startupMs: Date.now(), + startupGraceMs: 60_000, + directTracker: { + isDirectMessage: vi.fn().mockResolvedValue(false), + }, + getRoomInfo: vi.fn().mockResolvedValue({ + name: "Dev Room", + canonicalAlias: "#dev:matrix.example.org", + altAliases: [], + }), + getMemberDisplayName: vi.fn().mockResolvedValue("Bu"), + accountId: undefined, + }); + + const event = { + type: EventType.RoomMessage, + event_id: "$event1", + sender: "@bu:matrix.example.org", + origin_server_ts: Date.now(), + content: { + msgtype: "m.text", + body: "show me my commits", + "m.mentions": { user_ids: ["@bot:matrix.example.org"] }, + "m.relates_to": { + rel_type: "m.thread", + event_id: "$thread-root", + }, + }, + } as unknown as MatrixRawEvent; + + await handler("!room:example.org", event); + + expect(formatInboundEnvelope).toHaveBeenCalledWith( + expect.objectContaining({ + chatType: "channel", + senderLabel: "Bu (bu)", + }), + ); + expect(recordInboundSession).toHaveBeenCalledWith( + expect.objectContaining({ + ctx: expect.objectContaining({ + ChatType: "thread", + BodyForAgent: "Bu (bu): show me my commits", + }), + }), + ); + }); +}); diff --git a/extensions/matrix/src/matrix/monitor/handler.ts b/extensions/matrix/src/matrix/monitor/handler.ts index 77e88162af3..fc441b83f9a 100644 --- a/extensions/matrix/src/matrix/monitor/handler.ts +++ b/extensions/matrix/src/matrix/monitor/handler.ts @@ -1,5 +1,7 @@ import type { LocationMessageEventContent, MatrixClient } from "@vector-im/matrix-bot-sdk"; import { + DEFAULT_ACCOUNT_ID, + createScopedPairingAccess, createReplyPrefixOptions, createTypingCallbacks, formatAllowlistMatchMeta, @@ -19,11 +21,17 @@ import { type PollStartContent, } from "../poll-types.js"; import { reactMatrixMessage, sendMessageMatrix, sendTypingMatrix } from "../send.js"; +import { enforceMatrixDirectMessageAccess, resolveMatrixAccessState } from "./access-policy.js"; import { normalizeMatrixAllowList, resolveMatrixAllowListMatch, resolveMatrixAllowListMatches, } from "./allowlist.js"; +import { + resolveMatrixBodyForAgent, + resolveMatrixInboundSenderLabel, + resolveMatrixSenderUsername, +} from "./inbound-body.js"; import { resolveMatrixLocation, type MatrixLocationPayload } from "./location.js"; import { downloadMatrixMedia } from "./media.js"; import { resolveMentions } from "./mentions.js"; @@ -91,6 +99,12 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam getMemberDisplayName, accountId, } = params; + const resolvedAccountId = accountId?.trim() || DEFAULT_ACCOUNT_ID; + const pairing = createScopedPairingAccess({ + core, + channel: "matrix", + accountId: resolvedAccountId, + }); return async (roomId: string, event: MatrixRawEvent) => { try { @@ -213,62 +227,42 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam } const senderName = await getMemberDisplayName(roomId, senderId); - const storeAllowFrom = - dmPolicy === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore("matrix").catch(() => []); - const effectiveAllowFrom = normalizeMatrixAllowList([...allowFrom, ...storeAllowFrom]); + const senderUsername = resolveMatrixSenderUsername(senderId); + const senderLabel = resolveMatrixInboundSenderLabel({ + senderName, + senderId, + senderUsername, + }); const groupAllowFrom = cfg.channels?.matrix?.groupAllowFrom ?? []; - const effectiveGroupAllowFrom = normalizeMatrixAllowList(groupAllowFrom); - const groupAllowConfigured = effectiveGroupAllowFrom.length > 0; + const { access, effectiveAllowFrom, effectiveGroupAllowFrom, groupAllowConfigured } = + await resolveMatrixAccessState({ + isDirectMessage, + resolvedAccountId, + dmPolicy, + groupPolicy, + allowFrom, + groupAllowFrom, + senderId, + readStoreForDmPolicy: pairing.readStoreForDmPolicy, + }); if (isDirectMessage) { - if (!dmEnabled || dmPolicy === "disabled") { + const allowedDirectMessage = await enforceMatrixDirectMessageAccess({ + dmEnabled, + dmPolicy, + accessDecision: access.decision, + senderId, + senderName, + effectiveAllowFrom, + upsertPairingRequest: pairing.upsertPairingRequest, + sendPairingReply: async (text) => { + await sendMessageMatrix(`room:${roomId}`, text, { client }); + }, + logVerboseMessage, + }); + if (!allowedDirectMessage) { return; } - if (dmPolicy !== "open") { - const allowMatch = resolveMatrixAllowListMatch({ - allowList: effectiveAllowFrom, - userId: senderId, - }); - const allowMatchMeta = formatAllowlistMatchMeta(allowMatch); - if (!allowMatch.allowed) { - if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "matrix", - id: senderId, - meta: { name: senderName }, - }); - if (created) { - logVerboseMessage( - `matrix pairing request sender=${senderId} name=${senderName ?? "unknown"} (${allowMatchMeta})`, - ); - try { - await sendMessageMatrix( - `room:${roomId}`, - [ - "OpenClaw: access not configured.", - "", - `Pairing code: ${code}`, - "", - "Ask the bot owner to approve with:", - "openclaw pairing approve matrix ", - ].join("\n"), - { client }, - ); - } catch (err) { - logVerboseMessage(`matrix pairing reply failed for ${senderId}: ${String(err)}`); - } - } - } - if (dmPolicy !== "pairing") { - logVerboseMessage( - `matrix: blocked dm sender ${senderId} (dmPolicy=${dmPolicy}, ${allowMatchMeta})`, - ); - } - return; - } - } } const roomUsers = roomConfig?.users ?? []; @@ -286,7 +280,7 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam return; } } - if (isRoom && groupPolicy === "allowlist" && roomUsers.length === 0 && groupAllowConfigured) { + if (isRoom && roomUsers.length === 0 && groupAllowConfigured && access.decision !== "allow") { const groupAllowMatch = resolveMatrixAllowListMatch({ allowList: effectiveGroupAllowFrom, userId: senderId, @@ -498,19 +492,25 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam storePath, sessionKey: route.sessionKey, }); - const body = core.channel.reply.formatAgentEnvelope({ + const body = core.channel.reply.formatInboundEnvelope({ channel: "Matrix", from: envelopeFrom, timestamp: eventTs ?? undefined, previousTimestamp, envelope: envelopeOptions, body: textWithId, + chatType: isDirectMessage ? "direct" : "channel", + senderLabel, }); const groupSystemPrompt = roomConfig?.systemPrompt?.trim() || undefined; const ctxPayload = core.channel.reply.finalizeInboundContext({ Body: body, - BodyForAgent: bodyText, + BodyForAgent: resolveMatrixBodyForAgent({ + isDirectMessage, + bodyText, + senderLabel, + }), RawBody: bodyText, CommandBody: bodyText, From: isDirectMessage ? `matrix:${senderId}` : `matrix:channel:${roomId}`, @@ -521,7 +521,7 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam ConversationLabel: envelopeFrom, SenderName: senderName, SenderId: senderId, - SenderUsername: senderId.split(":")[0]?.replace(/^@/, ""), + SenderUsername: senderUsername, GroupSubject: isRoom ? (roomName ?? roomId) : undefined, GroupChannel: isRoom ? (roomInfo.canonicalAlias ?? roomId) : undefined, GroupSystemPrompt: isRoom ? groupSystemPrompt : undefined, @@ -655,17 +655,23 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam }, }); - const { queuedFinal, counts } = await core.channel.reply.dispatchReplyFromConfig({ - ctx: ctxPayload, - cfg, + const { queuedFinal, counts } = await core.channel.reply.withReplyDispatcher({ dispatcher, - replyOptions: { - ...replyOptions, - skillFilter: roomConfig?.skills, - onModelSelected, + onSettled: () => { + markDispatchIdle(); }, + run: () => + core.channel.reply.dispatchReplyFromConfig({ + ctx: ctxPayload, + cfg, + dispatcher, + replyOptions: { + ...replyOptions, + skillFilter: roomConfig?.skills, + onModelSelected, + }, + }), }); - markDispatchIdle(); if (!queuedFinal) { return; } diff --git a/extensions/matrix/src/matrix/monitor/inbound-body.test.ts b/extensions/matrix/src/matrix/monitor/inbound-body.test.ts new file mode 100644 index 00000000000..8b5c63c89a9 --- /dev/null +++ b/extensions/matrix/src/matrix/monitor/inbound-body.test.ts @@ -0,0 +1,73 @@ +import { describe, expect, it } from "vitest"; +import { + resolveMatrixBodyForAgent, + resolveMatrixInboundSenderLabel, + resolveMatrixSenderUsername, +} from "./inbound-body.js"; + +describe("resolveMatrixSenderUsername", () => { + it("extracts localpart without leading @", () => { + expect(resolveMatrixSenderUsername("@bu:matrix.example.org")).toBe("bu"); + }); +}); + +describe("resolveMatrixInboundSenderLabel", () => { + it("uses provided senderUsername when present", () => { + expect( + resolveMatrixInboundSenderLabel({ + senderName: "Bu", + senderId: "@bu:matrix.example.org", + senderUsername: "BU_CUSTOM", + }), + ).toBe("Bu (BU_CUSTOM)"); + }); + + it("includes sender username when it differs from display name", () => { + expect( + resolveMatrixInboundSenderLabel({ + senderName: "Bu", + senderId: "@bu:matrix.example.org", + }), + ).toBe("Bu (bu)"); + }); + + it("falls back to sender username when display name is blank", () => { + expect( + resolveMatrixInboundSenderLabel({ + senderName: " ", + senderId: "@zhang:matrix.example.org", + }), + ).toBe("zhang"); + }); + + it("falls back to sender id when username cannot be parsed", () => { + expect( + resolveMatrixInboundSenderLabel({ + senderName: "", + senderId: "matrix-user-without-colon", + }), + ).toBe("matrix-user-without-colon"); + }); +}); + +describe("resolveMatrixBodyForAgent", () => { + it("keeps direct message body unchanged", () => { + expect( + resolveMatrixBodyForAgent({ + isDirectMessage: true, + bodyText: "show me my commits", + senderLabel: "Bu (bu)", + }), + ).toBe("show me my commits"); + }); + + it("prefixes non-direct message body with sender label", () => { + expect( + resolveMatrixBodyForAgent({ + isDirectMessage: false, + bodyText: "show me my commits", + senderLabel: "Bu (bu)", + }), + ).toBe("Bu (bu): show me my commits"); + }); +}); diff --git a/extensions/matrix/src/matrix/monitor/inbound-body.ts b/extensions/matrix/src/matrix/monitor/inbound-body.ts new file mode 100644 index 00000000000..48ad8d31e79 --- /dev/null +++ b/extensions/matrix/src/matrix/monitor/inbound-body.ts @@ -0,0 +1,28 @@ +export function resolveMatrixSenderUsername(senderId: string): string | undefined { + const username = senderId.split(":")[0]?.replace(/^@/, "").trim(); + return username ? username : undefined; +} + +export function resolveMatrixInboundSenderLabel(params: { + senderName: string; + senderId: string; + senderUsername?: string; +}): string { + const senderName = params.senderName.trim(); + const senderUsername = params.senderUsername ?? resolveMatrixSenderUsername(params.senderId); + if (senderName && senderUsername && senderName !== senderUsername) { + return `${senderName} (${senderUsername})`; + } + return senderName || senderUsername || params.senderId; +} + +export function resolveMatrixBodyForAgent(params: { + isDirectMessage: boolean; + bodyText: string; + senderLabel: string; +}): string { + if (params.isDirectMessage) { + return params.bodyText; + } + return `${params.senderLabel}: ${params.bodyText}`; +} diff --git a/extensions/mattermost/package.json b/extensions/mattermost/package.json index b9dfe770ee1..91cf1986c31 100644 --- a/extensions/mattermost/package.json +++ b/extensions/mattermost/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/mattermost", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Mattermost channel plugin", "type": "module", "openclaw": { diff --git a/extensions/mattermost/src/mattermost/monitor-auth.ts b/extensions/mattermost/src/mattermost/monitor-auth.ts new file mode 100644 index 00000000000..2b968c5f117 --- /dev/null +++ b/extensions/mattermost/src/mattermost/monitor-auth.ts @@ -0,0 +1,58 @@ +import { resolveAllowlistMatchSimple, resolveEffectiveAllowFromLists } from "openclaw/plugin-sdk"; + +export function normalizeMattermostAllowEntry(entry: string): string { + const trimmed = entry.trim(); + if (!trimmed) { + return ""; + } + if (trimmed === "*") { + return "*"; + } + return trimmed + .replace(/^(mattermost|user):/i, "") + .replace(/^@/, "") + .toLowerCase(); +} + +export function normalizeMattermostAllowList(entries: Array): string[] { + const normalized = entries + .map((entry) => normalizeMattermostAllowEntry(String(entry))) + .filter(Boolean); + return Array.from(new Set(normalized)); +} + +export function resolveMattermostEffectiveAllowFromLists(params: { + allowFrom?: Array | null; + groupAllowFrom?: Array | null; + storeAllowFrom?: Array | null; + dmPolicy?: string | null; +}): { + effectiveAllowFrom: string[]; + effectiveGroupAllowFrom: string[]; +} { + return resolveEffectiveAllowFromLists({ + allowFrom: normalizeMattermostAllowList(params.allowFrom ?? []), + groupAllowFrom: normalizeMattermostAllowList(params.groupAllowFrom ?? []), + storeAllowFrom: normalizeMattermostAllowList(params.storeAllowFrom ?? []), + dmPolicy: params.dmPolicy, + }); +} + +export function isMattermostSenderAllowed(params: { + senderId: string; + senderName?: string; + allowFrom: string[]; + allowNameMatching?: boolean; +}): boolean { + const allowFrom = normalizeMattermostAllowList(params.allowFrom); + if (allowFrom.length === 0) { + return false; + } + const match = resolveAllowlistMatchSimple({ + allowFrom, + senderId: normalizeMattermostAllowEntry(params.senderId), + senderName: params.senderName ? normalizeMattermostAllowEntry(params.senderName) : undefined, + allowNameMatching: params.allowNameMatching, + }); + return match.allowed; +} diff --git a/extensions/mattermost/src/mattermost/monitor.authz.test.ts b/extensions/mattermost/src/mattermost/monitor.authz.test.ts new file mode 100644 index 00000000000..9b6a296a34e --- /dev/null +++ b/extensions/mattermost/src/mattermost/monitor.authz.test.ts @@ -0,0 +1,59 @@ +import { resolveControlCommandGate } from "openclaw/plugin-sdk"; +import { describe, expect, it } from "vitest"; +import { resolveMattermostEffectiveAllowFromLists } from "./monitor-auth.js"; + +describe("mattermost monitor authz", () => { + it("keeps DM allowlist merged with pairing-store entries", () => { + const resolved = resolveMattermostEffectiveAllowFromLists({ + dmPolicy: "pairing", + allowFrom: ["@trusted-user"], + groupAllowFrom: ["@group-owner"], + storeAllowFrom: ["user:attacker"], + }); + + expect(resolved.effectiveAllowFrom).toEqual(["trusted-user", "attacker"]); + }); + + it("uses explicit groupAllowFrom without pairing-store inheritance", () => { + const resolved = resolveMattermostEffectiveAllowFromLists({ + dmPolicy: "pairing", + allowFrom: ["@trusted-user"], + groupAllowFrom: ["@group-owner"], + storeAllowFrom: ["user:attacker"], + }); + + expect(resolved.effectiveGroupAllowFrom).toEqual(["group-owner"]); + }); + + it("does not inherit pairing-store entries into group allowlist", () => { + const resolved = resolveMattermostEffectiveAllowFromLists({ + dmPolicy: "pairing", + allowFrom: ["@trusted-user"], + storeAllowFrom: ["user:attacker"], + }); + + expect(resolved.effectiveAllowFrom).toEqual(["trusted-user", "attacker"]); + expect(resolved.effectiveGroupAllowFrom).toEqual(["trusted-user"]); + }); + + it("does not auto-authorize DM commands in open mode without allowlists", () => { + const resolved = resolveMattermostEffectiveAllowFromLists({ + dmPolicy: "open", + allowFrom: [], + groupAllowFrom: [], + storeAllowFrom: [], + }); + + const commandGate = resolveControlCommandGate({ + useAccessGroups: true, + authorizers: [ + { configured: resolved.effectiveAllowFrom.length > 0, allowed: false }, + { configured: resolved.effectiveGroupAllowFrom.length > 0, allowed: false }, + ], + allowTextCommands: true, + hasControlCommand: true, + }); + + expect(commandGate.commandAuthorized).toBe(false); + }); +}); diff --git a/extensions/mattermost/src/mattermost/monitor.ts b/extensions/mattermost/src/mattermost/monitor.ts index 6056c3fef15..b66c15812ae 100644 --- a/extensions/mattermost/src/mattermost/monitor.ts +++ b/extensions/mattermost/src/mattermost/monitor.ts @@ -7,6 +7,8 @@ import type { } from "openclaw/plugin-sdk"; import { buildAgentMediaPayload, + DM_GROUP_ACCESS_REASON, + createScopedPairingAccess, createReplyPrefixOptions, createTypingCallbacks, logInboundDrop, @@ -17,6 +19,8 @@ import { recordPendingHistoryEntryIfEnabled, isDangerousNameMatchingEnabled, resolveControlCommandGate, + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithLists, resolveAllowlistProviderRuntimeGroupPolicy, resolveDefaultGroupPolicy, resolveChannelMediaMaxBytes, @@ -36,6 +40,7 @@ import { type MattermostPost, type MattermostUser, } from "./client.js"; +import { isMattermostSenderAllowed, normalizeMattermostAllowList } from "./monitor-auth.js"; import { createDedupeCache, formatInboundFromLabel, @@ -61,7 +66,6 @@ export type MonitorMattermostOpts = { webSocketFactory?: MattermostWebSocketFactory; }; -type FetchLike = (input: URL | RequestInfo, init?: RequestInit) => Promise; type MediaKind = "image" | "audio" | "video" | "document" | "unknown"; type MattermostReaction = { @@ -130,51 +134,6 @@ function channelChatType(kind: ChatType): "direct" | "group" | "channel" { return "channel"; } -function normalizeAllowEntry(entry: string): string { - const trimmed = entry.trim(); - if (!trimmed) { - return ""; - } - if (trimmed === "*") { - return "*"; - } - return trimmed - .replace(/^(mattermost|user):/i, "") - .replace(/^@/, "") - .toLowerCase(); -} - -function normalizeAllowList(entries: Array): string[] { - const normalized = entries.map((entry) => normalizeAllowEntry(String(entry))).filter(Boolean); - return Array.from(new Set(normalized)); -} - -function isSenderAllowed(params: { - senderId: string; - senderName?: string; - allowFrom: string[]; - allowNameMatching?: boolean; -}): boolean { - const allowFrom = params.allowFrom; - if (allowFrom.length === 0) { - return false; - } - if (allowFrom.includes("*")) { - return true; - } - const normalizedSenderId = normalizeAllowEntry(params.senderId); - const normalizedSenderName = params.senderName ? normalizeAllowEntry(params.senderName) : ""; - return allowFrom.some((entry) => { - if (entry === normalizedSenderId) { - return true; - } - if (params.allowNameMatching !== true) { - return false; - } - return normalizedSenderName ? entry === normalizedSenderName : false; - }); -} - type MattermostMediaInfo = { path: string; contentType?: string; @@ -213,6 +172,11 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} cfg, accountId: opts.accountId, }); + const pairing = createScopedPairingAccess({ + core, + channel: "mattermost", + accountId: account.accountId, + }); const allowNameMatching = isDangerousNameMatchingEnabled(account.config); const botToken = opts.botToken?.trim() || account.botToken?.trim(); if (!botToken) { @@ -267,12 +231,6 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} log: (message) => logVerboseMessage(message), }); - const fetchWithAuth: FetchLike = (input, init) => { - const headers = new Headers(init?.headers); - headers.set("Authorization", `Bearer ${client.token}`); - return fetch(input, { ...init, headers }); - }; - const resolveMattermostMedia = async ( fileIds?: string[] | null, ): Promise => { @@ -285,7 +243,11 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} try { const fetched = await core.channel.media.fetchRemoteMedia({ url: `${client.apiBaseUrl}/files/${fileId}`, - fetchImpl: fetchWithAuth, + requestInit: { + headers: { + Authorization: `Bearer ${client.token}`, + }, + }, filePathHint: fileId, maxBytes: mediaMaxBytes, }); @@ -399,20 +361,35 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} senderId; const rawText = post.message?.trim() || ""; const dmPolicy = account.config.dmPolicy ?? "pairing"; - const configAllowFrom = normalizeAllowList(account.config.allowFrom ?? []); - const configGroupAllowFrom = normalizeAllowList(account.config.groupAllowFrom ?? []); - const storeAllowFrom = normalizeAllowList( - dmPolicy === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore("mattermost").catch(() => []), + const normalizedAllowFrom = normalizeMattermostAllowList(account.config.allowFrom ?? []); + const normalizedGroupAllowFrom = normalizeMattermostAllowList( + account.config.groupAllowFrom ?? [], ); - const effectiveAllowFrom = Array.from(new Set([...configAllowFrom, ...storeAllowFrom])); - const effectiveGroupAllowFrom = Array.from( - new Set([ - ...(configGroupAllowFrom.length > 0 ? configGroupAllowFrom : configAllowFrom), - ...storeAllowFrom, - ]), + const storeAllowFrom = normalizeMattermostAllowList( + await readStoreAllowFromForDmPolicy({ + provider: "mattermost", + accountId: account.accountId, + dmPolicy, + readStore: pairing.readStoreForDmPolicy, + }), ); + const accessDecision = resolveDmGroupAccessWithLists({ + isGroup: kind !== "direct", + dmPolicy, + groupPolicy, + allowFrom: normalizedAllowFrom, + groupAllowFrom: normalizedGroupAllowFrom, + storeAllowFrom, + isSenderAllowed: (allowFrom) => + isMattermostSenderAllowed({ + senderId, + senderName, + allowFrom, + allowNameMatching, + }), + }); + const effectiveAllowFrom = accessDecision.effectiveAllowFrom; + const effectiveGroupAllowFrom = accessDecision.effectiveGroupAllowFrom; const allowTextCommands = core.channel.commands.shouldHandleTextCommands({ cfg, surface: "mattermost", @@ -420,13 +397,14 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} const hasControlCommand = core.channel.text.hasControlCommand(rawText, cfg); const isControlCommand = allowTextCommands && hasControlCommand; const useAccessGroups = cfg.commands?.useAccessGroups !== false; - const senderAllowedForCommands = isSenderAllowed({ + const commandDmAllowFrom = kind === "direct" ? effectiveAllowFrom : normalizedAllowFrom; + const senderAllowedForCommands = isMattermostSenderAllowed({ senderId, senderName, - allowFrom: effectiveAllowFrom, + allowFrom: commandDmAllowFrom, allowNameMatching, }); - const groupAllowedForCommands = isSenderAllowed({ + const groupAllowedForCommands = isMattermostSenderAllowed({ senderId, senderName, allowFrom: effectiveGroupAllowFrom, @@ -435,7 +413,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} const commandGate = resolveControlCommandGate({ useAccessGroups, authorizers: [ - { configured: effectiveAllowFrom.length > 0, allowed: senderAllowedForCommands }, + { configured: commandDmAllowFrom.length > 0, allowed: senderAllowedForCommands }, { configured: effectiveGroupAllowFrom.length > 0, allowed: groupAllowedForCommands, @@ -444,20 +422,16 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} allowTextCommands, hasControlCommand, }); - const commandAuthorized = - kind === "direct" - ? dmPolicy === "open" || senderAllowedForCommands - : commandGate.commandAuthorized; + const commandAuthorized = commandGate.commandAuthorized; - if (kind === "direct") { - if (dmPolicy === "disabled") { - logVerboseMessage(`mattermost: drop dm (dmPolicy=disabled sender=${senderId})`); - return; - } - if (dmPolicy !== "open" && !senderAllowedForCommands) { - if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "mattermost", + if (accessDecision.decision !== "allow") { + if (kind === "direct") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.DM_POLICY_DISABLED) { + logVerboseMessage(`mattermost: drop dm (dmPolicy=disabled sender=${senderId})`); + return; + } + if (accessDecision.decision === "pairing") { + const { code, created } = await pairing.upsertPairingRequest({ id: senderId, meta: { name: senderName }, }); @@ -478,26 +452,27 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} logVerboseMessage(`mattermost: pairing reply failed for ${senderId}: ${String(err)}`); } } - } else { - logVerboseMessage(`mattermost: drop dm sender=${senderId} (dmPolicy=${dmPolicy})`); + return; } + logVerboseMessage(`mattermost: drop dm sender=${senderId} (dmPolicy=${dmPolicy})`); return; } - } else { - if (groupPolicy === "disabled") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_DISABLED) { logVerboseMessage("mattermost: drop group message (groupPolicy=disabled)"); return; } - if (groupPolicy === "allowlist") { - if (effectiveGroupAllowFrom.length === 0) { - logVerboseMessage("mattermost: drop group message (no group allowlist)"); - return; - } - if (!groupAllowedForCommands) { - logVerboseMessage(`mattermost: drop group sender=${senderId} (not in groupAllowFrom)`); - return; - } + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_EMPTY_ALLOWLIST) { + logVerboseMessage("mattermost: drop group message (no group allowlist)"); + return; } + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_NOT_ALLOWLISTED) { + logVerboseMessage(`mattermost: drop group sender=${senderId} (not in groupAllowFrom)`); + return; + } + logVerboseMessage( + `mattermost: drop group message (groupPolicy=${groupPolicy} reason=${accessDecision.reason})`, + ); + return; } if (kind !== "direct" && commandGate.shouldBlock) { @@ -807,18 +782,24 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} }, }); - await core.channel.reply.dispatchReplyFromConfig({ - ctx: ctxPayload, - cfg, + await core.channel.reply.withReplyDispatcher({ dispatcher, - replyOptions: { - ...replyOptions, - disableBlockStreaming: - typeof account.blockStreaming === "boolean" ? !account.blockStreaming : undefined, - onModelSelected, + onSettled: () => { + markDispatchIdle(); }, + run: () => + core.channel.reply.dispatchReplyFromConfig({ + ctx: ctxPayload, + cfg, + dispatcher, + replyOptions: { + ...replyOptions, + disableBlockStreaming: + typeof account.blockStreaming === "boolean" ? !account.blockStreaming : undefined, + onModelSelected, + }, + }), }); - markDispatchIdle(); if (historyKey) { clearHistoryEntriesIfEnabled({ historyMap: channelHistories, @@ -883,68 +864,41 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} const kind = channelKind(channelInfo.type); // Enforce DM/group policy and allowlist checks (same as normal messages) - if (kind === "direct") { - const dmPolicy = account.config.dmPolicy ?? "pairing"; - if (dmPolicy === "disabled") { - logVerboseMessage(`mattermost: drop reaction (dmPolicy=disabled sender=${userId})`); - return; - } - // For pairing/allowlist modes, only allow reactions from approved senders - if (dmPolicy !== "open") { - const configAllowFrom = normalizeAllowList(account.config.allowFrom ?? []); - const storeAllowFrom = normalizeAllowList( - dmPolicy === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore("mattermost").catch(() => []), - ); - const effectiveAllowFrom = Array.from(new Set([...configAllowFrom, ...storeAllowFrom])); - const allowed = isSenderAllowed({ + const dmPolicy = account.config.dmPolicy ?? "pairing"; + const storeAllowFrom = normalizeMattermostAllowList( + await readStoreAllowFromForDmPolicy({ + provider: "mattermost", + accountId: account.accountId, + dmPolicy, + readStore: pairing.readStoreForDmPolicy, + }), + ); + const reactionAccess = resolveDmGroupAccessWithLists({ + isGroup: kind !== "direct", + dmPolicy, + groupPolicy, + allowFrom: normalizeMattermostAllowList(account.config.allowFrom ?? []), + groupAllowFrom: normalizeMattermostAllowList(account.config.groupAllowFrom ?? []), + storeAllowFrom, + isSenderAllowed: (allowFrom) => + isMattermostSenderAllowed({ senderId: userId, senderName, - allowFrom: effectiveAllowFrom, + allowFrom, allowNameMatching, - }); - if (!allowed) { - logVerboseMessage( - `mattermost: drop reaction (dmPolicy=${dmPolicy} sender=${userId} not allowed)`, - ); - return; - } - } - } else if (kind) { - if (groupPolicy === "disabled") { - logVerboseMessage(`mattermost: drop reaction (groupPolicy=disabled channel=${channelId})`); - return; - } - if (groupPolicy === "allowlist") { - const dmPolicyForStore = account.config.dmPolicy ?? "pairing"; - const configAllowFrom = normalizeAllowList(account.config.allowFrom ?? []); - const configGroupAllowFrom = normalizeAllowList(account.config.groupAllowFrom ?? []); - const storeAllowFrom = normalizeAllowList( - dmPolicyForStore === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore("mattermost").catch(() => []), + }), + }); + if (reactionAccess.decision !== "allow") { + if (kind === "direct") { + logVerboseMessage( + `mattermost: drop reaction (dmPolicy=${dmPolicy} sender=${userId} reason=${reactionAccess.reason})`, ); - const effectiveGroupAllowFrom = Array.from( - new Set([ - ...(configGroupAllowFrom.length > 0 ? configGroupAllowFrom : configAllowFrom), - ...storeAllowFrom, - ]), + } else { + logVerboseMessage( + `mattermost: drop reaction (groupPolicy=${groupPolicy} sender=${userId} reason=${reactionAccess.reason} channel=${channelId})`, ); - // Drop when allowlist is empty (same as normal message handler) - const allowed = - effectiveGroupAllowFrom.length > 0 && - isSenderAllowed({ - senderId: userId, - senderName, - allowFrom: effectiveGroupAllowFrom, - allowNameMatching, - }); - if (!allowed) { - logVerboseMessage(`mattermost: drop reaction (groupPolicy=allowlist sender=${userId})`); - return; - } } + return; } const teamId = channelInfo?.team_id ?? undefined; diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index 98bdbe76f73..ca80fd77278 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/memory-core", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw core memory search plugin", "type": "module", diff --git a/extensions/memory-lancedb/package.json b/extensions/memory-lancedb/package.json index a658940881e..da88bf069fe 100644 --- a/extensions/memory-lancedb/package.json +++ b/extensions/memory-lancedb/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/memory-lancedb", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw LanceDB-backed long-term memory plugin with auto-recall/capture", "type": "module", diff --git a/extensions/minimax-portal-auth/package.json b/extensions/minimax-portal-auth/package.json index 4a0dfc6121d..c5744e546c1 100644 --- a/extensions/minimax-portal-auth/package.json +++ b/extensions/minimax-portal-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/minimax-portal-auth", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw MiniMax Portal OAuth provider plugin", "type": "module", diff --git a/extensions/msteams/CHANGELOG.md b/extensions/msteams/CHANGELOG.md index b6760627b46..2402bf1a4fa 100644 --- a/extensions/msteams/CHANGELOG.md +++ b/extensions/msteams/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/msteams/package.json b/extensions/msteams/package.json index efee0ce8554..9cd947a3ba8 100644 --- a/extensions/msteams/package.json +++ b/extensions/msteams/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/msteams", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Microsoft Teams channel plugin", "type": "module", "dependencies": { diff --git a/extensions/msteams/src/attachments.test.ts b/extensions/msteams/src/attachments.test.ts index b67289aea9d..167075d1c6e 100644 --- a/extensions/msteams/src/attachments.test.ts +++ b/extensions/msteams/src/attachments.test.ts @@ -1,4 +1,4 @@ -import type { PluginRuntime } from "openclaw/plugin-sdk"; +import type { PluginRuntime, SsrFPolicy } from "openclaw/plugin-sdk"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { buildMSTeamsAttachmentPlaceholder, @@ -9,16 +9,6 @@ import { } from "./attachments.js"; import { setMSTeamsRuntime } from "./runtime.js"; -vi.mock("openclaw/plugin-sdk", async (importOriginal) => { - const actual = await importOriginal(); - return { - ...actual, - isPrivateIpAddress: () => false, - }; -}); - -/** Mock DNS resolver that always returns a public IP (for anti-SSRF validation in tests). */ -const publicResolveFn = async () => ({ address: "13.107.136.10" }); const GRAPH_HOST = "graph.microsoft.com"; const SHAREPOINT_HOST = "contoso.sharepoint.com"; const AZUREEDGE_HOST = "azureedge.net"; @@ -50,6 +40,7 @@ type RemoteMediaFetchParams = { url: string; maxBytes?: number; filePathHint?: string; + ssrfPolicy?: SsrFPolicy; fetchImpl?: (input: RequestInfo | URL, init?: RequestInit) => Promise; }; @@ -75,10 +66,44 @@ const readRemoteMediaResponse = async ( fileName: params.filePathHint, }; }; + +function isHostnameAllowedByPattern(hostname: string, pattern: string): boolean { + if (pattern.startsWith("*.")) { + const suffix = pattern.slice(2); + return suffix.length > 0 && hostname !== suffix && hostname.endsWith(`.${suffix}`); + } + return hostname === pattern; +} + +function isUrlAllowedBySsrfPolicy(url: string, policy?: SsrFPolicy): boolean { + if (!policy?.hostnameAllowlist || policy.hostnameAllowlist.length === 0) { + return true; + } + const hostname = new URL(url).hostname.toLowerCase(); + return policy.hostnameAllowlist.some((pattern) => + isHostnameAllowedByPattern(hostname, pattern.toLowerCase()), + ); +} + const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { const fetchFn = params.fetchImpl ?? fetch; - const res = await fetchFn(params.url); - return readRemoteMediaResponse(res, params); + let currentUrl = params.url; + for (let i = 0; i <= MAX_REDIRECT_HOPS; i += 1) { + if (!isUrlAllowedBySsrfPolicy(currentUrl, params.ssrfPolicy)) { + throw new Error(`Blocked hostname (not in allowlist): ${currentUrl}`); + } + const res = await fetchFn(currentUrl, { redirect: "manual" }); + if (REDIRECT_STATUS_CODES.includes(res.status)) { + const location = res.headers.get("location"); + if (!location) { + throw new Error("redirect missing location"); + } + currentUrl = new URL(location, currentUrl).toString(); + continue; + } + return readRemoteMediaResponse(res, params); + } + throw new Error("too many redirects"); }); const runtimeStub = { @@ -100,16 +125,13 @@ type DownloadGraphMediaParams = Parameters[0]; type DownloadedMedia = Awaited>; type MSTeamsMediaPayload = ReturnType; type DownloadAttachmentsBuildOverrides = Partial< - Omit + Omit > & - Pick; + Pick; type DownloadAttachmentsNoFetchOverrides = Partial< - Omit< - DownloadAttachmentsParams, - "attachments" | "maxBytes" | "allowHosts" | "resolveFn" | "fetchFn" - > + Omit > & - Pick; + Pick; type DownloadGraphMediaOverrides = Partial< Omit >; @@ -210,7 +232,6 @@ const buildDownloadParams = ( attachments, maxBytes: DEFAULT_MAX_BYTES, allowHosts: DEFAULT_ALLOW_HOSTS, - resolveFn: publicResolveFn, ...overrides, }; }; @@ -680,13 +701,37 @@ describe("msteams attachments", () => { fetchMock, { allowHosts: [GRAPH_HOST], - resolveFn: undefined, }, { expectFetchCalled: false }, ); expectAttachmentMediaLength(media, 0); }); + + it("blocks redirects to non-https URLs", async () => { + const insecureUrl = "http://x/insecure.png"; + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = typeof input === "string" ? input : input.toString(); + if (url === TEST_URL_IMAGE) { + return createRedirectResponse(insecureUrl); + } + if (url === insecureUrl) { + return createBufferResponse("insecure", CONTENT_TYPE_IMAGE_PNG); + } + return createNotFoundResponse(); + }); + + const media = await downloadAttachmentsWithFetch( + createImageAttachments(TEST_URL_IMAGE), + fetchMock, + { + allowHosts: [TEST_HOST], + }, + ); + + expectAttachmentMediaLength(media, 0); + expect(fetchMock).toHaveBeenCalledTimes(1); + }); }); describe("buildMSTeamsGraphMessageUrls", () => { @@ -701,24 +746,6 @@ describe("msteams attachments", () => { it("blocks SharePoint redirects to hosts outside allowHosts", async () => { const escapedUrl = "https://evil.example/internal.pdf"; - fetchRemoteMediaMock.mockImplementationOnce(async (params) => { - const fetchFn = params.fetchImpl ?? fetch; - let currentUrl = params.url; - for (let i = 0; i < MAX_REDIRECT_HOPS; i += 1) { - const res = await fetchFn(currentUrl, { redirect: "manual" }); - if (REDIRECT_STATUS_CODES.includes(res.status)) { - const location = res.headers.get("location"); - if (!location) { - throw new Error("redirect missing location"); - } - currentUrl = new URL(location, currentUrl).toString(); - continue; - } - return readRemoteMediaResponse(res, params); - } - throw new Error("too many redirects"); - }); - const { fetchMock, media } = await downloadGraphMediaWithMockOptions( { ...buildDefaultShareReferenceGraphFetchOptions({ diff --git a/extensions/msteams/src/attachments/download.ts b/extensions/msteams/src/attachments/download.ts index bb3c5867205..f6f16ff803e 100644 --- a/extensions/msteams/src/attachments/download.ts +++ b/extensions/msteams/src/attachments/download.ts @@ -1,3 +1,4 @@ +import { fetchWithBearerAuthScopeFallback } from "openclaw/plugin-sdk"; import { getMSTeamsRuntime } from "../runtime.js"; import { downloadAndStoreMSTeamsRemoteMedia } from "./remote-media.js"; import { @@ -7,10 +8,10 @@ import { isRecord, isUrlAllowed, normalizeContentType, + resolveMediaSsrfPolicy, resolveRequestUrl, resolveAuthAllowedHosts, resolveAllowedHosts, - safeFetch, } from "./shared.js"; import type { MSTeamsAccessTokenProvider, @@ -90,81 +91,17 @@ async function fetchWithAuthFallback(params: { tokenProvider?: MSTeamsAccessTokenProvider; fetchFn?: typeof fetch; requestInit?: RequestInit; - allowHosts: string[]; authAllowHosts: string[]; - resolveFn?: (hostname: string) => Promise<{ address: string }>; }): Promise { - const fetchFn = params.fetchFn ?? fetch; - - // Use safeFetch for the initial attempt — redirect: "manual" with - // allowlist + DNS/IP validation on every hop (prevents SSRF via redirect). - const firstAttempt = await safeFetch({ + return await fetchWithBearerAuthScopeFallback({ url: params.url, - allowHosts: params.allowHosts, - fetchFn, + scopes: scopeCandidatesForUrl(params.url), + tokenProvider: params.tokenProvider, + fetchFn: params.fetchFn, requestInit: params.requestInit, - resolveFn: params.resolveFn, + requireHttps: true, + shouldAttachAuth: (url) => isUrlAllowed(url, params.authAllowHosts), }); - if (firstAttempt.ok) { - return firstAttempt; - } - if (!params.tokenProvider) { - return firstAttempt; - } - if (firstAttempt.status !== 401 && firstAttempt.status !== 403) { - return firstAttempt; - } - if (!isUrlAllowed(params.url, params.authAllowHosts)) { - return firstAttempt; - } - - const scopes = scopeCandidatesForUrl(params.url); - for (const scope of scopes) { - try { - const token = await params.tokenProvider.getAccessToken(scope); - const authHeaders = new Headers(params.requestInit?.headers); - authHeaders.set("Authorization", `Bearer ${token}`); - const authAttempt = await safeFetch({ - url: params.url, - allowHosts: params.allowHosts, - fetchFn, - requestInit: { - ...params.requestInit, - headers: authHeaders, - }, - resolveFn: params.resolveFn, - }); - if (authAttempt.ok) { - return authAttempt; - } - if (authAttempt.status !== 401 && authAttempt.status !== 403) { - continue; - } - - const finalUrl = - typeof authAttempt.url === "string" && authAttempt.url ? authAttempt.url : ""; - if (!finalUrl || finalUrl === params.url || !isUrlAllowed(finalUrl, params.authAllowHosts)) { - continue; - } - const redirectedAuthAttempt = await safeFetch({ - url: finalUrl, - allowHosts: params.allowHosts, - fetchFn, - requestInit: { - ...params.requestInit, - headers: authHeaders, - }, - resolveFn: params.resolveFn, - }); - if (redirectedAuthAttempt.ok) { - return redirectedAuthAttempt; - } - } catch { - // Try the next scope. - } - } - - return firstAttempt; } /** @@ -180,8 +117,6 @@ export async function downloadMSTeamsAttachments(params: { fetchFn?: typeof fetch; /** When true, embeds original filename in stored path for later extraction. */ preserveFilenames?: boolean; - /** Override DNS resolver for testing (anti-SSRF IP validation). */ - resolveFn?: (hostname: string) => Promise<{ address: string }>; }): Promise { const list = Array.isArray(params.attachments) ? params.attachments : []; if (list.length === 0) { @@ -189,6 +124,7 @@ export async function downloadMSTeamsAttachments(params: { } const allowHosts = resolveAllowedHosts(params.allowHosts); const authAllowHosts = resolveAuthAllowedHosts(params.authAllowHosts); + const ssrfPolicy = resolveMediaSsrfPolicy(allowHosts); // Download ANY downloadable attachment (not just images) const downloadable = list.filter(isDownloadableAttachment); @@ -257,15 +193,14 @@ export async function downloadMSTeamsAttachments(params: { contentTypeHint: candidate.contentTypeHint, placeholder: candidate.placeholder, preserveFilenames: params.preserveFilenames, + ssrfPolicy, fetchImpl: (input, init) => fetchWithAuthFallback({ url: resolveRequestUrl(input), tokenProvider: params.tokenProvider, fetchFn: params.fetchFn, requestInit: init, - allowHosts, authAllowHosts, - resolveFn: params.resolveFn, }), }); out.push(media); diff --git a/extensions/msteams/src/attachments/graph.ts b/extensions/msteams/src/attachments/graph.ts index 8ae4b3f424b..1097d0caeb1 100644 --- a/extensions/msteams/src/attachments/graph.ts +++ b/extensions/msteams/src/attachments/graph.ts @@ -1,3 +1,4 @@ +import { fetchWithSsrFGuard, type SsrFPolicy } from "openclaw/plugin-sdk"; import { getMSTeamsRuntime } from "../runtime.js"; import { downloadMSTeamsAttachments } from "./download.js"; import { downloadAndStoreMSTeamsRemoteMedia } from "./remote-media.js"; @@ -7,9 +8,9 @@ import { isRecord, isUrlAllowed, normalizeContentType, + resolveMediaSsrfPolicy, resolveRequestUrl, resolveAllowedHosts, - safeFetch, } from "./shared.js"; import type { MSTeamsAccessTokenProvider, @@ -119,20 +120,31 @@ async function fetchGraphCollection(params: { url: string; accessToken: string; fetchFn?: typeof fetch; + ssrfPolicy?: SsrFPolicy; }): Promise<{ status: number; items: T[] }> { const fetchFn = params.fetchFn ?? fetch; - const res = await fetchFn(params.url, { - headers: { Authorization: `Bearer ${params.accessToken}` }, + const { response, release } = await fetchWithSsrFGuard({ + url: params.url, + fetchImpl: fetchFn, + init: { + headers: { Authorization: `Bearer ${params.accessToken}` }, + }, + policy: params.ssrfPolicy, + auditContext: "msteams.graph.collection", }); - const status = res.status; - if (!res.ok) { - return { status, items: [] }; - } try { - const data = (await res.json()) as { value?: T[] }; - return { status, items: Array.isArray(data.value) ? data.value : [] }; - } catch { - return { status, items: [] }; + const status = response.status; + if (!response.ok) { + return { status, items: [] }; + } + try { + const data = (await response.json()) as { value?: T[] }; + return { status, items: Array.isArray(data.value) ? data.value : [] }; + } catch { + return { status, items: [] }; + } + } finally { + await release(); } } @@ -164,11 +176,13 @@ async function downloadGraphHostedContent(params: { maxBytes: number; fetchFn?: typeof fetch; preserveFilenames?: boolean; + ssrfPolicy?: SsrFPolicy; }): Promise<{ media: MSTeamsInboundMedia[]; status: number; count: number }> { const hosted = await fetchGraphCollection({ url: `${params.messageUrl}/hostedContents`, accessToken: params.accessToken, fetchFn: params.fetchFn, + ssrfPolicy: params.ssrfPolicy, }); if (hosted.items.length === 0) { return { media: [], status: hosted.status, count: 0 }; @@ -228,6 +242,7 @@ export async function downloadMSTeamsGraphMedia(params: { return { media: [] }; } const allowHosts = resolveAllowedHosts(params.allowHosts); + const ssrfPolicy = resolveMediaSsrfPolicy(allowHosts); const messageUrl = params.messageUrl; let accessToken: string; try { @@ -241,64 +256,67 @@ export async function downloadMSTeamsGraphMedia(params: { const sharePointMedia: MSTeamsInboundMedia[] = []; const downloadedReferenceUrls = new Set(); try { - const msgRes = await fetchFn(messageUrl, { - headers: { Authorization: `Bearer ${accessToken}` }, + const { response: msgRes, release } = await fetchWithSsrFGuard({ + url: messageUrl, + fetchImpl: fetchFn, + init: { + headers: { Authorization: `Bearer ${accessToken}` }, + }, + policy: ssrfPolicy, + auditContext: "msteams.graph.message", }); - if (msgRes.ok) { - const msgData = (await msgRes.json()) as { - body?: { content?: string; contentType?: string }; - attachments?: Array<{ - id?: string; - contentUrl?: string; - contentType?: string; - name?: string; - }>; - }; + try { + if (msgRes.ok) { + const msgData = (await msgRes.json()) as { + body?: { content?: string; contentType?: string }; + attachments?: Array<{ + id?: string; + contentUrl?: string; + contentType?: string; + name?: string; + }>; + }; - // Extract SharePoint file attachments (contentType: "reference") - // Download any file type, not just images - const spAttachments = (msgData.attachments ?? []).filter( - (a) => a.contentType === "reference" && a.contentUrl && a.name, - ); - for (const att of spAttachments) { - const name = att.name ?? "file"; + // Extract SharePoint file attachments (contentType: "reference") + // Download any file type, not just images + const spAttachments = (msgData.attachments ?? []).filter( + (a) => a.contentType === "reference" && a.contentUrl && a.name, + ); + for (const att of spAttachments) { + const name = att.name ?? "file"; - try { - // SharePoint URLs need to be accessed via Graph shares API - const shareUrl = att.contentUrl!; - if (!isUrlAllowed(shareUrl, allowHosts)) { - continue; + try { + // SharePoint URLs need to be accessed via Graph shares API + const shareUrl = att.contentUrl!; + if (!isUrlAllowed(shareUrl, allowHosts)) { + continue; + } + const encodedUrl = Buffer.from(shareUrl).toString("base64url"); + const sharesUrl = `${GRAPH_ROOT}/shares/u!${encodedUrl}/driveItem/content`; + + const media = await downloadAndStoreMSTeamsRemoteMedia({ + url: sharesUrl, + filePathHint: name, + maxBytes: params.maxBytes, + contentTypeHint: "application/octet-stream", + preserveFilenames: params.preserveFilenames, + ssrfPolicy, + fetchImpl: async (input, init) => { + const requestUrl = resolveRequestUrl(input); + const headers = new Headers(init?.headers); + headers.set("Authorization", `Bearer ${accessToken}`); + return await fetchFn(requestUrl, { ...init, headers }); + }, + }); + sharePointMedia.push(media); + downloadedReferenceUrls.add(shareUrl); + } catch { + // Ignore SharePoint download failures. } - const encodedUrl = Buffer.from(shareUrl).toString("base64url"); - const sharesUrl = `${GRAPH_ROOT}/shares/u!${encodedUrl}/driveItem/content`; - - const media = await downloadAndStoreMSTeamsRemoteMedia({ - url: sharesUrl, - filePathHint: name, - maxBytes: params.maxBytes, - contentTypeHint: "application/octet-stream", - preserveFilenames: params.preserveFilenames, - fetchImpl: async (input, init) => { - const requestUrl = resolveRequestUrl(input); - const headers = new Headers(init?.headers); - headers.set("Authorization", `Bearer ${accessToken}`); - return await safeFetch({ - url: requestUrl, - allowHosts, - fetchFn, - requestInit: { - ...init, - headers, - }, - }); - }, - }); - sharePointMedia.push(media); - downloadedReferenceUrls.add(shareUrl); - } catch { - // Ignore SharePoint download failures. } } + } finally { + await release(); } } catch { // Ignore message fetch failures. @@ -310,12 +328,14 @@ export async function downloadMSTeamsGraphMedia(params: { maxBytes: params.maxBytes, fetchFn: params.fetchFn, preserveFilenames: params.preserveFilenames, + ssrfPolicy, }); const attachments = await fetchGraphCollection({ url: `${messageUrl}/attachments`, accessToken, fetchFn: params.fetchFn, + ssrfPolicy, }); const normalizedAttachments = attachments.items.map(normalizeGraphAttachment); diff --git a/extensions/msteams/src/attachments/remote-media.ts b/extensions/msteams/src/attachments/remote-media.ts index 20842b2b5a0..162a797b57f 100644 --- a/extensions/msteams/src/attachments/remote-media.ts +++ b/extensions/msteams/src/attachments/remote-media.ts @@ -1,3 +1,4 @@ +import type { SsrFPolicy } from "openclaw/plugin-sdk"; import { getMSTeamsRuntime } from "../runtime.js"; import { inferPlaceholder } from "./shared.js"; import type { MSTeamsInboundMedia } from "./types.js"; @@ -9,6 +10,7 @@ export async function downloadAndStoreMSTeamsRemoteMedia(params: { filePathHint: string; maxBytes: number; fetchImpl?: FetchLike; + ssrfPolicy?: SsrFPolicy; contentTypeHint?: string; placeholder?: string; preserveFilenames?: boolean; @@ -18,6 +20,7 @@ export async function downloadAndStoreMSTeamsRemoteMedia(params: { fetchImpl: params.fetchImpl, filePathHint: params.filePathHint, maxBytes: params.maxBytes, + ssrfPolicy: params.ssrfPolicy, }); const mime = await getMSTeamsRuntime().media.detectMime({ buffer: fetched.buffer, diff --git a/extensions/msteams/src/attachments/shared.test.ts b/extensions/msteams/src/attachments/shared.test.ts index 9df64c51ab4..a5d0a4bef5a 100644 --- a/extensions/msteams/src/attachments/shared.test.ts +++ b/extensions/msteams/src/attachments/shared.test.ts @@ -1,281 +1,28 @@ -import { describe, expect, it, vi } from "vitest"; -import { isPrivateOrReservedIP, resolveAndValidateIP, safeFetch } from "./shared.js"; +import { describe, expect, it } from "vitest"; +import { + isUrlAllowed, + resolveAllowedHosts, + resolveAuthAllowedHosts, + resolveMediaSsrfPolicy, +} from "./shared.js"; -// ─── Helpers ───────────────────────────────────────────────────────────────── - -const publicResolve = async () => ({ address: "13.107.136.10" }); -const privateResolve = (ip: string) => async () => ({ address: ip }); -const failingResolve = async () => { - throw new Error("DNS failure"); -}; - -function mockFetchWithRedirect(redirectMap: Record, finalBody = "ok") { - return vi.fn(async (url: string, init?: RequestInit) => { - const target = redirectMap[url]; - if (target && init?.redirect === "manual") { - return new Response(null, { - status: 302, - headers: { location: target }, - }); - } - return new Response(finalBody, { status: 200 }); - }); -} - -// ─── isPrivateOrReservedIP ─────────────────────────────────────────────────── - -describe("isPrivateOrReservedIP", () => { - it.each([ - ["10.0.0.1", true], - ["10.255.255.255", true], - ["172.16.0.1", true], - ["172.31.255.255", true], - ["172.15.0.1", false], - ["172.32.0.1", false], - ["192.168.0.1", true], - ["192.168.255.255", true], - ["127.0.0.1", true], - ["127.255.255.255", true], - ["169.254.0.1", true], - ["169.254.169.254", true], - ["0.0.0.0", true], - ["8.8.8.8", false], - ["13.107.136.10", false], - ["52.96.0.1", false], - ] as const)("IPv4 %s → %s", (ip, expected) => { - expect(isPrivateOrReservedIP(ip)).toBe(expected); +describe("msteams attachment allowlists", () => { + it("normalizes wildcard host lists", () => { + expect(resolveAllowedHosts(["*", "graph.microsoft.com"])).toEqual(["*"]); + expect(resolveAuthAllowedHosts(["*", "graph.microsoft.com"])).toEqual(["*"]); }); - it.each([ - ["::1", true], - ["::", true], - ["fe80::1", true], - ["fc00::1", true], - ["fd12:3456::1", true], - ["2001:0db8::1", false], - ["2620:1ec:c11::200", false], - // IPv4-mapped IPv6 addresses - ["::ffff:127.0.0.1", true], - ["::ffff:10.0.0.1", true], - ["::ffff:192.168.1.1", true], - ["::ffff:169.254.169.254", true], - ["::ffff:8.8.8.8", false], - ["::ffff:13.107.136.10", false], - ] as const)("IPv6 %s → %s", (ip, expected) => { - expect(isPrivateOrReservedIP(ip)).toBe(expected); + it("requires https and host suffix match", () => { + const allowHosts = resolveAllowedHosts(["sharepoint.com"]); + expect(isUrlAllowed("https://contoso.sharepoint.com/file.png", allowHosts)).toBe(true); + expect(isUrlAllowed("http://contoso.sharepoint.com/file.png", allowHosts)).toBe(false); + expect(isUrlAllowed("https://evil.example.com/file.png", allowHosts)).toBe(false); }); - it.each([ - ["999.999.999.999", true], - ["256.0.0.1", true], - ["10.0.0.256", true], - ["-1.0.0.1", false], - ["1.2.3.4.5", false], - ["0:0:0:0:0:0:0:1", true], - ] as const)("malformed/expanded %s → %s (SDK fails closed)", (ip, expected) => { - expect(isPrivateOrReservedIP(ip)).toBe(expected); - }); -}); - -// ─── resolveAndValidateIP ──────────────────────────────────────────────────── - -describe("resolveAndValidateIP", () => { - it("accepts a hostname resolving to a public IP", async () => { - const ip = await resolveAndValidateIP("teams.sharepoint.com", publicResolve); - expect(ip).toBe("13.107.136.10"); - }); - - it("rejects a hostname resolving to 10.x.x.x", async () => { - await expect(resolveAndValidateIP("evil.test", privateResolve("10.0.0.1"))).rejects.toThrow( - "private/reserved IP", - ); - }); - - it("rejects a hostname resolving to 169.254.169.254", async () => { - await expect( - resolveAndValidateIP("evil.test", privateResolve("169.254.169.254")), - ).rejects.toThrow("private/reserved IP"); - }); - - it("rejects a hostname resolving to loopback", async () => { - await expect(resolveAndValidateIP("evil.test", privateResolve("127.0.0.1"))).rejects.toThrow( - "private/reserved IP", - ); - }); - - it("rejects a hostname resolving to IPv6 loopback", async () => { - await expect(resolveAndValidateIP("evil.test", privateResolve("::1"))).rejects.toThrow( - "private/reserved IP", - ); - }); - - it("throws on DNS resolution failure", async () => { - await expect(resolveAndValidateIP("nonexistent.test", failingResolve)).rejects.toThrow( - "DNS resolution failed", - ); - }); -}); - -// ─── safeFetch ─────────────────────────────────────────────────────────────── - -describe("safeFetch", () => { - it("fetches a URL directly when no redirect occurs", async () => { - const fetchMock = vi.fn(async (_url: string, _init?: RequestInit) => { - return new Response("ok", { status: 200 }); - }); - const res = await safeFetch({ - url: "https://teams.sharepoint.com/file.pdf", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, - }); - expect(res.status).toBe(200); - expect(fetchMock).toHaveBeenCalledOnce(); - // Should have used redirect: "manual" - expect(fetchMock.mock.calls[0][1]).toHaveProperty("redirect", "manual"); - }); - - it("follows a redirect to an allowlisted host with public IP", async () => { - const fetchMock = mockFetchWithRedirect({ - "https://teams.sharepoint.com/file.pdf": "https://cdn.sharepoint.com/storage/file.pdf", - }); - const res = await safeFetch({ - url: "https://teams.sharepoint.com/file.pdf", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, - }); - expect(res.status).toBe(200); - expect(fetchMock).toHaveBeenCalledTimes(2); - }); - - it("blocks a redirect to a non-allowlisted host", async () => { - const fetchMock = mockFetchWithRedirect({ - "https://teams.sharepoint.com/file.pdf": "https://evil.example.com/steal", - }); - await expect( - safeFetch({ - url: "https://teams.sharepoint.com/file.pdf", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, - }), - ).rejects.toThrow("blocked by allowlist"); - // Should not have fetched the evil URL - expect(fetchMock).toHaveBeenCalledTimes(1); - }); - - it("blocks a redirect to an allowlisted host that resolves to a private IP (DNS rebinding)", async () => { - let callCount = 0; - const rebindingResolve = async () => { - callCount++; - // First call (initial URL) resolves to public IP - if (callCount === 1) return { address: "13.107.136.10" }; - // Second call (redirect target) resolves to private IP - return { address: "169.254.169.254" }; - }; - - const fetchMock = mockFetchWithRedirect({ - "https://teams.sharepoint.com/file.pdf": "https://evil.trafficmanager.net/metadata", - }); - await expect( - safeFetch({ - url: "https://teams.sharepoint.com/file.pdf", - allowHosts: ["sharepoint.com", "trafficmanager.net"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: rebindingResolve, - }), - ).rejects.toThrow("private/reserved IP"); - expect(fetchMock).toHaveBeenCalledTimes(1); - }); - - it("blocks when the initial URL resolves to a private IP", async () => { - const fetchMock = vi.fn(); - await expect( - safeFetch({ - url: "https://evil.sharepoint.com/file.pdf", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: privateResolve("10.0.0.1"), - }), - ).rejects.toThrow("Initial download URL blocked"); - expect(fetchMock).not.toHaveBeenCalled(); - }); - - it("blocks when initial URL DNS resolution fails", async () => { - const fetchMock = vi.fn(); - await expect( - safeFetch({ - url: "https://nonexistent.sharepoint.com/file.pdf", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: failingResolve, - }), - ).rejects.toThrow("Initial download URL blocked"); - expect(fetchMock).not.toHaveBeenCalled(); - }); - - it("follows multiple redirects when all are valid", async () => { - const fetchMock = vi.fn(async (url: string, init?: RequestInit) => { - if (url === "https://a.sharepoint.com/1" && init?.redirect === "manual") { - return new Response(null, { - status: 302, - headers: { location: "https://b.sharepoint.com/2" }, - }); - } - if (url === "https://b.sharepoint.com/2" && init?.redirect === "manual") { - return new Response(null, { - status: 302, - headers: { location: "https://c.sharepoint.com/3" }, - }); - } - return new Response("final", { status: 200 }); - }); - - const res = await safeFetch({ - url: "https://a.sharepoint.com/1", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, - }); - expect(res.status).toBe(200); - expect(fetchMock).toHaveBeenCalledTimes(3); - }); - - it("throws on too many redirects", async () => { - let counter = 0; - const fetchMock = vi.fn(async (_url: string, init?: RequestInit) => { - if (init?.redirect === "manual") { - counter++; - return new Response(null, { - status: 302, - headers: { location: `https://loop${counter}.sharepoint.com/x` }, - }); - } - return new Response("ok", { status: 200 }); - }); - - await expect( - safeFetch({ - url: "https://start.sharepoint.com/x", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, - }), - ).rejects.toThrow("Too many redirects"); - }); - - it("blocks redirect to HTTP (non-HTTPS)", async () => { - const fetchMock = mockFetchWithRedirect({ - "https://teams.sharepoint.com/file": "http://internal.sharepoint.com/file", - }); - await expect( - safeFetch({ - url: "https://teams.sharepoint.com/file", - allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, - }), - ).rejects.toThrow("blocked by allowlist"); + it("builds shared SSRF policy from suffix allowlist", () => { + expect(resolveMediaSsrfPolicy(["sharepoint.com"])).toEqual({ + hostnameAllowlist: ["sharepoint.com", "*.sharepoint.com"], + }); + expect(resolveMediaSsrfPolicy(["*"])).toBeUndefined(); }); }); diff --git a/extensions/msteams/src/attachments/shared.ts b/extensions/msteams/src/attachments/shared.ts index 50221e8eb9a..abb98791b32 100644 --- a/extensions/msteams/src/attachments/shared.ts +++ b/extensions/msteams/src/attachments/shared.ts @@ -1,5 +1,9 @@ -import { lookup } from "node:dns/promises"; -import { isPrivateIpAddress } from "openclaw/plugin-sdk"; +import { + buildHostnameAllowlistPolicyFromSuffixAllowlist, + isHttpsUrlAllowedByHostnameSuffixAllowlist, + normalizeHostnameSuffixAllowlist, +} from "openclaw/plugin-sdk"; +import type { SsrFPolicy } from "openclaw/plugin-sdk"; import type { MSTeamsAttachmentLike } from "./types.js"; type InlineImageCandidate = @@ -252,153 +256,18 @@ export function safeHostForUrl(url: string): string { } } -function normalizeAllowHost(value: string): string { - const trimmed = value.trim().toLowerCase(); - if (!trimmed) { - return ""; - } - if (trimmed === "*") { - return "*"; - } - return trimmed.replace(/^\*\.?/, ""); -} - export function resolveAllowedHosts(input?: string[]): string[] { - if (!Array.isArray(input) || input.length === 0) { - return DEFAULT_MEDIA_HOST_ALLOWLIST.slice(); - } - const normalized = input.map(normalizeAllowHost).filter(Boolean); - if (normalized.includes("*")) { - return ["*"]; - } - return normalized; + return normalizeHostnameSuffixAllowlist(input, DEFAULT_MEDIA_HOST_ALLOWLIST); } export function resolveAuthAllowedHosts(input?: string[]): string[] { - if (!Array.isArray(input) || input.length === 0) { - return DEFAULT_MEDIA_AUTH_HOST_ALLOWLIST.slice(); - } - const normalized = input.map(normalizeAllowHost).filter(Boolean); - if (normalized.includes("*")) { - return ["*"]; - } - return normalized; -} - -function isHostAllowed(host: string, allowlist: string[]): boolean { - if (allowlist.includes("*")) { - return true; - } - const normalized = host.toLowerCase(); - return allowlist.some((entry) => normalized === entry || normalized.endsWith(`.${entry}`)); + return normalizeHostnameSuffixAllowlist(input, DEFAULT_MEDIA_AUTH_HOST_ALLOWLIST); } export function isUrlAllowed(url: string, allowlist: string[]): boolean { - try { - const parsed = new URL(url); - if (parsed.protocol !== "https:") { - return false; - } - return isHostAllowed(parsed.hostname, allowlist); - } catch { - return false; - } + return isHttpsUrlAllowedByHostnameSuffixAllowlist(url, allowlist); } -/** - * Returns true if the given IPv4 or IPv6 address is in a private, loopback, - * or link-local range that must never be reached from media downloads. - * - * Delegates to the SDK's `isPrivateIpAddress` which handles IPv4-mapped IPv6, - * expanded notation, NAT64, 6to4, Teredo, octal IPv4, and fails closed on - * parse errors. - */ -export const isPrivateOrReservedIP: (ip: string) => boolean = isPrivateIpAddress; - -/** - * Resolve a hostname via DNS and reject private/reserved IPs. - * Throws if the resolved IP is private or resolution fails. - */ -export async function resolveAndValidateIP( - hostname: string, - resolveFn?: (hostname: string) => Promise<{ address: string }>, -): Promise { - const resolve = resolveFn ?? lookup; - let resolved: { address: string }; - try { - resolved = await resolve(hostname); - } catch { - throw new Error(`DNS resolution failed for "${hostname}"`); - } - if (isPrivateOrReservedIP(resolved.address)) { - throw new Error(`Hostname "${hostname}" resolves to private/reserved IP (${resolved.address})`); - } - return resolved.address; -} - -/** Maximum number of redirects to follow in safeFetch. */ -const MAX_SAFE_REDIRECTS = 5; - -/** - * Fetch a URL with redirect: "manual", validating each redirect target - * against the hostname allowlist and DNS-resolved IP (anti-SSRF). - * - * This prevents: - * - Auto-following redirects to non-allowlisted hosts - * - DNS rebinding attacks where an allowlisted domain resolves to a private IP - */ -export async function safeFetch(params: { - url: string; - allowHosts: string[]; - fetchFn?: typeof fetch; - requestInit?: RequestInit; - resolveFn?: (hostname: string) => Promise<{ address: string }>; -}): Promise { - const fetchFn = params.fetchFn ?? fetch; - const resolveFn = params.resolveFn; - let currentUrl = params.url; - - // Validate the initial URL's resolved IP - try { - const initialHost = new URL(currentUrl).hostname; - await resolveAndValidateIP(initialHost, resolveFn); - } catch { - throw new Error(`Initial download URL blocked: ${currentUrl}`); - } - - for (let i = 0; i <= MAX_SAFE_REDIRECTS; i++) { - const res = await fetchFn(currentUrl, { - ...params.requestInit, - redirect: "manual", - }); - - if (![301, 302, 303, 307, 308].includes(res.status)) { - return res; - } - - const location = res.headers.get("location"); - if (!location) { - return res; - } - - let redirectUrl: string; - try { - redirectUrl = new URL(location, currentUrl).toString(); - } catch { - throw new Error(`Invalid redirect URL: ${location}`); - } - - // Validate redirect target against hostname allowlist - if (!isUrlAllowed(redirectUrl, params.allowHosts)) { - throw new Error(`Media redirect target blocked by allowlist: ${redirectUrl}`); - } - - // Validate redirect target's resolved IP - const redirectHost = new URL(redirectUrl).hostname; - await resolveAndValidateIP(redirectHost, resolveFn); - - currentUrl = redirectUrl; - } - - throw new Error(`Too many redirects (>${MAX_SAFE_REDIRECTS})`); +export function resolveMediaSsrfPolicy(allowHosts: string[]): SsrFPolicy | undefined { + return buildHostnameAllowlistPolicyFromSuffixAllowlist(allowHosts); } diff --git a/extensions/msteams/src/messenger.test.ts b/extensions/msteams/src/messenger.test.ts index ba176019994..0f27cf2d382 100644 --- a/extensions/msteams/src/messenger.test.ts +++ b/extensions/msteams/src/messenger.test.ts @@ -92,12 +92,12 @@ describe("msteams messenger", () => { expect(messages).toEqual([]); }); - it("filters silent reply prefixes", () => { + it("does not filter non-exact silent reply prefixes", () => { const messages = renderReplyPayloadsToMessages( [{ text: `${SILENT_REPLY_TOKEN} -- ignored` }], { textChunkLimit: 4000, tableMode: "code" }, ); - expect(messages).toEqual([]); + expect(messages).toEqual([{ text: `${SILENT_REPLY_TOKEN} -- ignored` }]); }); it("splits media into separate messages by default", () => { diff --git a/extensions/msteams/src/monitor-handler.file-consent.test.ts b/extensions/msteams/src/monitor-handler.file-consent.test.ts new file mode 100644 index 00000000000..1fc6714a451 --- /dev/null +++ b/extensions/msteams/src/monitor-handler.file-consent.test.ts @@ -0,0 +1,234 @@ +import type { OpenClawConfig, PluginRuntime, RuntimeEnv } from "openclaw/plugin-sdk"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { MSTeamsConversationStore } from "./conversation-store.js"; +import type { MSTeamsAdapter } from "./messenger.js"; +import { + type MSTeamsActivityHandler, + type MSTeamsMessageHandlerDeps, + registerMSTeamsHandlers, +} from "./monitor-handler.js"; +import { clearPendingUploads, getPendingUpload, storePendingUpload } from "./pending-uploads.js"; +import type { MSTeamsPollStore } from "./polls.js"; +import { setMSTeamsRuntime } from "./runtime.js"; +import type { MSTeamsTurnContext } from "./sdk-types.js"; + +const fileConsentMockState = vi.hoisted(() => ({ + uploadToConsentUrl: vi.fn(), +})); + +vi.mock("./file-consent.js", async () => { + const actual = await vi.importActual("./file-consent.js"); + return { + ...actual, + uploadToConsentUrl: fileConsentMockState.uploadToConsentUrl, + }; +}); + +const runtimeStub: PluginRuntime = { + logging: { + shouldLogVerbose: () => false, + }, + channel: { + debounce: { + resolveInboundDebounceMs: () => 0, + createInboundDebouncer: () => ({ + enqueue: async () => {}, + }), + }, + }, +} as unknown as PluginRuntime; + +function createDeps(): MSTeamsMessageHandlerDeps { + const adapter: MSTeamsAdapter = { + continueConversation: async () => {}, + process: async () => {}, + }; + const conversationStore: MSTeamsConversationStore = { + upsert: async () => {}, + get: async () => null, + list: async () => [], + remove: async () => false, + findByUserId: async () => null, + }; + const pollStore: MSTeamsPollStore = { + createPoll: async () => {}, + getPoll: async () => null, + recordVote: async () => null, + }; + return { + cfg: {} as OpenClawConfig, + runtime: { + error: vi.fn(), + } as unknown as RuntimeEnv, + appId: "test-app-id", + adapter, + tokenProvider: { + getAccessToken: async () => "token", + }, + textLimit: 4000, + mediaMaxBytes: 8 * 1024 * 1024, + conversationStore, + pollStore, + log: { + info: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, + }; +} + +function createActivityHandler(): MSTeamsActivityHandler { + let handler: MSTeamsActivityHandler; + handler = { + onMessage: () => handler, + onMembersAdded: () => handler, + run: async () => {}, + }; + return handler; +} + +function createInvokeContext(params: { + conversationId: string; + uploadId: string; + action: "accept" | "decline"; +}): { context: MSTeamsTurnContext; sendActivity: ReturnType } { + const sendActivity = vi.fn(async () => ({ id: "activity-id" })); + const uploadInfo = + params.action === "accept" + ? { + name: "secret.txt", + uploadUrl: "https://upload.example.com/put", + contentUrl: "https://content.example.com/file", + uniqueId: "unique-id", + fileType: "txt", + } + : undefined; + return { + context: { + activity: { + type: "invoke", + name: "fileConsent/invoke", + conversation: { id: params.conversationId }, + value: { + type: "fileUpload", + action: params.action, + uploadInfo, + context: { uploadId: params.uploadId }, + }, + }, + sendActivity, + sendActivities: async () => [], + } as unknown as MSTeamsTurnContext, + sendActivity, + }; +} + +describe("msteams file consent invoke authz", () => { + beforeEach(() => { + setMSTeamsRuntime(runtimeStub); + clearPendingUploads(); + fileConsentMockState.uploadToConsentUrl.mockReset(); + fileConsentMockState.uploadToConsentUrl.mockResolvedValue(undefined); + }); + + it("uploads when invoke conversation matches pending upload conversation", async () => { + const uploadId = storePendingUpload({ + buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), + filename: "secret.txt", + contentType: "text/plain", + conversationId: "19:victim@thread.v2", + }); + const deps = createDeps(); + const handler = registerMSTeamsHandlers(createActivityHandler(), deps); + const { context, sendActivity } = createInvokeContext({ + conversationId: "19:victim@thread.v2;messageid=abc123", + uploadId, + action: "accept", + }); + + await handler.run?.(context); + + // invokeResponse should be sent immediately + expect(sendActivity).toHaveBeenCalledWith( + expect.objectContaining({ + type: "invokeResponse", + }), + ); + + // Wait for async upload to complete + await vi.waitFor(() => { + expect(fileConsentMockState.uploadToConsentUrl).toHaveBeenCalledTimes(1); + }); + + expect(fileConsentMockState.uploadToConsentUrl).toHaveBeenCalledWith( + expect.objectContaining({ + url: "https://upload.example.com/put", + }), + ); + expect(getPendingUpload(uploadId)).toBeUndefined(); + }); + + it("rejects cross-conversation accept invoke and keeps pending upload", async () => { + const uploadId = storePendingUpload({ + buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), + filename: "secret.txt", + contentType: "text/plain", + conversationId: "19:victim@thread.v2", + }); + const deps = createDeps(); + const handler = registerMSTeamsHandlers(createActivityHandler(), deps); + const { context, sendActivity } = createInvokeContext({ + conversationId: "19:attacker@thread.v2", + uploadId, + action: "accept", + }); + + await handler.run?.(context); + + // invokeResponse should be sent immediately + expect(sendActivity).toHaveBeenCalledWith( + expect.objectContaining({ + type: "invokeResponse", + }), + ); + + // Wait for async handler to complete + await vi.waitFor(() => { + expect(sendActivity).toHaveBeenCalledWith( + "The file upload request has expired. Please try sending the file again.", + ); + }); + + expect(fileConsentMockState.uploadToConsentUrl).not.toHaveBeenCalled(); + expect(getPendingUpload(uploadId)).toBeDefined(); + }); + + it("ignores cross-conversation decline invoke and keeps pending upload", async () => { + const uploadId = storePendingUpload({ + buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), + filename: "secret.txt", + contentType: "text/plain", + conversationId: "19:victim@thread.v2", + }); + const deps = createDeps(); + const handler = registerMSTeamsHandlers(createActivityHandler(), deps); + const { context, sendActivity } = createInvokeContext({ + conversationId: "19:attacker@thread.v2", + uploadId, + action: "decline", + }); + + await handler.run?.(context); + + // invokeResponse should be sent immediately + expect(sendActivity).toHaveBeenCalledWith( + expect.objectContaining({ + type: "invokeResponse", + }), + ); + + expect(fileConsentMockState.uploadToConsentUrl).not.toHaveBeenCalled(); + expect(getPendingUpload(uploadId)).toBeDefined(); + expect(sendActivity).toHaveBeenCalledTimes(1); + }); +}); diff --git a/extensions/msteams/src/monitor-handler.ts b/extensions/msteams/src/monitor-handler.ts index d4b848fde5a..27d3e06929f 100644 --- a/extensions/msteams/src/monitor-handler.ts +++ b/extensions/msteams/src/monitor-handler.ts @@ -1,6 +1,7 @@ import type { OpenClawConfig, RuntimeEnv } from "openclaw/plugin-sdk"; import type { MSTeamsConversationStore } from "./conversation-store.js"; import { buildFileInfoCard, parseFileConsentInvoke, uploadToConsentUrl } from "./file-consent.js"; +import { normalizeMSTeamsConversationId } from "./inbound.js"; import type { MSTeamsAdapter } from "./messenger.js"; import { createMSTeamsMessageHandler } from "./monitor-handler/message-handler.js"; import type { MSTeamsMonitorLogger } from "./monitor-types.js"; @@ -42,6 +43,8 @@ async function handleFileConsentInvoke( context: MSTeamsTurnContext, log: MSTeamsMonitorLogger, ): Promise { + const expiredUploadMessage = + "The file upload request has expired. Please try sending the file again."; const activity = context.activity; if (activity.type !== "invoke" || activity.name !== "fileConsent/invoke") { return false; @@ -57,9 +60,24 @@ async function handleFileConsentInvoke( typeof consentResponse.context?.uploadId === "string" ? consentResponse.context.uploadId : undefined; + const pendingFile = getPendingUpload(uploadId); + if (pendingFile) { + const pendingConversationId = normalizeMSTeamsConversationId(pendingFile.conversationId); + const invokeConversationId = normalizeMSTeamsConversationId(activity.conversation?.id ?? ""); + if (!invokeConversationId || pendingConversationId !== invokeConversationId) { + log.info("file consent conversation mismatch", { + uploadId, + expectedConversationId: pendingConversationId, + receivedConversationId: invokeConversationId || undefined, + }); + if (consentResponse.action === "accept") { + await context.sendActivity(expiredUploadMessage); + } + return true; + } + } if (consentResponse.action === "accept" && consentResponse.uploadInfo) { - const pendingFile = getPendingUpload(uploadId); if (pendingFile) { log.debug?.("user accepted file consent, uploading", { uploadId, @@ -101,9 +119,7 @@ async function handleFileConsentInvoke( } } else { log.debug?.("pending file not found for consent", { uploadId }); - await context.sendActivity( - "The file upload request has expired. Please try sending the file again.", - ); + await context.sendActivity(expiredUploadMessage); } } else { // User declined @@ -127,12 +143,14 @@ export function registerMSTeamsHandlers( const ctx = context as MSTeamsTurnContext; // Handle file consent invokes before passing to normal flow if (ctx.activity?.type === "invoke" && ctx.activity?.name === "fileConsent/invoke") { - const handled = await handleFileConsentInvoke(ctx, deps.log); - if (handled) { - // Send invoke response for file consent - await ctx.sendActivity({ type: "invokeResponse", value: { status: 200 } }); - return; - } + // Send invoke response IMMEDIATELY to prevent Teams timeout + await ctx.sendActivity({ type: "invokeResponse", value: { status: 200 } }); + + // Handle file upload asynchronously (don't await) + handleFileConsentInvoke(ctx, deps.log).catch((err) => { + deps.log.debug?.("file consent handler error", { error: String(err) }); + }); + return; } return originalRun.call(handler, context); }; diff --git a/extensions/msteams/src/monitor-handler/message-handler.authz.test.ts b/extensions/msteams/src/monitor-handler/message-handler.authz.test.ts index 124599147a8..2be36f89732 100644 --- a/extensions/msteams/src/monitor-handler/message-handler.authz.test.ts +++ b/extensions/msteams/src/monitor-handler/message-handler.authz.test.ts @@ -90,7 +90,10 @@ describe("msteams monitor handler authz", () => { sendActivity: vi.fn(async () => undefined), } as unknown as Parameters[0]); - expect(readAllowFromStore).toHaveBeenCalledWith("msteams"); + expect(readAllowFromStore).toHaveBeenCalledWith({ + channel: "msteams", + accountId: "default", + }); expect(conversationStore.upsert).not.toHaveBeenCalled(); }); }); diff --git a/extensions/msteams/src/monitor-handler/message-handler.ts b/extensions/msteams/src/monitor-handler/message-handler.ts index a87f704a340..520a158321e 100644 --- a/extensions/msteams/src/monitor-handler/message-handler.ts +++ b/extensions/msteams/src/monitor-handler/message-handler.ts @@ -1,14 +1,19 @@ import { + DEFAULT_ACCOUNT_ID, buildPendingHistoryContextFromMap, clearHistoryEntriesIfEnabled, DEFAULT_GROUP_HISTORY_LIMIT, + createScopedPairingAccess, logInboundDrop, recordPendingHistoryEntryIfEnabled, resolveControlCommandGate, resolveDefaultGroupPolicy, isDangerousNameMatchingEnabled, + readStoreAllowFromForDmPolicy, resolveMentionGating, formatAllowlistMatchMeta, + resolveEffectiveAllowFromLists, + resolveDmGroupAccessWithLists, type HistoryEntry, } from "openclaw/plugin-sdk"; import { @@ -54,6 +59,11 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { log, } = deps; const core = getMSTeamsRuntime(); + const pairing = createScopedPairingAccess({ + core, + channel: "msteams", + accountId: DEFAULT_ACCOUNT_ID, + }); const logVerboseMessage = (message: string) => { if (core.logging.shouldLogVerbose()) { log.debug?.(message); @@ -127,70 +137,31 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { const senderName = from.name ?? from.id; const senderId = from.aadObjectId ?? from.id; const dmPolicy = msteamsCfg?.dmPolicy ?? "pairing"; - const storedAllowFrom = - dmPolicy === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore("msteams").catch(() => []); + const storedAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "msteams", + accountId: pairing.accountId, + dmPolicy, + readStore: pairing.readStoreForDmPolicy, + }); const useAccessGroups = cfg.commands?.useAccessGroups !== false; // Check DM policy for direct messages. const dmAllowFrom = msteamsCfg?.allowFrom ?? []; const configuredDmAllowFrom = dmAllowFrom.map((v) => String(v)); - const effectiveDmAllowFrom = [...configuredDmAllowFrom, ...storedAllowFrom]; - if (isDirectMessage && msteamsCfg) { - const allowFrom = dmAllowFrom; - - if (dmPolicy === "disabled") { - log.debug?.("dropping dm (dms disabled)"); - return; - } - - if (dmPolicy !== "open") { - const effectiveAllowFrom = [...allowFrom.map((v) => String(v)), ...storedAllowFrom]; - const allowNameMatching = isDangerousNameMatchingEnabled(msteamsCfg); - const allowMatch = resolveMSTeamsAllowlistMatch({ - allowFrom: effectiveAllowFrom, - senderId, - senderName, - allowNameMatching, - }); - - if (!allowMatch.allowed) { - if (dmPolicy === "pairing") { - const request = await core.channel.pairing.upsertPairingRequest({ - channel: "msteams", - id: senderId, - meta: { name: senderName }, - }); - if (request) { - log.info("msteams pairing request created", { - sender: senderId, - label: senderName, - }); - } - } - log.debug?.("dropping dm (not allowlisted)", { - sender: senderId, - label: senderName, - allowlistMatch: formatAllowlistMatchMeta(allowMatch), - }); - return; - } - } - } + const groupAllowFrom = msteamsCfg?.groupAllowFrom; + const resolvedAllowFromLists = resolveEffectiveAllowFromLists({ + allowFrom: configuredDmAllowFrom, + groupAllowFrom, + storeAllowFrom: storedAllowFrom, + dmPolicy, + }); const defaultGroupPolicy = resolveDefaultGroupPolicy(cfg); const groupPolicy = !isDirectMessage && msteamsCfg ? (msteamsCfg.groupPolicy ?? defaultGroupPolicy ?? "allowlist") : "disabled"; - const groupAllowFrom = - !isDirectMessage && msteamsCfg - ? (msteamsCfg.groupAllowFrom ?? - (msteamsCfg.allowFrom && msteamsCfg.allowFrom.length > 0 ? msteamsCfg.allowFrom : [])) - : []; - const effectiveGroupAllowFrom = - !isDirectMessage && msteamsCfg ? groupAllowFrom.map((v) => String(v)) : []; + const effectiveGroupAllowFrom = resolvedAllowFromLists.effectiveGroupAllowFrom; const teamId = activity.channelData?.team?.id; const teamName = activity.channelData?.team?.name; const channelName = activity.channelData?.channel?.name; @@ -201,6 +172,60 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { conversationId, channelName, }); + const senderGroupPolicy = + groupPolicy === "disabled" + ? "disabled" + : effectiveGroupAllowFrom.length > 0 + ? "allowlist" + : "open"; + const access = resolveDmGroupAccessWithLists({ + isGroup: !isDirectMessage, + dmPolicy, + groupPolicy: senderGroupPolicy, + allowFrom: configuredDmAllowFrom, + groupAllowFrom, + storeAllowFrom: storedAllowFrom, + groupAllowFromFallbackToAllowFrom: false, + isSenderAllowed: (allowFrom) => + resolveMSTeamsAllowlistMatch({ + allowFrom, + senderId, + senderName, + allowNameMatching: isDangerousNameMatchingEnabled(msteamsCfg), + }).allowed, + }); + const effectiveDmAllowFrom = access.effectiveAllowFrom; + + if (isDirectMessage && msteamsCfg && access.decision !== "allow") { + if (access.reason === "dmPolicy=disabled") { + log.debug?.("dropping dm (dms disabled)"); + return; + } + const allowMatch = resolveMSTeamsAllowlistMatch({ + allowFrom: effectiveDmAllowFrom, + senderId, + senderName, + allowNameMatching: isDangerousNameMatchingEnabled(msteamsCfg), + }); + if (access.decision === "pairing") { + const request = await pairing.upsertPairingRequest({ + id: senderId, + meta: { name: senderName }, + }); + if (request) { + log.info("msteams pairing request created", { + sender: senderId, + label: senderName, + }); + } + } + log.debug?.("dropping dm (not allowlisted)", { + sender: senderId, + label: senderName, + allowlistMatch: formatAllowlistMatchMeta(allowMatch), + }); + return; + } if (!isDirectMessage && msteamsCfg) { if (groupPolicy === "disabled") { @@ -227,13 +252,12 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { }); return; } - if (effectiveGroupAllowFrom.length > 0) { - const allowNameMatching = isDangerousNameMatchingEnabled(msteamsCfg); + if (effectiveGroupAllowFrom.length > 0 && access.decision !== "allow") { const allowMatch = resolveMSTeamsAllowlistMatch({ allowFrom: effectiveGroupAllowFrom, senderId, senderName, - allowNameMatching, + allowNameMatching: isDangerousNameMatchingEnabled(msteamsCfg), }); if (!allowMatch.allowed) { log.debug?.("dropping group message (not in groupAllowFrom)", { @@ -533,14 +557,20 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { log.info("dispatching to agent", { sessionKey: route.sessionKey }); try { - const { queuedFinal, counts } = await core.channel.reply.dispatchReplyFromConfig({ - ctx: ctxPayload, - cfg, + const { queuedFinal, counts } = await core.channel.reply.withReplyDispatcher({ dispatcher, - replyOptions, + onSettled: () => { + markDispatchIdle(); + }, + run: () => + core.channel.reply.dispatchReplyFromConfig({ + ctx: ctxPayload, + cfg, + dispatcher, + replyOptions, + }), }); - markDispatchIdle(); log.info("dispatch complete", { queuedFinal, counts }); if (!queuedFinal) { diff --git a/extensions/nextcloud-talk/package.json b/extensions/nextcloud-talk/package.json index cd4639b1c0f..09aa3b9ed28 100644 --- a/extensions/nextcloud-talk/package.json +++ b/extensions/nextcloud-talk/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nextcloud-talk", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Nextcloud Talk channel plugin", "type": "module", "openclaw": { diff --git a/extensions/nextcloud-talk/src/channel.startup.test.ts b/extensions/nextcloud-talk/src/channel.startup.test.ts new file mode 100644 index 00000000000..68f8490efb9 --- /dev/null +++ b/extensions/nextcloud-talk/src/channel.startup.test.ts @@ -0,0 +1,115 @@ +import type { + ChannelAccountSnapshot, + ChannelGatewayContext, + OpenClawConfig, +} from "openclaw/plugin-sdk"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createRuntimeEnv } from "../../test-utils/runtime-env.js"; +import type { ResolvedNextcloudTalkAccount } from "./accounts.js"; + +const hoisted = vi.hoisted(() => ({ + monitorNextcloudTalkProvider: vi.fn(), +})); + +vi.mock("./monitor.js", async () => { + const actual = await vi.importActual("./monitor.js"); + return { + ...actual, + monitorNextcloudTalkProvider: hoisted.monitorNextcloudTalkProvider, + }; +}); + +import { nextcloudTalkPlugin } from "./channel.js"; + +function createStartAccountCtx(params: { + account: ResolvedNextcloudTalkAccount; + abortSignal: AbortSignal; +}): ChannelGatewayContext { + const snapshot: ChannelAccountSnapshot = { + accountId: params.account.accountId, + configured: true, + enabled: true, + running: false, + }; + return { + accountId: params.account.accountId, + account: params.account, + cfg: {} as OpenClawConfig, + runtime: createRuntimeEnv(), + abortSignal: params.abortSignal, + log: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, + getStatus: () => snapshot, + setStatus: (next) => { + Object.assign(snapshot, next); + }, + }; +} + +function buildAccount(): ResolvedNextcloudTalkAccount { + return { + accountId: "default", + enabled: true, + baseUrl: "https://nextcloud.example.com", + secret: "secret", + secretSource: "config", + config: { + baseUrl: "https://nextcloud.example.com", + botSecret: "secret", + webhookPath: "/nextcloud-talk-webhook", + webhookPort: 8788, + }, + }; +} + +describe("nextcloudTalkPlugin gateway.startAccount", () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it("keeps startAccount pending until abort, then stops the monitor", async () => { + const stop = vi.fn(); + hoisted.monitorNextcloudTalkProvider.mockResolvedValue({ stop }); + const abort = new AbortController(); + + const task = nextcloudTalkPlugin.gateway!.startAccount!( + createStartAccountCtx({ + account: buildAccount(), + abortSignal: abort.signal, + }), + ); + + await new Promise((resolve) => setTimeout(resolve, 20)); + + let settled = false; + void task.then(() => { + settled = true; + }); + + await new Promise((resolve) => setTimeout(resolve, 20)); + expect(settled).toBe(false); + expect(hoisted.monitorNextcloudTalkProvider).toHaveBeenCalledOnce(); + expect(stop).not.toHaveBeenCalled(); + + abort.abort(); + await task; + + expect(stop).toHaveBeenCalledOnce(); + }); + + it("stops immediately when startAccount receives an already-aborted signal", async () => { + const stop = vi.fn(); + hoisted.monitorNextcloudTalkProvider.mockResolvedValue({ stop }); + const abort = new AbortController(); + abort.abort(); + + await nextcloudTalkPlugin.gateway!.startAccount!( + createStartAccountCtx({ + account: buildAccount(), + abortSignal: abort.signal, + }), + ); + + expect(hoisted.monitorNextcloudTalkProvider).toHaveBeenCalledOnce(); + expect(stop).toHaveBeenCalledOnce(); + }); +}); diff --git a/extensions/nextcloud-talk/src/channel.ts b/extensions/nextcloud-talk/src/channel.ts index c0cfa8e44be..e49f057878c 100644 --- a/extensions/nextcloud-talk/src/channel.ts +++ b/extensions/nextcloud-talk/src/channel.ts @@ -12,6 +12,7 @@ import { type OpenClawConfig, type ChannelSetupInput, } from "openclaw/plugin-sdk"; +import { waitForAbortSignal } from "../../../src/infra/abort-signal.js"; import { listNextcloudTalkAccountIds, resolveDefaultNextcloudTalkAccountId, @@ -332,7 +333,9 @@ export const nextcloudTalkPlugin: ChannelPlugin = statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), }); - return { stop }; + // Keep webhook channels pending for the account lifecycle. + await waitForAbortSignal(ctx.abortSignal); + stop(); }, logoutAccount: async ({ accountId, cfg }) => { const nextCfg = { ...cfg } as OpenClawConfig; diff --git a/extensions/nextcloud-talk/src/inbound.authz.test.ts b/extensions/nextcloud-talk/src/inbound.authz.test.ts index 88a655ec442..6ceca861ad8 100644 --- a/extensions/nextcloud-talk/src/inbound.authz.test.ts +++ b/extensions/nextcloud-talk/src/inbound.authz.test.ts @@ -75,7 +75,10 @@ describe("nextcloud-talk inbound authz", () => { } as unknown as RuntimeEnv, }); - expect(readAllowFromStore).toHaveBeenCalledWith("nextcloud-talk"); + expect(readAllowFromStore).toHaveBeenCalledWith({ + channel: "nextcloud-talk", + accountId: "default", + }); expect(buildMentionRegexes).not.toHaveBeenCalled(); }); }); diff --git a/extensions/nextcloud-talk/src/inbound.ts b/extensions/nextcloud-talk/src/inbound.ts index 526249aa977..69b983b68cd 100644 --- a/extensions/nextcloud-talk/src/inbound.ts +++ b/extensions/nextcloud-talk/src/inbound.ts @@ -1,10 +1,12 @@ import { GROUP_POLICY_BLOCKED_LABEL, + createScopedPairingAccess, createNormalizedOutboundDeliverer, createReplyPrefixOptions, formatTextWithAttachmentLinks, logInboundDrop, - resolveControlCommandGate, + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithCommandGate, resolveOutboundMediaUrls, resolveAllowlistProviderRuntimeGroupPolicy, resolveDefaultGroupPolicy, @@ -57,6 +59,11 @@ export async function handleNextcloudTalkInbound(params: { }): Promise { const { message, account, config, runtime, statusSink } = params; const core = getNextcloudTalkRuntime(); + const pairing = createScopedPairingAccess({ + core, + channel: CHANNEL_ID, + accountId: account.accountId, + }); const rawBody = message.text?.trim() ?? ""; if (!rawBody) { @@ -96,10 +103,12 @@ export async function handleNextcloudTalkInbound(params: { const configAllowFrom = normalizeNextcloudTalkAllowlist(account.config.allowFrom); const configGroupAllowFrom = normalizeNextcloudTalkAllowlist(account.config.groupAllowFrom); - const storeAllowFrom = - dmPolicy === "allowlist" - ? [] - : await core.channel.pairing.readAllowFromStore(CHANNEL_ID).catch(() => []); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: CHANNEL_ID, + accountId: account.accountId, + dmPolicy, + readStore: pairing.readStoreForDmPolicy, + }); const storeAllowList = normalizeNextcloudTalkAllowlist(storeAllowFrom); const roomMatch = resolveNextcloudTalkRoomMatch({ @@ -118,11 +127,6 @@ export async function handleNextcloudTalkInbound(params: { } const roomAllowFrom = normalizeNextcloudTalkAllowlist(roomConfig?.allowFrom); - const baseGroupAllowFrom = - configGroupAllowFrom.length > 0 ? configGroupAllowFrom : configAllowFrom; - - const effectiveAllowFrom = [...configAllowFrom, ...storeAllowList].filter(Boolean); - const effectiveGroupAllowFrom = [...baseGroupAllowFrom].filter(Boolean); const allowTextCommands = core.channel.commands.shouldHandleTextCommands({ cfg: config as OpenClawConfig, @@ -130,25 +134,33 @@ export async function handleNextcloudTalkInbound(params: { }); const useAccessGroups = (config.commands as Record | undefined)?.useAccessGroups !== false; - const senderAllowedForCommands = resolveNextcloudTalkAllowlistMatch({ - allowFrom: isGroup ? effectiveGroupAllowFrom : effectiveAllowFrom, - senderId, - }).allowed; const hasControlCommand = core.channel.text.hasControlCommand(rawBody, config as OpenClawConfig); - const commandGate = resolveControlCommandGate({ - useAccessGroups, - authorizers: [ - { - configured: (isGroup ? effectiveGroupAllowFrom : effectiveAllowFrom).length > 0, - allowed: senderAllowedForCommands, - }, - ], - allowTextCommands, - hasControlCommand, + const access = resolveDmGroupAccessWithCommandGate({ + isGroup, + dmPolicy, + groupPolicy, + allowFrom: configAllowFrom, + groupAllowFrom: configGroupAllowFrom, + storeAllowFrom: storeAllowList, + isSenderAllowed: (allowFrom) => + resolveNextcloudTalkAllowlistMatch({ + allowFrom, + senderId, + }).allowed, + command: { + useAccessGroups, + allowTextCommands, + hasControlCommand, + }, }); - const commandAuthorized = commandGate.commandAuthorized; + const commandAuthorized = access.commandAuthorized; + const effectiveGroupAllowFrom = access.effectiveGroupAllowFrom; if (isGroup) { + if (access.decision !== "allow") { + runtime.log?.(`nextcloud-talk: drop group sender ${senderId} (reason=${access.reason})`); + return; + } const groupAllow = resolveNextcloudTalkGroupAllow({ groupPolicy, outerAllowFrom: effectiveGroupAllowFrom, @@ -160,48 +172,35 @@ export async function handleNextcloudTalkInbound(params: { return; } } else { - if (dmPolicy === "disabled") { - runtime.log?.(`nextcloud-talk: drop DM sender=${senderId} (dmPolicy=disabled)`); - return; - } - if (dmPolicy !== "open") { - const dmAllowed = resolveNextcloudTalkAllowlistMatch({ - allowFrom: effectiveAllowFrom, - senderId, - }).allowed; - if (!dmAllowed) { - if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: CHANNEL_ID, - id: senderId, - meta: { name: senderName || undefined }, - }); - if (created) { - try { - await sendMessageNextcloudTalk( - roomToken, - core.channel.pairing.buildPairingReply({ - channel: CHANNEL_ID, - idLine: `Your Nextcloud user id: ${senderId}`, - code, - }), - { accountId: account.accountId }, - ); - statusSink?.({ lastOutboundAt: Date.now() }); - } catch (err) { - runtime.error?.( - `nextcloud-talk: pairing reply failed for ${senderId}: ${String(err)}`, - ); - } + if (access.decision !== "allow") { + if (access.decision === "pairing") { + const { code, created } = await pairing.upsertPairingRequest({ + id: senderId, + meta: { name: senderName || undefined }, + }); + if (created) { + try { + await sendMessageNextcloudTalk( + roomToken, + core.channel.pairing.buildPairingReply({ + channel: CHANNEL_ID, + idLine: `Your Nextcloud user id: ${senderId}`, + code, + }), + { accountId: account.accountId }, + ); + statusSink?.({ lastOutboundAt: Date.now() }); + } catch (err) { + runtime.error?.(`nextcloud-talk: pairing reply failed for ${senderId}: ${String(err)}`); } } - runtime.log?.(`nextcloud-talk: drop DM sender ${senderId} (dmPolicy=${dmPolicy})`); - return; } + runtime.log?.(`nextcloud-talk: drop DM sender ${senderId} (reason=${access.reason})`); + return; } } - if (isGroup && commandGate.shouldBlock) { + if (access.shouldBlockControlCommand) { logInboundDrop({ log: (message) => runtime.log?.(message), channel: CHANNEL_ID, diff --git a/extensions/nextcloud-talk/src/monitor.auth-order.test.ts b/extensions/nextcloud-talk/src/monitor.auth-order.test.ts index f2b4b65054d..6cc149dde47 100644 --- a/extensions/nextcloud-talk/src/monitor.auth-order.test.ts +++ b/extensions/nextcloud-talk/src/monitor.auth-order.test.ts @@ -1,50 +1,5 @@ -import { type AddressInfo } from "node:net"; -import { afterEach, describe, expect, it, vi } from "vitest"; -import { createNextcloudTalkWebhookServer } from "./monitor.js"; - -type WebhookHarness = { - webhookUrl: string; - stop: () => Promise; -}; - -const cleanupFns: Array<() => Promise> = []; - -afterEach(async () => { - while (cleanupFns.length > 0) { - const cleanup = cleanupFns.pop(); - if (cleanup) { - await cleanup(); - } - } -}); - -async function startWebhookServer(params: { - path: string; - maxBodyBytes: number; - readBody?: (req: import("node:http").IncomingMessage, maxBodyBytes: number) => Promise; -}): Promise { - const { server, start } = createNextcloudTalkWebhookServer({ - port: 0, - host: "127.0.0.1", - path: params.path, - secret: "nextcloud-secret", - maxBodyBytes: params.maxBodyBytes, - readBody: params.readBody, - onMessage: vi.fn(), - }); - await start(); - const address = server.address() as AddressInfo | null; - if (!address) { - throw new Error("missing server address"); - } - return { - webhookUrl: `http://127.0.0.1:${address.port}${params.path}`, - stop: () => - new Promise((resolve) => { - server.close(() => resolve()); - }), - }; -} +import { describe, expect, it, vi } from "vitest"; +import { startWebhookServer } from "./monitor.test-harness.js"; describe("createNextcloudTalkWebhookServer auth order", () => { it("rejects missing signature headers before reading request body", async () => { @@ -55,8 +10,8 @@ describe("createNextcloudTalkWebhookServer auth order", () => { path: "/nextcloud-auth-order", maxBodyBytes: 128, readBody, + onMessage: vi.fn(), }); - cleanupFns.push(harness.stop); const response = await fetch(harness.webhookUrl, { method: "POST", diff --git a/extensions/nextcloud-talk/src/monitor.backend.test.ts b/extensions/nextcloud-talk/src/monitor.backend.test.ts new file mode 100644 index 00000000000..aaf9a30a9c8 --- /dev/null +++ b/extensions/nextcloud-talk/src/monitor.backend.test.ts @@ -0,0 +1,46 @@ +import { describe, expect, it, vi } from "vitest"; +import { startWebhookServer } from "./monitor.test-harness.js"; +import { generateNextcloudTalkSignature } from "./signature.js"; + +describe("createNextcloudTalkWebhookServer backend allowlist", () => { + it("rejects requests from unexpected backend origins", async () => { + const onMessage = vi.fn(async () => {}); + const harness = await startWebhookServer({ + path: "/nextcloud-backend-check", + isBackendAllowed: (backend) => backend === "https://nextcloud.expected", + onMessage, + }); + + const payload = { + type: "Create", + actor: { type: "Person", id: "alice", name: "Alice" }, + object: { + type: "Note", + id: "msg-1", + name: "hello", + content: "hello", + mediaType: "text/plain", + }, + target: { type: "Collection", id: "room-1", name: "Room 1" }, + }; + const body = JSON.stringify(payload); + const { random, signature } = generateNextcloudTalkSignature({ + body, + secret: "nextcloud-secret", + }); + const response = await fetch(harness.webhookUrl, { + method: "POST", + headers: { + "content-type": "application/json", + "x-nextcloud-talk-random": random, + "x-nextcloud-talk-signature": signature, + "x-nextcloud-talk-backend": "https://nextcloud.unexpected", + }, + body, + }); + + expect(response.status).toBe(401); + expect(await response.json()).toEqual({ error: "Invalid backend" }); + expect(onMessage).not.toHaveBeenCalled(); + }); +}); diff --git a/extensions/nextcloud-talk/src/monitor.replay.test.ts b/extensions/nextcloud-talk/src/monitor.replay.test.ts new file mode 100644 index 00000000000..387e7a8304f --- /dev/null +++ b/extensions/nextcloud-talk/src/monitor.replay.test.ts @@ -0,0 +1,67 @@ +import { describe, expect, it, vi } from "vitest"; +import { startWebhookServer } from "./monitor.test-harness.js"; +import { generateNextcloudTalkSignature } from "./signature.js"; +import type { NextcloudTalkInboundMessage } from "./types.js"; + +function createSignedRequest(body: string): { random: string; signature: string } { + return generateNextcloudTalkSignature({ + body, + secret: "nextcloud-secret", + }); +} + +describe("createNextcloudTalkWebhookServer replay handling", () => { + it("acknowledges replayed requests and skips onMessage side effects", async () => { + const seen = new Set(); + const onMessage = vi.fn(async () => {}); + const shouldProcessMessage = vi.fn(async (message: NextcloudTalkInboundMessage) => { + if (seen.has(message.messageId)) { + return false; + } + seen.add(message.messageId); + return true; + }); + const harness = await startWebhookServer({ + path: "/nextcloud-replay", + shouldProcessMessage, + onMessage, + }); + + const payload = { + type: "Create", + actor: { type: "Person", id: "alice", name: "Alice" }, + object: { + type: "Note", + id: "msg-1", + name: "hello", + content: "hello", + mediaType: "text/plain", + }, + target: { type: "Collection", id: "room-1", name: "Room 1" }, + }; + const body = JSON.stringify(payload); + const { random, signature } = createSignedRequest(body); + const headers = { + "content-type": "application/json", + "x-nextcloud-talk-random": random, + "x-nextcloud-talk-signature": signature, + "x-nextcloud-talk-backend": "https://nextcloud.example", + }; + + const first = await fetch(harness.webhookUrl, { + method: "POST", + headers, + body, + }); + const second = await fetch(harness.webhookUrl, { + method: "POST", + headers, + body, + }); + + expect(first.status).toBe(200); + expect(second.status).toBe(200); + expect(shouldProcessMessage).toHaveBeenCalledTimes(2); + expect(onMessage).toHaveBeenCalledTimes(1); + }); +}); diff --git a/extensions/nextcloud-talk/src/monitor.test-harness.ts b/extensions/nextcloud-talk/src/monitor.test-harness.ts new file mode 100644 index 00000000000..f0daf42e8d5 --- /dev/null +++ b/extensions/nextcloud-talk/src/monitor.test-harness.ts @@ -0,0 +1,59 @@ +import { type AddressInfo } from "node:net"; +import { afterEach } from "vitest"; +import { createNextcloudTalkWebhookServer } from "./monitor.js"; +import type { NextcloudTalkWebhookServerOptions } from "./types.js"; + +export type WebhookHarness = { + webhookUrl: string; + stop: () => Promise; +}; + +const cleanupFns: Array<() => Promise> = []; + +afterEach(async () => { + while (cleanupFns.length > 0) { + const cleanup = cleanupFns.pop(); + if (cleanup) { + await cleanup(); + } + } +}); + +export type StartWebhookServerParams = Omit< + NextcloudTalkWebhookServerOptions, + "port" | "host" | "path" | "secret" +> & { + path: string; + secret?: string; + host?: string; + port?: number; +}; + +export async function startWebhookServer( + params: StartWebhookServerParams, +): Promise { + const host = params.host ?? "127.0.0.1"; + const port = params.port ?? 0; + const secret = params.secret ?? "nextcloud-secret"; + const { server, start } = createNextcloudTalkWebhookServer({ + ...params, + port, + host, + secret, + }); + await start(); + const address = server.address() as AddressInfo | null; + if (!address) { + throw new Error("missing server address"); + } + + const harness: WebhookHarness = { + webhookUrl: `http://${host}:${address.port}${params.path}`, + stop: () => + new Promise((resolve) => { + server.close(() => resolve()); + }), + }; + cleanupFns.push(harness.stop); + return harness; +} diff --git a/extensions/nextcloud-talk/src/monitor.ts b/extensions/nextcloud-talk/src/monitor.ts index 4b68a3c4d0b..2de886864b7 100644 --- a/extensions/nextcloud-talk/src/monitor.ts +++ b/extensions/nextcloud-talk/src/monitor.ts @@ -1,4 +1,5 @@ import { createServer, type IncomingMessage, type Server, type ServerResponse } from "node:http"; +import os from "node:os"; import { createLoggerBackedRuntime, type RuntimeEnv, @@ -8,11 +9,13 @@ import { } from "openclaw/plugin-sdk"; import { resolveNextcloudTalkAccount } from "./accounts.js"; import { handleNextcloudTalkInbound } from "./inbound.js"; +import { createNextcloudTalkReplayGuard } from "./replay-guard.js"; import { getNextcloudTalkRuntime } from "./runtime.js"; import { extractNextcloudTalkHeaders, verifyNextcloudTalkSignature } from "./signature.js"; import type { CoreConfig, NextcloudTalkInboundMessage, + NextcloudTalkWebhookHeaders, NextcloudTalkWebhookPayload, NextcloudTalkWebhookServerOptions, } from "./types.js"; @@ -23,6 +26,14 @@ const DEFAULT_WEBHOOK_PATH = "/nextcloud-talk-webhook"; const DEFAULT_WEBHOOK_MAX_BODY_BYTES = 1024 * 1024; const DEFAULT_WEBHOOK_BODY_TIMEOUT_MS = 30_000; const HEALTH_PATH = "/healthz"; +const WEBHOOK_ERRORS = { + missingSignatureHeaders: "Missing signature headers", + invalidBackend: "Invalid backend", + invalidSignature: "Invalid signature", + invalidPayloadFormat: "Invalid payload format", + payloadTooLarge: "Payload too large", + internalServerError: "Internal server error", +} as const; function formatError(err: unknown): string { if (err instanceof Error) { @@ -31,6 +42,14 @@ function formatError(err: unknown): string { return typeof err === "string" ? err : JSON.stringify(err); } +function normalizeOrigin(value: string): string | null { + try { + return new URL(value).origin.toLowerCase(); + } catch { + return null; + } +} + function parseWebhookPayload(body: string): NextcloudTalkWebhookPayload | null { try { const data = JSON.parse(body); @@ -51,6 +70,83 @@ function parseWebhookPayload(body: string): NextcloudTalkWebhookPayload | null { } } +function writeJsonResponse( + res: ServerResponse, + status: number, + body?: Record, +): void { + if (body) { + res.writeHead(status, { "Content-Type": "application/json" }); + res.end(JSON.stringify(body)); + return; + } + res.writeHead(status); + res.end(); +} + +function writeWebhookError(res: ServerResponse, status: number, error: string): void { + if (res.headersSent) { + return; + } + writeJsonResponse(res, status, { error }); +} + +function validateWebhookHeaders(params: { + req: IncomingMessage; + res: ServerResponse; + isBackendAllowed?: (backend: string) => boolean; +}): NextcloudTalkWebhookHeaders | null { + const headers = extractNextcloudTalkHeaders( + params.req.headers as Record, + ); + if (!headers) { + writeWebhookError(params.res, 400, WEBHOOK_ERRORS.missingSignatureHeaders); + return null; + } + if (params.isBackendAllowed && !params.isBackendAllowed(headers.backend)) { + writeWebhookError(params.res, 401, WEBHOOK_ERRORS.invalidBackend); + return null; + } + return headers; +} + +function verifyWebhookSignature(params: { + headers: NextcloudTalkWebhookHeaders; + body: string; + secret: string; + res: ServerResponse; +}): boolean { + const isValid = verifyNextcloudTalkSignature({ + signature: params.headers.signature, + random: params.headers.random, + body: params.body, + secret: params.secret, + }); + if (!isValid) { + writeWebhookError(params.res, 401, WEBHOOK_ERRORS.invalidSignature); + return false; + } + return true; +} + +function decodeWebhookCreateMessage(params: { + body: string; + res: ServerResponse; +}): + | { kind: "message"; message: NextcloudTalkInboundMessage } + | { kind: "ignore" } + | { kind: "invalid" } { + const payload = parseWebhookPayload(params.body); + if (!payload) { + writeWebhookError(params.res, 400, WEBHOOK_ERRORS.invalidPayloadFormat); + return { kind: "invalid" }; + } + if (payload.type !== "Create") { + return { kind: "ignore" }; + } + return { kind: "message", message: payloadToInboundMessage(payload) }; +} + function payloadToInboundMessage( payload: NextcloudTalkWebhookPayload, ): NextcloudTalkInboundMessage { @@ -93,6 +189,8 @@ export function createNextcloudTalkWebhookServer(opts: NextcloudTalkWebhookServe ? Math.floor(opts.maxBodyBytes) : DEFAULT_WEBHOOK_MAX_BODY_BYTES; const readBody = opts.readBody ?? readNextcloudTalkWebhookBody; + const isBackendAllowed = opts.isBackendAllowed; + const shouldProcessMessage = opts.shouldProcessMessage; const server = createServer(async (req: IncomingMessage, res: ServerResponse) => { if (req.url === HEALTH_PATH) { @@ -108,47 +206,49 @@ export function createNextcloudTalkWebhookServer(opts: NextcloudTalkWebhookServe } try { - const headers = extractNextcloudTalkHeaders( - req.headers as Record, - ); + const headers = validateWebhookHeaders({ + req, + res, + isBackendAllowed, + }); if (!headers) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Missing signature headers" })); return; } const body = await readBody(req, maxBodyBytes); - const isValid = verifyNextcloudTalkSignature({ - signature: headers.signature, - random: headers.random, + const hasValidSignature = verifyWebhookSignature({ + headers, body, secret, + res, }); - - if (!isValid) { - res.writeHead(401, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Invalid signature" })); + if (!hasValidSignature) { return; } - const payload = parseWebhookPayload(body); - if (!payload) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Invalid payload format" })); + const decoded = decodeWebhookCreateMessage({ + body, + res, + }); + if (decoded.kind === "invalid") { + return; + } + if (decoded.kind === "ignore") { + writeJsonResponse(res, 200); return; } - if (payload.type !== "Create") { - res.writeHead(200); - res.end(); - return; + const message = decoded.message; + if (shouldProcessMessage) { + const shouldProcess = await shouldProcessMessage(message); + if (!shouldProcess) { + writeJsonResponse(res, 200); + return; + } } - const message = payloadToInboundMessage(payload); - - res.writeHead(200); - res.end(); + writeJsonResponse(res, 200); try { await onMessage(message); @@ -157,25 +257,16 @@ export function createNextcloudTalkWebhookServer(opts: NextcloudTalkWebhookServe } } catch (err) { if (isRequestBodyLimitError(err, "PAYLOAD_TOO_LARGE")) { - if (!res.headersSent) { - res.writeHead(413, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Payload too large" })); - } + writeWebhookError(res, 413, WEBHOOK_ERRORS.payloadTooLarge); return; } if (isRequestBodyLimitError(err, "REQUEST_BODY_TIMEOUT")) { - if (!res.headersSent) { - res.writeHead(408, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: requestBodyErrorToText("REQUEST_BODY_TIMEOUT") })); - } + writeWebhookError(res, 408, requestBodyErrorToText("REQUEST_BODY_TIMEOUT")); return; } const error = err instanceof Error ? err : new Error(formatError(err)); onError?.(error); - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Internal server error" })); - } + writeWebhookError(res, 500, WEBHOOK_ERRORS.internalServerError); } }); @@ -185,12 +276,25 @@ export function createNextcloudTalkWebhookServer(opts: NextcloudTalkWebhookServe }); }; + let stopped = false; const stop = () => { - server.close(); + if (stopped) { + return; + } + stopped = true; + try { + server.close(); + } catch { + // ignore close races while shutting down + } }; if (abortSignal) { - abortSignal.addEventListener("abort", stop, { once: true }); + if (abortSignal.aborted) { + stop(); + } else { + abortSignal.addEventListener("abort", stop, { once: true }); + } } return { server, start, stop }; @@ -233,12 +337,41 @@ export async function monitorNextcloudTalkProvider( channel: "nextcloud-talk", accountId: account.accountId, }); + const expectedBackendOrigin = normalizeOrigin(account.baseUrl); + const replayGuard = createNextcloudTalkReplayGuard({ + stateDir: core.state.resolveStateDir(process.env, os.homedir), + onDiskError: (error) => { + logger.warn( + `[nextcloud-talk:${account.accountId}] replay guard disk error: ${String(error)}`, + ); + }, + }); const { start, stop } = createNextcloudTalkWebhookServer({ port, host, path, secret: account.secret, + isBackendAllowed: (backend) => { + if (!expectedBackendOrigin) { + return true; + } + const backendOrigin = normalizeOrigin(backend); + return backendOrigin === expectedBackendOrigin; + }, + shouldProcessMessage: async (message) => { + const shouldProcess = await replayGuard.shouldProcessMessage({ + accountId: account.accountId, + roomToken: message.roomToken, + messageId: message.messageId, + }); + if (!shouldProcess) { + logger.warn( + `[nextcloud-talk:${account.accountId}] replayed webhook ignored room=${message.roomToken} messageId=${message.messageId}`, + ); + } + return shouldProcess; + }, onMessage: async (message) => { core.channel.activity.record({ channel: "nextcloud-talk", @@ -264,7 +397,14 @@ export async function monitorNextcloudTalkProvider( abortSignal: opts.abortSignal, }); + if (opts.abortSignal?.aborted) { + return { stop }; + } await start(); + if (opts.abortSignal?.aborted) { + stop(); + return { stop }; + } const publicUrl = account.config.webhookPublicUrl ?? diff --git a/extensions/nextcloud-talk/src/replay-guard.test.ts b/extensions/nextcloud-talk/src/replay-guard.test.ts new file mode 100644 index 00000000000..0bf18acb600 --- /dev/null +++ b/extensions/nextcloud-talk/src/replay-guard.test.ts @@ -0,0 +1,70 @@ +import { mkdtemp, rm } from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { createNextcloudTalkReplayGuard } from "./replay-guard.js"; + +const tempDirs: string[] = []; + +afterEach(async () => { + while (tempDirs.length > 0) { + const dir = tempDirs.pop(); + if (dir) { + await rm(dir, { recursive: true, force: true }); + } + } +}); + +async function makeTempDir(): Promise { + const dir = await mkdtemp(path.join(os.tmpdir(), "nextcloud-talk-replay-")); + tempDirs.push(dir); + return dir; +} + +describe("createNextcloudTalkReplayGuard", () => { + it("persists replay decisions across guard instances", async () => { + const stateDir = await makeTempDir(); + + const firstGuard = createNextcloudTalkReplayGuard({ stateDir }); + const firstAttempt = await firstGuard.shouldProcessMessage({ + accountId: "account-a", + roomToken: "room-1", + messageId: "msg-1", + }); + const replayAttempt = await firstGuard.shouldProcessMessage({ + accountId: "account-a", + roomToken: "room-1", + messageId: "msg-1", + }); + + const secondGuard = createNextcloudTalkReplayGuard({ stateDir }); + const restartReplayAttempt = await secondGuard.shouldProcessMessage({ + accountId: "account-a", + roomToken: "room-1", + messageId: "msg-1", + }); + + expect(firstAttempt).toBe(true); + expect(replayAttempt).toBe(false); + expect(restartReplayAttempt).toBe(false); + }); + + it("scopes replay state by account namespace", async () => { + const stateDir = await makeTempDir(); + const guard = createNextcloudTalkReplayGuard({ stateDir }); + + const accountAFirst = await guard.shouldProcessMessage({ + accountId: "account-a", + roomToken: "room-1", + messageId: "msg-9", + }); + const accountBFirst = await guard.shouldProcessMessage({ + accountId: "account-b", + roomToken: "room-1", + messageId: "msg-9", + }); + + expect(accountAFirst).toBe(true); + expect(accountBFirst).toBe(true); + }); +}); diff --git a/extensions/nextcloud-talk/src/replay-guard.ts b/extensions/nextcloud-talk/src/replay-guard.ts new file mode 100644 index 00000000000..14b074ed2ab --- /dev/null +++ b/extensions/nextcloud-talk/src/replay-guard.ts @@ -0,0 +1,65 @@ +import path from "node:path"; +import { createPersistentDedupe } from "openclaw/plugin-sdk"; + +const DEFAULT_REPLAY_TTL_MS = 24 * 60 * 60 * 1000; +const DEFAULT_MEMORY_MAX_SIZE = 1_000; +const DEFAULT_FILE_MAX_ENTRIES = 10_000; + +function sanitizeSegment(value: string): string { + const trimmed = value.trim(); + if (!trimmed) { + return "default"; + } + return trimmed.replace(/[^a-zA-Z0-9_-]/g, "_"); +} + +function buildReplayKey(params: { roomToken: string; messageId: string }): string | null { + const roomToken = params.roomToken.trim(); + const messageId = params.messageId.trim(); + if (!roomToken || !messageId) { + return null; + } + return `${roomToken}:${messageId}`; +} + +export type NextcloudTalkReplayGuardOptions = { + stateDir: string; + ttlMs?: number; + memoryMaxSize?: number; + fileMaxEntries?: number; + onDiskError?: (error: unknown) => void; +}; + +export type NextcloudTalkReplayGuard = { + shouldProcessMessage: (params: { + accountId: string; + roomToken: string; + messageId: string; + }) => Promise; +}; + +export function createNextcloudTalkReplayGuard( + options: NextcloudTalkReplayGuardOptions, +): NextcloudTalkReplayGuard { + const stateDir = options.stateDir.trim(); + const persistentDedupe = createPersistentDedupe({ + ttlMs: options.ttlMs ?? DEFAULT_REPLAY_TTL_MS, + memoryMaxSize: options.memoryMaxSize ?? DEFAULT_MEMORY_MAX_SIZE, + fileMaxEntries: options.fileMaxEntries ?? DEFAULT_FILE_MAX_ENTRIES, + resolveFilePath: (namespace) => + path.join(stateDir, "nextcloud-talk", "replay-dedupe", `${sanitizeSegment(namespace)}.json`), + }); + + return { + shouldProcessMessage: async ({ accountId, roomToken, messageId }) => { + const replayKey = buildReplayKey({ roomToken, messageId }); + if (!replayKey) { + return true; + } + return await persistentDedupe.checkAndRecord(replayKey, { + namespace: accountId, + onDiskError: options.onDiskError, + }); + }, + }; +} diff --git a/extensions/nextcloud-talk/src/types.ts b/extensions/nextcloud-talk/src/types.ts index a9fe49be36d..e7af64a965c 100644 --- a/extensions/nextcloud-talk/src/types.ts +++ b/extensions/nextcloud-talk/src/types.ts @@ -170,6 +170,8 @@ export type NextcloudTalkWebhookServerOptions = { secret: string; maxBodyBytes?: number; readBody?: (req: import("node:http").IncomingMessage, maxBodyBytes: number) => Promise; + isBackendAllowed?: (backend: string) => boolean; + shouldProcessMessage?: (message: NextcloudTalkInboundMessage) => boolean | Promise; onMessage: (message: NextcloudTalkInboundMessage) => void | Promise; onError?: (error: Error) => void; abortSignal?: AbortSignal; diff --git a/extensions/nostr/CHANGELOG.md b/extensions/nostr/CHANGELOG.md index 3ab7bf7a136..b99f48bd8df 100644 --- a/extensions/nostr/CHANGELOG.md +++ b/extensions/nostr/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/nostr/package.json b/extensions/nostr/package.json index 72b1a2cee62..2cff8f09ec9 100644 --- a/extensions/nostr/package.json +++ b/extensions/nostr/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nostr", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Nostr channel plugin for NIP-04 encrypted DMs", "type": "module", "dependencies": { diff --git a/extensions/open-prose/package.json b/extensions/open-prose/package.json index 4d28edc8e68..ae46e3fba4a 100644 --- a/extensions/open-prose/package.json +++ b/extensions/open-prose/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/open-prose", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenProse VM skill pack plugin (slash command + telemetry).", "type": "module", diff --git a/extensions/signal/package.json b/extensions/signal/package.json index 1005503eff1..eb047ab7e73 100644 --- a/extensions/signal/package.json +++ b/extensions/signal/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/signal", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw Signal channel plugin", "type": "module", diff --git a/extensions/slack/package.json b/extensions/slack/package.json index adbd311981f..ca4558764b7 100644 --- a/extensions/slack/package.json +++ b/extensions/slack/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/slack", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw Slack channel plugin", "type": "module", diff --git a/extensions/synology-chat/package.json b/extensions/synology-chat/package.json index e4474651f07..0d6e3427123 100644 --- a/extensions/synology-chat/package.json +++ b/extensions/synology-chat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/synology-chat", - "version": "2026.2.25", + "version": "2026.2.26", "description": "Synology Chat channel plugin for OpenClaw", "type": "module", "dependencies": { diff --git a/extensions/telegram/package.json b/extensions/telegram/package.json index 83586d5da0e..4cf2a6276ef 100644 --- a/extensions/telegram/package.json +++ b/extensions/telegram/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/telegram", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw Telegram channel plugin", "type": "module", diff --git a/extensions/tlon/package.json b/extensions/tlon/package.json index b989fb957a8..c0e93868085 100644 --- a/extensions/tlon/package.json +++ b/extensions/tlon/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/tlon", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Tlon/Urbit channel plugin", "type": "module", "dependencies": { diff --git a/extensions/twitch/CHANGELOG.md b/extensions/twitch/CHANGELOG.md index 94e20c4cf6a..970f756d73e 100644 --- a/extensions/twitch/CHANGELOG.md +++ b/extensions/twitch/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/twitch/package.json b/extensions/twitch/package.json index 1efd4d0814f..720bf7af3d8 100644 --- a/extensions/twitch/package.json +++ b/extensions/twitch/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/twitch", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Twitch channel plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/CHANGELOG.md b/extensions/voice-call/CHANGELOG.md index 48f4d2573a0..41f8685d304 100644 --- a/extensions/voice-call/CHANGELOG.md +++ b/extensions/voice-call/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/voice-call/package.json b/extensions/voice-call/package.json index e09e59fef8d..374c658631f 100644 --- a/extensions/voice-call/package.json +++ b/extensions/voice-call/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/voice-call", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw voice-call plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/src/http-headers.test.ts b/extensions/voice-call/src/http-headers.test.ts new file mode 100644 index 00000000000..5141d1d2759 --- /dev/null +++ b/extensions/voice-call/src/http-headers.test.ts @@ -0,0 +1,16 @@ +import { describe, expect, it } from "vitest"; +import { getHeader } from "./http-headers.js"; + +describe("getHeader", () => { + it("returns first value when header is an array", () => { + expect(getHeader({ "x-test": ["first", "second"] }, "x-test")).toBe("first"); + }); + + it("matches headers case-insensitively", () => { + expect(getHeader({ "X-Twilio-Signature": "sig-1" }, "x-twilio-signature")).toBe("sig-1"); + }); + + it("returns undefined for missing header", () => { + expect(getHeader({ host: "example.com" }, "x-missing")).toBeUndefined(); + }); +}); diff --git a/extensions/voice-call/src/http-headers.ts b/extensions/voice-call/src/http-headers.ts new file mode 100644 index 00000000000..1e50658b6bb --- /dev/null +++ b/extensions/voice-call/src/http-headers.ts @@ -0,0 +1,12 @@ +export type HttpHeaderMap = Record; + +export function getHeader(headers: HttpHeaderMap, name: string): string | undefined { + const target = name.toLowerCase(); + const direct = headers[target]; + const value = + direct ?? Object.entries(headers).find(([key]) => key.toLowerCase() === target)?.[1]; + if (Array.isArray(value)) { + return value[0]; + } + return value; +} diff --git a/extensions/voice-call/src/providers/base.ts b/extensions/voice-call/src/providers/base.ts index 63a9a047181..2d76cc15a7e 100644 --- a/extensions/voice-call/src/providers/base.ts +++ b/extensions/voice-call/src/providers/base.ts @@ -4,6 +4,7 @@ import type { InitiateCallResult, PlayTtsInput, ProviderName, + WebhookParseOptions, ProviderWebhookParseResult, StartListeningInput, StopListeningInput, @@ -36,7 +37,7 @@ export interface VoiceCallProvider { * Parse provider-specific webhook payload into normalized events. * Returns events and optional response to send back to provider. */ - parseWebhookEvent(ctx: WebhookContext): ProviderWebhookParseResult; + parseWebhookEvent(ctx: WebhookContext, options?: WebhookParseOptions): ProviderWebhookParseResult; /** * Initiate an outbound call. diff --git a/extensions/voice-call/src/providers/mock.ts b/extensions/voice-call/src/providers/mock.ts index bc6a52efa71..6602d6e71f9 100644 --- a/extensions/voice-call/src/providers/mock.ts +++ b/extensions/voice-call/src/providers/mock.ts @@ -6,6 +6,7 @@ import type { InitiateCallResult, NormalizedEvent, PlayTtsInput, + WebhookParseOptions, ProviderWebhookParseResult, StartListeningInput, StopListeningInput, @@ -28,7 +29,10 @@ export class MockProvider implements VoiceCallProvider { return { ok: true }; } - parseWebhookEvent(ctx: WebhookContext): ProviderWebhookParseResult { + parseWebhookEvent( + ctx: WebhookContext, + _options?: WebhookParseOptions, + ): ProviderWebhookParseResult { try { const payload = JSON.parse(ctx.rawBody); const events: NormalizedEvent[] = []; diff --git a/extensions/voice-call/src/providers/plivo.test.ts b/extensions/voice-call/src/providers/plivo.test.ts index 1f46e2d47a5..7652c3777cd 100644 --- a/extensions/voice-call/src/providers/plivo.test.ts +++ b/extensions/voice-call/src/providers/plivo.test.ts @@ -24,4 +24,26 @@ describe("PlivoProvider", () => { expect(result.providerResponseBody).toContain(" { + const provider = new PlivoProvider({ + authId: "MA000000000000000000", + authToken: "test-token", + }); + + const result = provider.parseWebhookEvent( + { + headers: { host: "example.com", "x-plivo-signature-v3-nonce": "nonce-1" }, + rawBody: + "CallUUID=call-uuid&CallStatus=in-progress&Direction=outbound&From=%2B15550000000&To=%2B15550000001&Event=StartApp", + url: "https://example.com/voice/webhook?provider=plivo&flow=answer&callId=internal-call-id", + method: "POST", + query: { provider: "plivo", flow: "answer", callId: "internal-call-id" }, + }, + { verifiedRequestKey: "plivo:v3:verified" }, + ); + + expect(result.events).toHaveLength(1); + expect(result.events[0]?.dedupeKey).toBe("plivo:v3:verified"); + }); }); diff --git a/extensions/voice-call/src/providers/plivo.ts b/extensions/voice-call/src/providers/plivo.ts index 5b5311acc73..6db603d0639 100644 --- a/extensions/voice-call/src/providers/plivo.ts +++ b/extensions/voice-call/src/providers/plivo.ts @@ -1,5 +1,6 @@ import crypto from "node:crypto"; import type { PlivoConfig, WebhookSecurityConfig } from "../config.js"; +import { getHeader } from "../http-headers.js"; import type { HangupCallInput, InitiateCallInput, @@ -10,11 +11,13 @@ import type { StartListeningInput, StopListeningInput, WebhookContext, + WebhookParseOptions, WebhookVerificationResult, } from "../types.js"; import { escapeXml } from "../voice-mapping.js"; import { reconstructWebhookUrl, verifyPlivoWebhook } from "../webhook-security.js"; import type { VoiceCallProvider } from "./base.js"; +import { guardedJsonApiRequest } from "./shared/guarded-json-api.js"; export interface PlivoProviderOptions { /** Override public URL origin for signature verification */ @@ -30,17 +33,6 @@ export interface PlivoProviderOptions { type PendingSpeak = { text: string; locale?: string }; type PendingListen = { language?: string }; -function getHeader( - headers: Record, - name: string, -): string | undefined { - const value = headers[name.toLowerCase()]; - if (Array.isArray(value)) { - return value[0]; - } - return value; -} - function createPlivoRequestDedupeKey(ctx: WebhookContext): string { const nonceV3 = getHeader(ctx.headers, "x-plivo-signature-v3-nonce"); if (nonceV3) { @@ -60,6 +52,7 @@ export class PlivoProvider implements VoiceCallProvider { private readonly authToken: string; private readonly baseUrl: string; private readonly options: PlivoProviderOptions; + private readonly apiHost: string; // Best-effort mapping between create-call request UUID and call UUID. private requestUuidToCallUuid = new Map(); @@ -82,6 +75,7 @@ export class PlivoProvider implements VoiceCallProvider { this.authId = config.authId; this.authToken = config.authToken; this.baseUrl = `https://api.plivo.com/v1/Account/${this.authId}`; + this.apiHost = new URL(this.baseUrl).hostname; this.options = options; } @@ -92,25 +86,19 @@ export class PlivoProvider implements VoiceCallProvider { allowNotFound?: boolean; }): Promise { const { method, endpoint, body, allowNotFound } = params; - const response = await fetch(`${this.baseUrl}${endpoint}`, { + return await guardedJsonApiRequest({ + url: `${this.baseUrl}${endpoint}`, method, headers: { Authorization: `Basic ${Buffer.from(`${this.authId}:${this.authToken}`).toString("base64")}`, "Content-Type": "application/json", }, - body: body ? JSON.stringify(body) : undefined, + body, + allowNotFound, + allowedHostnames: [this.apiHost], + auditContext: "voice-call.plivo.api", + errorPrefix: "Plivo API error", }); - - if (!response.ok) { - if (allowNotFound && response.status === 404) { - return undefined as T; - } - const errorText = await response.text(); - throw new Error(`Plivo API error: ${response.status} ${errorText}`); - } - - const text = await response.text(); - return text ? (JSON.parse(text) as T) : (undefined as T); } verifyWebhook(ctx: WebhookContext): WebhookVerificationResult { @@ -127,10 +115,18 @@ export class PlivoProvider implements VoiceCallProvider { console.warn(`[plivo] Webhook verification failed: ${result.reason}`); } - return { ok: result.ok, reason: result.reason, isReplay: result.isReplay }; + return { + ok: result.ok, + reason: result.reason, + isReplay: result.isReplay, + verifiedRequestKey: result.verifiedRequestKey, + }; } - parseWebhookEvent(ctx: WebhookContext): ProviderWebhookParseResult { + parseWebhookEvent( + ctx: WebhookContext, + options?: WebhookParseOptions, + ): ProviderWebhookParseResult { const flow = typeof ctx.query?.flow === "string" ? ctx.query.flow.trim() : ""; const parsed = this.parseBody(ctx.rawBody); @@ -196,7 +192,7 @@ export class PlivoProvider implements VoiceCallProvider { // Normal events. const callIdFromQuery = this.getCallIdFromQuery(ctx); - const dedupeKey = createPlivoRequestDedupeKey(ctx); + const dedupeKey = options?.verifiedRequestKey ?? createPlivoRequestDedupeKey(ctx); const event = this.normalizeEvent(parsed, callIdFromQuery, dedupeKey); return { diff --git a/extensions/voice-call/src/providers/shared/guarded-json-api.ts b/extensions/voice-call/src/providers/shared/guarded-json-api.ts new file mode 100644 index 00000000000..6790cae5d76 --- /dev/null +++ b/extensions/voice-call/src/providers/shared/guarded-json-api.ts @@ -0,0 +1,42 @@ +import { fetchWithSsrFGuard } from "openclaw/plugin-sdk"; + +type GuardedJsonApiRequestParams = { + url: string; + method: "GET" | "POST" | "DELETE" | "PUT" | "PATCH"; + headers: Record; + body?: Record; + allowNotFound?: boolean; + allowedHostnames: string[]; + auditContext: string; + errorPrefix: string; +}; + +export async function guardedJsonApiRequest( + params: GuardedJsonApiRequestParams, +): Promise { + const { response, release } = await fetchWithSsrFGuard({ + url: params.url, + init: { + method: params.method, + headers: params.headers, + body: params.body ? JSON.stringify(params.body) : undefined, + }, + policy: { allowedHostnames: params.allowedHostnames }, + auditContext: params.auditContext, + }); + + try { + if (!response.ok) { + if (params.allowNotFound && response.status === 404) { + return undefined as T; + } + const errorText = await response.text(); + throw new Error(`${params.errorPrefix}: ${response.status} ${errorText}`); + } + + const text = await response.text(); + return text ? (JSON.parse(text) as T) : (undefined as T); + } finally { + await release(); + } +} diff --git a/extensions/voice-call/src/providers/telnyx.test.ts b/extensions/voice-call/src/providers/telnyx.test.ts index 7fcd756b943..c083070229f 100644 --- a/extensions/voice-call/src/providers/telnyx.test.ts +++ b/extensions/voice-call/src/providers/telnyx.test.ts @@ -133,7 +133,34 @@ describe("TelnyxProvider.verifyWebhook", () => { expect(first.ok).toBe(true); expect(first.isReplay).toBeFalsy(); + expect(first.verifiedRequestKey).toBeTruthy(); expect(second.ok).toBe(true); expect(second.isReplay).toBe(true); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + }); +}); + +describe("TelnyxProvider.parseWebhookEvent", () => { + it("uses verified request key for manager dedupe", () => { + const provider = new TelnyxProvider({ + apiKey: "KEY123", + connectionId: "CONN456", + publicKey: undefined, + }); + const result = provider.parseWebhookEvent( + createCtx({ + rawBody: JSON.stringify({ + data: { + id: "evt-123", + event_type: "call.initiated", + payload: { call_control_id: "call-1" }, + }, + }), + }), + { verifiedRequestKey: "telnyx:req:abc" }, + ); + + expect(result.events).toHaveLength(1); + expect(result.events[0]?.dedupeKey).toBe("telnyx:req:abc"); }); }); diff --git a/extensions/voice-call/src/providers/telnyx.ts b/extensions/voice-call/src/providers/telnyx.ts index e81844f1f65..80a46ce2192 100644 --- a/extensions/voice-call/src/providers/telnyx.ts +++ b/extensions/voice-call/src/providers/telnyx.ts @@ -11,10 +11,12 @@ import type { StartListeningInput, StopListeningInput, WebhookContext, + WebhookParseOptions, WebhookVerificationResult, } from "../types.js"; import { verifyTelnyxWebhook } from "../webhook-security.js"; import type { VoiceCallProvider } from "./base.js"; +import { guardedJsonApiRequest } from "./shared/guarded-json-api.js"; /** * Telnyx Voice API provider implementation. @@ -35,6 +37,7 @@ export class TelnyxProvider implements VoiceCallProvider { private readonly publicKey: string | undefined; private readonly options: TelnyxProviderOptions; private readonly baseUrl = "https://api.telnyx.com/v2"; + private readonly apiHost = "api.telnyx.com"; constructor(config: TelnyxConfig, options: TelnyxProviderOptions = {}) { if (!config.apiKey) { @@ -58,25 +61,19 @@ export class TelnyxProvider implements VoiceCallProvider { body: Record, options?: { allowNotFound?: boolean }, ): Promise { - const response = await fetch(`${this.baseUrl}${endpoint}`, { + return await guardedJsonApiRequest({ + url: `${this.baseUrl}${endpoint}`, method: "POST", headers: { Authorization: `Bearer ${this.apiKey}`, "Content-Type": "application/json", }, - body: JSON.stringify(body), + body, + allowNotFound: options?.allowNotFound, + allowedHostnames: [this.apiHost], + auditContext: "voice-call.telnyx.api", + errorPrefix: "Telnyx API error", }); - - if (!response.ok) { - if (options?.allowNotFound && response.status === 404) { - return undefined as T; - } - const errorText = await response.text(); - throw new Error(`Telnyx API error: ${response.status} ${errorText}`); - } - - const text = await response.text(); - return text ? (JSON.parse(text) as T) : (undefined as T); } /** @@ -87,13 +84,21 @@ export class TelnyxProvider implements VoiceCallProvider { skipVerification: this.options.skipVerification, }); - return { ok: result.ok, reason: result.reason, isReplay: result.isReplay }; + return { + ok: result.ok, + reason: result.reason, + isReplay: result.isReplay, + verifiedRequestKey: result.verifiedRequestKey, + }; } /** * Parse Telnyx webhook event into normalized format. */ - parseWebhookEvent(ctx: WebhookContext): ProviderWebhookParseResult { + parseWebhookEvent( + ctx: WebhookContext, + options?: WebhookParseOptions, + ): ProviderWebhookParseResult { try { const payload = JSON.parse(ctx.rawBody); const data = payload.data; @@ -102,7 +107,7 @@ export class TelnyxProvider implements VoiceCallProvider { return { events: [], statusCode: 200 }; } - const event = this.normalizeEvent(data); + const event = this.normalizeEvent(data, options?.verifiedRequestKey); return { events: event ? [event] : [], statusCode: 200, @@ -115,7 +120,7 @@ export class TelnyxProvider implements VoiceCallProvider { /** * Convert Telnyx event to normalized event format. */ - private normalizeEvent(data: TelnyxEvent): NormalizedEvent | null { + private normalizeEvent(data: TelnyxEvent, dedupeKey?: string): NormalizedEvent | null { // Decode client_state from Base64 (we encode it in initiateCall) let callId = ""; if (data.payload?.client_state) { @@ -132,6 +137,7 @@ export class TelnyxProvider implements VoiceCallProvider { const baseEvent = { id: data.id || crypto.randomUUID(), + dedupeKey, callId, providerCallId: data.payload?.call_control_id, timestamp: Date.now(), diff --git a/extensions/voice-call/src/providers/twilio.test.ts b/extensions/voice-call/src/providers/twilio.test.ts index 0d5c6de03d0..92cbe0fec32 100644 --- a/extensions/voice-call/src/providers/twilio.test.ts +++ b/extensions/voice-call/src/providers/twilio.test.ts @@ -60,7 +60,7 @@ describe("TwilioProvider", () => { expect(result.providerResponseBody).toContain(""); }); - it("uses a stable dedupeKey for identical request payloads", () => { + it("uses a stable fallback dedupeKey for identical request payloads", () => { const provider = createProvider(); const rawBody = "CallSid=CA789&Direction=inbound&SpeechResult=hello"; const ctxA = { @@ -78,10 +78,31 @@ describe("TwilioProvider", () => { expect(eventA).toBeDefined(); expect(eventB).toBeDefined(); expect(eventA?.id).not.toBe(eventB?.id); - expect(eventA?.dedupeKey).toBe("twilio:idempotency:idem-123"); + expect(eventA?.dedupeKey).toContain("twilio:fallback:"); expect(eventA?.dedupeKey).toBe(eventB?.dedupeKey); }); + it("uses verified request key for dedupe and ignores idempotency header changes", () => { + const provider = createProvider(); + const rawBody = "CallSid=CA790&Direction=inbound&SpeechResult=hello"; + const ctxA = { + ...createContext(rawBody, { callId: "call-1", turnToken: "turn-1" }), + headers: { "i-twilio-idempotency-token": "idem-a" }, + }; + const ctxB = { + ...createContext(rawBody, { callId: "call-1", turnToken: "turn-1" }), + headers: { "i-twilio-idempotency-token": "idem-b" }, + }; + + const eventA = provider.parseWebhookEvent(ctxA, { verifiedRequestKey: "twilio:req:abc" }) + .events[0]; + const eventB = provider.parseWebhookEvent(ctxB, { verifiedRequestKey: "twilio:req:abc" }) + .events[0]; + + expect(eventA?.dedupeKey).toBe("twilio:req:abc"); + expect(eventB?.dedupeKey).toBe("twilio:req:abc"); + }); + it("keeps turnToken from query on speech events", () => { const provider = createProvider(); const ctx = createContext("CallSid=CA222&Direction=inbound&SpeechResult=hello", { diff --git a/extensions/voice-call/src/providers/twilio.ts b/extensions/voice-call/src/providers/twilio.ts index c1dbf6c7f4f..bf551567722 100644 --- a/extensions/voice-call/src/providers/twilio.ts +++ b/extensions/voice-call/src/providers/twilio.ts @@ -1,5 +1,6 @@ import crypto from "node:crypto"; import type { TwilioConfig, WebhookSecurityConfig } from "../config.js"; +import { getHeader } from "../http-headers.js"; import type { MediaStreamHandler } from "../media-stream.js"; import { chunkAudio } from "../telephony-audio.js"; import type { TelephonyTtsProvider } from "../telephony-tts.js"; @@ -13,6 +14,7 @@ import type { StartListeningInput, StopListeningInput, WebhookContext, + WebhookParseOptions, WebhookVerificationResult, } from "../types.js"; import { escapeXml, mapVoiceToPolly } from "../voice-mapping.js"; @@ -20,30 +22,24 @@ import type { VoiceCallProvider } from "./base.js"; import { twilioApiRequest } from "./twilio/api.js"; import { verifyTwilioProviderWebhook } from "./twilio/webhook.js"; -function getHeader( - headers: Record, - name: string, -): string | undefined { - const value = headers[name.toLowerCase()]; - if (Array.isArray(value)) { - return value[0]; - } - return value; -} - -function createTwilioRequestDedupeKey(ctx: WebhookContext): string { - const idempotencyToken = getHeader(ctx.headers, "i-twilio-idempotency-token"); - if (idempotencyToken) { - return `twilio:idempotency:${idempotencyToken}`; +function createTwilioRequestDedupeKey(ctx: WebhookContext, verifiedRequestKey?: string): string { + if (verifiedRequestKey) { + return verifiedRequestKey; } const signature = getHeader(ctx.headers, "x-twilio-signature") ?? ""; + const params = new URLSearchParams(ctx.rawBody); + const callSid = params.get("CallSid") ?? ""; + const callStatus = params.get("CallStatus") ?? ""; + const direction = params.get("Direction") ?? ""; const callId = typeof ctx.query?.callId === "string" ? ctx.query.callId.trim() : ""; const flow = typeof ctx.query?.flow === "string" ? ctx.query.flow.trim() : ""; const turnToken = typeof ctx.query?.turnToken === "string" ? ctx.query.turnToken.trim() : ""; return `twilio:fallback:${crypto .createHash("sha256") - .update(`${signature}\n${callId}\n${flow}\n${turnToken}\n${ctx.rawBody}`) + .update( + `${signature}\n${callSid}\n${callStatus}\n${direction}\n${callId}\n${flow}\n${turnToken}\n${ctx.rawBody}`, + ) .digest("hex")}`; } @@ -232,7 +228,10 @@ export class TwilioProvider implements VoiceCallProvider { /** * Parse Twilio webhook event into normalized format. */ - parseWebhookEvent(ctx: WebhookContext): ProviderWebhookParseResult { + parseWebhookEvent( + ctx: WebhookContext, + options?: WebhookParseOptions, + ): ProviderWebhookParseResult { try { const params = new URLSearchParams(ctx.rawBody); const callIdFromQuery = @@ -243,7 +242,7 @@ export class TwilioProvider implements VoiceCallProvider { typeof ctx.query?.turnToken === "string" && ctx.query.turnToken.trim() ? ctx.query.turnToken.trim() : undefined; - const dedupeKey = createTwilioRequestDedupeKey(ctx); + const dedupeKey = createTwilioRequestDedupeKey(ctx, options?.verifiedRequestKey); const event = this.normalizeEvent(params, { callIdOverride: callIdFromQuery, dedupeKey, diff --git a/extensions/voice-call/src/providers/twilio/webhook.ts b/extensions/voice-call/src/providers/twilio/webhook.ts index 072e7f4f399..4b38050959b 100644 --- a/extensions/voice-call/src/providers/twilio/webhook.ts +++ b/extensions/voice-call/src/providers/twilio/webhook.ts @@ -29,5 +29,6 @@ export function verifyTwilioProviderWebhook(params: { ok: result.ok, reason: result.reason, isReplay: result.isReplay, + verifiedRequestKey: result.verifiedRequestKey, }; } diff --git a/extensions/voice-call/src/types.ts b/extensions/voice-call/src/types.ts index 835b8ad8a1d..6806b7cc728 100644 --- a/extensions/voice-call/src/types.ts +++ b/extensions/voice-call/src/types.ts @@ -177,6 +177,13 @@ export type WebhookVerificationResult = { reason?: string; /** Signature is valid, but request was seen before within replay window. */ isReplay?: boolean; + /** Stable key derived from authenticated request material. */ + verifiedRequestKey?: string; +}; + +export type WebhookParseOptions = { + /** Stable request key from verifyWebhook. */ + verifiedRequestKey?: string; }; export type WebhookContext = { diff --git a/extensions/voice-call/src/webhook-security.test.ts b/extensions/voice-call/src/webhook-security.test.ts index e85838a1383..dd7fb69502e 100644 --- a/extensions/voice-call/src/webhook-security.test.ts +++ b/extensions/voice-call/src/webhook-security.test.ts @@ -198,8 +198,26 @@ describe("verifyPlivoWebhook", () => { expect(first.ok).toBe(true); expect(first.isReplay).toBeFalsy(); + expect(first.verifiedRequestKey).toBeTruthy(); expect(second.ok).toBe(true); expect(second.isReplay).toBe(true); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + }); + + it("returns a stable request key when verification is skipped", () => { + const ctx = { + headers: {}, + rawBody: "CallUUID=uuid&CallStatus=in-progress", + url: "https://example.com/voice/webhook", + method: "POST" as const, + }; + const first = verifyPlivoWebhook(ctx, "token", { skipVerification: true }); + const second = verifyPlivoWebhook(ctx, "token", { skipVerification: true }); + + expect(first.ok).toBe(true); + expect(first.verifiedRequestKey).toMatch(/^plivo:skip:/); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expect(second.isReplay).toBe(true); }); }); @@ -229,8 +247,26 @@ describe("verifyTelnyxWebhook", () => { expect(first.ok).toBe(true); expect(first.isReplay).toBeFalsy(); + expect(first.verifiedRequestKey).toBeTruthy(); expect(second.ok).toBe(true); expect(second.isReplay).toBe(true); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + }); + + it("returns a stable request key when verification is skipped", () => { + const ctx = { + headers: {}, + rawBody: JSON.stringify({ data: { event_type: "call.initiated" } }), + url: "https://example.com/voice/webhook", + method: "POST" as const, + }; + const first = verifyTelnyxWebhook(ctx, undefined, { skipVerification: true }); + const second = verifyTelnyxWebhook(ctx, undefined, { skipVerification: true }); + + expect(first.ok).toBe(true); + expect(first.verifiedRequestKey).toMatch(/^telnyx:skip:/); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expect(second.isReplay).toBe(true); }); }); @@ -304,8 +340,58 @@ describe("verifyTwilioWebhook", () => { expect(first.ok).toBe(true); expect(first.isReplay).toBeFalsy(); + expect(first.verifiedRequestKey).toBeTruthy(); expect(second.ok).toBe(true); expect(second.isReplay).toBe(true); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + }); + + it("treats changed idempotency header as replay for identical signed requests", () => { + const authToken = "test-auth-token"; + const publicUrl = "https://example.com/voice/webhook"; + const urlWithQuery = `${publicUrl}?callId=abc`; + const postBody = "CallSid=CS778&CallStatus=completed&From=%2B15550000000"; + const signature = twilioSignature({ authToken, url: urlWithQuery, postBody }); + + const first = verifyTwilioWebhook( + { + headers: { + host: "example.com", + "x-forwarded-proto": "https", + "x-twilio-signature": signature, + "i-twilio-idempotency-token": "idem-replay-a", + }, + rawBody: postBody, + url: "http://local/voice/webhook?callId=abc", + method: "POST", + query: { callId: "abc" }, + }, + authToken, + { publicUrl }, + ); + const second = verifyTwilioWebhook( + { + headers: { + host: "example.com", + "x-forwarded-proto": "https", + "x-twilio-signature": signature, + "i-twilio-idempotency-token": "idem-replay-b", + }, + rawBody: postBody, + url: "http://local/voice/webhook?callId=abc", + method: "POST", + query: { callId: "abc" }, + }, + authToken, + { publicUrl }, + ); + + expect(first.ok).toBe(true); + expect(first.isReplay).toBe(false); + expect(first.verifiedRequestKey).toBeTruthy(); + expect(second.ok).toBe(true); + expect(second.isReplay).toBe(true); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); }); it("rejects invalid signatures even when attacker injects forwarded host", () => { @@ -517,4 +603,20 @@ describe("verifyTwilioWebhook", () => { expect(result.ok).toBe(false); expect(result.verificationUrl).toBe("https://legitimate.example.com/voice/webhook"); }); + + it("returns a stable request key when verification is skipped", () => { + const ctx = { + headers: {}, + rawBody: "CallSid=CS123&CallStatus=completed", + url: "https://example.com/voice/webhook", + method: "POST" as const, + }; + const first = verifyTwilioWebhook(ctx, "token", { skipVerification: true }); + const second = verifyTwilioWebhook(ctx, "token", { skipVerification: true }); + + expect(first.ok).toBe(true); + expect(first.verifiedRequestKey).toMatch(/^twilio:skip:/); + expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expect(second.isReplay).toBe(true); + }); }); diff --git a/extensions/voice-call/src/webhook-security.ts b/extensions/voice-call/src/webhook-security.ts index d190ed8f9ff..75d1ca490d0 100644 --- a/extensions/voice-call/src/webhook-security.ts +++ b/extensions/voice-call/src/webhook-security.ts @@ -1,4 +1,5 @@ import crypto from "node:crypto"; +import { getHeader } from "./http-headers.js"; import type { WebhookContext } from "./types.js"; const REPLAY_WINDOW_MS = 10 * 60 * 1000; @@ -29,6 +30,10 @@ function sha256Hex(input: string): string { return crypto.createHash("sha256").update(input).digest("hex"); } +function createSkippedVerificationReplayKey(provider: string, ctx: WebhookContext): string { + return `${provider}:skip:${sha256Hex(`${ctx.method}\n${ctx.url}\n${ctx.rawBody}`)}`; +} + function pruneReplayCache(cache: ReplayCache, now: number): void { for (const [key, expiresAt] of cache.seenUntil) { if (expiresAt <= now) { @@ -81,17 +86,7 @@ export function validateTwilioSignature( return false; } - // Build the string to sign: URL + sorted params (key+value pairs) - let dataToSign = url; - - // Sort params alphabetically and append key+value - const sortedParams = Array.from(params.entries()).toSorted((a, b) => - a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0, - ); - - for (const [key, value] of sortedParams) { - dataToSign += key + value; - } + const dataToSign = buildTwilioDataToSign(url, params); // HMAC-SHA1 with auth token, then base64 encode const expectedSignature = crypto @@ -103,6 +98,24 @@ export function validateTwilioSignature( return timingSafeEqual(signature, expectedSignature); } +function buildTwilioDataToSign(url: string, params: URLSearchParams): string { + let dataToSign = url; + const sortedParams = Array.from(params.entries()).toSorted((a, b) => + a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0, + ); + for (const [key, value] of sortedParams) { + dataToSign += key + value; + } + return dataToSign; +} + +function buildCanonicalTwilioParamString(params: URLSearchParams): string { + return Array.from(params.entries()) + .toSorted((a, b) => (a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0)) + .map(([key, value]) => `${key}=${value}`) + .join("&"); +} + /** * Timing-safe string comparison to prevent timing attacks. */ @@ -353,20 +366,6 @@ function buildTwilioVerificationUrl( } } -/** - * Get a header value, handling both string and string[] types. - */ -function getHeader( - headers: Record, - name: string, -): string | undefined { - const value = headers[name.toLowerCase()]; - if (Array.isArray(value)) { - return value[0]; - } - return value; -} - function isLoopbackAddress(address?: string): boolean { if (!address) { return false; @@ -392,6 +391,8 @@ export interface TwilioVerificationResult { isNgrokFreeTier?: boolean; /** Request is cryptographically valid but was already processed recently. */ isReplay?: boolean; + /** Stable request identity derived from signed Twilio material. */ + verifiedRequestKey?: string; } export interface TelnyxVerificationResult { @@ -399,19 +400,18 @@ export interface TelnyxVerificationResult { reason?: string; /** Request is cryptographically valid but was already processed recently. */ isReplay?: boolean; + /** Stable request identity derived from signed Telnyx material. */ + verifiedRequestKey?: string; } function createTwilioReplayKey(params: { - ctx: WebhookContext; - signature: string; verificationUrl: string; + signature: string; + requestParams: URLSearchParams; }): string { - const idempotencyToken = getHeader(params.ctx.headers, "i-twilio-idempotency-token"); - if (idempotencyToken) { - return `twilio:idempotency:${idempotencyToken}`; - } - return `twilio:fallback:${sha256Hex( - `${params.verificationUrl}\n${params.signature}\n${params.ctx.rawBody}`, + const canonicalParams = buildCanonicalTwilioParamString(params.requestParams); + return `twilio:req:${sha256Hex( + `${params.verificationUrl}\n${canonicalParams}\n${params.signature}`, )}`; } @@ -470,7 +470,14 @@ export function verifyTelnyxWebhook( }, ): TelnyxVerificationResult { if (options?.skipVerification) { - return { ok: true, reason: "verification skipped (dev mode)" }; + const replayKey = createSkippedVerificationReplayKey("telnyx", ctx); + const isReplay = markReplay(telnyxReplayCache, replayKey); + return { + ok: true, + reason: "verification skipped (dev mode)", + isReplay, + verifiedRequestKey: replayKey, + }; } if (!publicKey) { @@ -508,7 +515,7 @@ export function verifyTelnyxWebhook( const replayKey = `telnyx:${sha256Hex(`${timestamp}\n${signature}\n${ctx.rawBody}`)}`; const isReplay = markReplay(telnyxReplayCache, replayKey); - return { ok: true, isReplay }; + return { ok: true, isReplay, verifiedRequestKey: replayKey }; } catch (err) { return { ok: false, @@ -560,7 +567,14 @@ export function verifyTwilioWebhook( ): TwilioVerificationResult { // Allow skipping verification for development/testing if (options?.skipVerification) { - return { ok: true, reason: "verification skipped (dev mode)" }; + const replayKey = createSkippedVerificationReplayKey("twilio", ctx); + const isReplay = markReplay(twilioReplayCache, replayKey); + return { + ok: true, + reason: "verification skipped (dev mode)", + isReplay, + verifiedRequestKey: replayKey, + }; } const signature = getHeader(ctx.headers, "x-twilio-signature"); @@ -583,13 +597,16 @@ export function verifyTwilioWebhook( // Parse the body as URL-encoded params const params = new URLSearchParams(ctx.rawBody); - // Validate signature const isValid = validateTwilioSignature(authToken, signature, verificationUrl, params); if (isValid) { - const replayKey = createTwilioReplayKey({ ctx, signature, verificationUrl }); + const replayKey = createTwilioReplayKey({ + verificationUrl, + signature, + requestParams: params, + }); const isReplay = markReplay(twilioReplayCache, replayKey); - return { ok: true, verificationUrl, isReplay }; + return { ok: true, verificationUrl, isReplay, verifiedRequestKey: replayKey }; } // Check if this is ngrok free tier - the URL might have different format @@ -619,6 +636,8 @@ export interface PlivoVerificationResult { version?: "v3" | "v2"; /** Request is cryptographically valid but was already processed recently. */ isReplay?: boolean; + /** Stable request identity derived from signed Plivo material. */ + verifiedRequestKey?: string; } function normalizeSignatureBase64(input: string): string { @@ -791,7 +810,14 @@ export function verifyPlivoWebhook( }, ): PlivoVerificationResult { if (options?.skipVerification) { - return { ok: true, reason: "verification skipped (dev mode)" }; + const replayKey = createSkippedVerificationReplayKey("plivo", ctx); + const isReplay = markReplay(plivoReplayCache, replayKey); + return { + ok: true, + reason: "verification skipped (dev mode)", + isReplay, + verifiedRequestKey: replayKey, + }; } const signatureV3 = getHeader(ctx.headers, "x-plivo-signature-v3"); @@ -849,7 +875,7 @@ export function verifyPlivoWebhook( } const replayKey = `plivo:v3:${sha256Hex(`${verificationUrl}\n${nonceV3}`)}`; const isReplay = markReplay(plivoReplayCache, replayKey); - return { ok: true, version: "v3", verificationUrl, isReplay }; + return { ok: true, version: "v3", verificationUrl, isReplay, verifiedRequestKey: replayKey }; } if (signatureV2 && nonceV2) { @@ -869,7 +895,7 @@ export function verifyPlivoWebhook( } const replayKey = `plivo:v2:${sha256Hex(`${verificationUrl}\n${nonceV2}`)}`; const isReplay = markReplay(plivoReplayCache, replayKey); - return { ok: true, version: "v2", verificationUrl, isReplay }; + return { ok: true, version: "v2", verificationUrl, isReplay, verifiedRequestKey: replayKey }; } return { diff --git a/extensions/voice-call/src/webhook.test.ts b/extensions/voice-call/src/webhook.test.ts index 8dcf3346342..759ff85d010 100644 --- a/extensions/voice-call/src/webhook.test.ts +++ b/extensions/voice-call/src/webhook.test.ts @@ -7,7 +7,7 @@ import { VoiceCallWebhookServer } from "./webhook.js"; const provider: VoiceCallProvider = { name: "mock", - verifyWebhook: () => ({ ok: true }), + verifyWebhook: () => ({ ok: true, verifiedRequestKey: "mock:req:base" }), parseWebhookEvent: () => ({ events: [] }), initiateCall: async () => ({ providerCallId: "provider-call", status: "initiated" }), hangupCall: async () => {}, @@ -123,7 +123,7 @@ describe("VoiceCallWebhookServer replay handling", () => { it("acknowledges replayed webhook requests and skips event side effects", async () => { const replayProvider: VoiceCallProvider = { ...provider, - verifyWebhook: () => ({ ok: true, isReplay: true }), + verifyWebhook: () => ({ ok: true, isReplay: true, verifiedRequestKey: "mock:req:replay" }), parseWebhookEvent: () => ({ events: [ { @@ -165,4 +165,89 @@ describe("VoiceCallWebhookServer replay handling", () => { await server.stop(); } }); + + it("passes verified request key from verifyWebhook into parseWebhookEvent", async () => { + const parseWebhookEvent = vi.fn((_ctx: unknown, options?: { verifiedRequestKey?: string }) => ({ + events: [ + { + id: "evt-verified", + dedupeKey: options?.verifiedRequestKey, + type: "call.speech" as const, + callId: "call-1", + providerCallId: "provider-call-1", + timestamp: Date.now(), + transcript: "hello", + isFinal: true, + }, + ], + statusCode: 200, + })); + const verifiedProvider: VoiceCallProvider = { + ...provider, + verifyWebhook: () => ({ ok: true, verifiedRequestKey: "verified:req:123" }), + parseWebhookEvent, + }; + const { manager, processEvent } = createManager([]); + const config = createConfig({ serve: { port: 0, bind: "127.0.0.1", path: "/voice/webhook" } }); + const server = new VoiceCallWebhookServer(config, manager, verifiedProvider); + + try { + const baseUrl = await server.start(); + const address = ( + server as unknown as { server?: { address?: () => unknown } } + ).server?.address?.(); + const requestUrl = new URL(baseUrl); + if (address && typeof address === "object" && "port" in address && address.port) { + requestUrl.port = String(address.port); + } + const response = await fetch(requestUrl.toString(), { + method: "POST", + headers: { "content-type": "application/x-www-form-urlencoded" }, + body: "CallSid=CA123&SpeechResult=hello", + }); + + expect(response.status).toBe(200); + expect(parseWebhookEvent).toHaveBeenCalledTimes(1); + expect(parseWebhookEvent.mock.calls[0]?.[1]).toEqual({ + verifiedRequestKey: "verified:req:123", + }); + expect(processEvent).toHaveBeenCalledTimes(1); + expect(processEvent.mock.calls[0]?.[0]?.dedupeKey).toBe("verified:req:123"); + } finally { + await server.stop(); + } + }); + + it("rejects requests when verification succeeds without a request key", async () => { + const parseWebhookEvent = vi.fn(() => ({ events: [], statusCode: 200 })); + const badProvider: VoiceCallProvider = { + ...provider, + verifyWebhook: () => ({ ok: true }), + parseWebhookEvent, + }; + const { manager } = createManager([]); + const config = createConfig({ serve: { port: 0, bind: "127.0.0.1", path: "/voice/webhook" } }); + const server = new VoiceCallWebhookServer(config, manager, badProvider); + + try { + const baseUrl = await server.start(); + const address = ( + server as unknown as { server?: { address?: () => unknown } } + ).server?.address?.(); + const requestUrl = new URL(baseUrl); + if (address && typeof address === "object" && "port" in address && address.port) { + requestUrl.port = String(address.port); + } + const response = await fetch(requestUrl.toString(), { + method: "POST", + headers: { "content-type": "application/x-www-form-urlencoded" }, + body: "CallSid=CA123&SpeechResult=hello", + }); + + expect(response.status).toBe(401); + expect(parseWebhookEvent).not.toHaveBeenCalled(); + } finally { + await server.stop(); + } + }); }); diff --git a/extensions/voice-call/src/webhook.ts b/extensions/voice-call/src/webhook.ts index 4b778e3a8d7..95d6628b5a8 100644 --- a/extensions/voice-call/src/webhook.ts +++ b/extensions/voice-call/src/webhook.ts @@ -15,6 +15,7 @@ import type { VoiceCallProvider } from "./providers/base.js"; import { OpenAIRealtimeSTTProvider } from "./providers/stt-openai-realtime.js"; import type { TwilioProvider } from "./providers/twilio.js"; import type { NormalizedEvent, WebhookContext } from "./types.js"; +import { startStaleCallReaper } from "./webhook/stale-call-reaper.js"; const MAX_WEBHOOK_BODY_BYTES = 1024 * 1024; @@ -28,7 +29,7 @@ export class VoiceCallWebhookServer { private manager: CallManager; private provider: VoiceCallProvider; private coreConfig: CoreConfig | null; - private staleCallReaperInterval: ReturnType | null = null; + private stopStaleCallReaper: (() => void) | null = null; /** Media stream handler for bidirectional audio (when streaming enabled) */ private mediaStreamHandler: MediaStreamHandler | null = null; @@ -217,48 +218,21 @@ export class VoiceCallWebhookServer { resolve(url); // Start the stale call reaper if configured - this.startStaleCallReaper(); + this.stopStaleCallReaper = startStaleCallReaper({ + manager: this.manager, + staleCallReaperSeconds: this.config.staleCallReaperSeconds, + }); }); }); } - /** - * Start a periodic reaper that ends calls older than the configured threshold. - * Catches calls stuck in unexpected states (e.g., notify-mode calls that never - * receive a terminal webhook from the provider). - */ - private startStaleCallReaper(): void { - const maxAgeSeconds = this.config.staleCallReaperSeconds; - if (!maxAgeSeconds || maxAgeSeconds <= 0) { - return; - } - - const CHECK_INTERVAL_MS = 30_000; // Check every 30 seconds - const maxAgeMs = maxAgeSeconds * 1000; - - this.staleCallReaperInterval = setInterval(() => { - const now = Date.now(); - for (const call of this.manager.getActiveCalls()) { - const age = now - call.startedAt; - if (age > maxAgeMs) { - console.log( - `[voice-call] Reaping stale call ${call.callId} (age: ${Math.round(age / 1000)}s, state: ${call.state})`, - ); - void this.manager.endCall(call.callId).catch((err) => { - console.warn(`[voice-call] Reaper failed to end call ${call.callId}:`, err); - }); - } - } - }, CHECK_INTERVAL_MS); - } - /** * Stop the webhook server. */ async stop(): Promise { - if (this.staleCallReaperInterval) { - clearInterval(this.staleCallReaperInterval); - this.staleCallReaperInterval = null; + if (this.stopStaleCallReaper) { + this.stopStaleCallReaper(); + this.stopStaleCallReaper = null; } return new Promise((resolve) => { if (this.server) { @@ -341,9 +315,17 @@ export class VoiceCallWebhookServer { res.end("Unauthorized"); return; } + if (!verification.verifiedRequestKey) { + console.warn("[voice-call] Webhook verification succeeded without request identity key"); + res.statusCode = 401; + res.end("Unauthorized"); + return; + } // Parse events - const result = this.provider.parseWebhookEvent(ctx); + const result = this.provider.parseWebhookEvent(ctx, { + verifiedRequestKey: verification.verifiedRequestKey, + }); // Process each event if (verification.isReplay) { diff --git a/extensions/voice-call/src/webhook/stale-call-reaper.ts b/extensions/voice-call/src/webhook/stale-call-reaper.ts new file mode 100644 index 00000000000..4c9661153d5 --- /dev/null +++ b/extensions/voice-call/src/webhook/stale-call-reaper.ts @@ -0,0 +1,33 @@ +import type { CallManager } from "../manager.js"; + +const CHECK_INTERVAL_MS = 30_000; + +export function startStaleCallReaper(params: { + manager: CallManager; + staleCallReaperSeconds?: number; +}): (() => void) | null { + const maxAgeSeconds = params.staleCallReaperSeconds; + if (!maxAgeSeconds || maxAgeSeconds <= 0) { + return null; + } + + const maxAgeMs = maxAgeSeconds * 1000; + const interval = setInterval(() => { + const now = Date.now(); + for (const call of params.manager.getActiveCalls()) { + const age = now - call.startedAt; + if (age > maxAgeMs) { + console.log( + `[voice-call] Reaping stale call ${call.callId} (age: ${Math.round(age / 1000)}s, state: ${call.state})`, + ); + void params.manager.endCall(call.callId).catch((err) => { + console.warn(`[voice-call] Reaper failed to end call ${call.callId}:`, err); + }); + } + } + }, CHECK_INTERVAL_MS); + + return () => { + clearInterval(interval); + }; +} diff --git a/extensions/whatsapp/package.json b/extensions/whatsapp/package.json index 8cabcd7bf57..e2ba8ba8487 100644 --- a/extensions/whatsapp/package.json +++ b/extensions/whatsapp/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/whatsapp", - "version": "2026.2.25", + "version": "2026.2.26", "private": true, "description": "OpenClaw WhatsApp channel plugin", "type": "module", diff --git a/extensions/zalo/CHANGELOG.md b/extensions/zalo/CHANGELOG.md index 2cf799f217f..341a8e37d1b 100644 --- a/extensions/zalo/CHANGELOG.md +++ b/extensions/zalo/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/zalo/package.json b/extensions/zalo/package.json index 3154002f997..c6e64ee121a 100644 --- a/extensions/zalo/package.json +++ b/extensions/zalo/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalo", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Zalo channel plugin", "type": "module", "dependencies": { diff --git a/extensions/zalo/src/monitor.ts b/extensions/zalo/src/monitor.ts index 76e656af7de..3063e231a21 100644 --- a/extensions/zalo/src/monitor.ts +++ b/extensions/zalo/src/monitor.ts @@ -1,6 +1,7 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import type { MarkdownTableMode, OpenClawConfig, OutboundReplyPayload } from "openclaw/plugin-sdk"; import { + createScopedPairingAccess, createReplyPrefixOptions, resolveSenderCommandAuthorization, resolveOutboundMediaUrls, @@ -303,6 +304,11 @@ async function processMessageWithPipeline(params: { statusSink, fetcher, } = params; + const pairing = createScopedPairingAccess({ + core, + channel: "zalo", + accountId: account.accountId, + }); const { from, chat, message_id, date } = message; const isGroup = chat.chat_type === "GROUP"; @@ -355,9 +361,10 @@ async function processMessageWithPipeline(params: { isGroup, dmPolicy, configuredAllowFrom: configAllowFrom, + configuredGroupAllowFrom: groupAllowFrom, senderId, isSenderAllowed: isZaloSenderAllowed, - readAllowFromStore: () => core.channel.pairing.readAllowFromStore("zalo"), + readAllowFromStore: pairing.readAllowFromStore, shouldComputeCommandAuthorized: (body, cfg) => core.channel.commands.shouldComputeCommandAuthorized(body, cfg), resolveCommandAuthorizedFromAuthorizers: (params) => @@ -375,8 +382,7 @@ async function processMessageWithPipeline(params: { if (!allowed) { if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "zalo", + const { code, created } = await pairing.upsertPairingRequest({ id: senderId, meta: { name: senderName ?? undefined }, }); diff --git a/extensions/zalouser/CHANGELOG.md b/extensions/zalouser/CHANGELOG.md index c247e93b967..2a59860c1b1 100644 --- a/extensions/zalouser/CHANGELOG.md +++ b/extensions/zalouser/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.26 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.25 ### Changes diff --git a/extensions/zalouser/package.json b/extensions/zalouser/package.json index 49cede39b76..feb0ce9cfc4 100644 --- a/extensions/zalouser/package.json +++ b/extensions/zalouser/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalouser", - "version": "2026.2.25", + "version": "2026.2.26", "description": "OpenClaw Zalo Personal Account plugin via zca-cli", "type": "module", "dependencies": { diff --git a/extensions/zalouser/src/monitor.ts b/extensions/zalouser/src/monitor.ts index 7e2ff850d40..c6aee6adcc8 100644 --- a/extensions/zalouser/src/monitor.ts +++ b/extensions/zalouser/src/monitor.ts @@ -6,6 +6,7 @@ import type { RuntimeEnv, } from "openclaw/plugin-sdk"; import { + createScopedPairingAccess, createReplyPrefixOptions, resolveOutboundMediaUrls, mergeAllowlist, @@ -177,6 +178,11 @@ async function processMessage( statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, ): Promise { const { threadId, content, timestamp, metadata } = message; + const pairing = createScopedPairingAccess({ + core, + channel: "zalouser", + accountId: account.accountId, + }); if (!content?.trim()) { return; } @@ -225,7 +231,7 @@ async function processMessage( configuredAllowFrom: configAllowFrom, senderId, isSenderAllowed, - readAllowFromStore: () => core.channel.pairing.readAllowFromStore("zalouser"), + readAllowFromStore: pairing.readAllowFromStore, shouldComputeCommandAuthorized: (body, cfg) => core.channel.commands.shouldComputeCommandAuthorized(body, cfg), resolveCommandAuthorizedFromAuthorizers: (params) => @@ -243,8 +249,7 @@ async function processMessage( if (!allowed) { if (dmPolicy === "pairing") { - const { code, created } = await core.channel.pairing.upsertPairingRequest({ - channel: "zalouser", + const { code, created } = await pairing.upsertPairingRequest({ id: senderId, meta: { name: senderName || undefined }, }); diff --git a/package.json b/package.json index 81a8a66cb4b..18760b29b88 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openclaw", - "version": "2026.2.25", + "version": "2026.2.26", "description": "Multi-channel AI gateway with extensible messaging integrations", "keywords": [], "homepage": "https://github.com/openclaw/openclaw#readme", @@ -54,8 +54,9 @@ "build": "pnpm canvas:a2ui:bundle && tsdown && pnpm build:plugin-sdk:dts && node --import tsx scripts/write-plugin-sdk-entry-dts.ts && node --import tsx scripts/canvas-a2ui-copy.ts && node --import tsx scripts/copy-hook-metadata.ts && node --import tsx scripts/copy-export-html-templates.ts && node --import tsx scripts/write-build-info.ts && node --import tsx scripts/write-cli-compat.ts", "build:plugin-sdk:dts": "tsc -p tsconfig.plugin-sdk.dts.json", "canvas:a2ui:bundle": "bash scripts/bundle-a2ui.sh", - "check": "pnpm format:check && pnpm tsgo && pnpm lint && pnpm lint:tmp:no-random-messaging", + "check": "pnpm format:check && pnpm tsgo && pnpm lint && pnpm lint:tmp:no-random-messaging && pnpm lint:tmp:channel-agnostic-boundaries && pnpm lint:tmp:no-raw-channel-fetch && pnpm lint:auth:no-pairing-store-group && pnpm lint:auth:pairing-account-scope && pnpm check:host-env-policy:swift", "check:docs": "pnpm format:docs:check && pnpm lint:docs && pnpm docs:check-links", + "check:host-env-policy:swift": "node scripts/generate-host-env-security-policy-swift.mjs --check", "check:loc": "node --import tsx scripts/check-ts-max-loc.ts --max 500", "deadcode:ci": "pnpm deadcode:report:ci:knip && pnpm deadcode:report:ci:ts-prune && pnpm deadcode:report:ci:ts-unused", "deadcode:knip": "pnpm dlx knip --no-progress", @@ -83,17 +84,23 @@ "gateway:dev": "OPENCLAW_SKIP_CHANNELS=1 CLAWDBOT_SKIP_CHANNELS=1 node scripts/run-node.mjs --dev gateway", "gateway:dev:reset": "OPENCLAW_SKIP_CHANNELS=1 CLAWDBOT_SKIP_CHANNELS=1 node scripts/run-node.mjs --dev gateway --reset", "gateway:watch": "node scripts/watch-node.mjs gateway --force", + "gen:host-env-policy:swift": "node scripts/generate-host-env-security-policy-swift.mjs --write", + "ghsa:patch": "node scripts/ghsa-patch.mjs", "ios:build": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build'", "ios:gen": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate'", "ios:open": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && open OpenClaw.xcodeproj'", "ios:run": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build && xcrun simctl boot \"${IOS_SIM:-iPhone 17}\" || true && xcrun simctl launch booted ai.openclaw.ios'", "lint": "oxlint --type-aware", "lint:all": "pnpm lint && pnpm lint:swift", + "lint:auth:no-pairing-store-group": "node scripts/check-no-pairing-store-group-auth.mjs", + "lint:auth:pairing-account-scope": "node scripts/check-pairing-account-scope.mjs", "lint:docs": "pnpm dlx markdownlint-cli2", "lint:docs:fix": "pnpm dlx markdownlint-cli2 --fix", "lint:fix": "oxlint --type-aware --fix && pnpm format", "lint:swift": "swiftlint lint --config .swiftlint.yml && (cd apps/ios && swiftlint lint --config .swiftlint.yml)", + "lint:tmp:channel-agnostic-boundaries": "node scripts/check-channel-agnostic-boundaries.mjs", "lint:tmp:no-random-messaging": "node scripts/check-no-random-messaging-tmp.mjs", + "lint:tmp:no-raw-channel-fetch": "node scripts/check-no-raw-channel-fetch.mjs", "lint:ui:no-raw-window-open": "node scripts/check-no-raw-window-open.mjs", "mac:open": "open dist/OpenClaw.app", "mac:package": "bash scripts/package-mac-app.sh", @@ -130,6 +137,7 @@ "test:install:smoke": "bash scripts/test-install-sh-docker.sh", "test:live": "OPENCLAW_LIVE_TEST=1 CLAWDBOT_LIVE_TEST=1 vitest run --config vitest.live.config.ts", "test:macmini": "OPENCLAW_TEST_VM_FORKS=0 OPENCLAW_TEST_PROFILE=serial node scripts/test-parallel.mjs", + "test:sectriage": "pnpm exec vitest run --config vitest.gateway.config.ts && vitest run --config vitest.unit.config.ts --exclude src/daemon/launchd.integration.test.ts --exclude src/process/exec.test.ts", "test:ui": "pnpm lint:ui:no-raw-window-open && pnpm --dir ui test", "test:voicecall:closedloop": "vitest run extensions/voice-call/src/manager.test.ts extensions/voice-call/src/media-stream.test.ts src/plugins/voice-call.plugin.test.ts --maxWorkers=1", "test:watch": "vitest", @@ -141,7 +149,7 @@ }, "dependencies": { "@agentclientprotocol/sdk": "0.14.1", - "@aws-sdk/client-bedrock": "^3.997.0", + "@aws-sdk/client-bedrock": "^3.998.0", "@buape/carbon": "0.0.0-beta-20260216184201", "@clack/prompts": "^1.0.1", "@discordjs/voice": "^0.19.0", @@ -151,10 +159,10 @@ "@larksuiteoapi/node-sdk": "^1.59.0", "@line/bot-sdk": "^10.6.0", "@lydell/node-pty": "1.2.0-beta.3", - "@mariozechner/pi-agent-core": "0.55.0", - "@mariozechner/pi-ai": "0.55.0", - "@mariozechner/pi-coding-agent": "0.55.0", - "@mariozechner/pi-tui": "0.55.0", + "@mariozechner/pi-agent-core": "0.55.1", + "@mariozechner/pi-ai": "0.55.1", + "@mariozechner/pi-coding-agent": "0.55.1", + "@mariozechner/pi-tui": "0.55.1", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", @@ -171,7 +179,7 @@ "dotenv": "^17.3.1", "express": "^5.2.1", "file-type": "^21.3.0", - "grammy": "^1.40.0", + "grammy": "^1.40.1", "https-proxy-agent": "^7.0.6", "ipaddr.js": "^2.3.0", "jiti": "^2.6.1", @@ -201,10 +209,10 @@ "@lit/context": "^1.1.6", "@types/express": "^5.0.6", "@types/markdown-it": "^14.1.2", - "@types/node": "^25.3.0", + "@types/node": "^25.3.1", "@types/qrcode-terminal": "^0.12.2", "@types/ws": "^8.18.1", - "@typescript/native-preview": "7.0.0-dev.20260224.1", + "@typescript/native-preview": "7.0.0-dev.20260225.1", "@vitest/coverage-v8": "^4.0.18", "lit": "^3.3.2", "oxfmt": "0.35.0", @@ -235,7 +243,7 @@ "request": "npm:@cypress/request@3.0.10", "request-promise": "npm:@cypress/request-promise@5.0.0", "form-data": "2.5.4", - "minimatch": "10.2.1", + "minimatch": "10.2.4", "qs": "6.14.2", "@sinclair/typebox": "0.34.48", "tar": "7.5.9", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 365b0ee1707..e692b8c58a6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -6,11 +6,11 @@ settings: overrides: hono: 4.11.10 + fast-xml-parser: 5.3.6 request: npm:@cypress/request@3.0.10 request-promise: npm:@cypress/request-promise@5.0.0 - fast-xml-parser: 5.3.6 form-data: 2.5.4 - minimatch: 10.2.1 + minimatch: 10.2.4 qs: 6.14.2 '@sinclair/typebox': 0.34.48 tar: 7.5.9 @@ -24,8 +24,8 @@ importers: specifier: 0.14.1 version: 0.14.1(zod@4.3.6) '@aws-sdk/client-bedrock': - specifier: ^3.997.0 - version: 3.997.0 + specifier: ^3.998.0 + version: 3.998.0 '@buape/carbon': specifier: 0.0.0-beta-20260216184201 version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.11.10)(opusscript@0.1.1) @@ -37,10 +37,10 @@ importers: version: 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) '@grammyjs/runner': specifier: ^2.0.3 - version: 2.0.3(grammy@1.40.0) + version: 2.0.3(grammy@1.40.1) '@grammyjs/transformer-throttler': specifier: ^1.2.1 - version: 1.2.1(grammy@1.40.0) + version: 1.2.1(grammy@1.40.1) '@homebridge/ciao': specifier: ^1.3.5 version: 1.3.5 @@ -54,23 +54,23 @@ importers: specifier: 1.2.0-beta.3 version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': - specifier: 0.55.0 - version: 0.55.0(ws@8.19.0)(zod@4.3.6) + specifier: 0.55.1 + version: 0.55.1(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': - specifier: 0.55.0 - version: 0.55.0(ws@8.19.0)(zod@4.3.6) + specifier: 0.55.1 + version: 0.55.1(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': - specifier: 0.55.0 - version: 0.55.0(ws@8.19.0)(zod@4.3.6) + specifier: 0.55.1 + version: 0.55.1(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': - specifier: 0.55.0 - version: 0.55.0 + specifier: 0.55.1 + version: 0.55.1 '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 '@napi-rs/canvas': specifier: ^0.1.89 - version: 0.1.92 + version: 0.1.95 '@sinclair/typebox': specifier: 0.34.48 version: 0.34.48 @@ -117,8 +117,8 @@ importers: specifier: ^21.3.0 version: 21.3.0 grammy: - specifier: ^1.40.0 - version: 1.40.0 + specifier: ^1.40.1 + version: 1.40.1 https-proxy-agent: specifier: ^7.0.6 version: 7.0.6 @@ -205,8 +205,8 @@ importers: specifier: ^14.1.2 version: 14.1.2 '@types/node': - specifier: ^25.3.0 - version: 25.3.0 + specifier: ^25.3.1 + version: 25.3.1 '@types/qrcode-terminal': specifier: ^0.12.2 version: 0.12.2 @@ -214,11 +214,11 @@ importers: specifier: ^8.18.1 version: 8.18.1 '@typescript/native-preview': - specifier: 7.0.0-dev.20260224.1 - version: 7.0.0-dev.20260224.1 + specifier: 7.0.0-dev.20260225.1 + version: 7.0.0-dev.20260225.1 '@vitest/coverage-v8': specifier: ^4.0.18 - version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) + version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) lit: specifier: ^3.3.2 version: 3.3.2 @@ -236,7 +236,7 @@ importers: version: 0.21.1(signal-polyfill@0.2.2) tsdown: specifier: ^0.20.3 - version: 0.20.3(@typescript/native-preview@7.0.0-dev.20260224.1)(typescript@5.9.3) + version: 0.20.3(@typescript/native-preview@7.0.0-dev.20260225.1)(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -245,12 +245,18 @@ importers: version: 5.9.3 vitest: specifier: ^4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) optionalDependencies: '@discordjs/opus': specifier: ^0.10.0 version: 0.10.0 + extensions/acpx: + dependencies: + acpx: + specifier: ^0.1.13 + version: 0.1.13(zod@4.3.6) + extensions/bluebubbles: {} extensions/copilot-proxy: {} @@ -314,7 +320,7 @@ importers: version: 10.6.1 openclaw: specifier: '>=2026.1.26' - version: 2026.2.23(@napi-rs/canvas@0.1.94)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.11.10)(node-llama-cpp@3.15.1(typescript@5.9.3)) + version: 2026.2.24(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.11.10)(node-llama-cpp@3.15.1(typescript@5.9.3)) extensions/imessage: {} @@ -350,7 +356,7 @@ importers: dependencies: openclaw: specifier: '>=2026.1.26' - version: 2026.2.23(@napi-rs/canvas@0.1.94)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.11.10)(node-llama-cpp@3.15.1(typescript@5.9.3)) + version: 2026.2.24(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.11.10)(node-llama-cpp@3.15.1(typescript@5.9.3)) extensions/memory-lancedb: dependencies: @@ -487,17 +493,17 @@ importers: version: 0.21.1(signal-polyfill@0.2.2) vite: specifier: 7.3.1 - version: 7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) devDependencies: '@vitest/browser-playwright': specifier: 4.0.18 - version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) playwright: specifier: ^1.58.2 version: 1.58.2 vitest: specifier: 4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) packages: @@ -532,226 +538,111 @@ packages: '@aws-crypto/util@5.2.0': resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} - '@aws-sdk/client-bedrock-runtime@3.995.0': - resolution: {integrity: sha512-nI7tT11L9s34AKr95GHmxs6k2+3ie+rEOew2cXOwsMC9k/5aifrZwh0JjAkBop4FqbmS8n0ZjCKDjBZFY/0YxQ==} + '@aws-sdk/client-bedrock-runtime@3.998.0': + resolution: {integrity: sha512-orRgpdNmdRLik+en3xDxlGuT5AxQU+GFUTMn97ZdRuPLnAiY7Y6/8VTsod6y97/3NB8xuTZbH9wNXzW97IWNMA==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock-runtime@3.997.0': - resolution: {integrity: sha512-yEgCc/HvI7dLeXQLCuc4cnbzwE/NbNpKX8NmSSWTy3jnjiMZwrNKdHMBgPoNvaEb0klHhnTyO+JCHVVCPI/eYw==} + '@aws-sdk/client-bedrock@3.998.0': + resolution: {integrity: sha512-NeSBIdsJwVtACGHXVoguJOsKhq6oR5Q2B6BUU7LWGqIl1skwPors77aLpOa2240ZFtX3Br/0lJYfxAhB8692KA==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.995.0': - resolution: {integrity: sha512-ONw5c7pOeHe78kC+jK2j73hP727Kqp7cc9lZqkfshlBD8MWxXmZM9GihIQLrNBCSUKRhc19NH7DUM6B7uN0mMQ==} + '@aws-sdk/core@3.973.14': + resolution: {integrity: sha512-iAQ1jIGESTVjoqNNY9VlsE9FnCz+Hc8s+dgurF6WrgFyVIw+uggH+V102RFhwjRv4dLSSLfzjDwvQnLszov7TQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.997.0': - resolution: {integrity: sha512-PMRqxSzfkQHbU7ADVlT4jYLB7beFQWLXN9CGI9D9P8eqCIaDVv3YxTfwcT3FcBVucqktdTBTEowhvKn0whr/rA==} + '@aws-sdk/credential-provider-env@3.972.12': + resolution: {integrity: sha512-WPtj/iAYHHd+NDM6AZoilZwUz0nMaPxbTPGLA7nhyIYRZN2L8trqfbNvm7g/Jr3gzfKp1LpO6AtBTnrhz9WW2g==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-sso@3.993.0': - resolution: {integrity: sha512-VLUN+wIeNX24fg12SCbzTUBnBENlL014yMKZvRhPkcn4wHR6LKgNrjsG3fZ03Xs0XoKaGtNFi1VVrq666sGBoQ==} + '@aws-sdk/credential-provider-http@3.972.14': + resolution: {integrity: sha512-umtjCicH2o/Fcc8Fu1562UkDyt6gql4czTYVlUfHfAM8S4QEKggzmtHYYYpPfQcjFj1ajyy68ahYSuF67x4ptQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/core@3.973.11': - resolution: {integrity: sha512-wdQ8vrvHkKIV7yNUKXyjPWKCdYEUrZTHJ8Ojd5uJxXp9vqPCkUR1dpi1NtOLcrDgueJH7MUH5lQZxshjFPSbDA==} + '@aws-sdk/credential-provider-ini@3.972.12': + resolution: {integrity: sha512-qjzgnMl6GIBbVeK74jBqSF07+s6kyeZl5R88qjMs302JlqkxE57jkvflDmZ9I017ffEWqIUa9/M4Hfp28qyu1g==} engines: {node: '>=20.0.0'} - '@aws-sdk/core@3.973.13': - resolution: {integrity: sha512-eCFiLyBhJR7c/i8hZOETdzj2wsLFzi2L/w9/jajOgwmGqO8xrUExqkTZqdjROkwU62owqeqSuw4sIzlCv1E/ww==} + '@aws-sdk/credential-provider-login@3.972.12': + resolution: {integrity: sha512-AO57y46PzG24bJzxWLk+FYJG6MzxvXoFXnOKnmKUGV43ub4/FS/4Rz7zCC6ThqUotgqEFd30l5LTAd65RP65pg==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-env@3.972.11': - resolution: {integrity: sha512-hbyoFuVm3qOAGfIPS9t7jCs8GFLFoaOs8ZmYp/chqciuHDyEGv+J365ip7YSvXSrxxUbeW9NyB1hTLt40NBMRg==} + '@aws-sdk/credential-provider-node@3.972.13': + resolution: {integrity: sha512-ME2sgus+gFRtiudy5Xqj9iT/tj8lHOIGrFgktuO5skJU4EngOvTZ1Hpj8mknrW4FgWXmpWhc88NtEscUuuDpKw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-env@3.972.9': - resolution: {integrity: sha512-ZptrOwQynfupubvcngLkbdIq/aXvl/czdpEG8XJ8mN8Nb19BR0jaK0bR+tfuMU36Ez9q4xv7GGkHFqEEP2hUUQ==} + '@aws-sdk/credential-provider-process@3.972.12': + resolution: {integrity: sha512-msxrHBpVP5AOIDohNPCINUtL47f7XI1TEru3N13uM3nWUMvIRA1vFa8Tlxbxm1EntPPvLAxRmvE5EbjDjOZkbw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-http@3.972.11': - resolution: {integrity: sha512-hECWoOoH386bGr89NQc9vA/abkGf5TJrMREt+lhNcnSNmoBS04fK7vc3LrJBSQAUGGVj0Tz3f4dHB3w5veovig==} + '@aws-sdk/credential-provider-sso@3.972.12': + resolution: {integrity: sha512-D5iC5546hJyhobJN0szOT4KVeJQ8z/meZq2B3lEDZFcvHONKw+tzq36DAJUy3qLTueeB2geSxiHXngQlA11eoA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-http@3.972.13': - resolution: {integrity: sha512-a864QxQWFkdCZ5wQF0QZNKTbqAc/DFQNeARp4gOyZZdql5RHjj4CppUSfwAzS9cpw2IPY3eeJjWqLZ1QiDB/6w==} + '@aws-sdk/credential-provider-web-identity@3.972.12': + resolution: {integrity: sha512-yluBahBVsduoA/zgV0NAXtwwXvQ6tNn95dNA3Hg+vISdiPWA46QY0d9PLO2KpNbjtm+1oGcWxemS4fYTwJ0W1w==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.11': - resolution: {integrity: sha512-kvPFn626ABLzxmjFMoqMRtmFKMeiUdWPhwxhmuPu233tqHnNuXzHv0MtrZlkzHd+rwlh9j0zCbQo89B54wIazQ==} + '@aws-sdk/eventstream-handler-node@3.972.8': + resolution: {integrity: sha512-tVrf8X7hKnqv3HyVraUbsQW5mfHlD++S5NSIbfQEx0sCRvIwUbTPDl/lJCxhNmZ2zjgUyBIXIKrWilFWBxzv+w==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.9': - resolution: {integrity: sha512-zr1csEu9n4eDiHMTYJabX1mDGuGLgjgUnNckIivvk43DocJC9/f6DefFrnUPZXE+GHtbW50YuXb+JIxKykU74A==} + '@aws-sdk/middleware-eventstream@3.972.5': + resolution: {integrity: sha512-j8sFerTrzS9tEJhiW2k+T9hsELE+13D5H+mqMjTRyPSgAOebkiK9d4t8vjbLOXuk7yi5lop40x15MubgcjpLmQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.11': - resolution: {integrity: sha512-stdy09EpBTmsxGiXe1vB5qtXNww9wact36/uWLlSV0/vWbCOUAY2JjhPXoDVLk8n+E6r0M5HeZseLk+iTtifxg==} + '@aws-sdk/middleware-host-header@3.972.5': + resolution: {integrity: sha512-dVA0m1cEQ2iA6yB19aHvWNeUVTuvTt3AXzT0aiIu2uxk0S7AcmwDCDaRgYa/v+eFHcJVxEnpYTozqA7X62xinw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.9': - resolution: {integrity: sha512-m4RIpVgZChv0vWS/HKChg1xLgZPpx8Z+ly9Fv7FwA8SOfuC6I3htcSaBz2Ch4bneRIiBUhwP4ziUo0UZgtJStQ==} + '@aws-sdk/middleware-logger@3.972.5': + resolution: {integrity: sha512-03RqplLZjUTkYi0dDPR/bbOLnDLFNdaVvNENgA3XK7Ph1MhEBhUYlgoGfOyRAKApDZ+WG4ykOoA8jI8J04jmFA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.10': - resolution: {integrity: sha512-70nCESlvnzjo4LjJ8By8MYIiBogkYPSXl3WmMZfH9RZcB/Nt9qVWbFpYj6Fk1vLa4Vk8qagFVeXgxdieMxG1QA==} + '@aws-sdk/middleware-recursion-detection@3.972.5': + resolution: {integrity: sha512-2QSuuVkpHTe84+mDdnFjHX8rAP3g0yYwLVAhS3lQN1rW5Z/zNsf8/pYQrLjLO4n4sPCsUAkTa0Vrod0lk+o1Tg==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.12': - resolution: {integrity: sha512-gMWGnHbNSKWRj+PAiuSg0EDpEwpyIgk0v9U6EuZ1C/5/BUv25Way+E+UFB7r+YYkscuBJMJ+ai8E2K0Q8dx50g==} + '@aws-sdk/middleware-user-agent@3.972.14': + resolution: {integrity: sha512-PzDz+yRAQuIzd+4ZY3s6/TYRzlNKAn4Gae3E5uLV7NnYHqrZHFoAfKE4beXcu3C51pA2/FQ3X2qOGSYqUoN1WQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-process@3.972.11': - resolution: {integrity: sha512-B049fvbv41vf0Fs5bCtbzHpruBDp61sPiFDxUmkAJ/zvgSAturpj2rqzV1rj2clg4mb44Uxp9rgpcODexNFlFA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-process@3.972.9': - resolution: {integrity: sha512-gOWl0Fe2gETj5Bk151+LYKpeGi2lBDLNu+NMNpHRlIrKHdBmVun8/AalwMK8ci4uRfG5a3/+zvZBMpuen1SZ0A==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-sso@3.972.11': - resolution: {integrity: sha512-vX9z8skN8vPtamVWmSCm4KQohub+1uMuRzIo4urZ2ZUMBAl1bqHatVD/roCb3qRfAyIGvZXCA/AWS03BQRMyCQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-sso@3.972.9': - resolution: {integrity: sha512-ey7S686foGTArvFhi3ifQXmgptKYvLSGE2250BAQceMSXZddz7sUSNERGJT2S7u5KIe/kgugxrt01hntXVln6w==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-web-identity@3.972.11': - resolution: {integrity: sha512-VR2Ju/QBdOjnWNIYuxRml63eFDLGc6Zl8aDwLi1rzgWo3rLBgtaWhWVBAijhVXzyPdQIOqdL8hvll5ybqumjeQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-web-identity@3.972.9': - resolution: {integrity: sha512-8LnfS76nHXoEc9aRRiMMpxZxJeDG0yusdyo3NvPhCgESmBUgpMa4luhGbClW5NoX/qRcGxxM6Z/esqANSNMTow==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/eventstream-handler-node@3.972.5': - resolution: {integrity: sha512-xEmd3dnyn83K6t4AJxBJA63wpEoCD45ERFG0XMTViD2E/Ohls9TLxjOWPb1PAxR9/46cKy/TImez1GoqP6xVNQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/eventstream-handler-node@3.972.7': - resolution: {integrity: sha512-p8k2ZWKJVrR3KIcBbI+/+FcWXdwe3LLgGnixsA7w8lDwWjzSVDHFp6uPeSqBt5PQpRxzak9EheJ1xTmOnHGf4g==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-eventstream@3.972.3': - resolution: {integrity: sha512-pbvZ6Ye/Ks6BAZPa3RhsNjHrvxU9li25PMhSdDpbX0jzdpKpAkIR65gXSNKmA/REnSdEMWSD4vKUW+5eMFzB6w==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-eventstream@3.972.4': - resolution: {integrity: sha512-0t+2Dn46cRE9iu5ynUXINBtR0wNHi/Jz3FbrqS5k3dGot2O7Ln1xCqXbJUAtGM5ZAqN77SbnpETAgVWC84DeoA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-host-header@3.972.3': - resolution: {integrity: sha512-aknPTb2M+G3s+0qLCx4Li/qGZH8IIYjugHMv15JTYMe6mgZO8VBpYgeGYsNMGCqCZOcWzuf900jFBG5bopfzmA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-host-header@3.972.4': - resolution: {integrity: sha512-4q2Vg7/zOB10huDBLjzzTwVjBpG22X3J3ief2XrJEgTaANZrNfA3/cGbCVNAibSbu/nIYA7tDk8WCdsIzDDc4Q==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-logger@3.972.3': - resolution: {integrity: sha512-Ftg09xNNRqaz9QNzlfdQWfpqMCJbsQdnZVJP55jfhbKi1+FTWxGuvfPoBhDHIovqWKjqbuiew3HuhxbJ0+OjgA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-logger@3.972.4': - resolution: {integrity: sha512-xFqPvTysuZAHSkdygT+ken/5rzkR7fhOoDPejAJQslZpp0XBepmCJnDOqA57ERtCTBpu8wpjTFI1ETd4S0AXEw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-recursion-detection@3.972.3': - resolution: {integrity: sha512-PY57QhzNuXHnwbJgbWYTrqIDHYSeOlhfYERTAuc16LKZpTZRJUjzBFokp9hF7u1fuGeE3D70ERXzdbMBOqQz7Q==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-recursion-detection@3.972.4': - resolution: {integrity: sha512-tVbRaayUZ7y2bOb02hC3oEPTqQf2A0HpPDwdMl1qTmye/q8Mq1F1WiIoFkQwG/YQFvbyErYIDMbYzIlxzzLtjQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-user-agent@3.972.11': - resolution: {integrity: sha512-R8CvPsPHXwzIHCAza+bllY6PrctEk4lYq/SkHJz9NLoBHCcKQrbOcsfXxO6xmipSbUNIbNIUhH0lBsJGgsRdiw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-user-agent@3.972.13': - resolution: {integrity: sha512-p1kVYbzBxRmhuOHoL/ANJPCedqUxnVgkEjxPoxt5pQv/yzppHM7aBWciYEE9TZY59M421D3GjLfZIZBoEFboVQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-websocket@3.972.6': - resolution: {integrity: sha512-1DedO6N3m8zQ/vG6twNiHtsdwBgk773VdavLEbB3NXeKZDlzSK1BTviqWwvJdKx5UnIy4kGGP6WWpCEFEt/bhQ==} + '@aws-sdk/middleware-websocket@3.972.9': + resolution: {integrity: sha512-O+FSwU9UvKd+QNuGLHqvmP33kkH4jh8pAgdMo3wbFLf+u30fS9/2gbSSWWtNCcWkSNFyG6RUlKU7jPSLApFfGw==} engines: {node: '>= 14.0.0'} - '@aws-sdk/middleware-websocket@3.972.8': - resolution: {integrity: sha512-KPUXz8lRw73Rh12/QkELxiryC9Wi9Ah1xNzFe2Vtbz2/81c2ZA0yM8er+u0iCF/SRMMhDQshLcmRNgn/ueA+gA==} - engines: {node: '>= 14.0.0'} - - '@aws-sdk/nested-clients@3.993.0': - resolution: {integrity: sha512-iOq86f2H67924kQUIPOAvlmMaOAvOLoDOIb66I2YqSUpMYB6ufiuJW3RlREgskxv86S5qKzMnfy/X6CqMjK6XQ==} + '@aws-sdk/nested-clients@3.996.2': + resolution: {integrity: sha512-W+u6EM8WRxOIhAhR2mXMHSaUygqItpTehkgxLwJngXqr9RlAR4t6CtECH7o7QK0ct3oyi5Z8ViDHtPbel+D2Rg==} engines: {node: '>=20.0.0'} - '@aws-sdk/nested-clients@3.995.0': - resolution: {integrity: sha512-7gq9gismVhESiRsSt0eYe1y1b6jS20LqLk+e/YSyPmGi9yHdndHQLIq73RbEJnK/QPpkQGFqq70M1mI46M1HGw==} + '@aws-sdk/region-config-resolver@3.972.5': + resolution: {integrity: sha512-AOitrygDwfTNCLCW7L+GScDy1p49FZ6WutTUFWROouoPetfVNmpL4q8TWD3MhfY/ynhoGhleUQENrBH374EU8w==} engines: {node: '>=20.0.0'} - '@aws-sdk/nested-clients@3.996.1': - resolution: {integrity: sha512-XHVLFRGkuV2gh2uwBahCt65ALMb5wMpqplXEZIvFnWOCPlk60B7h7M5J9Em243K8iICDiWY6KhBEqVGfjTqlLA==} + '@aws-sdk/token-providers@3.998.0': + resolution: {integrity: sha512-JFzi44tQnENZQ+1DYcHfoa/wTRKkccz0VsNMow0rvsxZtqUEkeV2pYFbir35mHTyUKju9995ay1MAGxLt1dpRA==} engines: {node: '>=20.0.0'} - '@aws-sdk/region-config-resolver@3.972.3': - resolution: {integrity: sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow==} + '@aws-sdk/types@3.973.3': + resolution: {integrity: sha512-tma6D8/xHZHJEUqmr6ksZjZ0onyIUqKDQLyp50ttZJmS0IwFYzxBgp5CxFvpYAnah52V3UtgrqGA6E83gtT7NQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/region-config-resolver@3.972.4': - resolution: {integrity: sha512-3GrJYv5eI65oCKveBZP7Q246dVP+tqeys9aKMB0dfX1glUWfppWlxIu52derqdNb9BX9lxYmeiaBcBIqOAYSgQ==} + '@aws-sdk/util-endpoints@3.996.2': + resolution: {integrity: sha512-83E6T1CKi0/IozPzqRBKqduW0mS4UQdI3soBH6CG7UgupTADWunqEMOTuPWCs9XGjpJJ4ujj+yu7pn8svhp5yg==} engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.993.0': - resolution: {integrity: sha512-+35g4c+8r7sB9Sjp1KPdM8qxGn6B/shBjJtEUN4e+Edw9UEQlZKIzioOGu3UAbyE0a/s450LdLZr4wbJChtmww==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.995.0': - resolution: {integrity: sha512-lYSadNdZZ513qCKoj/KlJ+PgCycL3n8ZNS37qLVFC0t7TbHzoxvGquu9aD2n9OCERAn43OMhQ7dXjYDYdjAXzA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.997.0': - resolution: {integrity: sha512-UdG36F7lU9aTqGFRieEyuRUJlgEJBqKeKKekC0esH21DbUSKhPR1kZBah214kYasIaWe1hLJLaqUigoTa5hZAQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/types@3.973.1': - resolution: {integrity: sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/types@3.973.2': - resolution: {integrity: sha512-maTZwGsALtnAw4TJr/S6yERAosTwPduu0XhUV+SdbvRZtCOgSgk1ttL2R0XYzvkYSpvbtJocn77tBXq2AKglBw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-endpoints@3.993.0': - resolution: {integrity: sha512-j6vioBeRZ4eHX4SWGvGPpwGg/xSOcK7f1GL0VM+rdf3ZFTIsUEhCFmD78B+5r2PgztcECSzEfvHQX01k8dPQPw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-endpoints@3.995.0': - resolution: {integrity: sha512-aym/pjB8SLbo9w2nmkrDdAAVKVlf7CM71B9mKhjDbJTzwpSFBPHqJIMdDyj0mLumKC0aIVDr1H6U+59m9GvMFw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-endpoints@3.996.1': - resolution: {integrity: sha512-7cJyd+M5i0IoqWkJa1KFx8KNCGIx+Ywu+lT53KpqX7ReVwz03DCKUqvZ/y65vdKwo9w9/HptSAeLDluO5MpGIg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-format-url@3.972.3': - resolution: {integrity: sha512-n7F2ycckcKFXa01vAsT/SJdjFHfKH9s96QHcs5gn8AaaigASICeME8WdUL9uBp8XV/OVwEt8+6gzn6KFUgQa8g==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-format-url@3.972.4': - resolution: {integrity: sha512-rPm9g4WvgTz4ko5kqseIG5Vp5LUAbWBBDalm4ogHLMc0i20ChwQWqwuTUPJSu8zXn43jIM0xO2KZaYQsFJb+ew==} + '@aws-sdk/util-format-url@3.972.5': + resolution: {integrity: sha512-PccfrPQVOEQSL8xaSvu988ESMlqdH1Qfk3AWPZksCOYPHyzYeUV988E+DBachXNV7tBVTUvK85cZYEZu7JtPxQ==} engines: {node: '>=20.0.0'} '@aws-sdk/util-locate-window@3.965.4': resolution: {integrity: sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog==} engines: {node: '>=20.0.0'} - '@aws-sdk/util-user-agent-browser@3.972.3': - resolution: {integrity: sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw==} + '@aws-sdk/util-user-agent-browser@3.972.5': + resolution: {integrity: sha512-2ja1WqtuBaEAMgVoHYuWx393DF6ULqdt3OozeO7BosqouYaoU47Adtp9vEF+GImSG/Q8A+dqfwDULTTdMkHGUQ==} - '@aws-sdk/util-user-agent-browser@3.972.4': - resolution: {integrity: sha512-GHb+8XHv6hfLWKQKAKaSOm+vRvogg07s+FWtbR3+eCXXPSFn9XVmiYF4oypAxH7dGIvoxkVG/buHEnzYukyJiA==} - - '@aws-sdk/util-user-agent-node@3.972.10': - resolution: {integrity: sha512-LVXzICPlsheET+sE6tkcS47Q5HkSTrANIlqL1iFxGAY/wRQ236DX/PCAK56qMh9QJoXAfXfoRW0B0Og4R+X7Nw==} + '@aws-sdk/util-user-agent-node@3.972.13': + resolution: {integrity: sha512-PHErmuu+v6iAST48zcsB2cYwDKW45gk6qCp49t1p0NGZ4EaFPr/tA5jl0X/ekDwvWbuT0LTj++fjjdVQAbuh0Q==} engines: {node: '>=20.0.0'} peerDependencies: aws-crt: '>=1.0.0' @@ -759,21 +650,8 @@ packages: aws-crt: optional: true - '@aws-sdk/util-user-agent-node@3.972.12': - resolution: {integrity: sha512-c1n3wBK6te+Vd9qU86nF8AsYuiBsxLn0AADGWyFX7vEADr3btaAg5iPQT6GYj6rvzSOEVVisvaAatOWInlJUbQ==} - engines: {node: '>=20.0.0'} - peerDependencies: - aws-crt: '>=1.0.0' - peerDependenciesMeta: - aws-crt: - optional: true - - '@aws-sdk/xml-builder@3.972.5': - resolution: {integrity: sha512-mCae5Ys6Qm1LDu0qdGwx2UQ63ONUe+FHw908fJzLDqFKTDBK4LDZUqKWm4OkTCNFq19bftjsBSESIGLD/s3/rA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/xml-builder@3.972.6': - resolution: {integrity: sha512-YrXu+UnfC8IdARa4ZkrpcyuRmA/TVgYW6Lcdtvi34NQgRjM1hTirNirN+rGb+s/kNomby8oJiIAu0KNbiZC7PA==} + '@aws-sdk/xml-builder@3.972.7': + resolution: {integrity: sha512-9GF86s6mHuc1TYCbuKatMDWl2PyK3KIkpRaI7ul2/gYZPfaLzKZ+ISHhxzVb9KVeakf75tUQe6CXW2gugSCXNw==} engines: {node: '>=20.0.0'} '@aws/lambda-invoke-store@0.2.3': @@ -792,12 +670,12 @@ packages: resolution: {integrity: sha512-XPArKLzsvl0Hf0CaGyKHUyVgF7oDnhKoP85Xv6M4StF/1AhfORhZudHtOyf2s+FcbuQ9dPRAjB8J2KvRRMUK2A==} engines: {node: '>=20.0.0'} - '@azure/msal-common@16.0.4': - resolution: {integrity: sha512-0KZ9/wbUyZN65JLAx5bGNfWjkD0kRMUgM99oSpZFg7wEOb3XcKIiHrFnIpgyc8zZ70fHodyh8JKEOel1oN24Gw==} + '@azure/msal-common@16.1.0': + resolution: {integrity: sha512-uiX0ChrRFbreXlPlDR8LwHKmZpJudDAr124iNWJKJ+b7MJUWXmvVU3idSi/c5lk1FwLVZeMxhQir3BGdV09I+g==} engines: {node: '>=0.8.0'} - '@azure/msal-node@5.0.4': - resolution: {integrity: sha512-WbA77m68noCw4qV+1tMm5nodll34JCDF0KmrSrp9LskS0bGbgHt98ZRxq69BQK5mjMqDD5ThHJOrrGSfzPybxw==} + '@azure/msal-node@5.0.5': + resolution: {integrity: sha512-CxUYSZgFiviUC3d8Hc+tT7uxre6QkPEWYEHWXmyEBzaO6tfFY4hs5KbXWU6s4q9Zv1NP/04qiR3mcujYLRuYuw==} engines: {node: '>=20'} '@babel/generator@8.0.0-rc.1': @@ -1103,6 +981,15 @@ packages: '@modelcontextprotocol/sdk': optional: true + '@google/genai@1.43.0': + resolution: {integrity: sha512-hklCsJNdMlDM1IwcCVcGQFBg2izY0+t5BIGbRsxi2UnKi6AGKL7pqJqmBDNRbw0bYCs4y3NA7TB+fkKfP/Nrdw==} + engines: {node: '>=20.0.0'} + peerDependencies: + '@modelcontextprotocol/sdk': ^1.25.2 + peerDependenciesMeta: + '@modelcontextprotocol/sdk': + optional: true + '@grammyjs/runner@2.0.3': resolution: {integrity: sha512-nckmTs1dPWfVQteK9cxqxzE+0m1VRvluLWB8UgFzsjg62w3qthPJt0TYtJBEdG7OedvfQq4vnFAyE6iaMkR42A==} engines: {node: '>=12.20.0 || >=14.13.1'} @@ -1492,26 +1379,21 @@ packages: resolution: {integrity: sha512-faGUlTcXka5l7rv0lP3K3vGW/ejRuOS24RR2aSFWREUQqzjgdsuWNo/IiPqL3kWRGt6Ahl2+qcDAwtdeWeuGUw==} hasBin: true - '@mariozechner/pi-agent-core@0.54.1': - resolution: {integrity: sha512-AC0SqEbR62PckWOyP0CmhYtfcC+Q6e1DGghwEcKpomTtmNfHTy7iTVy64mmtB2CFiN8j4rJFCqh2xJHgucUvkA==} - engines: {node: '>=20.0.0'} - '@mariozechner/pi-agent-core@0.55.0': resolution: {integrity: sha512-8RLaOpmESBSqTSpA/6E9ihxYybhrkNa5LOYNdJst57LuDSDytfvkiTXlKA4DjsHua4PKopG9p0Wgqaem+kKvCA==} engines: {node: '>=20.0.0'} - '@mariozechner/pi-ai@0.54.1': - resolution: {integrity: sha512-tiVvoNQV+3dpWgRQ1U/3bwJoDVSYwL17BE/kc00nXmaSLAPwNZoxLagtQ+HBr/rGzkq5viOgQf2dk+ud+/4UCg==} + '@mariozechner/pi-agent-core@0.55.1': + resolution: {integrity: sha512-t9FAb4ouy8HJSIa8gSRC7j8oeUOb2XDdhvBiHj7FhfpYafj1vRPrvGIEXUV8fPJDCI07vhK9iztP27EPk+yEWw==} engines: {node: '>=20.0.0'} - hasBin: true '@mariozechner/pi-ai@0.55.0': resolution: {integrity: sha512-G5rutF5h1hFZgU1W2yYktZJegKUZVDhdGCxvl7zPOonrGBczuNBKmM87VXvl1m+t9718rYMsgTSBseGN0RhYug==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-coding-agent@0.54.1': - resolution: {integrity: sha512-pPFrdaKZ16oIcdhZVcfWPhCDFx8PWHaACjQS9aFFcMOhLBduyKAGyf8bQtfysekl+gIbBSGDT2rgCxsOwK2bQw==} + '@mariozechner/pi-ai@0.55.1': + resolution: {integrity: sha512-JJX1LrVWPUPMExu0f89XR4nMNP37+FNLjEE4cIHq9Hi6xQtOiiEi7OjDFMx58hWsq81xH1CwmQXqGTWBjbXKTw==} engines: {node: '>=20.0.0'} hasBin: true @@ -1520,14 +1402,19 @@ packages: engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-tui@0.54.1': - resolution: {integrity: sha512-FY8QcLlr9T276oZAwMSSPo1drg+J9Y7B+A0S9g8Jh6IFJxymKZZq29/Vit6XDziJfZIgJDraC6lpobtxgTEoFQ==} + '@mariozechner/pi-coding-agent@0.55.1': + resolution: {integrity: sha512-H2M8mbBNyDqhON6+3m4H8CjqJ9taGq/CM3B8dG73+VJJIXFm5SExhU9bdgcw2xh0wWj8yEumsj0of6Tu+F7Ffg==} engines: {node: '>=20.0.0'} + hasBin: true '@mariozechner/pi-tui@0.55.0': resolution: {integrity: sha512-qFdBsA0CTIQbUlN5hp1yJOSgJJiuTegx+oNPzpHxaMMBPjwMuh3Y8szBqE/2HxroA6mGSQfp/fzuPinTK1+Iyg==} engines: {node: '>=20.0.0'} + '@mariozechner/pi-tui@0.55.1': + resolution: {integrity: sha512-rnqDUp2fm/ySevC0Ltj/ZFRbEc1kZ1A4qHESejj9hA8NVrb/pX9g82XwTE762JOieEGrRWAtmHLNOm7/e4dJMw==} + engines: {node: '>=20.0.0'} + '@matrix-org/matrix-sdk-crypto-nodejs@0.4.0': resolution: {integrity: sha512-+qqgpn39XFSbsD0dFjssGO9vHEP7sTyfs8yTpt8vuqWpUpF20QMwpCZi0jpYw7GxjErNTsMshopuo8677DfGEA==} engines: {node: '>= 22'} @@ -1547,144 +1434,74 @@ packages: resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==} engines: {node: '>=14.0.0'} - '@napi-rs/canvas-android-arm64@0.1.92': - resolution: {integrity: sha512-rDOtq53ujfOuevD5taxAuIFALuf1QsQWZe1yS/N4MtT+tNiDBEdjufvQRPWZ11FubL2uwgP8ApYU3YOaNu1ZsQ==} + '@napi-rs/canvas-android-arm64@0.1.95': + resolution: {integrity: sha512-SqTh0wsYbetckMXEvHqmR7HKRJujVf1sYv1xdlhkifg6TlCSysz1opa49LlS3+xWuazcQcfRfmhA07HxxxGsAA==} engines: {node: '>= 10'} cpu: [arm64] os: [android] - '@napi-rs/canvas-android-arm64@0.1.94': - resolution: {integrity: sha512-YQ6K83RWNMQOtgpk1aIML97QTE3zxPmVCHTi5eA8Nss4+B9JZi5J7LHQr7B5oD7VwSfWd++xsPdUiJ1+frqsMg==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [android] - - '@napi-rs/canvas-darwin-arm64@0.1.92': - resolution: {integrity: sha512-4PT6GRGCr7yMRehp42x0LJb1V0IEy1cDZDDayv7eKbFUIGbPFkV7CRC9Bee5MPkjg1EB4ZPXXUyy3gjQm7mR8Q==} + '@napi-rs/canvas-darwin-arm64@0.1.95': + resolution: {integrity: sha512-F7jT0Syu+B9DGBUBcMk3qCRIxAWiDXmvEjamwbYfbZl7asI1pmXZUnCOoIu49Wt0RNooToYfRDxU9omD6t5Xuw==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@napi-rs/canvas-darwin-arm64@0.1.94': - resolution: {integrity: sha512-h1yl9XjqSrYZAbBUHCVLAhwd2knM8D8xt081Pv40KqNJXfeMmBrhG1SfroRymG2ak+pl42iQlWjFZ2Z8AWFdSw==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [darwin] - - '@napi-rs/canvas-darwin-x64@0.1.92': - resolution: {integrity: sha512-5e/3ZapP7CqPtDcZPtmowCsjoyQwuNMMD7c0GKPtZQ8pgQhLkeq/3fmk0HqNSD1i227FyJN/9pDrhw/UMTkaWA==} + '@napi-rs/canvas-darwin-x64@0.1.95': + resolution: {integrity: sha512-54eb2Ho15RDjYGXO/harjRznBrAvu+j5nQ85Z4Qd6Qg3slR8/Ja+Yvvy9G4yo7rdX6NR9GPkZeSTf2UcKXwaXw==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@napi-rs/canvas-darwin-x64@0.1.94': - resolution: {integrity: sha512-rkr/lrafbU0IIHebst+sQJf1HjdHvTMN0GGqWvw5OfaVS0K/sVxhNHtxi8oCfaRSvRE62aJZjWTcdc2ue/o6yw==} - engines: {node: '>= 10'} - cpu: [x64] - os: [darwin] - - '@napi-rs/canvas-linux-arm-gnueabihf@0.1.92': - resolution: {integrity: sha512-j6KaLL9iir68lwpzzY+aBGag1PZp3+gJE2mQ3ar4VJVmyLRVOh+1qsdNK1gfWoAVy5w6U7OEYFrLzN2vOFUSng==} + '@napi-rs/canvas-linux-arm-gnueabihf@0.1.95': + resolution: {integrity: sha512-hYaLCSLx5bmbnclzQc3ado3PgZ66blJWzjXp0wJmdwpr/kH+Mwhj6vuytJIomgksyJoCdIqIa4N6aiqBGJtJ5Q==} engines: {node: '>= 10'} cpu: [arm] os: [linux] - '@napi-rs/canvas-linux-arm-gnueabihf@0.1.94': - resolution: {integrity: sha512-q95TDo32YkTKdi+Sp2yQ2Npm7pmfKEruNoJ3RUIw1KvQQ9EHKL3fii/iuU60tnzP0W+c8BKN7BFstNFcm2KXCQ==} - engines: {node: '>= 10'} - cpu: [arm] - os: [linux] - - '@napi-rs/canvas-linux-arm64-gnu@0.1.92': - resolution: {integrity: sha512-s3NlnJMHOSotUYVoTCoC1OcomaChFdKmZg0VsHFeIkeHbwX0uPHP4eCX1irjSfMykyvsGHTQDfBAtGYuqxCxhQ==} + '@napi-rs/canvas-linux-arm64-gnu@0.1.95': + resolution: {integrity: sha512-J7VipONahKsmScPZsipHVQBqpbZx4favaD8/enWzzlGcjiwycOoymL7f4tNeqdjK0su19bDOUt6mjp9gsPWYlw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/canvas-linux-arm64-gnu@0.1.94': - resolution: {integrity: sha512-Je5/gKVybWAoIGyDOcJF1zYgBTKWkPIkfOgvCzrQcl8h7DiDvRvEY70EapA+NicGe4X3DW9VsCT34KZJnerShA==} + '@napi-rs/canvas-linux-arm64-musl@0.1.95': + resolution: {integrity: sha512-PXy0UT1J/8MPG8UAkWp6Fd51ZtIZINFzIjGH909JjQrtCuJf3X6nanHYdz1A+Wq9o4aoPAw1YEUpFS1lelsVlg==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/canvas-linux-arm64-musl@0.1.92': - resolution: {integrity: sha512-xV0GQnukYq5qY+ebkAwHjnP2OrSGBxS3vSi1zQNQj0bkXU6Ou+Tw7JjCM7pZcQ28MUyEBS1yKfo7rc7ip2IPFQ==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [linux] - - '@napi-rs/canvas-linux-arm64-musl@0.1.94': - resolution: {integrity: sha512-9YleDDauDEZNsFnfz3HyZvp1LK1ECu8N2gDUg1wtL7uWLQv8dUbfVeFtp5HOdxht1o7LsWRmQeqeIbnD4EqE2A==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [linux] - - '@napi-rs/canvas-linux-riscv64-gnu@0.1.92': - resolution: {integrity: sha512-+GKvIFbQ74eB/TopEdH6XIXcvOGcuKvCITLGXy7WLJAyNp3Kdn1ncjxg91ihatBaPR+t63QOE99yHuIWn3UQ9w==} + '@napi-rs/canvas-linux-riscv64-gnu@0.1.95': + resolution: {integrity: sha512-2IzCkW2RHRdcgF9W5/plHvYFpc6uikyjMb5SxjqmNxfyDFz9/HB89yhi8YQo0SNqrGRI7yBVDec7Pt+uMyRWsg==} engines: {node: '>= 10'} cpu: [riscv64] os: [linux] - '@napi-rs/canvas-linux-riscv64-gnu@0.1.94': - resolution: {integrity: sha512-lQUy9Xvz7ch8+0AXq8RkioLD41iQ6EqdKFu5uV40BxkBDijB2SCm1jna/BRhqitQRSjwAk2KlLUxTjHChyfNGg==} - engines: {node: '>= 10'} - cpu: [riscv64] - os: [linux] - - '@napi-rs/canvas-linux-x64-gnu@0.1.92': - resolution: {integrity: sha512-tFd6MwbEhZ1g64iVY2asV+dOJC+GT3Yd6UH4G3Hp0/VHQ6qikB+nvXEULskFYZ0+wFqlGPtXjG1Jmv7sJy+3Ww==} + '@napi-rs/canvas-linux-x64-gnu@0.1.95': + resolution: {integrity: sha512-OV/ol/OtcUr4qDhQg8G7SdViZX8XyQeKpPsVv/j3+7U178FGoU4M+yIocdVo1ih/A8GQ63+LjF4jDoEjaVU8Pw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/canvas-linux-x64-gnu@0.1.94': - resolution: {integrity: sha512-0IYgyuUaugHdWxXRhDQUCMxTou8kAHHmpIBFtbmdRlciPlfK7AYQW5agvUU1PghPc5Ja3Zzp5qZfiiLu36vIWQ==} + '@napi-rs/canvas-linux-x64-musl@0.1.95': + resolution: {integrity: sha512-Z5KzqBK/XzPz5+SFHKz7yKqClEQ8pOiEDdgk5SlphBLVNb8JFIJkxhtJKSvnJyHh2rjVgiFmvtJzMF0gNwwKyQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/canvas-linux-x64-musl@0.1.92': - resolution: {integrity: sha512-uSuqeSveB/ZGd72VfNbHCSXO9sArpZTvznMVsb42nqPP7gBGEH6NJQ0+hmF+w24unEmxBhPYakP/Wiosm16KkA==} - engines: {node: '>= 10'} - cpu: [x64] - os: [linux] - - '@napi-rs/canvas-linux-x64-musl@0.1.94': - resolution: {integrity: sha512-xuetfzzcflCIiBw2HJlOU4/+zTqhdxoe1BEcwdBsHAd/5wAQ4Pp+FGPi5g74gDvtcXQmTdEU3fLQvHc/j3wbxQ==} - engines: {node: '>= 10'} - cpu: [x64] - os: [linux] - - '@napi-rs/canvas-win32-arm64-msvc@0.1.92': - resolution: {integrity: sha512-20SK5AU/OUNz9ZuoAPj5ekWai45EIBDh/XsdrVZ8le/pJVlhjFU3olbumSQUXRFn7lBRS+qwM8kA//uLaDx6iQ==} + '@napi-rs/canvas-win32-arm64-msvc@0.1.95': + resolution: {integrity: sha512-aj0YbRpe8qVJ4OzMsK7NfNQePgcf9zkGFzNZ9mSuaxXzhpLHmlF2GivNdCdNOg8WzA/NxV6IU4c5XkXadUMLeA==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@napi-rs/canvas-win32-arm64-msvc@0.1.94': - resolution: {integrity: sha512-2F3p8wci4Q4vjbENlQtSibqFWxBdpzYk1c8Jh1mqqLE92rBKElG018dBJ6C8Dp49vE350Hmy5LrfdLgFKMG8sg==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [win32] - - '@napi-rs/canvas-win32-x64-msvc@0.1.92': - resolution: {integrity: sha512-KEhyZLzq1MXCNlXybz4k25MJmHFp+uK1SIb8yJB0xfrQjz5aogAMhyseSzewo+XxAq3OAOdyKvfHGNzT3w1RPg==} + '@napi-rs/canvas-win32-x64-msvc@0.1.95': + resolution: {integrity: sha512-GA8leTTCfdjuHi8reICTIxU0081PhXvl3lzIniLUjeLACx9GubUiyzkwFb+oyeKLS5IAGZFLKnzAf4wm2epRlA==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@napi-rs/canvas-win32-x64-msvc@0.1.94': - resolution: {integrity: sha512-hjwaIKMrQLoNiu3724octSGhDVKkBwJtMeQ3qUXOi+y60h2q6Sxq3+MM2za3V88+XQzzwn0DgG0Xo6v6gzV8kQ==} - engines: {node: '>= 10'} - cpu: [x64] - os: [win32] - - '@napi-rs/canvas@0.1.92': - resolution: {integrity: sha512-q7ZaUCJkEU5BeOdE7fBx1XWRd2T5Ady65nxq4brMf5L4cE1VV/ACq5w9Z5b/IVJs8CwSSIwc30nlthH0gFo4Ig==} - engines: {node: '>= 10'} - - '@napi-rs/canvas@0.1.94': - resolution: {integrity: sha512-8jBkvqynXNdQPNZjLJxB/Rp9PdnnMSHFBLzPmMc615nlt/O6w0ergBbkEDEOr8EbjL8nRQDpEklPx4pzD7zrbg==} + '@napi-rs/canvas@0.1.95': + resolution: {integrity: sha512-lkg23ge+rgyhgUwXmlbkPEhuhHq/hUi/gXKH+4I7vO+lJrbNfEYcQdJLIGjKyXLQzgFiiyDAwh5vAe/tITAE+w==} engines: {node: '>= 10'} '@napi-rs/wasm-runtime@1.1.1': @@ -1815,8 +1632,8 @@ packages: resolution: {integrity: sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q==} engines: {node: '>= 20'} - '@octokit/endpoint@11.0.2': - resolution: {integrity: sha512-4zCpzP1fWc7QlqunZ5bSEjxc6yLAlRTnDwKtgXfcI/FxxGoqedDG8V2+xJ60bV2kODqcGB+nATdtap/XYq2NZQ==} + '@octokit/endpoint@11.0.3': + resolution: {integrity: sha512-FWFlNxghg4HrXkD3ifYbS/IdL/mDHjh9QcsNyhQjN8dplUoZbejsdpmuqdA76nxj2xoWPs7p8uX2SNr9rYu0Ag==} engines: {node: '>= 20'} '@octokit/graphql@9.0.3': @@ -1859,8 +1676,8 @@ packages: peerDependencies: '@octokit/core': '>=6' - '@octokit/plugin-retry@8.0.3': - resolution: {integrity: sha512-vKGx1i3MC0za53IzYBSBXcrhmd+daQDzuZfYDd52X5S0M2otf3kVZTVP8bLA3EkU0lTvd1WEC2OlNNa4G+dohA==} + '@octokit/plugin-retry@8.1.0': + resolution: {integrity: sha512-O1FZgXeiGb2sowEr/hYTr6YunGdSAFWnr2fyW39Ah85H8O33ELASQxcvOFF5LE6Tjekcyu2ms4qAzJVhSaJxTw==} engines: {node: '>= 20'} peerDependencies: '@octokit/core': '>=7' @@ -1875,8 +1692,8 @@ packages: resolution: {integrity: sha512-KMQIfq5sOPpkQYajXHwnhjCC0slzCNScLHs9JafXc4RAJI+9f+jNDlBNaIMTvazOPLgb4BnlhGJOTbnN0wIjPw==} engines: {node: '>= 20'} - '@octokit/request@10.0.7': - resolution: {integrity: sha512-v93h0i1yu4idj8qFPZwjehoJx4j3Ntn+JhXsdJrG9pYaX6j/XRz2RmasMUHtNgQD39nrv/VwTWSqK0RNXR8upA==} + '@octokit/request@10.0.8': + resolution: {integrity: sha512-SJZNwY9pur9Agf7l87ywFi14W+Hd9Jg6Ifivsd33+/bGUQIjNujdFiXII2/qSlN2ybqUHfp5xpekMEjIBTjlSw==} engines: {node: '>= 20'} '@octokit/types@16.0.0': @@ -2667,22 +2484,10 @@ packages: resolution: {integrity: sha512-qocxM/X4XGATqQtUkbE9SPUB6wekBi+FyJOMbPj0AhvyvFGYEmOlz6VB22iMePCQsFmMIvFSeViDvA7mZJG47g==} engines: {node: '>=18.0.0'} - '@smithy/abort-controller@4.2.8': - resolution: {integrity: sha512-peuVfkYHAmS5ybKxWcfraK7WBBP0J+rkfUcbHJJKQ4ir3UAUNQI+Y4Vt/PqSzGqgloJ5O1dk7+WzNL8wcCSXbw==} - engines: {node: '>=18.0.0'} - - '@smithy/config-resolver@4.4.6': - resolution: {integrity: sha512-qJpzYC64kaj3S0fueiu3kXm8xPrR3PcXDPEgnaNMRn0EjNSZFoFjvbUp0YUDsRhN1CB90EnHJtbxWKevnH99UQ==} - engines: {node: '>=18.0.0'} - '@smithy/config-resolver@4.4.9': resolution: {integrity: sha512-ejQvXqlcU30h7liR9fXtj7PIAau1t/sFbJpgWPfiYDs7zd16jpH0IsSXKcba2jF6ChTXvIjACs27kNMc5xxE2Q==} engines: {node: '>=18.0.0'} - '@smithy/core@3.23.2': - resolution: {integrity: sha512-HaaH4VbGie4t0+9nY3tNBRSxVTr96wzIqexUa6C2qx3MPePAuz7lIxPxYtt1Wc//SPfJLNoZJzfdt0B6ksj2jA==} - engines: {node: '>=18.0.0'} - '@smithy/core@3.23.6': resolution: {integrity: sha512-4xE+0L2NrsFKpEVFlFELkIHQddBvMbQ41LRIP74dGCXnY1zQ9DgksrBcRBDJT+iOzGy4VEJIeU3hkUK5mn06kg==} engines: {node: '>=18.0.0'} @@ -2691,82 +2496,42 @@ packages: resolution: {integrity: sha512-3bsMLJJLTZGZqVGGeBVFfLzuRulVsGTj12BzRKODTHqUABpIr0jMN1vN3+u6r2OfyhAQ2pXaMZWX/swBK5I6PQ==} engines: {node: '>=18.0.0'} - '@smithy/credential-provider-imds@4.2.8': - resolution: {integrity: sha512-FNT0xHS1c/CPN8upqbMFP83+ul5YgdisfCfkZ86Jh2NSmnqw/AJ6x5pEogVCTVvSm7j9MopRU89bmDelxuDMYw==} - engines: {node: '>=18.0.0'} - '@smithy/eventstream-codec@4.2.10': resolution: {integrity: sha512-A4ynrsFFfSXUHicfTcRehytppFBcY3HQxEGYiyGktPIOye3Ot7fxpiy4VR42WmtGI4Wfo6OXt/c1Ky1nUFxYYQ==} engines: {node: '>=18.0.0'} - '@smithy/eventstream-codec@4.2.8': - resolution: {integrity: sha512-jS/O5Q14UsufqoGhov7dHLOPCzkYJl9QDzusI2Psh4wyYx/izhzvX9P4D69aTxcdfVhEPhjK+wYyn/PzLjKbbw==} - engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-browser@4.2.10': resolution: {integrity: sha512-0xupsu9yj9oDVuQ50YCTS9nuSYhGlrwqdaKQel9y2Fz7LU9fNErVlw9N0o4pm4qqvWEGbSTI4HKc6XJfB30MVw==} engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-browser@4.2.8': - resolution: {integrity: sha512-MTfQT/CRQz5g24ayXdjg53V0mhucZth4PESoA5IhvaWVDTOQLfo8qI9vzqHcPsdd2v6sqfTYqF5L/l+pea5Uyw==} - engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-config-resolver@4.3.10': resolution: {integrity: sha512-8kn6sinrduk0yaYHMJDsNuiFpXwQwibR7n/4CDUqn4UgaG+SeBHu5jHGFdU9BLFAM7Q4/gvr9RYxBHz9/jKrhA==} engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-config-resolver@4.3.8': - resolution: {integrity: sha512-ah12+luBiDGzBruhu3efNy1IlbwSEdNiw8fOZksoKoWW1ZHvO/04MQsdnws/9Aj+5b0YXSSN2JXKy/ClIsW8MQ==} - engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-node@4.2.10': resolution: {integrity: sha512-uUrxPGgIffnYfvIOUmBM5i+USdEBRTdh7mLPttjphgtooxQ8CtdO1p6K5+Q4BBAZvKlvtJ9jWyrWpBJYzBKsyQ==} engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-node@4.2.8': - resolution: {integrity: sha512-cYpCpp29z6EJHa5T9WL0KAlq3SOKUQkcgSoeRfRVwjGgSFl7Uh32eYGt7IDYCX20skiEdRffyDpvF2efEZPC0A==} - engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-universal@4.2.10': resolution: {integrity: sha512-aArqzOEvcs2dK+xQVCgLbpJQGfZihw8SD4ymhkwNTtwKbnrzdhJsFDKuMQnam2kF69WzgJYOU5eJlCx+CA32bw==} engines: {node: '>=18.0.0'} - '@smithy/eventstream-serde-universal@4.2.8': - resolution: {integrity: sha512-iJ6YNJd0bntJYnX6s52NC4WFYcZeKrPUr1Kmmr5AwZcwCSzVpS7oavAmxMR7pMq7V+D1G4s9F5NJK0xwOsKAlQ==} - engines: {node: '>=18.0.0'} - '@smithy/fetch-http-handler@5.3.11': resolution: {integrity: sha512-wbTRjOxdFuyEg0CpumjZO0hkUl+fetJFqxNROepuLIoijQh51aMBmzFLfoQdwRjxsuuS2jizzIUTjPWgd8pd7g==} engines: {node: '>=18.0.0'} - '@smithy/fetch-http-handler@5.3.9': - resolution: {integrity: sha512-I4UhmcTYXBrct03rwzQX1Y/iqQlzVQaPxWjCjula++5EmWq9YGBrx6bbGqluGc1f0XEfhSkiY4jhLgbsJUMKRA==} - engines: {node: '>=18.0.0'} - '@smithy/hash-node@4.2.10': resolution: {integrity: sha512-1VzIOI5CcsvMDvP3iv1vG/RfLJVVVc67dCRyLSB2Hn9SWCZrDO3zvcIzj3BfEtqRW5kcMg5KAeVf1K3dR6nD3w==} engines: {node: '>=18.0.0'} - '@smithy/hash-node@4.2.8': - resolution: {integrity: sha512-7ZIlPbmaDGxVoxErDZnuFG18WekhbA/g2/i97wGj+wUBeS6pcUeAym8u4BXh/75RXWhgIJhyC11hBzig6MljwA==} - engines: {node: '>=18.0.0'} - '@smithy/invalid-dependency@4.2.10': resolution: {integrity: sha512-vy9KPNSFUU0ajFYk0sDZIYiUlAWGEAhRfehIr5ZkdFrRFTAuXEPUd41USuqHU6vvLX4r6Q9X7MKBco5+Il0Org==} engines: {node: '>=18.0.0'} - '@smithy/invalid-dependency@4.2.8': - resolution: {integrity: sha512-N9iozRybwAQ2dn9Fot9kI6/w9vos2oTXLhtK7ovGqwZjlOcxu6XhPlpLpC+INsxktqHinn5gS2DXDjDF2kG5sQ==} - engines: {node: '>=18.0.0'} - '@smithy/is-array-buffer@2.2.0': resolution: {integrity: sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA==} engines: {node: '>=14.0.0'} - '@smithy/is-array-buffer@4.2.0': - resolution: {integrity: sha512-DZZZBvC7sjcYh4MazJSGiWMI2L7E0oCiRHREDzIxi/M2LY79/21iXt6aPLHge82wi5LsuRF5A06Ds3+0mlh6CQ==} - engines: {node: '>=18.0.0'} - '@smithy/is-array-buffer@4.2.1': resolution: {integrity: sha512-Yfu664Qbf1B4IYIsYgKoABt010daZjkaCRvdU/sPnZG6TtHOB0md0RjNdLGzxe5UIdn9js4ftPICzmkRa9RJ4Q==} engines: {node: '>=18.0.0'} @@ -2775,22 +2540,10 @@ packages: resolution: {integrity: sha512-TQZ9kX5c6XbjhaEBpvhSvMEZ0klBs1CFtOdPFwATZSbC9UeQfKHPLPN9Y+I6wZGMOavlYTOlHEPDrt42PMSH9w==} engines: {node: '>=18.0.0'} - '@smithy/middleware-content-length@4.2.8': - resolution: {integrity: sha512-RO0jeoaYAB1qBRhfVyq0pMgBoUK34YEJxVxyjOWYZiOKOq2yMZ4MnVXMZCUDenpozHue207+9P5ilTV1zeda0A==} - engines: {node: '>=18.0.0'} - - '@smithy/middleware-endpoint@4.4.16': - resolution: {integrity: sha512-L5GICFCSsNhbJ5JSKeWFGFy16Q2OhoBizb3X2DrxaJwXSEujVvjG9Jt386dpQn2t7jINglQl0b4K/Su69BdbMA==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-endpoint@4.4.20': resolution: {integrity: sha512-9W6Np4ceBP3XCYAGLoMCmn8t2RRVzuD1ndWPLBbv7H9CrwM9Bprf6Up6BM9ZA/3alodg0b7Kf6ftBK9R1N04vw==} engines: {node: '>=18.0.0'} - '@smithy/middleware-retry@4.4.33': - resolution: {integrity: sha512-jLqZOdJhtIL4lnA9hXnAG6GgnJlo1sD3FqsTxm9wSfjviqgWesY/TMBVnT84yr4O0Vfe0jWoXlfFbzsBVph3WA==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-retry@4.4.37': resolution: {integrity: sha512-/1psZZllBBSQ7+qo5+hhLz7AEPGLx3Z0+e3ramMBEuPK2PfvLK4SrncDB9VegX5mBn+oP/UTDrM6IHrFjvX1ZA==} engines: {node: '>=18.0.0'} @@ -2799,30 +2552,14 @@ packages: resolution: {integrity: sha512-STQdONGPwbbC7cusL60s7vOa6He6A9w2jWhoapL0mgVjmR19pr26slV+yoSP76SIssMTX/95e5nOZ6UQv6jolg==} engines: {node: '>=18.0.0'} - '@smithy/middleware-serde@4.2.9': - resolution: {integrity: sha512-eMNiej0u/snzDvlqRGSN3Vl0ESn3838+nKyVfF2FKNXFbi4SERYT6PR392D39iczngbqqGG0Jl1DlCnp7tBbXQ==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-stack@4.2.10': resolution: {integrity: sha512-pmts/WovNcE/tlyHa8z/groPeOtqtEpp61q3W0nW1nDJuMq/x+hWa/OVQBtgU0tBqupeXq0VBOLA4UZwE8I0YA==} engines: {node: '>=18.0.0'} - '@smithy/middleware-stack@4.2.8': - resolution: {integrity: sha512-w6LCfOviTYQjBctOKSwy6A8FIkQy7ICvglrZFl6Bw4FmcQ1Z420fUtIhxaUZZshRe0VCq4kvDiPiXrPZAe8oRA==} - engines: {node: '>=18.0.0'} - '@smithy/node-config-provider@4.3.10': resolution: {integrity: sha512-UALRbJtVX34AdP2VECKVlnNgidLHA2A7YgcJzwSBg1hzmnO/bZBHl/LDQQyYifzUwp1UOODnl9JJ3KNawpUJ9w==} engines: {node: '>=18.0.0'} - '@smithy/node-config-provider@4.3.8': - resolution: {integrity: sha512-aFP1ai4lrbVlWjfpAfRSL8KFcnJQYfTl5QxLJXY32vghJrDuFyPZ6LtUL+JEGYiFRG1PfPLHLoxj107ulncLIg==} - engines: {node: '>=18.0.0'} - - '@smithy/node-http-handler@4.4.10': - resolution: {integrity: sha512-u4YeUwOWRZaHbWaebvrs3UhwQwj+2VNmcVCwXcYTvPIuVyM7Ex1ftAj+fdbG/P4AkBwLq/+SKn+ydOI4ZJE9PA==} - engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.12': resolution: {integrity: sha512-zo1+WKJkR9x7ZtMeMDAAsq2PufwiLDmkhcjpWPRRkmeIuOm6nq1qjFICSZbnjBvD09ei8KMo26BWxsu2BUU+5w==} engines: {node: '>=18.0.0'} @@ -2831,46 +2568,22 @@ packages: resolution: {integrity: sha512-5jm60P0CU7tom0eNrZ7YrkgBaoLFXzmqB0wVS+4uK8PPGmosSrLNf6rRd50UBvukztawZ7zyA8TxlrKpF5z9jw==} engines: {node: '>=18.0.0'} - '@smithy/property-provider@4.2.8': - resolution: {integrity: sha512-EtCTbyIveCKeOXDSWSdze3k612yCPq1YbXsbqX3UHhkOSW8zKsM9NOJG5gTIya0vbY2DIaieG8pKo1rITHYL0w==} - engines: {node: '>=18.0.0'} - '@smithy/protocol-http@5.3.10': resolution: {integrity: sha512-2NzVWpYY0tRdfeCJLsgrR89KE3NTWT2wGulhNUxYlRmtRmPwLQwKzhrfVaiNlA9ZpJvbW7cjTVChYKgnkqXj1A==} engines: {node: '>=18.0.0'} - '@smithy/protocol-http@5.3.8': - resolution: {integrity: sha512-QNINVDhxpZ5QnP3aviNHQFlRogQZDfYlCkQT+7tJnErPQbDhysondEjhikuANxgMsZrkGeiAxXy4jguEGsDrWQ==} - engines: {node: '>=18.0.0'} - '@smithy/querystring-builder@4.2.10': resolution: {integrity: sha512-HeN7kEvuzO2DmAzLukE9UryiUvejD3tMp9a1D1NJETerIfKobBUCLfviP6QEk500166eD2IATaXM59qgUI+YDA==} engines: {node: '>=18.0.0'} - '@smithy/querystring-builder@4.2.8': - resolution: {integrity: sha512-Xr83r31+DrE8CP3MqPgMJl+pQlLLmOfiEUnoyAlGzzJIrEsbKsPy1hqH0qySaQm4oWrCBlUqRt+idEgunKB+iw==} - engines: {node: '>=18.0.0'} - '@smithy/querystring-parser@4.2.10': resolution: {integrity: sha512-4Mh18J26+ao1oX5wXJfWlTT+Q1OpDR8ssiC9PDOuEgVBGloqg18Fw7h5Ct8DyT9NBYwJgtJ2nLjKKFU6RP1G1Q==} engines: {node: '>=18.0.0'} - '@smithy/querystring-parser@4.2.8': - resolution: {integrity: sha512-vUurovluVy50CUlazOiXkPq40KGvGWSdmusa3130MwrR1UNnNgKAlj58wlOe61XSHRpUfIIh6cE0zZ8mzKaDPA==} - engines: {node: '>=18.0.0'} - '@smithy/service-error-classification@4.2.10': resolution: {integrity: sha512-0R/+/Il5y8nB/By90o8hy/bWVYptbIfvoTYad0igYQO5RefhNCDmNzqxaMx7K1t/QWo0d6UynqpqN5cCQt1MCg==} engines: {node: '>=18.0.0'} - '@smithy/service-error-classification@4.2.8': - resolution: {integrity: sha512-mZ5xddodpJhEt3RkCjbmUQuXUOaPNTkbMGR0bcS8FE0bJDLMZlhmpgrvPNCYglVw5rsYTpSnv19womw9WWXKQQ==} - engines: {node: '>=18.0.0'} - - '@smithy/shared-ini-file-loader@4.4.3': - resolution: {integrity: sha512-DfQjxXQnzC5UbCUPeC3Ie8u+rIWZTvuDPAGU/BxzrOGhRvgUanaP68kDZA+jaT3ZI+djOf+4dERGlm9mWfFDrg==} - engines: {node: '>=18.0.0'} - '@smithy/shared-ini-file-loader@4.4.5': resolution: {integrity: sha512-pHgASxl50rrtOztgQCPmOXFjRW+mCd7ALr/3uXNzRrRoGV5G2+78GOsQ3HlQuBVHCh9o6xqMNvlIKZjWn4Euug==} engines: {node: '>=18.0.0'} @@ -2879,22 +2592,10 @@ packages: resolution: {integrity: sha512-Wab3wW8468WqTKIxI+aZe3JYO52/RYT/8sDOdzkUhjnLakLe9qoQqIcfih/qxcF4qWEFoWBszY0mj5uxffaVXA==} engines: {node: '>=18.0.0'} - '@smithy/signature-v4@5.3.8': - resolution: {integrity: sha512-6A4vdGj7qKNRF16UIcO8HhHjKW27thsxYci+5r/uVRkdcBEkOEiY8OMPuydLX4QHSrJqGHPJzPRwwVTqbLZJhg==} - engines: {node: '>=18.0.0'} - - '@smithy/smithy-client@4.11.5': - resolution: {integrity: sha512-xixwBRqoeP2IUgcAl3U9dvJXc+qJum4lzo3maaJxifsZxKUYLfVfCXvhT4/jD01sRrHg5zjd1cw2Zmjr4/SuKQ==} - engines: {node: '>=18.0.0'} - '@smithy/smithy-client@4.12.0': resolution: {integrity: sha512-R8bQ9K3lCcXyZmBnQqUZJF4ChZmtWT5NLi6x5kgWx5D+/j0KorXcA0YcFg/X5TOgnTCy1tbKc6z2g2y4amFupQ==} engines: {node: '>=18.0.0'} - '@smithy/types@4.12.0': - resolution: {integrity: sha512-9YcuJVTOBDjg9LWo23Qp0lTQ3D7fQsQtwle0jVfpbUHy9qBwCEgKuVH4FqFB3VYu0nwdHKiEMA+oXz7oV8X1kw==} - engines: {node: '>=18.0.0'} - '@smithy/types@4.13.0': resolution: {integrity: sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==} engines: {node: '>=18.0.0'} @@ -2903,30 +2604,14 @@ packages: resolution: {integrity: sha512-uypjF7fCDsRk26u3qHmFI/ePL7bxxB9vKkE+2WKEciHhz+4QtbzWiHRVNRJwU3cKhrYDYQE3b0MRFtqfLYdA4A==} engines: {node: '>=18.0.0'} - '@smithy/url-parser@4.2.8': - resolution: {integrity: sha512-NQho9U68TGMEU639YkXnVMV3GEFFULmmaWdlu1E9qzyIePOHsoSnagTGSDv1Zi8DCNN6btxOSdgmy5E/hsZwhA==} - engines: {node: '>=18.0.0'} - - '@smithy/util-base64@4.3.0': - resolution: {integrity: sha512-GkXZ59JfyxsIwNTWFnjmFEI8kZpRNIBfxKjv09+nkAWPt/4aGaEWMM04m4sxgNVWkbt2MdSvE3KF/PfX4nFedQ==} - engines: {node: '>=18.0.0'} - '@smithy/util-base64@4.3.1': resolution: {integrity: sha512-BKGuawX4Doq/bI/uEmg+Zyc36rJKWuin3py89PquXBIBqmbnJwBBsmKhdHfNEp0+A4TDgLmT/3MSKZ1SxHcR6w==} engines: {node: '>=18.0.0'} - '@smithy/util-body-length-browser@4.2.0': - resolution: {integrity: sha512-Fkoh/I76szMKJnBXWPdFkQJl2r9SjPt3cMzLdOB6eJ4Pnpas8hVoWPYemX/peO0yrrvldgCUVJqOAjUrOLjbxg==} - engines: {node: '>=18.0.0'} - '@smithy/util-body-length-browser@4.2.1': resolution: {integrity: sha512-SiJeLiozrAoCrgDBUgsVbmqHmMgg/2bA15AzcbcW+zan7SuyAVHN4xTSbq0GlebAIwlcaX32xacnrG488/J/6g==} engines: {node: '>=18.0.0'} - '@smithy/util-body-length-node@4.2.1': - resolution: {integrity: sha512-h53dz/pISVrVrfxV1iqXlx5pRg3V2YWFcSQyPyXZRrZoZj4R4DeWRDo1a7dd3CPTcFi3kE+98tuNyD2axyZReA==} - engines: {node: '>=18.0.0'} - '@smithy/util-body-length-node@4.2.2': resolution: {integrity: sha512-4rHqBvxtJEBvsZcFQSPQqXP2b/yy/YlB66KlcEgcH2WNoOKCKB03DSLzXmOsXjbl8dJ4OEYTn31knhdznwk7zw==} engines: {node: '>=18.0.0'} @@ -2935,50 +2620,26 @@ packages: resolution: {integrity: sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA==} engines: {node: '>=14.0.0'} - '@smithy/util-buffer-from@4.2.0': - resolution: {integrity: sha512-kAY9hTKulTNevM2nlRtxAG2FQ3B2OR6QIrPY3zE5LqJy1oxzmgBGsHLWTcNhWXKchgA0WHW+mZkQrng/pgcCew==} - engines: {node: '>=18.0.0'} - '@smithy/util-buffer-from@4.2.1': resolution: {integrity: sha512-/swhmt1qTiVkaejlmMPPDgZhEaWb/HWMGRBheaxwuVkusp/z+ErJyQxO6kaXumOciZSWlmq6Z5mNylCd33X7Ig==} engines: {node: '>=18.0.0'} - '@smithy/util-config-provider@4.2.0': - resolution: {integrity: sha512-YEjpl6XJ36FTKmD+kRJJWYvrHeUvm5ykaUS5xK+6oXffQPHeEM4/nXlZPe+Wu0lsgRUcNZiliYNh/y7q9c2y6Q==} - engines: {node: '>=18.0.0'} - '@smithy/util-config-provider@4.2.1': resolution: {integrity: sha512-462id/00U8JWFw6qBuTSWfN5TxOHvDu4WliI97qOIOnuC/g+NDAknTU8eoGXEPlLkRVgWEr03jJBLV4o2FL8+A==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-browser@4.3.32': - resolution: {integrity: sha512-092sjYfFMQ/iaPH798LY/OJFBcYu0sSK34Oy9vdixhsU36zlZu8OcYjF3TD4e2ARupyK7xaxPXl+T0VIJTEkkg==} - engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-browser@4.3.36': resolution: {integrity: sha512-R0smq7EHQXRVMxkAxtH5akJ/FvgAmNF6bUy/GwY/N20T4GrwjT633NFm0VuRpC+8Bbv8R9A0DoJ9OiZL/M3xew==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-node@4.2.35': - resolution: {integrity: sha512-miz/ggz87M8VuM29y7jJZMYkn7+IErM5p5UgKIf8OtqVs/h2bXr1Bt3uTsREsI/4nK8a0PQERbAPsVPVNIsG7Q==} - engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-node@4.2.39': resolution: {integrity: sha512-otWuoDm35btJV1L8MyHrPl462B07QCdMTktKc7/yM+Psv6KbED/ziXiHnmr7yPHUjfIwE9S8Max0LO24Mo3ZVg==} engines: {node: '>=18.0.0'} - '@smithy/util-endpoints@3.2.8': - resolution: {integrity: sha512-8JaVTn3pBDkhZgHQ8R0epwWt+BqPSLCjdjXXusK1onwJlRuN69fbvSK66aIKKO7SwVFM6x2J2ox5X8pOaWcUEw==} - engines: {node: '>=18.0.0'} - '@smithy/util-endpoints@3.3.1': resolution: {integrity: sha512-xyctc4klmjmieQiF9I1wssBWleRV0RhJ2DpO8+8yzi2LO1Z+4IWOZNGZGNj4+hq9kdo+nyfrRLmQTzc16Op2Vg==} engines: {node: '>=18.0.0'} - '@smithy/util-hex-encoding@4.2.0': - resolution: {integrity: sha512-CCQBwJIvXMLKxVbO88IukazJD9a4kQ9ZN7/UMGBjBcJYvatpWk+9g870El4cB8/EJxfe+k+y0GmR9CAzkF+Nbw==} - engines: {node: '>=18.0.0'} - '@smithy/util-hex-encoding@4.2.1': resolution: {integrity: sha512-c1hHtkgAWmE35/50gmdKajgGAKV3ePJ7t6UtEmpfCWJmQE9BQAQPz0URUVI89eSkcDqCtzqllxzG28IQoZPvwA==} engines: {node: '>=18.0.0'} @@ -2987,30 +2648,14 @@ packages: resolution: {integrity: sha512-LxaQIWLp4y0r72eA8mwPNQ9va4h5KeLM0I3M/HV9klmFaY2kN766wf5vsTzmaOpNNb7GgXAd9a25P3h8T49PSA==} engines: {node: '>=18.0.0'} - '@smithy/util-middleware@4.2.8': - resolution: {integrity: sha512-PMqfeJxLcNPMDgvPbbLl/2Vpin+luxqTGPpW3NAQVLbRrFRzTa4rNAASYeIGjRV9Ytuhzny39SpyU04EQreF+A==} - engines: {node: '>=18.0.0'} - '@smithy/util-retry@4.2.10': resolution: {integrity: sha512-HrBzistfpyE5uqTwiyLsFHscgnwB0kgv8vySp7q5kZ0Eltn/tjosaSGGDj/jJ9ys7pWzIP/icE2d+7vMKXLv7A==} engines: {node: '>=18.0.0'} - '@smithy/util-retry@4.2.8': - resolution: {integrity: sha512-CfJqwvoRY0kTGe5AkQokpURNCT1u/MkRzMTASWMPPo2hNSnKtF1D45dQl3DE2LKLr4m+PW9mCeBMJr5mCAVThg==} - engines: {node: '>=18.0.0'} - - '@smithy/util-stream@4.5.12': - resolution: {integrity: sha512-D8tgkrmhAX/UNeCZbqbEO3uqyghUnEmmoO9YEvRuwxjlkKKUE7FOgCJnqpTlQPe9MApdWPky58mNQQHbnCzoNg==} - engines: {node: '>=18.0.0'} - '@smithy/util-stream@4.5.15': resolution: {integrity: sha512-OlOKnaqnkU9X+6wEkd7mN+WB7orPbCVDauXOj22Q7VtiTkvy7ZdSsOg4QiNAZMgI4OkvNf+/VLUC3VXkxuWJZw==} engines: {node: '>=18.0.0'} - '@smithy/util-uri-escape@4.2.0': - resolution: {integrity: sha512-igZpCKV9+E/Mzrpq6YacdTQ0qTiLm85gD6N/IrmyDvQFA4UnU3d5g3m8tMT/6zG/vVkWSU+VxeUyGonL62DuxA==} - engines: {node: '>=18.0.0'} - '@smithy/util-uri-escape@4.2.1': resolution: {integrity: sha512-YmiUDn2eo2IOiWYYvGQkgX5ZkBSiTQu4FlDo5jNPpAxng2t6Sjb6WutnZV9l6VR4eJul1ABmCrnWBC9hKHQa6Q==} engines: {node: '>=18.0.0'} @@ -3019,18 +2664,10 @@ packages: resolution: {integrity: sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==} engines: {node: '>=14.0.0'} - '@smithy/util-utf8@4.2.0': - resolution: {integrity: sha512-zBPfuzoI8xyBtR2P6WQj63Rz8i3AmfAaJLuNG8dWsfvPe8lO4aCPYLn879mEgHndZH1zQ2oXmG8O1GGzzaoZiw==} - engines: {node: '>=18.0.0'} - '@smithy/util-utf8@4.2.1': resolution: {integrity: sha512-DSIwNaWtmzrNQHv8g7DBGR9mulSit65KSj5ymGEIAknmIN8IpbZefEep10LaMG/P/xquwbmJ1h9ectz8z6mV6g==} engines: {node: '>=18.0.0'} - '@smithy/uuid@1.1.0': - resolution: {integrity: sha512-4aUIteuyxtBUhVdiQqcDhKFitwfd9hqoSDYY2KRXiWtgoWJ9Bmise+KfEPDiVHWeJepvF8xJO9/9+WDIciMFFw==} - engines: {node: '>=18.0.0'} - '@smithy/uuid@1.1.1': resolution: {integrity: sha512-dSfDCeihDmZlV2oyr0yWPTUfh07suS+R5OB+FZGiv/hHyK3hrFBW5rR1UYjfa57vBsrP9lciFkRPzebaV1Qujw==} engines: {node: '>=18.0.0'} @@ -3247,14 +2884,14 @@ packages: '@types/node@10.17.60': resolution: {integrity: sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==} - '@types/node@20.19.33': - resolution: {integrity: sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==} + '@types/node@20.19.34': + resolution: {integrity: sha512-by3/Z0Qp+L9cAySEsSNNwZ6WWw8ywgGLPQGgbQDhNRSitqYgkgp4pErd23ZSCavbtUA2CN4jQtoB3T8nk4j3Rg==} - '@types/node@24.10.13': - resolution: {integrity: sha512-oH72nZRfDv9lADUBSo104Aq7gPHpQZc4BTx38r9xf9pg5LfP6EzSyH2n7qFmmxRQXh7YlUXODcYsg6PuTDSxGg==} + '@types/node@24.10.14': + resolution: {integrity: sha512-OowOUbD1lBCOFIPOZ8xnMIhgqA4sCutMiYOmPHL1PTLt5+y1XA+g2+yC9OOyz8p+deMZqPZLxfMjYIfrKsPeFg==} - '@types/node@25.3.0': - resolution: {integrity: sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==} + '@types/node@25.3.1': + resolution: {integrity: sha512-hj9YIJimBCipHVfHKRMnvmHg+wfhKc0o4mTtXh9pKBjC8TLJzz0nzGmLi5UJsYAUgSvXFHgb0V2oY10DUFtImw==} '@types/qrcode-terminal@0.12.2': resolution: {integrity: sha512-v+RcIEJ+Uhd6ygSQ0u5YYY7ZM+la7GgPbs0V/7l/kFs2uO4S8BcIUEMoP7za4DNIqNnUD5npf0A/7kBhrCKG5Q==} @@ -3292,43 +2929,46 @@ packages: '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-9VHXRhB7sM5DFqdlKaeDww8vuklgfzhYCjBazLCEnuFvb4J+rJ1DodLykc2bL+6kE8k6sdhYi3x8ipfbjtO44g==} + '@types/yauzl@2.10.3': + resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} + + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-3qSsqv7FmM4z09wEpEXdhmgMfiJF/OMOZa41AdgMsXTTRpX2/38hDg2KGhi3fc24M2T3MnLPLTqw6HyTOBaV1Q==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-uCHipPRcIhHnvb7lAM29MQ1QT9pZ+uirqtH630aOMFm8VG3j8mkxVM9iGRLx829n38DMSDLjc3joCrQO3+sDcQ==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-F8ZCCX2UESHcbxvnkd1Dn5PTnOOgpGddFHYgn4usyWRMzNZLPP+YjyGALZe9zdR/D8L0uraND0Haok+TPq8xYg==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-yFEEq6hD2R70+lTogb211sPdCwz3H5hpYh0+YuKVMPsKo0oM8/jMvgjj2pyutmj/uCKLdbcJ9HP2vJ/13Szbcg==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-Up8Z/QNcwce5C4rWnbLNW5w7lRARdyKZcNbB1NMnaswaGOBdeDmdP0wbVsOgJMoDp6vnun+EkvrSft8hWLLhIg==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-cEWSRQ8b+CXdMJvoG18IjNTvBo+qT22B5imqm6nAssMpyHHQb62PvZGnrA8mPRQNPzLpa5F956j8GwAjyP8hBQ==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-Iu5rnCmqwGIMUu//BXkl9VQaxAAsqVvFhU4mJoNexNkMxPqVcu9quqYAouY7tN/95WcKzUsPpyRfkThdbNFO/g==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-zGz5kVcCeBRheQwA4jVTAxtbLsBsTkp9AEvWK5AlyCs1rQCUQobBhtx37X4VEmxn4ekIDMxYgaZdlZb7/PGp8w==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-WWjIfHCWlcriempYYc/sPJ3HFt6znNZKp60nvDNih0+wmxNqEfT5Yzu5zAY0awIe7XLelFSY+bolkpzMYVWEIQ==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-A0f9ZDQqKvGk/an59HuAJuzoI/wMyrgTd69oX9gFCx7+5E/ajSdgv0Eg1Fco+nyLfT/UVM0CV3ERyWrKzx277w==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-lmfQO+HdmPMk0dtPoNo8dZereTUYNQuapsAI7nFHCP8F25I8eGKKXY2nD1R8W1hp/LmVtske1pqKFNN6IOCt5g==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-Se9JrcMdVLeDYMLn+CKEV3qy1yiildb5N23USGvnC9siNFalz8tVgd589dhRP+ywDhXnbIsZiFKDrZF/7B4wSQ==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-e4eJyzR9ne0XreqYgQNqfX7SNuaePxggnUtVrLERgBv25QKwdQl72GnSXDhdxZHzrb97YwumiXWMQQJj9h8NCg==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260224.1': - resolution: {integrity: sha512-PU0zBXLvz6RKxbIubT66RCnJXgScdDIhfmNMkvRhOnX/C4SZom5TFSn7BEHC3w8JPj7OSz5OYoubtV1Haty2GA==} + '@typescript/native-preview@7.0.0-dev.20260225.1': + resolution: {integrity: sha512-mUf1aON+eZLupLorX4214n4W6uWIz/lvNv81ErzjJylD/GyJPEJkvDLmgIK3bbvLpMwTRWdVJLhpLCah5Qe8iA==} hasBin: true '@typespec/ts-http-runtime@0.3.3': @@ -3449,6 +3089,11 @@ packages: engines: {node: '>=0.4.0'} hasBin: true + acpx@0.1.13: + resolution: {integrity: sha512-C032VkV3cNa13ubq9YhskTWvDTsciNAQfNHZLW3PIN3atdkrzkV0v2yi6Znp7UZDw+pzgpKUsOrZWl64Lwr+3w==} + engines: {node: '>=18'} + hasBin: true + agent-base@6.0.2: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} @@ -3586,10 +3231,26 @@ packages: axios@1.13.5: resolution: {integrity: sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==} + b4a@1.8.0: + resolution: {integrity: sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg==} + peerDependencies: + react-native-b4a: '*' + peerDependenciesMeta: + react-native-b4a: + optional: true + balanced-match@4.0.4: resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==} engines: {node: 18 || 20 || >=22} + bare-events@2.8.2: + resolution: {integrity: sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ==} + peerDependencies: + bare-abort-controller: '*' + peerDependenciesMeta: + bare-abort-controller: + optional: true + base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} @@ -3637,6 +3298,9 @@ packages: resolution: {integrity: sha512-fy6KJm2RawA5RcHkLa1z/ScpBeA762UF9KmZQxwIbDtRJrgLzM10depAiEQ+CXYcoiqW1/m96OAAoke2nE9EeA==} engines: {node: 18 || 20 || >=22} + buffer-crc32@0.2.13: + resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==} + buffer-equal-constant-time@1.0.1: resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} @@ -3757,6 +3421,10 @@ packages: resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} engines: {node: '>=14'} + commander@13.1.0: + resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} + engines: {node: '>=18'} + commander@14.0.3: resolution: {integrity: sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==} engines: {node: '>=20'} @@ -3949,6 +3617,9 @@ packages: resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} engines: {node: '>= 0.8'} + end-of-stream@1.4.5: + resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==} + entities@4.5.0: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} @@ -4031,6 +3702,9 @@ packages: eventemitter3@5.0.4: resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==} + events-universal@1.0.1: + resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==} + expect-type@1.3.0: resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} engines: {node: '>=12.0.0'} @@ -4046,6 +3720,11 @@ packages: extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + extract-zip@2.0.1: + resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==} + engines: {node: '>= 10.17.0'} + hasBin: true + extsprintf@1.3.0: resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} engines: {'0': node >=0.6.0} @@ -4056,6 +3735,9 @@ packages: fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + fast-fifo@1.3.2: + resolution: {integrity: sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==} + fast-uri@3.1.0: resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} @@ -4063,6 +3745,9 @@ packages: resolution: {integrity: sha512-QNI3sAvSvaOiaMl8FYU4trnEzCwiRr8XMWgAHzlrWpTSj+QaCSvOf1h82OEP1s4hiAXhnbXSyFWCf4ldZzZRVA==} hasBin: true + fd-slicer@1.1.0: + resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==} + fdir@6.5.0: resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} engines: {node: '>=12.0.0'} @@ -4182,10 +3867,6 @@ packages: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} - get-east-asian-width@1.4.0: - resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} - engines: {node: '>=18'} - get-east-asian-width@1.5.0: resolution: {integrity: sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==} engines: {node: '>=18'} @@ -4198,6 +3879,10 @@ packages: resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} engines: {node: '>= 0.4'} + get-stream@5.2.0: + resolution: {integrity: sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==} + engines: {node: '>=8'} + get-tsconfig@4.13.6: resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==} @@ -4239,8 +3924,8 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - grammy@1.40.0: - resolution: {integrity: sha512-ssuE7fc1AwqlUxHr931OCVW3fU+oFDjHZGgvIedPKXfTdjXvzP19xifvVGCnPtYVUig1Kz+gwxe4A9M5WdkT4Q==} + grammy@1.40.1: + resolution: {integrity: sha512-bTe8SWXD8/Sdt2LGAAAsFGhuxI9RG8zL2gGk3V42A/RxriPqBQqwMGoNSldNK1qIFD2EaVuq7NQM8+ZAmNgHLw==} engines: {node: ^12.20.0 || >=14.13.1} has-flag@4.0.0: @@ -4376,8 +4061,8 @@ packages: resolution: {integrity: sha512-Zv/pA+ciVFbCSBBjGfaKUya/CcGmUHzTydLMaTwrUUEM2DIEO3iZvueGxmacvmN50fGpGVKeTXpb2LcYQxeVdg==} engines: {node: '>= 10'} - ipull@3.9.3: - resolution: {integrity: sha512-ZMkxaopfwKHwmEuGDYx7giNBdLxbHbRCWcQVA1D2eqE4crUguupfxej6s7UqbidYEwT69dkyumYkY8DPHIxF9g==} + ipull@3.9.5: + resolution: {integrity: sha512-5w/yZB5lXmTfsvNawmvkCjYo4SJNuKQz/av8TC1UiOyfOHyaM+DReqbpU2XpWYfmY+NIUbRRH8PUAWsxaS+IfA==} engines: {node: '>=18.0.0'} hasBin: true @@ -4490,6 +4175,9 @@ packages: json-stringify-safe@5.0.1: resolution: {integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==} + json-with-bigint@3.5.3: + resolution: {integrity: sha512-QObKu6nxy7NsxqR0VK4rkXnsNr5L9ElJaGEg+ucJ6J7/suoKZ0n+p76cu9aCqowytxEbwYNzvrMerfMkXneF5A==} + json5@2.2.3: resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} engines: {node: '>=6'} @@ -4538,8 +4226,8 @@ packages: lifecycle-utils@2.1.0: resolution: {integrity: sha512-AnrXnE2/OF9PHCyFg0RSqsnQTzV991XaZA/buhFDoc58xU7rhSCDgCz/09Lqpsn4MpoPHt7TRAXV1kWZypFVsA==} - lifecycle-utils@3.1.0: - resolution: {integrity: sha512-kVvegv+r/icjIo1dkHv1hznVQi4FzEVglJD2IU4w07HzevIyH3BAYsFZzEIbBk/nNZjXHGgclJ5g9rz9QdBCLw==} + lifecycle-utils@3.1.1: + resolution: {integrity: sha512-gNd3OvhFNjHykJE3uGntz7UuPzWlK9phrIdXxU9Adis0+ExkwnZibfxCJWiWWZ+a6VbKiZrb+9D9hCQWd4vjTg==} lightningcss-android-arm64@1.30.2: resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==} @@ -4802,9 +4490,9 @@ packages: minimalistic-assert@1.0.1: resolution: {integrity: sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==} - minimatch@10.2.1: - resolution: {integrity: sha512-MClCe8IL5nRRmawL6ib/eT4oLyeKMGCghibcDWK+J0hh0Q8kqSdia6BvbRMVk6mPa6WqUa5uR2oxt6C5jd533A==} - engines: {node: 20 || >=22} + minimatch@10.2.4: + resolution: {integrity: sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==} + engines: {node: 18 || 20 || >=22} minimist@1.2.8: resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} @@ -5019,8 +4707,8 @@ packages: zod: optional: true - openclaw@2026.2.23: - resolution: {integrity: sha512-7I7G898212v3OzUidgM8kZdZYAziT78Dc5zgeqsV2tfCbINtHK0Pdc2rg2eDLoDYAcheLh0fvH5qn/15Yu9q7A==} + openclaw@2026.2.24: + resolution: {integrity: sha512-a6zrcS6v5tUWqzsFh5cNtyu5+Tra1UW5yvPtYhRYCKSS/q6lXrLu+dj0ylJPOHRPAho2alZZL1gw1Qd2hAd2sQ==} engines: {node: '>=22.12.0'} hasBin: true peerDependencies: @@ -5030,9 +4718,6 @@ packages: opus-decoder@0.7.11: resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==} - opusscript@0.0.8: - resolution: {integrity: sha512-VSTi1aWFuCkRCVq+tx/BQ5q9fMnQ9pVZ3JU4UHKqTkf0ED3fKEPdr+gKAAl3IA2hj9rrP6iyq3hlcJq3HELtNQ==} - opusscript@0.1.1: resolution: {integrity: sha512-mL0fZZOUnXdZ78woRXp18lApwpp0lF5tozJOD1Wut0dgrA9WuQTgSels/CSmFleaAZrJi/nci5KOVtbuxeWoQA==} @@ -5163,6 +4848,9 @@ packages: peberminta@0.9.0: resolution: {integrity: sha512-XIxfHpEuSJbITd1H3EeQwpcZbTLHc+VVr8ANI9t5sit565tsI4/xK3KWTUFE2e6QiangUkh3B0jihzmGnNrRsQ==} + pend@1.2.0: + resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==} + performance-now@2.1.0: resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} @@ -5277,6 +4965,9 @@ packages: psl@1.15.0: resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} + pump@3.0.3: + resolution: {integrity: sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==} + punycode.js@2.3.1: resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} engines: {node: '>=6'} @@ -5520,8 +5211,8 @@ packages: peerDependencies: signal-polyfill: ^0.2.0 - simple-git@3.31.1: - resolution: {integrity: sha512-oiWP4Q9+kO8q9hHqkX35uuHmxiEbZNTrZ5IPxgMGrJwN76pzjm/jabkZO0ItEcqxAincqGAzL3QHSaHt4+knBg==} + simple-git@3.32.3: + resolution: {integrity: sha512-56a5oxFdWlsGygOXHWrG+xjj5w9ZIt2uQbzqiIGdR/6i5iococ7WQ/bNPzWxCJdEUGUCmyMH0t9zMpRJTaKxmw==} simple-yenc@1.0.4: resolution: {integrity: sha512-5gvxpSd79e9a3V4QDYUqnqxeD4HGlhCakVpb6gMnDD7lexJggSBJRBO5h52y/iJrdXRilX9UCuDaIJhSWm5OWw==} @@ -5533,6 +5224,11 @@ packages: sisteransi@1.0.5: resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + skillflag@0.1.4: + resolution: {integrity: sha512-egFg+XCF5sloOWdtzxZivTX7n4UDj5pxQoY33wbT8h+YSDjMQJ76MZUg2rXQIBXmIDtlZhLgirS1g/3R5/qaHA==} + engines: {node: '>=18'} + hasBin: true + sleep-promise@9.1.0: resolution: {integrity: sha512-UHYzVpz9Xn8b+jikYSD6bqvf754xL2uBUzDFwiU6NcdZeifPr6UfgU43xpkPu67VMS88+TI2PSI7Eohgqf2fKA==} @@ -5632,6 +5328,9 @@ packages: resolution: {integrity: sha512-yhPIQXjrlt1xv7dyPQg2P17URmXbuM5pdGkpiMB3RenprfiBlvK415Lctfe0eshk90oA7/tNq7WEiMK8RSP39A==} engines: {node: '>=18'} + streamx@2.23.0: + resolution: {integrity: sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==} + string-width@4.2.3: resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} engines: {node: '>=8'} @@ -5658,6 +5357,10 @@ packages: resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==} engines: {node: '>=12'} + strip-ansi@7.2.0: + resolution: {integrity: sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==} + engines: {node: '>=12'} + strip-json-comments@2.0.1: resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} engines: {node: '>=0.10.0'} @@ -5677,11 +5380,17 @@ packages: resolution: {integrity: sha512-iK5/YhZxq5GO5z8wb0bY1317uDF3Zjpha0QFFLA8/trAoiLbQD0HUbMesEaxyzUgDxi2QlcbM8IvqOlEjgoXBA==} engines: {node: '>=12.17'} + tar-stream@3.1.7: + resolution: {integrity: sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==} + tar@7.5.9: resolution: {integrity: sha512-BTLcK0xsDh2+PUe9F6c2TlRp4zOOBMTkoQHQIWSIzI0R7KG46uEwq4OPk2W7bZcprBMsuaeFsqwYr7pjh6CuHg==} engines: {node: '>=18'} deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + text-decoder@1.2.7: + resolution: {integrity: sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ==} + thenify-all@1.6.0: resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} engines: {node: '>=0.8'} @@ -6060,6 +5769,9 @@ packages: resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} engines: {node: '>=12'} + yauzl@2.10.0: + resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==} + yoctocolors@2.1.2: resolution: {integrity: sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==} engines: {node: '>=18'} @@ -6093,7 +5805,7 @@ snapshots: '@aws-crypto/crc32@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 tslib: 2.8.1 '@aws-crypto/sha256-browser@5.2.0': @@ -6101,7 +5813,7 @@ snapshots: '@aws-crypto/sha256-js': 5.2.0 '@aws-crypto/supports-web-crypto': 5.2.0 '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@aws-sdk/util-locate-window': 3.965.4 '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 @@ -6109,7 +5821,7 @@ snapshots: '@aws-crypto/sha256-js@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 tslib: 2.8.1 '@aws-crypto/supports-web-crypto@5.2.0': @@ -6118,81 +5830,29 @@ snapshots: '@aws-crypto/util@5.2.0': dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 - '@aws-sdk/client-bedrock-runtime@3.995.0': + '@aws-sdk/client-bedrock-runtime@3.998.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.11 - '@aws-sdk/credential-provider-node': 3.972.10 - '@aws-sdk/eventstream-handler-node': 3.972.5 - '@aws-sdk/middleware-eventstream': 3.972.3 - '@aws-sdk/middleware-host-header': 3.972.3 - '@aws-sdk/middleware-logger': 3.972.3 - '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.11 - '@aws-sdk/middleware-websocket': 3.972.6 - '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/token-providers': 3.995.0 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.995.0 - '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.10 - '@smithy/config-resolver': 4.4.6 - '@smithy/core': 3.23.2 - '@smithy/eventstream-serde-browser': 4.2.8 - '@smithy/eventstream-serde-config-resolver': 4.3.8 - '@smithy/eventstream-serde-node': 4.2.8 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/hash-node': 4.2.8 - '@smithy/invalid-dependency': 4.2.8 - '@smithy/middleware-content-length': 4.2.8 - '@smithy/middleware-endpoint': 4.4.16 - '@smithy/middleware-retry': 4.4.33 - '@smithy/middleware-serde': 4.2.9 - '@smithy/middleware-stack': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/node-http-handler': 4.4.10 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-body-length-node': 4.2.1 - '@smithy/util-defaults-mode-browser': 4.3.32 - '@smithy/util-defaults-mode-node': 4.2.35 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/util-stream': 4.5.12 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/client-bedrock-runtime@3.997.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.13 - '@aws-sdk/credential-provider-node': 3.972.12 - '@aws-sdk/eventstream-handler-node': 3.972.7 - '@aws-sdk/middleware-eventstream': 3.972.4 - '@aws-sdk/middleware-host-header': 3.972.4 - '@aws-sdk/middleware-logger': 3.972.4 - '@aws-sdk/middleware-recursion-detection': 3.972.4 - '@aws-sdk/middleware-user-agent': 3.972.13 - '@aws-sdk/middleware-websocket': 3.972.8 - '@aws-sdk/region-config-resolver': 3.972.4 - '@aws-sdk/token-providers': 3.997.0 - '@aws-sdk/types': 3.973.2 - '@aws-sdk/util-endpoints': 3.996.1 - '@aws-sdk/util-user-agent-browser': 3.972.4 - '@aws-sdk/util-user-agent-node': 3.972.12 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/credential-provider-node': 3.972.13 + '@aws-sdk/eventstream-handler-node': 3.972.8 + '@aws-sdk/middleware-eventstream': 3.972.5 + '@aws-sdk/middleware-host-header': 3.972.5 + '@aws-sdk/middleware-logger': 3.972.5 + '@aws-sdk/middleware-recursion-detection': 3.972.5 + '@aws-sdk/middleware-user-agent': 3.972.14 + '@aws-sdk/middleware-websocket': 3.972.9 + '@aws-sdk/region-config-resolver': 3.972.5 + '@aws-sdk/token-providers': 3.998.0 + '@aws-sdk/types': 3.973.3 + '@aws-sdk/util-endpoints': 3.996.2 + '@aws-sdk/util-user-agent-browser': 3.972.5 + '@aws-sdk/util-user-agent-node': 3.972.13 '@smithy/config-resolver': 4.4.9 '@smithy/core': 3.23.6 '@smithy/eventstream-serde-browser': 4.2.10 @@ -6226,67 +5886,22 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.995.0': + '@aws-sdk/client-bedrock@3.998.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.11 - '@aws-sdk/credential-provider-node': 3.972.10 - '@aws-sdk/middleware-host-header': 3.972.3 - '@aws-sdk/middleware-logger': 3.972.3 - '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.11 - '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/token-providers': 3.995.0 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.995.0 - '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.10 - '@smithy/config-resolver': 4.4.6 - '@smithy/core': 3.23.2 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/hash-node': 4.2.8 - '@smithy/invalid-dependency': 4.2.8 - '@smithy/middleware-content-length': 4.2.8 - '@smithy/middleware-endpoint': 4.4.16 - '@smithy/middleware-retry': 4.4.33 - '@smithy/middleware-serde': 4.2.9 - '@smithy/middleware-stack': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/node-http-handler': 4.4.10 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-body-length-node': 4.2.1 - '@smithy/util-defaults-mode-browser': 4.3.32 - '@smithy/util-defaults-mode-node': 4.2.35 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/client-bedrock@3.997.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.13 - '@aws-sdk/credential-provider-node': 3.972.12 - '@aws-sdk/middleware-host-header': 3.972.4 - '@aws-sdk/middleware-logger': 3.972.4 - '@aws-sdk/middleware-recursion-detection': 3.972.4 - '@aws-sdk/middleware-user-agent': 3.972.13 - '@aws-sdk/region-config-resolver': 3.972.4 - '@aws-sdk/token-providers': 3.997.0 - '@aws-sdk/types': 3.973.2 - '@aws-sdk/util-endpoints': 3.996.1 - '@aws-sdk/util-user-agent-browser': 3.972.4 - '@aws-sdk/util-user-agent-node': 3.972.12 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/credential-provider-node': 3.972.13 + '@aws-sdk/middleware-host-header': 3.972.5 + '@aws-sdk/middleware-logger': 3.972.5 + '@aws-sdk/middleware-recursion-detection': 3.972.5 + '@aws-sdk/middleware-user-agent': 3.972.14 + '@aws-sdk/region-config-resolver': 3.972.5 + '@aws-sdk/token-providers': 3.998.0 + '@aws-sdk/types': 3.973.3 + '@aws-sdk/util-endpoints': 3.996.2 + '@aws-sdk/util-user-agent-browser': 3.972.5 + '@aws-sdk/util-user-agent-node': 3.972.13 '@smithy/config-resolver': 4.4.9 '@smithy/core': 3.23.6 '@smithy/fetch-http-handler': 5.3.11 @@ -6316,69 +5931,10 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-sso@3.993.0': + '@aws-sdk/core@3.973.14': dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.11 - '@aws-sdk/middleware-host-header': 3.972.3 - '@aws-sdk/middleware-logger': 3.972.3 - '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.11 - '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.993.0 - '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.10 - '@smithy/config-resolver': 4.4.6 - '@smithy/core': 3.23.2 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/hash-node': 4.2.8 - '@smithy/invalid-dependency': 4.2.8 - '@smithy/middleware-content-length': 4.2.8 - '@smithy/middleware-endpoint': 4.4.16 - '@smithy/middleware-retry': 4.4.33 - '@smithy/middleware-serde': 4.2.9 - '@smithy/middleware-stack': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/node-http-handler': 4.4.10 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-body-length-node': 4.2.1 - '@smithy/util-defaults-mode-browser': 4.3.32 - '@smithy/util-defaults-mode-node': 4.2.35 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/core@3.973.11': - dependencies: - '@aws-sdk/types': 3.973.1 - '@aws-sdk/xml-builder': 3.972.5 - '@smithy/core': 3.23.2 - '@smithy/node-config-provider': 4.3.8 - '@smithy/property-provider': 4.2.8 - '@smithy/protocol-http': 5.3.8 - '@smithy/signature-v4': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/util-base64': 4.3.0 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - - '@aws-sdk/core@3.973.13': - dependencies: - '@aws-sdk/types': 3.973.2 - '@aws-sdk/xml-builder': 3.972.6 + '@aws-sdk/types': 3.973.3 + '@aws-sdk/xml-builder': 3.972.7 '@smithy/core': 3.23.6 '@smithy/node-config-provider': 4.3.10 '@smithy/property-provider': 4.2.10 @@ -6391,39 +5947,18 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@aws-sdk/credential-provider-env@3.972.11': + '@aws-sdk/credential-provider-env@3.972.12': dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/types': 3.973.3 '@smithy/property-provider': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-env@3.972.9': + '@aws-sdk/credential-provider-http@3.972.14': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/credential-provider-http@3.972.11': - dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/types': 3.973.1 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/node-http-handler': 4.4.10 - '@smithy/property-provider': 4.2.8 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/util-stream': 4.5.12 - tslib: 2.8.1 - - '@aws-sdk/credential-provider-http@3.972.13': - dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/types': 3.973.3 '@smithy/fetch-http-handler': 5.3.11 '@smithy/node-http-handler': 4.4.12 '@smithy/property-provider': 4.2.10 @@ -6433,17 +5968,17 @@ snapshots: '@smithy/util-stream': 4.5.15 tslib: 2.8.1 - '@aws-sdk/credential-provider-ini@3.972.11': + '@aws-sdk/credential-provider-ini@3.972.12': dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/credential-provider-env': 3.972.11 - '@aws-sdk/credential-provider-http': 3.972.13 - '@aws-sdk/credential-provider-login': 3.972.11 - '@aws-sdk/credential-provider-process': 3.972.11 - '@aws-sdk/credential-provider-sso': 3.972.11 - '@aws-sdk/credential-provider-web-identity': 3.972.11 - '@aws-sdk/nested-clients': 3.996.1 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/credential-provider-env': 3.972.12 + '@aws-sdk/credential-provider-http': 3.972.14 + '@aws-sdk/credential-provider-login': 3.972.12 + '@aws-sdk/credential-provider-process': 3.972.12 + '@aws-sdk/credential-provider-sso': 3.972.12 + '@aws-sdk/credential-provider-web-identity': 3.972.12 + '@aws-sdk/nested-clients': 3.996.2 + '@aws-sdk/types': 3.973.3 '@smithy/credential-provider-imds': 4.2.10 '@smithy/property-provider': 4.2.10 '@smithy/shared-ini-file-loader': 4.4.5 @@ -6452,30 +5987,11 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-ini@3.972.9': + '@aws-sdk/credential-provider-login@3.972.12': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/credential-provider-env': 3.972.9 - '@aws-sdk/credential-provider-http': 3.972.11 - '@aws-sdk/credential-provider-login': 3.972.9 - '@aws-sdk/credential-provider-process': 3.972.9 - '@aws-sdk/credential-provider-sso': 3.972.9 - '@aws-sdk/credential-provider-web-identity': 3.972.9 - '@aws-sdk/nested-clients': 3.993.0 - '@aws-sdk/types': 3.973.1 - '@smithy/credential-provider-imds': 4.2.8 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-login@3.972.11': - dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/nested-clients': 3.996.1 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/nested-clients': 3.996.2 + '@aws-sdk/types': 3.973.3 '@smithy/property-provider': 4.2.10 '@smithy/protocol-http': 5.3.10 '@smithy/shared-ini-file-loader': 4.4.5 @@ -6484,45 +6000,15 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-login@3.972.9': + '@aws-sdk/credential-provider-node@3.972.13': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/nested-clients': 3.993.0 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/protocol-http': 5.3.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-node@3.972.10': - dependencies: - '@aws-sdk/credential-provider-env': 3.972.9 - '@aws-sdk/credential-provider-http': 3.972.11 - '@aws-sdk/credential-provider-ini': 3.972.9 - '@aws-sdk/credential-provider-process': 3.972.9 - '@aws-sdk/credential-provider-sso': 3.972.9 - '@aws-sdk/credential-provider-web-identity': 3.972.9 - '@aws-sdk/types': 3.973.1 - '@smithy/credential-provider-imds': 4.2.8 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-node@3.972.12': - dependencies: - '@aws-sdk/credential-provider-env': 3.972.11 - '@aws-sdk/credential-provider-http': 3.972.13 - '@aws-sdk/credential-provider-ini': 3.972.11 - '@aws-sdk/credential-provider-process': 3.972.11 - '@aws-sdk/credential-provider-sso': 3.972.11 - '@aws-sdk/credential-provider-web-identity': 3.972.11 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/credential-provider-env': 3.972.12 + '@aws-sdk/credential-provider-http': 3.972.14 + '@aws-sdk/credential-provider-ini': 3.972.12 + '@aws-sdk/credential-provider-process': 3.972.12 + '@aws-sdk/credential-provider-sso': 3.972.12 + '@aws-sdk/credential-provider-web-identity': 3.972.12 + '@aws-sdk/types': 3.973.3 '@smithy/credential-provider-imds': 4.2.10 '@smithy/property-provider': 4.2.10 '@smithy/shared-ini-file-loader': 4.4.5 @@ -6531,30 +6017,21 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-process@3.972.11': + '@aws-sdk/credential-provider-process@3.972.12': dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/types': 3.973.3 '@smithy/property-provider': 4.2.10 '@smithy/shared-ini-file-loader': 4.4.5 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-process@3.972.9': + '@aws-sdk/credential-provider-sso@3.972.12': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/credential-provider-sso@3.972.11': - dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/nested-clients': 3.996.1 - '@aws-sdk/token-providers': 3.997.0 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/nested-clients': 3.996.2 + '@aws-sdk/token-providers': 3.998.0 + '@aws-sdk/types': 3.973.3 '@smithy/property-provider': 4.2.10 '@smithy/shared-ini-file-loader': 4.4.5 '@smithy/types': 4.13.0 @@ -6562,24 +6039,11 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-sso@3.972.9': + '@aws-sdk/credential-provider-web-identity@3.972.12': dependencies: - '@aws-sdk/client-sso': 3.993.0 - '@aws-sdk/core': 3.973.11 - '@aws-sdk/token-providers': 3.993.0 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-web-identity@3.972.11': - dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/nested-clients': 3.996.1 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/nested-clients': 3.996.2 + '@aws-sdk/types': 3.973.3 '@smithy/property-provider': 4.2.10 '@smithy/shared-ini-file-loader': 4.4.5 '@smithy/types': 4.13.0 @@ -6587,127 +6051,55 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-web-identity@3.972.9': + '@aws-sdk/eventstream-handler-node@3.972.8': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/nested-clients': 3.993.0 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/eventstream-handler-node@3.972.5': - dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/eventstream-codec': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/eventstream-handler-node@3.972.7': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/eventstream-codec': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-eventstream@3.972.3': + '@aws-sdk/middleware-eventstream@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/middleware-eventstream@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/protocol-http': 5.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-host-header@3.972.3': + '@aws-sdk/middleware-host-header@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/middleware-host-header@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/protocol-http': 5.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-logger@3.972.3': + '@aws-sdk/middleware-logger@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/middleware-logger@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-recursion-detection@3.972.3': + '@aws-sdk/middleware-recursion-detection@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@aws/lambda-invoke-store': 0.2.3 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/middleware-recursion-detection@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@aws/lambda-invoke-store': 0.2.3 '@smithy/protocol-http': 5.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-user-agent@3.972.11': + '@aws-sdk/middleware-user-agent@3.972.14': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.993.0 - '@smithy/core': 3.23.2 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/middleware-user-agent@3.972.13': - dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/types': 3.973.2 - '@aws-sdk/util-endpoints': 3.996.1 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/types': 3.973.3 + '@aws-sdk/util-endpoints': 3.996.2 '@smithy/core': 3.23.6 '@smithy/protocol-http': 5.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-websocket@3.972.6': + '@aws-sdk/middleware-websocket@3.972.9': dependencies: - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-format-url': 3.972.3 - '@smithy/eventstream-codec': 4.2.8 - '@smithy/eventstream-serde-browser': 4.2.8 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/protocol-http': 5.3.8 - '@smithy/signature-v4': 5.3.8 - '@smithy/types': 4.12.0 - '@smithy/util-base64': 4.3.0 - '@smithy/util-hex-encoding': 4.2.0 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - - '@aws-sdk/middleware-websocket@3.972.8': - dependencies: - '@aws-sdk/types': 3.973.2 - '@aws-sdk/util-format-url': 3.972.4 + '@aws-sdk/types': 3.973.3 + '@aws-sdk/util-format-url': 3.972.5 '@smithy/eventstream-codec': 4.2.10 '@smithy/eventstream-serde-browser': 4.2.10 '@smithy/fetch-http-handler': 5.3.11 @@ -6719,106 +6111,20 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@aws-sdk/nested-clients@3.993.0': + '@aws-sdk/nested-clients@3.996.2': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.11 - '@aws-sdk/middleware-host-header': 3.972.3 - '@aws-sdk/middleware-logger': 3.972.3 - '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.11 - '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.993.0 - '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.10 - '@smithy/config-resolver': 4.4.6 - '@smithy/core': 3.23.2 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/hash-node': 4.2.8 - '@smithy/invalid-dependency': 4.2.8 - '@smithy/middleware-content-length': 4.2.8 - '@smithy/middleware-endpoint': 4.4.16 - '@smithy/middleware-retry': 4.4.33 - '@smithy/middleware-serde': 4.2.9 - '@smithy/middleware-stack': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/node-http-handler': 4.4.10 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-body-length-node': 4.2.1 - '@smithy/util-defaults-mode-browser': 4.3.32 - '@smithy/util-defaults-mode-node': 4.2.35 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/nested-clients@3.995.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.11 - '@aws-sdk/middleware-host-header': 3.972.3 - '@aws-sdk/middleware-logger': 3.972.3 - '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.11 - '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.995.0 - '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.10 - '@smithy/config-resolver': 4.4.6 - '@smithy/core': 3.23.2 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/hash-node': 4.2.8 - '@smithy/invalid-dependency': 4.2.8 - '@smithy/middleware-content-length': 4.2.8 - '@smithy/middleware-endpoint': 4.4.16 - '@smithy/middleware-retry': 4.4.33 - '@smithy/middleware-serde': 4.2.9 - '@smithy/middleware-stack': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/node-http-handler': 4.4.10 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-body-length-node': 4.2.1 - '@smithy/util-defaults-mode-browser': 4.3.32 - '@smithy/util-defaults-mode-node': 4.2.35 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/nested-clients@3.996.1': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.13 - '@aws-sdk/middleware-host-header': 3.972.4 - '@aws-sdk/middleware-logger': 3.972.4 - '@aws-sdk/middleware-recursion-detection': 3.972.4 - '@aws-sdk/middleware-user-agent': 3.972.13 - '@aws-sdk/region-config-resolver': 3.972.4 - '@aws-sdk/types': 3.973.2 - '@aws-sdk/util-endpoints': 3.996.1 - '@aws-sdk/util-user-agent-browser': 3.972.4 - '@aws-sdk/util-user-agent-node': 3.972.12 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/middleware-host-header': 3.972.5 + '@aws-sdk/middleware-logger': 3.972.5 + '@aws-sdk/middleware-recursion-detection': 3.972.5 + '@aws-sdk/middleware-user-agent': 3.972.14 + '@aws-sdk/region-config-resolver': 3.972.5 + '@aws-sdk/types': 3.973.3 + '@aws-sdk/util-endpoints': 3.996.2 + '@aws-sdk/util-user-agent-browser': 3.972.5 + '@aws-sdk/util-user-agent-node': 3.972.13 '@smithy/config-resolver': 4.4.9 '@smithy/core': 3.23.6 '@smithy/fetch-http-handler': 5.3.11 @@ -6848,51 +6154,19 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/region-config-resolver@3.972.3': + '@aws-sdk/region-config-resolver@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/config-resolver': 4.4.6 - '@smithy/node-config-provider': 4.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/region-config-resolver@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/config-resolver': 4.4.9 '@smithy/node-config-provider': 4.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/token-providers@3.993.0': + '@aws-sdk/token-providers@3.998.0': dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/nested-clients': 3.993.0 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/token-providers@3.995.0': - dependencies: - '@aws-sdk/core': 3.973.11 - '@aws-sdk/nested-clients': 3.995.0 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/token-providers@3.997.0': - dependencies: - '@aws-sdk/core': 3.973.13 - '@aws-sdk/nested-clients': 3.996.1 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/core': 3.973.14 + '@aws-sdk/nested-clients': 3.996.2 + '@aws-sdk/types': 3.973.3 '@smithy/property-provider': 4.2.10 '@smithy/shared-ini-file-loader': 4.4.5 '@smithy/types': 4.13.0 @@ -6900,50 +6174,22 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/types@3.973.1': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/types@3.973.2': + '@aws-sdk/types@3.973.3': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/util-endpoints@3.993.0': + '@aws-sdk/util-endpoints@3.996.2': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-endpoints': 3.2.8 - tslib: 2.8.1 - - '@aws-sdk/util-endpoints@3.995.0': - dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-endpoints': 3.2.8 - tslib: 2.8.1 - - '@aws-sdk/util-endpoints@3.996.1': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/types': 4.13.0 '@smithy/url-parser': 4.2.10 '@smithy/util-endpoints': 3.3.1 tslib: 2.8.1 - '@aws-sdk/util-format-url@3.972.3': + '@aws-sdk/util-format-url@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/querystring-builder': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/util-format-url@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/querystring-builder': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 @@ -6952,43 +6198,22 @@ snapshots: dependencies: tslib: 2.8.1 - '@aws-sdk/util-user-agent-browser@3.972.3': + '@aws-sdk/util-user-agent-browser@3.972.5': dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/types': 4.12.0 - bowser: 2.14.1 - tslib: 2.8.1 - - '@aws-sdk/util-user-agent-browser@3.972.4': - dependencies: - '@aws-sdk/types': 3.973.2 + '@aws-sdk/types': 3.973.3 '@smithy/types': 4.13.0 bowser: 2.14.1 tslib: 2.8.1 - '@aws-sdk/util-user-agent-node@3.972.10': + '@aws-sdk/util-user-agent-node@3.972.13': dependencies: - '@aws-sdk/middleware-user-agent': 3.972.11 - '@aws-sdk/types': 3.973.1 - '@smithy/node-config-provider': 4.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@aws-sdk/util-user-agent-node@3.972.12': - dependencies: - '@aws-sdk/middleware-user-agent': 3.972.13 - '@aws-sdk/types': 3.973.2 + '@aws-sdk/middleware-user-agent': 3.972.14 + '@aws-sdk/types': 3.973.3 '@smithy/node-config-provider': 4.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/xml-builder@3.972.5': - dependencies: - '@smithy/types': 4.12.0 - fast-xml-parser: 5.3.6 - tslib: 2.8.1 - - '@aws-sdk/xml-builder@3.972.6': + '@aws-sdk/xml-builder@3.972.7': dependencies: '@smithy/types': 4.13.0 fast-xml-parser: 5.3.6 @@ -7016,11 +6241,11 @@ snapshots: transitivePeerDependencies: - supports-color - '@azure/msal-common@16.0.4': {} + '@azure/msal-common@16.1.0': {} - '@azure/msal-node@5.0.4': + '@azure/msal-node@5.0.5': dependencies: - '@azure/msal-common': 16.0.4 + '@azure/msal-common': 16.1.0 jsonwebtoken: 9.0.3 uuid: 8.3.2 @@ -7065,29 +6290,9 @@ snapshots: '@borewit/text-codec@0.2.1': {} - '@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.11.10)(opusscript@0.0.8)': - dependencies: - '@types/node': 25.3.0 - discord-api-types: 0.38.37 - optionalDependencies: - '@cloudflare/workers-types': 4.20260120.0 - '@discordjs/voice': 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.0.8) - '@hono/node-server': 1.19.9(hono@4.11.10) - '@types/bun': 1.3.9 - '@types/ws': 8.18.1 - ws: 8.19.0 - transitivePeerDependencies: - - '@discordjs/opus' - - bufferutil - - ffmpeg-static - - hono - - node-opus - - opusscript - - utf-8-validate - '@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.11.10)(opusscript@0.1.1)': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 discord-api-types: 0.38.37 optionalDependencies: '@cloudflare/workers-types': 4.20260120.0 @@ -7241,21 +6446,6 @@ snapshots: - supports-color optional: true - '@discordjs/voice@0.19.0(@discordjs/opus@0.10.0)(opusscript@0.0.8)': - dependencies: - '@types/ws': 8.18.1 - discord-api-types: 0.38.40 - prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.0.8) - tslib: 2.8.1 - ws: 8.19.0 - transitivePeerDependencies: - - '@discordjs/opus' - - bufferutil - - ffmpeg-static - - node-opus - - opusscript - - utf-8-validate - '@discordjs/voice@0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1)': dependencies: '@types/ws': 8.18.1 @@ -7379,15 +6569,26 @@ snapshots: - supports-color - utf-8-validate - '@grammyjs/runner@2.0.3(grammy@1.40.0)': + '@google/genai@1.43.0': + dependencies: + google-auth-library: 10.6.1 + p-retry: 4.6.2 + protobufjs: 7.5.4 + ws: 8.19.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + '@grammyjs/runner@2.0.3(grammy@1.40.1)': dependencies: abort-controller: 3.0.0 - grammy: 1.40.0 + grammy: 1.40.1 - '@grammyjs/transformer-throttler@1.2.1(grammy@1.40.0)': + '@grammyjs/transformer-throttler@1.2.1(grammy@1.40.1)': dependencies: bottleneck: 2.19.5 - grammy: 1.40.0 + grammy: 1.40.1 '@grammyjs/types@3.24.0': {} @@ -7602,7 +6803,7 @@ snapshots: '@larksuiteoapi/node-sdk@1.59.0': dependencies: - axios: 1.13.5 + axios: 1.13.5(debug@4.4.3) lodash.identity: 3.0.0 lodash.merge: 4.6.2 lodash.pickby: 4.6.0 @@ -7616,9 +6817,9 @@ snapshots: '@line/bot-sdk@10.6.0': dependencies: - '@types/node': 24.10.13 + '@types/node': 24.10.14 optionalDependencies: - axios: 1.13.5 + axios: 1.13.5(debug@4.4.3) transitivePeerDependencies: - debug @@ -7713,18 +6914,6 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.54.1(ws@8.19.0)(zod@4.3.6)': - dependencies: - '@mariozechner/pi-ai': 0.54.1(ws@8.19.0)(zod@4.3.6) - transitivePeerDependencies: - - '@modelcontextprotocol/sdk' - - aws-crt - - bufferutil - - supports-color - - utf-8-validate - - ws - - zod - '@mariozechner/pi-agent-core@0.55.0(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/pi-ai': 0.55.0(ws@8.19.0)(zod@4.3.6) @@ -7737,21 +6926,9 @@ snapshots: - ws - zod - '@mariozechner/pi-ai@0.54.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.55.1(ws@8.19.0)(zod@4.3.6)': dependencies: - '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) - '@aws-sdk/client-bedrock-runtime': 3.995.0 - '@google/genai': 1.42.0 - '@mistralai/mistralai': 1.10.0 - '@sinclair/typebox': 0.34.48 - ajv: 8.18.0 - ajv-formats: 3.0.1(ajv@8.18.0) - chalk: 5.6.2 - openai: 6.10.0(ws@8.19.0)(zod@4.3.6) - partial-json: 0.1.7 - proxy-agent: 6.5.0 - undici: 7.22.0 - zod-to-json-schema: 3.25.1(zod@4.3.6) + '@mariozechner/pi-ai': 0.55.1(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -7764,7 +6941,7 @@ snapshots: '@mariozechner/pi-ai@0.55.0(ws@8.19.0)(zod@4.3.6)': dependencies: '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) - '@aws-sdk/client-bedrock-runtime': 3.997.0 + '@aws-sdk/client-bedrock-runtime': 3.998.0 '@google/genai': 1.42.0 '@mistralai/mistralai': 1.10.0 '@sinclair/typebox': 0.34.48 @@ -7785,26 +6962,21 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.54.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.55.1(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.54.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.54.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.54.1 - '@silvia-odwyer/photon-node': 0.3.4 + '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) + '@aws-sdk/client-bedrock-runtime': 3.998.0 + '@google/genai': 1.43.0 + '@mistralai/mistralai': 1.10.0 + '@sinclair/typebox': 0.34.48 + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) chalk: 5.6.2 - cli-highlight: 2.1.11 - diff: 8.0.3 - file-type: 21.3.0 - glob: 13.0.6 - hosted-git-info: 9.0.2 - ignore: 7.0.5 - marked: 15.0.12 - minimatch: 10.2.1 - proper-lockfile: 4.1.2 - yaml: 2.8.2 - optionalDependencies: - '@mariozechner/clipboard': 0.3.2 + openai: 6.10.0(ws@8.19.0)(zod@4.3.6) + partial-json: 0.1.7 + proxy-agent: 6.5.0 + undici: 7.22.0 + zod-to-json-schema: 3.25.1(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -7829,7 +7001,7 @@ snapshots: hosted-git-info: 9.0.2 ignore: 7.0.5 marked: 15.0.12 - minimatch: 10.2.1 + minimatch: 10.2.4 proper-lockfile: 4.1.2 yaml: 2.8.2 optionalDependencies: @@ -7843,7 +7015,37 @@ snapshots: - ws - zod - '@mariozechner/pi-tui@0.54.1': + '@mariozechner/pi-coding-agent@0.55.1(ws@8.19.0)(zod@4.3.6)': + dependencies: + '@mariozechner/jiti': 2.6.5 + '@mariozechner/pi-agent-core': 0.55.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.55.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.55.1 + '@silvia-odwyer/photon-node': 0.3.4 + chalk: 5.6.2 + cli-highlight: 2.1.11 + diff: 8.0.3 + extract-zip: 2.0.1 + file-type: 21.3.0 + glob: 13.0.6 + hosted-git-info: 9.0.2 + ignore: 7.0.5 + marked: 15.0.12 + minimatch: 10.2.4 + proper-lockfile: 4.1.2 + yaml: 2.8.2 + optionalDependencies: + '@mariozechner/clipboard': 0.3.2 + transitivePeerDependencies: + - '@modelcontextprotocol/sdk' + - aws-crt + - bufferutil + - supports-color + - utf-8-validate + - ws + - zod + + '@mariozechner/pi-tui@0.55.0': dependencies: '@types/mime-types': 2.1.4 chalk: 5.6.2 @@ -7852,7 +7054,7 @@ snapshots: marked: 15.0.12 mime-types: 3.0.2 - '@mariozechner/pi-tui@0.55.0': + '@mariozechner/pi-tui@0.55.1': dependencies: '@types/mime-types': 2.1.4 chalk: 5.6.2 @@ -7879,9 +7081,9 @@ snapshots: '@microsoft/agents-hosting@1.3.1': dependencies: '@azure/core-auth': 1.10.1 - '@azure/msal-node': 5.0.4 + '@azure/msal-node': 5.0.5 '@microsoft/agents-activity': 1.3.1 - axios: 1.13.5 + axios: 1.13.5(debug@4.4.3) jsonwebtoken: 9.0.3 jwks-rsa: 3.2.2 object-path: 0.11.8 @@ -7897,99 +7099,52 @@ snapshots: '@mozilla/readability@0.6.0': {} - '@napi-rs/canvas-android-arm64@0.1.92': + '@napi-rs/canvas-android-arm64@0.1.95': optional: true - '@napi-rs/canvas-android-arm64@0.1.94': + '@napi-rs/canvas-darwin-arm64@0.1.95': optional: true - '@napi-rs/canvas-darwin-arm64@0.1.92': + '@napi-rs/canvas-darwin-x64@0.1.95': optional: true - '@napi-rs/canvas-darwin-arm64@0.1.94': + '@napi-rs/canvas-linux-arm-gnueabihf@0.1.95': optional: true - '@napi-rs/canvas-darwin-x64@0.1.92': + '@napi-rs/canvas-linux-arm64-gnu@0.1.95': optional: true - '@napi-rs/canvas-darwin-x64@0.1.94': + '@napi-rs/canvas-linux-arm64-musl@0.1.95': optional: true - '@napi-rs/canvas-linux-arm-gnueabihf@0.1.92': + '@napi-rs/canvas-linux-riscv64-gnu@0.1.95': optional: true - '@napi-rs/canvas-linux-arm-gnueabihf@0.1.94': + '@napi-rs/canvas-linux-x64-gnu@0.1.95': optional: true - '@napi-rs/canvas-linux-arm64-gnu@0.1.92': + '@napi-rs/canvas-linux-x64-musl@0.1.95': optional: true - '@napi-rs/canvas-linux-arm64-gnu@0.1.94': + '@napi-rs/canvas-win32-arm64-msvc@0.1.95': optional: true - '@napi-rs/canvas-linux-arm64-musl@0.1.92': + '@napi-rs/canvas-win32-x64-msvc@0.1.95': optional: true - '@napi-rs/canvas-linux-arm64-musl@0.1.94': - optional: true - - '@napi-rs/canvas-linux-riscv64-gnu@0.1.92': - optional: true - - '@napi-rs/canvas-linux-riscv64-gnu@0.1.94': - optional: true - - '@napi-rs/canvas-linux-x64-gnu@0.1.92': - optional: true - - '@napi-rs/canvas-linux-x64-gnu@0.1.94': - optional: true - - '@napi-rs/canvas-linux-x64-musl@0.1.92': - optional: true - - '@napi-rs/canvas-linux-x64-musl@0.1.94': - optional: true - - '@napi-rs/canvas-win32-arm64-msvc@0.1.92': - optional: true - - '@napi-rs/canvas-win32-arm64-msvc@0.1.94': - optional: true - - '@napi-rs/canvas-win32-x64-msvc@0.1.92': - optional: true - - '@napi-rs/canvas-win32-x64-msvc@0.1.94': - optional: true - - '@napi-rs/canvas@0.1.92': + '@napi-rs/canvas@0.1.95': optionalDependencies: - '@napi-rs/canvas-android-arm64': 0.1.92 - '@napi-rs/canvas-darwin-arm64': 0.1.92 - '@napi-rs/canvas-darwin-x64': 0.1.92 - '@napi-rs/canvas-linux-arm-gnueabihf': 0.1.92 - '@napi-rs/canvas-linux-arm64-gnu': 0.1.92 - '@napi-rs/canvas-linux-arm64-musl': 0.1.92 - '@napi-rs/canvas-linux-riscv64-gnu': 0.1.92 - '@napi-rs/canvas-linux-x64-gnu': 0.1.92 - '@napi-rs/canvas-linux-x64-musl': 0.1.92 - '@napi-rs/canvas-win32-arm64-msvc': 0.1.92 - '@napi-rs/canvas-win32-x64-msvc': 0.1.92 - - '@napi-rs/canvas@0.1.94': - optionalDependencies: - '@napi-rs/canvas-android-arm64': 0.1.94 - '@napi-rs/canvas-darwin-arm64': 0.1.94 - '@napi-rs/canvas-darwin-x64': 0.1.94 - '@napi-rs/canvas-linux-arm-gnueabihf': 0.1.94 - '@napi-rs/canvas-linux-arm64-gnu': 0.1.94 - '@napi-rs/canvas-linux-arm64-musl': 0.1.94 - '@napi-rs/canvas-linux-riscv64-gnu': 0.1.94 - '@napi-rs/canvas-linux-x64-gnu': 0.1.94 - '@napi-rs/canvas-linux-x64-musl': 0.1.94 - '@napi-rs/canvas-win32-arm64-msvc': 0.1.94 - '@napi-rs/canvas-win32-x64-msvc': 0.1.94 + '@napi-rs/canvas-android-arm64': 0.1.95 + '@napi-rs/canvas-darwin-arm64': 0.1.95 + '@napi-rs/canvas-darwin-x64': 0.1.95 + '@napi-rs/canvas-linux-arm-gnueabihf': 0.1.95 + '@napi-rs/canvas-linux-arm64-gnu': 0.1.95 + '@napi-rs/canvas-linux-arm64-musl': 0.1.95 + '@napi-rs/canvas-linux-riscv64-gnu': 0.1.95 + '@napi-rs/canvas-linux-x64-gnu': 0.1.95 + '@napi-rs/canvas-linux-x64-musl': 0.1.95 + '@napi-rs/canvas-win32-arm64-msvc': 0.1.95 + '@napi-rs/canvas-win32-x64-msvc': 0.1.95 '@napi-rs/wasm-runtime@1.1.1': dependencies: @@ -8061,7 +7216,7 @@ snapshots: dependencies: '@octokit/auth-oauth-app': 9.0.3 '@octokit/auth-oauth-user': 6.0.2 - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/request-error': 7.1.0 '@octokit/types': 16.0.0 toad-cache: 3.7.0 @@ -8072,14 +7227,14 @@ snapshots: dependencies: '@octokit/auth-oauth-device': 8.0.3 '@octokit/auth-oauth-user': 6.0.2 - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/types': 16.0.0 universal-user-agent: 7.0.3 '@octokit/auth-oauth-device@8.0.3': dependencies: '@octokit/oauth-methods': 6.0.2 - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/types': 16.0.0 universal-user-agent: 7.0.3 @@ -8087,7 +7242,7 @@ snapshots: dependencies: '@octokit/auth-oauth-device': 8.0.3 '@octokit/oauth-methods': 6.0.2 - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/types': 16.0.0 universal-user-agent: 7.0.3 @@ -8102,20 +7257,20 @@ snapshots: dependencies: '@octokit/auth-token': 6.0.0 '@octokit/graphql': 9.0.3 - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/request-error': 7.1.0 '@octokit/types': 16.0.0 before-after-hook: 4.0.0 universal-user-agent: 7.0.3 - '@octokit/endpoint@11.0.2': + '@octokit/endpoint@11.0.3': dependencies: '@octokit/types': 16.0.0 universal-user-agent: 7.0.3 '@octokit/graphql@9.0.3': dependencies: - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/types': 16.0.0 universal-user-agent: 7.0.3 @@ -8135,7 +7290,7 @@ snapshots: '@octokit/oauth-methods@6.0.2': dependencies: '@octokit/oauth-authorization-url': 8.0.0 - '@octokit/request': 10.0.7 + '@octokit/request': 10.0.8 '@octokit/request-error': 7.1.0 '@octokit/types': 16.0.0 @@ -8157,7 +7312,7 @@ snapshots: '@octokit/core': 7.0.6 '@octokit/types': 16.0.0 - '@octokit/plugin-retry@8.0.3(@octokit/core@7.0.6)': + '@octokit/plugin-retry@8.1.0(@octokit/core@7.0.6)': dependencies: '@octokit/core': 7.0.6 '@octokit/request-error': 7.1.0 @@ -8174,12 +7329,13 @@ snapshots: dependencies: '@octokit/types': 16.0.0 - '@octokit/request@10.0.7': + '@octokit/request@10.0.8': dependencies: - '@octokit/endpoint': 11.0.2 + '@octokit/endpoint': 11.0.3 '@octokit/request-error': 7.1.0 '@octokit/types': 16.0.0 fast-content-type-parse: 3.0.0 + json-with-bigint: 3.5.3 universal-user-agent: 7.0.3 '@octokit/types@16.0.0': @@ -8782,7 +7938,7 @@ snapshots: '@slack/types': 2.20.0 '@slack/web-api': 7.14.1 '@types/express': 5.0.6 - axios: 1.13.5 + axios: 1.13.5(debug@4.4.3) express: 5.2.1 path-to-regexp: 8.3.0 raw-body: 3.0.2 @@ -8795,14 +7951,14 @@ snapshots: '@slack/logger@4.0.0': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@slack/oauth@3.0.4': dependencies: '@slack/logger': 4.0.0 '@slack/web-api': 7.14.1 '@types/jsonwebtoken': 9.0.10 - '@types/node': 25.3.0 + '@types/node': 25.3.1 jsonwebtoken: 9.0.3 transitivePeerDependencies: - debug @@ -8811,7 +7967,7 @@ snapshots: dependencies: '@slack/logger': 4.0.0 '@slack/web-api': 7.14.1 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/ws': 8.18.1 eventemitter3: 5.0.4 ws: 8.19.0 @@ -8826,9 +7982,9 @@ snapshots: dependencies: '@slack/logger': 4.0.0 '@slack/types': 2.20.0 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/retry': 0.12.0 - axios: 1.13.5 + axios: 1.13.5(debug@4.4.3) eventemitter3: 5.0.4 form-data: 2.5.4 is-electron: 2.2.2 @@ -8844,20 +8000,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/abort-controller@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@smithy/config-resolver@4.4.6': - dependencies: - '@smithy/node-config-provider': 4.3.8 - '@smithy/types': 4.12.0 - '@smithy/util-config-provider': 4.2.0 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - tslib: 2.8.1 - '@smithy/config-resolver@4.4.9': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -8867,19 +8009,6 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 - '@smithy/core@3.23.2': - dependencies: - '@smithy/middleware-serde': 4.2.9 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-stream': 4.5.12 - '@smithy/util-utf8': 4.2.0 - '@smithy/uuid': 1.1.0 - tslib: 2.8.1 - '@smithy/core@3.23.6': dependencies: '@smithy/middleware-serde': 4.2.11 @@ -8901,14 +8030,6 @@ snapshots: '@smithy/url-parser': 4.2.10 tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.8': - dependencies: - '@smithy/node-config-provider': 4.3.8 - '@smithy/property-provider': 4.2.8 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - tslib: 2.8.1 - '@smithy/eventstream-codec@4.2.10': dependencies: '@aws-crypto/crc32': 5.2.0 @@ -8916,59 +8037,29 @@ snapshots: '@smithy/util-hex-encoding': 4.2.1 tslib: 2.8.1 - '@smithy/eventstream-codec@4.2.8': - dependencies: - '@aws-crypto/crc32': 5.2.0 - '@smithy/types': 4.12.0 - '@smithy/util-hex-encoding': 4.2.0 - tslib: 2.8.1 - '@smithy/eventstream-serde-browser@4.2.10': dependencies: '@smithy/eventstream-serde-universal': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/eventstream-serde-browser@4.2.8': - dependencies: - '@smithy/eventstream-serde-universal': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/eventstream-serde-config-resolver@4.3.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/eventstream-serde-config-resolver@4.3.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/eventstream-serde-node@4.2.10': dependencies: '@smithy/eventstream-serde-universal': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/eventstream-serde-node@4.2.8': - dependencies: - '@smithy/eventstream-serde-universal': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/eventstream-serde-universal@4.2.10': dependencies: '@smithy/eventstream-codec': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/eventstream-serde-universal@4.2.8': - dependencies: - '@smithy/eventstream-codec': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/fetch-http-handler@5.3.11': dependencies: '@smithy/protocol-http': 5.3.10 @@ -8977,14 +8068,6 @@ snapshots: '@smithy/util-base64': 4.3.1 tslib: 2.8.1 - '@smithy/fetch-http-handler@5.3.9': - dependencies: - '@smithy/protocol-http': 5.3.8 - '@smithy/querystring-builder': 4.2.8 - '@smithy/types': 4.12.0 - '@smithy/util-base64': 4.3.0 - tslib: 2.8.1 - '@smithy/hash-node@4.2.10': dependencies: '@smithy/types': 4.13.0 @@ -8992,31 +8075,15 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/hash-node@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - '@smithy/util-buffer-from': 4.2.0 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - '@smithy/invalid-dependency@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/invalid-dependency@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/is-array-buffer@2.2.0': dependencies: tslib: 2.8.1 - '@smithy/is-array-buffer@4.2.0': - dependencies: - tslib: 2.8.1 - '@smithy/is-array-buffer@4.2.1': dependencies: tslib: 2.8.1 @@ -9027,23 +8094,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-content-length@4.2.8': - dependencies: - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@smithy/middleware-endpoint@4.4.16': - dependencies: - '@smithy/core': 3.23.2 - '@smithy/middleware-serde': 4.2.9 - '@smithy/node-config-provider': 4.3.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-middleware': 4.2.8 - tslib: 2.8.1 - '@smithy/middleware-endpoint@4.4.20': dependencies: '@smithy/core': 3.23.6 @@ -9055,18 +8105,6 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 - '@smithy/middleware-retry@4.4.33': - dependencies: - '@smithy/node-config-provider': 4.3.8 - '@smithy/protocol-http': 5.3.8 - '@smithy/service-error-classification': 4.2.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/uuid': 1.1.0 - tslib: 2.8.1 - '@smithy/middleware-retry@4.4.37': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -9085,22 +8123,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-serde@4.2.9': - dependencies: - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/middleware-stack@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-stack@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/node-config-provider@4.3.10': dependencies: '@smithy/property-provider': 4.2.10 @@ -9108,21 +8135,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/node-config-provider@4.3.8': - dependencies: - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@smithy/node-http-handler@4.4.10': - dependencies: - '@smithy/abort-controller': 4.2.8 - '@smithy/protocol-http': 5.3.8 - '@smithy/querystring-builder': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/node-http-handler@4.4.12': dependencies: '@smithy/abort-controller': 4.2.10 @@ -9136,56 +8148,26 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/property-provider@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/protocol-http@5.3.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/protocol-http@5.3.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/querystring-builder@4.2.10': dependencies: '@smithy/types': 4.13.0 '@smithy/util-uri-escape': 4.2.1 tslib: 2.8.1 - '@smithy/querystring-builder@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - '@smithy/util-uri-escape': 4.2.0 - tslib: 2.8.1 - '@smithy/querystring-parser@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/querystring-parser@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/service-error-classification@4.2.10': dependencies: '@smithy/types': 4.13.0 - '@smithy/service-error-classification@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - - '@smithy/shared-ini-file-loader@4.4.3': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/shared-ini-file-loader@4.4.5': dependencies: '@smithy/types': 4.13.0 @@ -9202,27 +8184,6 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/signature-v4@5.3.8': - dependencies: - '@smithy/is-array-buffer': 4.2.0 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - '@smithy/util-hex-encoding': 4.2.0 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-uri-escape': 4.2.0 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - - '@smithy/smithy-client@4.11.5': - dependencies: - '@smithy/core': 3.23.2 - '@smithy/middleware-endpoint': 4.4.16 - '@smithy/middleware-stack': 4.2.8 - '@smithy/protocol-http': 5.3.8 - '@smithy/types': 4.12.0 - '@smithy/util-stream': 4.5.12 - tslib: 2.8.1 - '@smithy/smithy-client@4.12.0': dependencies: '@smithy/core': 3.23.6 @@ -9233,10 +8194,6 @@ snapshots: '@smithy/util-stream': 4.5.15 tslib: 2.8.1 - '@smithy/types@4.12.0': - dependencies: - tslib: 2.8.1 - '@smithy/types@4.13.0': dependencies: tslib: 2.8.1 @@ -9247,36 +8204,16 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/url-parser@4.2.8': - dependencies: - '@smithy/querystring-parser': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@smithy/util-base64@4.3.0': - dependencies: - '@smithy/util-buffer-from': 4.2.0 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - '@smithy/util-base64@4.3.1': dependencies: '@smithy/util-buffer-from': 4.2.1 '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/util-body-length-browser@4.2.0': - dependencies: - tslib: 2.8.1 - '@smithy/util-body-length-browser@4.2.1': dependencies: tslib: 2.8.1 - '@smithy/util-body-length-node@4.2.1': - dependencies: - tslib: 2.8.1 - '@smithy/util-body-length-node@4.2.2': dependencies: tslib: 2.8.1 @@ -9286,31 +8223,15 @@ snapshots: '@smithy/is-array-buffer': 2.2.0 tslib: 2.8.1 - '@smithy/util-buffer-from@4.2.0': - dependencies: - '@smithy/is-array-buffer': 4.2.0 - tslib: 2.8.1 - '@smithy/util-buffer-from@4.2.1': dependencies: '@smithy/is-array-buffer': 4.2.1 tslib: 2.8.1 - '@smithy/util-config-provider@4.2.0': - dependencies: - tslib: 2.8.1 - '@smithy/util-config-provider@4.2.1': dependencies: tslib: 2.8.1 - '@smithy/util-defaults-mode-browser@4.3.32': - dependencies: - '@smithy/property-provider': 4.2.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/util-defaults-mode-browser@4.3.36': dependencies: '@smithy/property-provider': 4.2.10 @@ -9318,16 +8239,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-defaults-mode-node@4.2.35': - dependencies: - '@smithy/config-resolver': 4.4.6 - '@smithy/credential-provider-imds': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/property-provider': 4.2.8 - '@smithy/smithy-client': 4.11.5 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/util-defaults-mode-node@4.2.39': dependencies: '@smithy/config-resolver': 4.4.9 @@ -9338,22 +8249,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-endpoints@3.2.8': - dependencies: - '@smithy/node-config-provider': 4.3.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/util-endpoints@3.3.1': dependencies: '@smithy/node-config-provider': 4.3.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-hex-encoding@4.2.0': - dependencies: - tslib: 2.8.1 - '@smithy/util-hex-encoding@4.2.1': dependencies: tslib: 2.8.1 @@ -9363,34 +8264,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-middleware@4.2.8': - dependencies: - '@smithy/types': 4.12.0 - tslib: 2.8.1 - '@smithy/util-retry@4.2.10': dependencies: '@smithy/service-error-classification': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-retry@4.2.8': - dependencies: - '@smithy/service-error-classification': 4.2.8 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - - '@smithy/util-stream@4.5.12': - dependencies: - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/node-http-handler': 4.4.10 - '@smithy/types': 4.12.0 - '@smithy/util-base64': 4.3.0 - '@smithy/util-buffer-from': 4.2.0 - '@smithy/util-hex-encoding': 4.2.0 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - '@smithy/util-stream@4.5.15': dependencies: '@smithy/fetch-http-handler': 5.3.11 @@ -9402,10 +8281,6 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/util-uri-escape@4.2.0': - dependencies: - tslib: 2.8.1 - '@smithy/util-uri-escape@4.2.1': dependencies: tslib: 2.8.1 @@ -9415,20 +8290,11 @@ snapshots: '@smithy/util-buffer-from': 2.2.0 tslib: 2.8.1 - '@smithy/util-utf8@4.2.0': - dependencies: - '@smithy/util-buffer-from': 4.2.0 - tslib: 2.8.1 - '@smithy/util-utf8@4.2.1': dependencies: '@smithy/util-buffer-from': 4.2.1 tslib: 2.8.1 - '@smithy/uuid@1.1.0': - dependencies: - tslib: 2.8.1 - '@smithy/uuid@1.1.1': dependencies: tslib: 2.8.1 @@ -9582,7 +8448,7 @@ snapshots: '@types/body-parser@1.19.6': dependencies: '@types/connect': 3.4.38 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/bun@1.3.9': dependencies: @@ -9602,7 +8468,7 @@ snapshots: '@types/connect@3.4.38': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/deep-eql@4.0.2': {} @@ -9610,14 +8476,14 @@ snapshots: '@types/express-serve-static-core@4.19.8': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 '@types/express-serve-static-core@5.1.1': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 @@ -9642,7 +8508,7 @@ snapshots: '@types/jsonwebtoken@9.0.10': dependencies: '@types/ms': 2.1.0 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/linkify-it@5.0.0': {} @@ -9663,15 +8529,15 @@ snapshots: '@types/node@10.17.60': {} - '@types/node@20.19.33': + '@types/node@20.19.34': dependencies: undici-types: 6.21.0 - '@types/node@24.10.13': + '@types/node@24.10.14': dependencies: undici-types: 7.16.0 - '@types/node@25.3.0': + '@types/node@25.3.1': dependencies: undici-types: 7.18.2 @@ -9684,7 +8550,7 @@ snapshots: '@types/request@2.48.13': dependencies: '@types/caseless': 0.12.5 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/tough-cookie': 4.0.5 form-data: 2.5.4 @@ -9693,22 +8559,22 @@ snapshots: '@types/send@0.17.6': dependencies: '@types/mime': 1.3.5 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/send@1.2.1': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/serve-static@1.15.10': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/send': 0.17.6 '@types/serve-static@2.2.0': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.0 + '@types/node': 25.3.1 '@types/tough-cookie@4.0.5': {} @@ -9716,38 +8582,43 @@ snapshots: '@types/ws@8.18.1': dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260224.1': + '@types/yauzl@2.10.3': + dependencies: + '@types/node': 25.3.1 optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260224.1': + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260225.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260224.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260225.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260224.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260225.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260224.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260225.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260224.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260225.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260224.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260225.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260224.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260225.1': + optional: true + + '@typescript/native-preview@7.0.0-dev.20260225.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260224.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260224.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260224.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260224.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260224.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260224.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260224.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260225.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260225.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260225.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260225.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260225.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260225.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260225.1 '@typespec/ts-http-runtime@0.3.3': dependencies: @@ -9784,29 +8655,29 @@ snapshots: - '@cypress/request' - supports-color - '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) playwright: 1.58.2 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - bufferutil - msw - utf-8-validate - vite - '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/utils': 4.0.18 magic-string: 0.30.21 pixelmatch: 7.1.0 pngjs: 7.0.0 sirv: 3.0.2 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) ws: 8.19.0 transitivePeerDependencies: - bufferutil @@ -9814,7 +8685,7 @@ snapshots: - utf-8-validate - vite - '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': + '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.18 @@ -9826,9 +8697,9 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) optionalDependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) '@vitest/expect@4.0.18': dependencies: @@ -9839,13 +8710,13 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: - vite: 7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) '@vitest/pretty-format@4.0.18': dependencies: @@ -9940,6 +8811,16 @@ snapshots: acorn@8.16.0: {} + acpx@0.1.13(zod@4.3.6): + dependencies: + '@agentclientprotocol/sdk': 0.14.1(zod@4.3.6) + commander: 13.1.0 + skillflag: 0.1.4 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + - zod + agent-base@6.0.2: dependencies: debug: 4.4.3 @@ -9983,7 +8864,7 @@ snapshots: '@swc/helpers': 0.5.19 '@types/command-line-args': 5.2.3 '@types/command-line-usage': 5.0.4 - '@types/node': 20.19.33 + '@types/node': 20.19.34 command-line-args: 5.2.1 command-line-usage: 7.0.3 flatbuffers: 24.12.23 @@ -10071,14 +8952,6 @@ snapshots: aws4@1.13.2: {} - axios@1.13.5: - dependencies: - follow-redirects: 1.15.11 - form-data: 2.5.4 - proxy-from-env: 1.1.0 - transitivePeerDependencies: - - debug - axios@1.13.5(debug@4.4.3): dependencies: follow-redirects: 1.15.11(debug@4.4.3) @@ -10087,8 +8960,12 @@ snapshots: transitivePeerDependencies: - debug + b4a@1.8.0: {} + balanced-match@4.0.4: {} + bare-events@2.8.2: {} + base64-js@1.5.1: {} basic-auth@2.0.1: @@ -10150,13 +9027,15 @@ snapshots: dependencies: balanced-match: 4.0.4 + buffer-crc32@0.2.13: {} + buffer-equal-constant-time@1.0.1: {} buffer-from@1.1.2: {} bun-types@1.3.9: dependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 optional: true bytes@3.1.2: {} @@ -10283,6 +9162,8 @@ snapshots: commander@10.0.1: {} + commander@13.1.0: {} + commander@14.0.3: {} console-control-strings@1.1.0: {} @@ -10426,6 +9307,10 @@ snapshots: encodeurl@2.0.0: {} + end-of-stream@1.4.5: + dependencies: + once: 1.4.0 + entities@4.5.0: {} entities@7.0.1: {} @@ -10510,6 +9395,12 @@ snapshots: eventemitter3@5.0.4: {} + events-universal@1.0.1: + dependencies: + bare-events: 2.8.2 + transitivePeerDependencies: + - bare-abort-controller + expect-type@1.3.0: {} express@4.22.1: @@ -10583,18 +9474,34 @@ snapshots: extend@3.0.2: {} + extract-zip@2.0.1: + dependencies: + debug: 4.4.3 + get-stream: 5.2.0 + yauzl: 2.10.0 + optionalDependencies: + '@types/yauzl': 2.10.3 + transitivePeerDependencies: + - supports-color + extsprintf@1.3.0: {} fast-content-type-parse@3.0.0: {} fast-deep-equal@3.1.3: {} + fast-fifo@1.3.2: {} + fast-uri@3.1.0: {} fast-xml-parser@5.3.6: dependencies: strnum: 2.1.2 + fd-slicer@1.1.0: + dependencies: + pend: 1.2.0 + fdir@6.5.0(picomatch@4.0.3): optionalDependencies: picomatch: 4.0.3 @@ -10648,8 +9555,6 @@ snapshots: flatbuffers@24.12.23: {} - follow-redirects@1.15.11: {} - follow-redirects@1.15.11(debug@4.4.3): optionalDependencies: debug: 4.4.3 @@ -10740,8 +9645,6 @@ snapshots: get-caller-file@2.0.5: {} - get-east-asian-width@1.4.0: {} - get-east-asian-width@1.5.0: {} get-intrinsic@1.3.0: @@ -10762,6 +9665,10 @@ snapshots: dunder-proto: 1.0.1 es-object-atoms: 1.1.1 + get-stream@5.2.0: + dependencies: + pump: 3.0.3 + get-tsconfig@4.13.6: dependencies: resolve-pkg-maps: 1.0.0 @@ -10784,14 +9691,14 @@ snapshots: dependencies: foreground-child: 3.3.1 jackspeak: 3.4.3 - minimatch: 10.2.1 + minimatch: 10.2.4 minipass: 7.1.3 package-json-from-dist: 1.0.1 path-scurry: 1.11.1 glob@13.0.6: dependencies: - minimatch: 10.2.1 + minimatch: 10.2.4 minipass: 7.1.3 path-scurry: 2.0.2 @@ -10800,7 +9707,7 @@ snapshots: fs.realpath: 1.0.0 inflight: 1.0.6 inherits: 2.0.4 - minimatch: 10.2.1 + minimatch: 10.2.4 once: 1.4.0 path-is-absolute: 1.0.1 optional: true @@ -10822,7 +9729,7 @@ snapshots: graceful-fs@4.2.11: {} - grammy@1.40.0: + grammy@1.40.1: dependencies: '@grammyjs/types': 3.24.0 abort-controller: 3.0.0 @@ -10973,7 +9880,7 @@ snapshots: ipaddr.js@2.3.0: {} - ipull@3.9.3: + ipull@3.9.5: dependencies: '@tinyhttp/content-disposition': 2.2.4 async-retry: 1.3.3 @@ -10993,7 +9900,7 @@ snapshots: sleep-promise: 9.1.0 slice-ansi: 7.1.2 stdout-update: 4.0.1 - strip-ansi: 7.1.2 + strip-ansi: 7.2.0 optionalDependencies: '@reflink/reflink': 0.1.19 @@ -11016,7 +9923,7 @@ snapshots: is-fullwidth-code-point@5.1.0: dependencies: - get-east-asian-width: 1.4.0 + get-east-asian-width: 1.5.0 is-interactive@2.0.0: {} @@ -11088,6 +9995,8 @@ snapshots: json-stringify-safe@5.0.1: {} + json-with-bigint@3.5.3: {} + json5@2.2.3: {} jsonfile@6.2.0: @@ -11160,7 +10069,7 @@ snapshots: lifecycle-utils@2.1.0: {} - lifecycle-utils@3.1.0: {} + lifecycle-utils@3.1.1: {} lightningcss-android-arm64@1.30.2: optional: true @@ -11379,7 +10288,7 @@ snapshots: minimalistic-assert@1.0.1: {} - minimatch@10.2.1: + minimatch@10.2.4: dependencies: brace-expansion: 5.0.3 @@ -11488,9 +10397,9 @@ snapshots: filenamify: 6.0.0 fs-extra: 11.3.3 ignore: 7.0.5 - ipull: 3.9.3 + ipull: 3.9.5 is-unicode-supported: 2.1.0 - lifecycle-utils: 3.1.0 + lifecycle-utils: 3.1.1 log-symbols: 7.0.1 nanoid: 5.1.6 node-addon-api: 8.5.0 @@ -11499,10 +10408,10 @@ snapshots: pretty-ms: 9.3.0 proper-lockfile: 4.1.2 semver: 7.7.4 - simple-git: 3.31.1 + simple-git: 3.32.3 slice-ansi: 7.1.2 stdout-update: 4.0.1 - strip-ansi: 7.1.2 + strip-ansi: 7.2.0 validate-npm-package-name: 6.0.2 which: 5.0.0 yargs: 17.7.2 @@ -11584,7 +10493,7 @@ snapshots: '@octokit/plugin-paginate-graphql': 6.0.0(@octokit/core@7.0.6) '@octokit/plugin-paginate-rest': 14.0.0(@octokit/core@7.0.6) '@octokit/plugin-rest-endpoint-methods': 17.0.0(@octokit/core@7.0.6) - '@octokit/plugin-retry': 8.0.3(@octokit/core@7.0.6) + '@octokit/plugin-retry': 8.1.0(@octokit/core@7.0.6) '@octokit/plugin-throttling': 11.0.3(@octokit/core@7.0.6) '@octokit/request-error': 7.1.0 '@octokit/types': 16.0.0 @@ -11628,28 +10537,29 @@ snapshots: ws: 8.19.0 zod: 4.3.6 - openclaw@2026.2.23(@napi-rs/canvas@0.1.94)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.11.10)(node-llama-cpp@3.15.1(typescript@5.9.3)): + openclaw@2026.2.24(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.11.10)(node-llama-cpp@3.15.1(typescript@5.9.3)): dependencies: '@agentclientprotocol/sdk': 0.14.1(zod@4.3.6) - '@aws-sdk/client-bedrock': 3.995.0 - '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.11.10)(opusscript@0.0.8) + '@aws-sdk/client-bedrock': 3.998.0 + '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.11.10)(opusscript@0.1.1) '@clack/prompts': 1.0.1 - '@discordjs/voice': 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.0.8) - '@grammyjs/runner': 2.0.3(grammy@1.40.0) - '@grammyjs/transformer-throttler': 1.2.1(grammy@1.40.0) + '@discordjs/voice': 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) + '@grammyjs/runner': 2.0.3(grammy@1.40.1) + '@grammyjs/transformer-throttler': 1.2.1(grammy@1.40.1) '@homebridge/ciao': 1.3.5 '@larksuiteoapi/node-sdk': 1.59.0 '@line/bot-sdk': 10.6.0 '@lydell/node-pty': 1.2.0-beta.3 - '@mariozechner/pi-agent-core': 0.54.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.54.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-coding-agent': 0.54.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.54.1 + '@mariozechner/pi-agent-core': 0.55.0(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.55.0(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-coding-agent': 0.55.0(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.55.0 '@mozilla/readability': 0.6.0 - '@napi-rs/canvas': 0.1.94 + '@napi-rs/canvas': 0.1.95 '@sinclair/typebox': 0.34.48 '@slack/bolt': 4.6.0(@types/express@5.0.6) '@slack/web-api': 7.14.1 + '@snazzah/davey': 0.1.9 '@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) ajv: 8.18.0 chalk: 5.6.2 @@ -11661,7 +10571,7 @@ snapshots: dotenv: 17.3.1 express: 5.2.1 file-type: 21.3.0 - grammy: 1.40.0 + grammy: 1.40.1 https-proxy-agent: 7.0.6 ipaddr.js: 2.3.0 jiti: 2.6.1 @@ -11672,7 +10582,7 @@ snapshots: markdown-it: 14.1.1 node-edge-tts: 1.2.10 node-llama-cpp: 3.15.1(typescript@5.9.3) - opusscript: 0.0.8 + opusscript: 0.1.1 osc-progress: 0.3.0 pdfjs-dist: 5.4.624 playwright-core: 1.58.2 @@ -11709,8 +10619,6 @@ snapshots: '@wasm-audio-decoders/common': 9.0.7 optional: true - opusscript@0.0.8: {} - opusscript@0.1.1: {} ora@8.2.0: @@ -11723,7 +10631,7 @@ snapshots: log-symbols: 6.0.0 stdin-discarder: 0.2.2 string-width: 7.2.0 - strip-ansi: 7.1.2 + strip-ansi: 7.2.0 osc-progress@0.3.0: {} @@ -11874,11 +10782,13 @@ snapshots: pdfjs-dist@5.4.624: optionalDependencies: - '@napi-rs/canvas': 0.1.94 + '@napi-rs/canvas': 0.1.95 node-readable-to-web-readable-stream: 0.4.2 peberminta@0.9.0: {} + pend@1.2.0: {} + performance-now@2.1.0: {} picocolors@1.1.1: {} @@ -11939,11 +10849,6 @@ snapshots: dependencies: parse-ms: 4.0.0 - prism-media@1.3.5(@discordjs/opus@0.10.0)(opusscript@0.0.8): - optionalDependencies: - '@discordjs/opus': 0.10.0 - opusscript: 0.0.8 - prism-media@1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1): optionalDependencies: '@discordjs/opus': 0.10.0 @@ -11987,7 +10892,7 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 25.3.0 + '@types/node': 25.3.1 long: 5.3.2 protobufjs@8.0.0: @@ -12002,7 +10907,7 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 25.3.0 + '@types/node': 25.3.1 long: 5.3.2 proxy-addr@2.0.7: @@ -12029,6 +10934,11 @@ snapshots: dependencies: punycode: 2.3.1 + pump@3.0.3: + dependencies: + end-of-stream: 1.4.5 + once: 1.4.0 + punycode.js@2.3.1: {} punycode@2.3.1: {} @@ -12137,7 +11047,7 @@ snapshots: dependencies: glob: 10.5.0 - rolldown-plugin-dts@0.22.1(@typescript/native-preview@7.0.0-dev.20260224.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3): + rolldown-plugin-dts@0.22.1(@typescript/native-preview@7.0.0-dev.20260225.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.1 '@babel/helper-validator-identifier': 8.0.0-rc.1 @@ -12150,7 +11060,7 @@ snapshots: obug: 2.1.1 rolldown: 1.0.0-rc.3 optionalDependencies: - '@typescript/native-preview': 7.0.0-dev.20260224.1 + '@typescript/native-preview': 7.0.0-dev.20260225.1 typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver @@ -12376,7 +11286,7 @@ snapshots: dependencies: signal-polyfill: 0.2.2 - simple-git@3.31.1: + simple-git@3.32.3: dependencies: '@kwsites/file-exists': 1.1.1 '@kwsites/promise-deferred': 1.1.1 @@ -12395,6 +11305,14 @@ snapshots: sisteransi@1.0.5: {} + skillflag@0.1.4: + dependencies: + '@clack/prompts': 1.0.1 + tar-stream: 3.1.7 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + sleep-promise@9.1.0: {} slice-ansi@7.1.2: @@ -12480,7 +11398,7 @@ snapshots: ansi-escapes: 6.2.1 ansi-styles: 6.2.3 string-width: 7.2.0 - strip-ansi: 7.1.2 + strip-ansi: 7.2.0 stealthy-require@1.1.1: {} @@ -12490,6 +11408,15 @@ snapshots: steno@4.0.2: {} + streamx@2.23.0: + dependencies: + events-universal: 1.0.1 + fast-fifo: 1.3.2 + text-decoder: 1.2.7 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + string-width@4.2.3: dependencies: emoji-regex: 8.0.0 @@ -12505,8 +11432,8 @@ snapshots: string-width@7.2.0: dependencies: emoji-regex: 10.6.0 - get-east-asian-width: 1.4.0 - strip-ansi: 7.1.2 + get-east-asian-width: 1.5.0 + strip-ansi: 7.2.0 string_decoder@1.1.1: dependencies: @@ -12524,6 +11451,10 @@ snapshots: dependencies: ansi-regex: 6.2.2 + strip-ansi@7.2.0: + dependencies: + ansi-regex: 6.2.2 + strip-json-comments@2.0.1: {} strnum@2.1.2: {} @@ -12541,6 +11472,15 @@ snapshots: array-back: 6.2.2 wordwrapjs: 5.1.1 + tar-stream@3.1.7: + dependencies: + b4a: 1.8.0 + fast-fifo: 1.3.2 + streamx: 2.23.0 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + tar@7.5.9: dependencies: '@isaacs/fs-minipass': 4.0.1 @@ -12549,6 +11489,12 @@ snapshots: minizlib: 3.1.0 yallist: 5.0.0 + text-decoder@1.2.7: + dependencies: + b4a: 1.8.0 + transitivePeerDependencies: + - react-native-b4a + thenify-all@1.6.0: dependencies: thenify: 3.3.1 @@ -12599,7 +11545,7 @@ snapshots: ts-algebra@2.0.0: {} - tsdown@0.20.3(@typescript/native-preview@7.0.0-dev.20260224.1)(typescript@5.9.3): + tsdown@0.20.3(@typescript/native-preview@7.0.0-dev.20260225.1)(typescript@5.9.3): dependencies: ansis: 4.2.0 cac: 6.7.14 @@ -12610,7 +11556,7 @@ snapshots: obug: 2.1.1 picomatch: 4.0.3 rolldown: 1.0.0-rc.3 - rolldown-plugin-dts: 0.22.1(@typescript/native-preview@7.0.0-dev.20260224.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3) + rolldown-plugin-dts: 0.22.1(@typescript/native-preview@7.0.0-dev.20260225.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3) semver: 7.7.4 tinyexec: 1.0.2 tinyglobby: 0.2.15 @@ -12720,7 +11666,7 @@ snapshots: core-util-is: 1.0.2 extsprintf: 1.3.0 - vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): dependencies: esbuild: 0.27.3 fdir: 6.5.0(picomatch@4.0.3) @@ -12729,17 +11675,17 @@ snapshots: rollup: 4.59.0 tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 25.3.0 + '@types/node': 25.3.1 fsevents: 2.3.3 jiti: 2.6.1 lightningcss: 1.30.2 tsx: 4.21.0 yaml: 2.8.2 - vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): dependencies: '@vitest/expect': 4.0.18 - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/pretty-format': 4.0.18 '@vitest/runner': 4.0.18 '@vitest/snapshot': 4.0.18 @@ -12756,12 +11702,12 @@ snapshots: tinyexec: 1.0.2 tinyglobby: 0.2.15 tinyrainbow: 3.0.3 - vite: 7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.0 - '@types/node': 25.3.0 - '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@types/node': 25.3.1 + '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) transitivePeerDependencies: - jiti - less @@ -12853,6 +11799,11 @@ snapshots: y18n: 5.0.8 yargs-parser: 21.1.1 + yauzl@2.10.0: + dependencies: + buffer-crc32: 0.2.13 + fd-slicer: 1.1.0 + yoctocolors@2.1.2: {} zod-to-json-schema@3.25.1(zod@3.25.76): diff --git a/scripts/check-channel-agnostic-boundaries.mjs b/scripts/check-channel-agnostic-boundaries.mjs new file mode 100644 index 00000000000..3b63911e86d --- /dev/null +++ b/scripts/check-channel-agnostic-boundaries.mjs @@ -0,0 +1,405 @@ +#!/usr/bin/env node + +import { promises as fs } from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import ts from "typescript"; + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); + +const acpCoreProtectedSources = [ + path.join(repoRoot, "src", "acp"), + path.join(repoRoot, "src", "agents", "acp-spawn.ts"), + path.join(repoRoot, "src", "auto-reply", "reply", "commands-acp"), + path.join(repoRoot, "src", "infra", "outbound", "conversation-id.ts"), +]; + +const channelCoreProtectedSources = [ + path.join(repoRoot, "src", "channels", "thread-bindings-policy.ts"), + path.join(repoRoot, "src", "channels", "thread-bindings-messages.ts"), +]; +const acpUserFacingTextSources = [ + path.join(repoRoot, "src", "auto-reply", "reply", "commands-acp"), +]; +const systemMarkLiteralGuardSources = [ + path.join(repoRoot, "src", "auto-reply", "reply", "commands-acp"), + path.join(repoRoot, "src", "auto-reply", "reply", "dispatch-acp.ts"), + path.join(repoRoot, "src", "auto-reply", "reply", "directive-handling.shared.ts"), + path.join(repoRoot, "src", "channels", "thread-bindings-messages.ts"), +]; + +const channelIds = [ + "bluebubbles", + "discord", + "googlechat", + "imessage", + "irc", + "line", + "matrix", + "msteams", + "signal", + "slack", + "telegram", + "web", + "whatsapp", + "zalo", + "zalouser", +]; + +const channelIdSet = new Set(channelIds); +const channelSegmentRe = new RegExp(`(^|[._/-])(?:${channelIds.join("|")})([._/-]|$)`); +const comparisonOperators = new Set([ + ts.SyntaxKind.EqualsEqualsEqualsToken, + ts.SyntaxKind.ExclamationEqualsEqualsToken, + ts.SyntaxKind.EqualsEqualsToken, + ts.SyntaxKind.ExclamationEqualsToken, +]); + +const allowedViolations = new Set([]); + +function isTestLikeFile(filePath) { + return ( + filePath.endsWith(".test.ts") || + filePath.endsWith(".test-utils.ts") || + filePath.endsWith(".test-harness.ts") || + filePath.endsWith(".e2e-harness.ts") + ); +} + +async function collectTypeScriptFiles(targetPath) { + const stat = await fs.stat(targetPath); + if (stat.isFile()) { + if (!targetPath.endsWith(".ts") || isTestLikeFile(targetPath)) { + return []; + } + return [targetPath]; + } + + const entries = await fs.readdir(targetPath, { withFileTypes: true }); + const files = []; + for (const entry of entries) { + const entryPath = path.join(targetPath, entry.name); + if (entry.isDirectory()) { + files.push(...(await collectTypeScriptFiles(entryPath))); + continue; + } + if (!entry.isFile()) { + continue; + } + if (!entryPath.endsWith(".ts")) { + continue; + } + if (isTestLikeFile(entryPath)) { + continue; + } + files.push(entryPath); + } + return files; +} + +function toLine(sourceFile, node) { + return sourceFile.getLineAndCharacterOfPosition(node.getStart(sourceFile)).line + 1; +} + +function isChannelsPropertyAccess(node) { + if (ts.isPropertyAccessExpression(node)) { + return node.name.text === "channels"; + } + if (ts.isElementAccessExpression(node) && ts.isStringLiteral(node.argumentExpression)) { + return node.argumentExpression.text === "channels"; + } + return false; +} + +function readStringLiteral(node) { + if (ts.isStringLiteral(node)) { + return node.text; + } + if (ts.isNoSubstitutionTemplateLiteral(node)) { + return node.text; + } + return null; +} + +function isChannelLiteralNode(node) { + const text = readStringLiteral(node); + return text ? channelIdSet.has(text) : false; +} + +function matchesChannelModuleSpecifier(specifier) { + return channelSegmentRe.test(specifier.replaceAll("\\", "/")); +} + +function getPropertyNameText(name) { + if (ts.isIdentifier(name) || ts.isStringLiteral(name) || ts.isNumericLiteral(name)) { + return name.text; + } + return null; +} + +const userFacingChannelNameRe = + /\b(?:discord|telegram|slack|signal|imessage|whatsapp|google\s*chat|irc|line|zalo|matrix|msteams|bluebubbles)\b/i; +const systemMarkLiteral = "⚙️"; + +function isModuleSpecifierStringNode(node) { + const parent = node.parent; + if (ts.isImportDeclaration(parent) || ts.isExportDeclaration(parent)) { + return true; + } + return ( + ts.isCallExpression(parent) && + parent.expression.kind === ts.SyntaxKind.ImportKeyword && + parent.arguments[0] === node + ); +} + +export function findChannelAgnosticBoundaryViolations( + content, + fileName = "source.ts", + options = {}, +) { + const checkModuleSpecifiers = options.checkModuleSpecifiers ?? true; + const checkConfigPaths = options.checkConfigPaths ?? true; + const checkChannelComparisons = options.checkChannelComparisons ?? true; + const checkChannelAssignments = options.checkChannelAssignments ?? true; + const moduleSpecifierMatcher = options.moduleSpecifierMatcher ?? matchesChannelModuleSpecifier; + + const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, true); + const violations = []; + + const visit = (node) => { + if ( + checkModuleSpecifiers && + ts.isImportDeclaration(node) && + ts.isStringLiteral(node.moduleSpecifier) + ) { + const specifier = node.moduleSpecifier.text; + if (moduleSpecifierMatcher(specifier)) { + violations.push({ + line: toLine(sourceFile, node.moduleSpecifier), + reason: `imports channel module "${specifier}"`, + }); + } + } + + if ( + checkModuleSpecifiers && + ts.isExportDeclaration(node) && + node.moduleSpecifier && + ts.isStringLiteral(node.moduleSpecifier) + ) { + const specifier = node.moduleSpecifier.text; + if (moduleSpecifierMatcher(specifier)) { + violations.push({ + line: toLine(sourceFile, node.moduleSpecifier), + reason: `re-exports channel module "${specifier}"`, + }); + } + } + + if ( + checkModuleSpecifiers && + ts.isCallExpression(node) && + node.expression.kind === ts.SyntaxKind.ImportKeyword && + node.arguments.length > 0 && + ts.isStringLiteral(node.arguments[0]) + ) { + const specifier = node.arguments[0].text; + if (moduleSpecifierMatcher(specifier)) { + violations.push({ + line: toLine(sourceFile, node.arguments[0]), + reason: `dynamically imports channel module "${specifier}"`, + }); + } + } + + if ( + checkConfigPaths && + ts.isPropertyAccessExpression(node) && + channelIdSet.has(node.name.text) + ) { + if (isChannelsPropertyAccess(node.expression)) { + violations.push({ + line: toLine(sourceFile, node.name), + reason: `references config path "channels.${node.name.text}"`, + }); + } + } + + if ( + checkConfigPaths && + ts.isElementAccessExpression(node) && + ts.isStringLiteral(node.argumentExpression) && + channelIdSet.has(node.argumentExpression.text) + ) { + if (isChannelsPropertyAccess(node.expression)) { + violations.push({ + line: toLine(sourceFile, node.argumentExpression), + reason: `references config path "channels[${JSON.stringify(node.argumentExpression.text)}]"`, + }); + } + } + + if ( + checkChannelComparisons && + ts.isBinaryExpression(node) && + comparisonOperators.has(node.operatorToken.kind) + ) { + if (isChannelLiteralNode(node.left) || isChannelLiteralNode(node.right)) { + const leftText = node.left.getText(sourceFile); + const rightText = node.right.getText(sourceFile); + violations.push({ + line: toLine(sourceFile, node.operatorToken), + reason: `compares with channel id literal (${leftText} ${node.operatorToken.getText(sourceFile)} ${rightText})`, + }); + } + } + + if (checkChannelAssignments && ts.isPropertyAssignment(node)) { + const propName = getPropertyNameText(node.name); + if (propName === "channel" && isChannelLiteralNode(node.initializer)) { + violations.push({ + line: toLine(sourceFile, node.initializer), + reason: `assigns channel id literal to "channel" (${node.initializer.getText(sourceFile)})`, + }); + } + } + + ts.forEachChild(node, visit); + }; + + visit(sourceFile); + return violations; +} + +export function findChannelCoreReverseDependencyViolations(content, fileName = "source.ts") { + return findChannelAgnosticBoundaryViolations(content, fileName, { + checkModuleSpecifiers: true, + checkConfigPaths: false, + checkChannelComparisons: false, + checkChannelAssignments: false, + moduleSpecifierMatcher: matchesChannelModuleSpecifier, + }); +} + +export function findAcpUserFacingChannelNameViolations(content, fileName = "source.ts") { + const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, true); + const violations = []; + + const visit = (node) => { + const text = readStringLiteral(node); + if (text && userFacingChannelNameRe.test(text) && !isModuleSpecifierStringNode(node)) { + violations.push({ + line: toLine(sourceFile, node), + reason: `user-facing text references channel name (${JSON.stringify(text)})`, + }); + } + ts.forEachChild(node, visit); + }; + + visit(sourceFile); + return violations; +} + +export function findSystemMarkLiteralViolations(content, fileName = "source.ts") { + const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, true); + const violations = []; + + const visit = (node) => { + const text = readStringLiteral(node); + if (text && text.includes(systemMarkLiteral) && !isModuleSpecifierStringNode(node)) { + violations.push({ + line: toLine(sourceFile, node), + reason: `hardcoded system mark literal (${JSON.stringify(text)})`, + }); + } + ts.forEachChild(node, visit); + }; + + visit(sourceFile); + return violations; +} + +const boundaryRuleSets = [ + { + id: "acp-core", + sources: acpCoreProtectedSources, + scan: (content, fileName) => findChannelAgnosticBoundaryViolations(content, fileName), + }, + { + id: "channel-core-reverse-deps", + sources: channelCoreProtectedSources, + scan: (content, fileName) => findChannelCoreReverseDependencyViolations(content, fileName), + }, + { + id: "acp-user-facing-text", + sources: acpUserFacingTextSources, + scan: (content, fileName) => findAcpUserFacingChannelNameViolations(content, fileName), + }, + { + id: "system-mark-literal-usage", + sources: systemMarkLiteralGuardSources, + scan: (content, fileName) => findSystemMarkLiteralViolations(content, fileName), + }, +]; + +export async function main() { + const violations = []; + for (const ruleSet of boundaryRuleSets) { + const files = ( + await Promise.all( + ruleSet.sources.map(async (sourcePath) => { + try { + return await collectTypeScriptFiles(sourcePath); + } catch (error) { + if (error && typeof error === "object" && "code" in error && error.code === "ENOENT") { + return []; + } + throw error; + } + }), + ) + ).flat(); + for (const filePath of files) { + const relativeFile = path.relative(repoRoot, filePath); + if ( + allowedViolations.has(`${ruleSet.id}:${relativeFile}`) || + allowedViolations.has(relativeFile) + ) { + continue; + } + const content = await fs.readFile(filePath, "utf8"); + for (const violation of ruleSet.scan(content, relativeFile)) { + violations.push(`${ruleSet.id} ${relativeFile}:${violation.line}: ${violation.reason}`); + } + } + } + + if (violations.length === 0) { + return; + } + + console.error("Found channel-specific references in channel-agnostic sources:"); + for (const violation of violations) { + console.error(`- ${violation}`); + } + console.error( + "Move channel-specific logic to channel adapters or add a justified allowlist entry.", + ); + process.exit(1); +} + +const isDirectExecution = (() => { + const entry = process.argv[1]; + if (!entry) { + return false; + } + return path.resolve(entry) === fileURLToPath(import.meta.url); +})(); + +if (isDirectExecution) { + main().catch((error) => { + console.error(error); + process.exit(1); + }); +} diff --git a/scripts/check-no-pairing-store-group-auth.mjs b/scripts/check-no-pairing-store-group-auth.mjs new file mode 100644 index 00000000000..316411c460e --- /dev/null +++ b/scripts/check-no-pairing-store-group-auth.mjs @@ -0,0 +1,239 @@ +#!/usr/bin/env node + +import { promises as fs } from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import ts from "typescript"; + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +const sourceRoots = [path.join(repoRoot, "src"), path.join(repoRoot, "extensions")]; + +const allowedFiles = new Set([ + path.join(repoRoot, "src", "security", "dm-policy-shared.ts"), + path.join(repoRoot, "src", "channels", "allow-from.ts"), + // Config migration/audit logic may intentionally reference store + group fields. + path.join(repoRoot, "src", "security", "fix.ts"), + path.join(repoRoot, "src", "security", "audit-channel.ts"), +]); + +const storeIdentifierRe = /^(?:storeAllowFrom|storedAllowFrom|storeAllowList)$/i; +const groupNameRe = + /(?:groupAllowFrom|effectiveGroupAllowFrom|groupAllowed|groupAllow|groupAuth|groupSender)/i; +const storeSourceCallNames = new Set([ + "readChannelAllowFromStore", + "readChannelAllowFromStoreSync", + "readStoreAllowFromForDmPolicy", +]); +const allowedResolverCallNames = new Set([ + "resolveEffectiveAllowFromLists", + "resolveDmGroupAccessWithLists", + "resolveMattermostEffectiveAllowFromLists", + "resolveIrcEffectiveAllowlists", +]); + +function isTestLikeFile(filePath) { + return ( + filePath.endsWith(".test.ts") || + filePath.endsWith(".test-utils.ts") || + filePath.endsWith(".test-harness.ts") || + filePath.endsWith(".e2e-harness.ts") + ); +} + +async function collectTypeScriptFiles(dir) { + const entries = await fs.readdir(dir, { withFileTypes: true }); + const out = []; + for (const entry of entries) { + const entryPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + out.push(...(await collectTypeScriptFiles(entryPath))); + continue; + } + if (!entry.isFile() || !entryPath.endsWith(".ts") || isTestLikeFile(entryPath)) { + continue; + } + out.push(entryPath); + } + return out; +} + +function toLine(sourceFile, node) { + return sourceFile.getLineAndCharacterOfPosition(node.getStart(sourceFile)).line + 1; +} + +function getPropertyNameText(name) { + if (ts.isIdentifier(name) || ts.isStringLiteral(name) || ts.isNumericLiteral(name)) { + return name.text; + } + return null; +} + +function getDeclarationNameText(name) { + if (ts.isIdentifier(name)) { + return name.text; + } + if (ts.isObjectBindingPattern(name) || ts.isArrayBindingPattern(name)) { + return name.getText(); + } + return null; +} + +function containsPairingStoreSource(node) { + let found = false; + const visit = (current) => { + if (found) { + return; + } + if (ts.isIdentifier(current) && storeIdentifierRe.test(current.text)) { + found = true; + return; + } + if (ts.isCallExpression(current)) { + const callName = getCallName(current); + if (callName && storeSourceCallNames.has(callName)) { + found = true; + return; + } + } + ts.forEachChild(current, visit); + }; + visit(node); + return found; +} + +function getCallName(node) { + if (!ts.isCallExpression(node)) { + return null; + } + if (ts.isIdentifier(node.expression)) { + return node.expression.text; + } + if (ts.isPropertyAccessExpression(node.expression)) { + return node.expression.name.text; + } + return null; +} + +function isSuspiciousNormalizeWithStoreCall(node) { + if (!ts.isCallExpression(node)) { + return false; + } + if (!ts.isIdentifier(node.expression) || node.expression.text !== "normalizeAllowFromWithStore") { + return false; + } + const firstArg = node.arguments[0]; + if (!firstArg || !ts.isObjectLiteralExpression(firstArg)) { + return false; + } + let hasStoreProp = false; + let hasGroupAllowProp = false; + for (const property of firstArg.properties) { + if (!ts.isPropertyAssignment(property)) { + continue; + } + const name = getPropertyNameText(property.name); + if (!name) { + continue; + } + if (name === "storeAllowFrom" && containsPairingStoreSource(property.initializer)) { + hasStoreProp = true; + } + if (name === "allowFrom" && groupNameRe.test(property.initializer.getText())) { + hasGroupAllowProp = true; + } + } + return hasStoreProp && hasGroupAllowProp; +} + +function findViolations(content, filePath) { + const sourceFile = ts.createSourceFile(filePath, content, ts.ScriptTarget.Latest, true); + const violations = []; + + const visit = (node) => { + if (ts.isVariableDeclaration(node) && node.initializer) { + const name = getDeclarationNameText(node.name); + if (name && groupNameRe.test(name) && containsPairingStoreSource(node.initializer)) { + const callName = getCallName(node.initializer); + if (callName && allowedResolverCallNames.has(callName)) { + ts.forEachChild(node, visit); + return; + } + violations.push({ + line: toLine(sourceFile, node), + reason: `group-scoped variable "${name}" references pairing-store identifiers`, + }); + } + } + + if (ts.isPropertyAssignment(node)) { + const propName = getPropertyNameText(node.name); + if (propName && groupNameRe.test(propName) && containsPairingStoreSource(node.initializer)) { + violations.push({ + line: toLine(sourceFile, node), + reason: `group-scoped property "${propName}" references pairing-store identifiers`, + }); + } + } + + if (isSuspiciousNormalizeWithStoreCall(node)) { + violations.push({ + line: toLine(sourceFile, node), + reason: "group allowlist uses normalizeAllowFromWithStore(...) with pairing-store entries", + }); + } + + ts.forEachChild(node, visit); + }; + + visit(sourceFile); + return violations; +} + +async function main() { + const files = ( + await Promise.all(sourceRoots.map(async (root) => await collectTypeScriptFiles(root))) + ).flat(); + + const violations = []; + for (const filePath of files) { + if (allowedFiles.has(filePath)) { + continue; + } + const content = await fs.readFile(filePath, "utf8"); + const fileViolations = findViolations(content, filePath); + for (const violation of fileViolations) { + violations.push({ + path: path.relative(repoRoot, filePath), + ...violation, + }); + } + } + + if (violations.length === 0) { + return; + } + + console.error("Found pairing-store identifiers referenced in group auth composition:"); + for (const violation of violations) { + console.error(`- ${violation.path}:${violation.line} (${violation.reason})`); + } + console.error( + "Group auth must be composed via shared resolvers (resolveDmGroupAccessWithLists / resolveEffectiveAllowFromLists).", + ); + process.exit(1); +} + +const isDirectExecution = (() => { + const entry = process.argv[1]; + if (!entry) { + return false; + } + return path.resolve(entry) === fileURLToPath(import.meta.url); +})(); + +if (isDirectExecution) { + main().catch((error) => { + console.error(error); + process.exit(1); + }); +} diff --git a/scripts/check-no-raw-channel-fetch.mjs b/scripts/check-no-raw-channel-fetch.mjs new file mode 100644 index 00000000000..56008b3f1d8 --- /dev/null +++ b/scripts/check-no-raw-channel-fetch.mjs @@ -0,0 +1,214 @@ +#!/usr/bin/env node + +import { promises as fs } from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import ts from "typescript"; + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +const sourceRoots = [ + path.join(repoRoot, "src", "telegram"), + path.join(repoRoot, "src", "discord"), + path.join(repoRoot, "src", "slack"), + path.join(repoRoot, "src", "signal"), + path.join(repoRoot, "src", "imessage"), + path.join(repoRoot, "src", "web"), + path.join(repoRoot, "src", "channels"), + path.join(repoRoot, "src", "routing"), + path.join(repoRoot, "src", "line"), + path.join(repoRoot, "extensions"), +]; + +// Temporary allowlist for legacy callsites. New raw fetch callsites in channel/plugin runtime +// code should be rejected and migrated to fetchWithSsrFGuard/shared channel helpers. +const allowedRawFetchCallsites = new Set([ + "extensions/bluebubbles/src/types.ts:131", + "extensions/feishu/src/streaming-card.ts:31", + "extensions/feishu/src/streaming-card.ts:100", + "extensions/feishu/src/streaming-card.ts:141", + "extensions/feishu/src/streaming-card.ts:197", + "extensions/google-gemini-cli-auth/oauth.ts:372", + "extensions/google-gemini-cli-auth/oauth.ts:408", + "extensions/google-gemini-cli-auth/oauth.ts:447", + "extensions/google-gemini-cli-auth/oauth.ts:507", + "extensions/google-gemini-cli-auth/oauth.ts:575", + "extensions/googlechat/src/api.ts:22", + "extensions/googlechat/src/api.ts:43", + "extensions/googlechat/src/api.ts:63", + "extensions/googlechat/src/api.ts:184", + "extensions/googlechat/src/auth.ts:82", + "extensions/matrix/src/directory-live.ts:41", + "extensions/matrix/src/matrix/client/config.ts:171", + "extensions/mattermost/src/mattermost/client.ts:211", + "extensions/mattermost/src/mattermost/monitor.ts:230", + "extensions/mattermost/src/mattermost/probe.ts:27", + "extensions/minimax-portal-auth/oauth.ts:71", + "extensions/minimax-portal-auth/oauth.ts:112", + "extensions/msteams/src/graph.ts:39", + "extensions/nextcloud-talk/src/room-info.ts:92", + "extensions/nextcloud-talk/src/send.ts:107", + "extensions/nextcloud-talk/src/send.ts:198", + "extensions/qwen-portal-auth/oauth.ts:46", + "extensions/qwen-portal-auth/oauth.ts:80", + "extensions/talk-voice/index.ts:27", + "extensions/thread-ownership/index.ts:105", + "extensions/voice-call/src/providers/plivo.ts:95", + "extensions/voice-call/src/providers/telnyx.ts:61", + "extensions/voice-call/src/providers/tts-openai.ts:111", + "extensions/voice-call/src/providers/twilio/api.ts:23", + "src/channels/telegram/api.ts:8", + "src/discord/send.outbound.ts:347", + "src/discord/voice-message.ts:267", + "src/slack/monitor/media.ts:64", + "src/slack/monitor/media.ts:68", + "src/slack/monitor/media.ts:82", + "src/slack/monitor/media.ts:108", +]); + +function isTestLikeFile(filePath) { + return ( + filePath.endsWith(".test.ts") || + filePath.endsWith(".test-utils.ts") || + filePath.endsWith(".test-harness.ts") || + filePath.endsWith(".e2e-harness.ts") || + filePath.endsWith(".browser.test.ts") || + filePath.endsWith(".node.test.ts") + ); +} + +async function collectTypeScriptFiles(targetPath) { + const stat = await fs.stat(targetPath); + if (stat.isFile()) { + if (!targetPath.endsWith(".ts") || isTestLikeFile(targetPath)) { + return []; + } + return [targetPath]; + } + const entries = await fs.readdir(targetPath, { withFileTypes: true }); + const files = []; + for (const entry of entries) { + const entryPath = path.join(targetPath, entry.name); + if (entry.isDirectory()) { + if (entry.name === "node_modules") { + continue; + } + files.push(...(await collectTypeScriptFiles(entryPath))); + continue; + } + if (!entry.isFile()) { + continue; + } + if (!entryPath.endsWith(".ts")) { + continue; + } + if (isTestLikeFile(entryPath)) { + continue; + } + files.push(entryPath); + } + return files; +} + +function unwrapExpression(expression) { + let current = expression; + while (true) { + if (ts.isParenthesizedExpression(current)) { + current = current.expression; + continue; + } + if (ts.isAsExpression(current) || ts.isTypeAssertionExpression(current)) { + current = current.expression; + continue; + } + if (ts.isNonNullExpression(current)) { + current = current.expression; + continue; + } + return current; + } +} + +function isRawFetchCall(expression) { + const callee = unwrapExpression(expression); + if (ts.isIdentifier(callee)) { + return callee.text === "fetch"; + } + if (ts.isPropertyAccessExpression(callee)) { + return ( + ts.isIdentifier(callee.expression) && + callee.expression.text === "globalThis" && + callee.name.text === "fetch" + ); + } + return false; +} + +export function findRawFetchCallLines(content, fileName = "source.ts") { + const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, true); + const lines = []; + const visit = (node) => { + if (ts.isCallExpression(node) && isRawFetchCall(node.expression)) { + const line = + sourceFile.getLineAndCharacterOfPosition(node.expression.getStart(sourceFile)).line + 1; + lines.push(line); + } + ts.forEachChild(node, visit); + }; + visit(sourceFile); + return lines; +} + +export async function main() { + const files = ( + await Promise.all( + sourceRoots.map(async (sourceRoot) => { + try { + return await collectTypeScriptFiles(sourceRoot); + } catch { + return []; + } + }), + ) + ).flat(); + + const violations = []; + for (const filePath of files) { + const content = await fs.readFile(filePath, "utf8"); + const relPath = path.relative(repoRoot, filePath).replaceAll(path.sep, "/"); + for (const line of findRawFetchCallLines(content, filePath)) { + const callsite = `${relPath}:${line}`; + if (allowedRawFetchCallsites.has(callsite)) { + continue; + } + violations.push(callsite); + } + } + + if (violations.length === 0) { + return; + } + + console.error("Found raw fetch() usage in channel/plugin runtime sources outside allowlist:"); + for (const violation of violations.toSorted()) { + console.error(`- ${violation}`); + } + console.error( + "Use fetchWithSsrFGuard() or existing channel/plugin SDK wrappers for network calls.", + ); + process.exit(1); +} + +const isDirectExecution = (() => { + const entry = process.argv[1]; + if (!entry) { + return false; + } + return path.resolve(entry) === fileURLToPath(import.meta.url); +})(); + +if (isDirectExecution) { + main().catch((error) => { + console.error(error); + process.exit(1); + }); +} diff --git a/scripts/check-pairing-account-scope.mjs b/scripts/check-pairing-account-scope.mjs new file mode 100644 index 00000000000..21db11a87a2 --- /dev/null +++ b/scripts/check-pairing-account-scope.mjs @@ -0,0 +1,157 @@ +#!/usr/bin/env node + +import { promises as fs } from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import ts from "typescript"; + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +const sourceRoots = [path.join(repoRoot, "src"), path.join(repoRoot, "extensions")]; + +function isTestLikeFile(filePath) { + return ( + filePath.endsWith(".test.ts") || + filePath.endsWith(".test-utils.ts") || + filePath.endsWith(".test-harness.ts") || + filePath.endsWith(".e2e-harness.ts") + ); +} + +async function collectTypeScriptFiles(dir) { + const entries = await fs.readdir(dir, { withFileTypes: true }); + const out = []; + for (const entry of entries) { + const entryPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + out.push(...(await collectTypeScriptFiles(entryPath))); + continue; + } + if (!entry.isFile() || !entryPath.endsWith(".ts") || isTestLikeFile(entryPath)) { + continue; + } + out.push(entryPath); + } + return out; +} + +function toLine(sourceFile, node) { + return sourceFile.getLineAndCharacterOfPosition(node.getStart(sourceFile)).line + 1; +} + +function getPropertyNameText(name) { + if (ts.isIdentifier(name) || ts.isStringLiteral(name) || ts.isNumericLiteral(name)) { + return name.text; + } + return null; +} + +function isUndefinedLikeExpression(node) { + if (ts.isIdentifier(node) && node.text === "undefined") { + return true; + } + return node.kind === ts.SyntaxKind.NullKeyword; +} + +function hasRequiredAccountIdProperty(node) { + if (!ts.isObjectLiteralExpression(node)) { + return false; + } + for (const property of node.properties) { + if (ts.isShorthandPropertyAssignment(property) && property.name.text === "accountId") { + return true; + } + if (!ts.isPropertyAssignment(property)) { + continue; + } + if (getPropertyNameText(property.name) !== "accountId") { + continue; + } + if (isUndefinedLikeExpression(property.initializer)) { + return false; + } + return true; + } + return false; +} + +function findViolations(content, filePath) { + const sourceFile = ts.createSourceFile(filePath, content, ts.ScriptTarget.Latest, true); + const violations = []; + + const visit = (node) => { + if (ts.isCallExpression(node) && ts.isIdentifier(node.expression)) { + const callName = node.expression.text; + if (callName === "readChannelAllowFromStore") { + if (node.arguments.length < 3 || isUndefinedLikeExpression(node.arguments[2])) { + violations.push({ + line: toLine(sourceFile, node), + reason: "readChannelAllowFromStore call must pass explicit accountId as 3rd arg", + }); + } + } else if ( + callName === "readLegacyChannelAllowFromStore" || + callName === "readLegacyChannelAllowFromStoreSync" + ) { + violations.push({ + line: toLine(sourceFile, node), + reason: `${callName} is legacy-only; use account-scoped readChannelAllowFromStore* APIs`, + }); + } else if (callName === "upsertChannelPairingRequest") { + const firstArg = node.arguments[0]; + if (!firstArg || !hasRequiredAccountIdProperty(firstArg)) { + violations.push({ + line: toLine(sourceFile, node), + reason: "upsertChannelPairingRequest call must include accountId in params", + }); + } + } + } + ts.forEachChild(node, visit); + }; + + visit(sourceFile); + return violations; +} + +async function main() { + const files = ( + await Promise.all(sourceRoots.map(async (root) => await collectTypeScriptFiles(root))) + ).flat(); + const violations = []; + + for (const filePath of files) { + const content = await fs.readFile(filePath, "utf8"); + const fileViolations = findViolations(content, filePath); + for (const violation of fileViolations) { + violations.push({ + path: path.relative(repoRoot, filePath), + ...violation, + }); + } + } + + if (violations.length === 0) { + return; + } + + console.error("Found unscoped pairing-store calls:"); + for (const violation of violations) { + console.error(`- ${violation.path}:${violation.line} (${violation.reason})`); + } + process.exit(1); +} + +const isDirectExecution = (() => { + const entry = process.argv[1]; + if (!entry) { + return false; + } + return path.resolve(entry) === fileURLToPath(import.meta.url); +})(); + +if (isDirectExecution) { + main().catch((error) => { + console.error(error); + process.exit(1); + }); +} diff --git a/scripts/dev/discord-acp-plain-language-smoke.ts b/scripts/dev/discord-acp-plain-language-smoke.ts new file mode 100644 index 00000000000..33b8eb0d54f --- /dev/null +++ b/scripts/dev/discord-acp-plain-language-smoke.ts @@ -0,0 +1,779 @@ +#!/usr/bin/env bun +// Manual ACP thread smoke for plain-language routing. +// Keep this script available for regression/debug validation. Do not delete. +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import path from "node:path"; + +type ThreadBindingRecord = { + accountId?: string; + channelId?: string; + threadId?: string; + targetKind?: string; + targetSessionKey?: string; + agentId?: string; + boundBy?: string; + boundAt?: number; +}; + +type ThreadBindingsPayload = { + version?: number; + bindings?: Record; +}; + +type DiscordMessage = { + id: string; + content?: string; + timestamp?: string; + author?: { + id?: string; + username?: string; + bot?: boolean; + }; +}; + +type DiscordUser = { + id: string; + username: string; + bot?: boolean; +}; + +type DriverMode = "token" | "webhook"; + +type Args = { + channelId: string; + driverMode: DriverMode; + driverToken: string; + driverTokenPrefix: string; + botToken: string; + botTokenPrefix: string; + targetAgent: string; + timeoutMs: number; + pollMs: number; + mentionUserId?: string; + instruction?: string; + threadBindingsPath: string; + json: boolean; +}; + +type SuccessResult = { + ok: true; + smokeId: string; + ackToken: string; + sentMessageId: string; + binding: { + threadId: string; + targetSessionKey: string; + targetKind: string; + agentId: string; + boundAt: number; + accountId?: string; + channelId?: string; + }; + ackMessage: { + id: string; + authorId?: string; + authorUsername?: string; + timestamp?: string; + content?: string; + }; +}; + +type FailureResult = { + ok: false; + smokeId: string; + stage: "validation" | "send-message" | "wait-binding" | "wait-ack" | "discord-api" | "unexpected"; + error: string; + diagnostics?: { + parentChannelRecent?: Array<{ + id: string; + author?: string; + bot?: boolean; + content?: string; + }>; + bindingCandidates?: Array<{ + threadId: string; + targetSessionKey: string; + targetKind?: string; + agentId?: string; + boundAt?: number; + }>; + }; +}; + +const DISCORD_API_BASE = "https://discord.com/api/v10"; + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +function parseNumber(value: string | undefined, fallback: number): number { + if (!value) { + return fallback; + } + const parsed = Number.parseInt(value, 10); + return Number.isFinite(parsed) && parsed > 0 ? parsed : fallback; +} + +function resolveStateDir(): string { + const override = process.env.OPENCLAW_STATE_DIR?.trim() || process.env.CLAWDBOT_STATE_DIR?.trim(); + if (override) { + return override.startsWith("~") + ? path.resolve(process.env.HOME || "", override.slice(1)) + : path.resolve(override); + } + const home = process.env.OPENCLAW_HOME?.trim() || process.env.HOME || ""; + return path.join(home, ".openclaw"); +} + +function resolveArg(flag: string): string | undefined { + const argv = process.argv.slice(2); + const eq = argv.find((entry) => entry.startsWith(`${flag}=`)); + if (eq) { + return eq.slice(flag.length + 1); + } + const idx = argv.indexOf(flag); + if (idx >= 0 && idx + 1 < argv.length) { + return argv[idx + 1]; + } + return undefined; +} + +function hasFlag(flag: string): boolean { + return process.argv.slice(2).includes(flag); +} + +function usage(): string { + return ( + "Usage: bun scripts/dev/discord-acp-plain-language-smoke.ts " + + "--channel [--token | --driver webhook --bot-token ] [options]\n\n" + + "Manual live smoke only (not CI). Sends a plain-language instruction in Discord and verifies:\n" + + "1) OpenClaw spawned an ACP thread binding\n" + + "2) agent replied in that bound thread with the expected ACK token\n\n" + + "Options:\n" + + " --channel Parent Discord channel id (required)\n" + + " --driver Driver transport mode (default: token)\n" + + " --token Driver Discord token (required for driver=token)\n" + + " --token-prefix Auth prefix for --token (default: Bot)\n" + + " --bot-token Bot token for webhook driver mode\n" + + " --bot-token-prefix Auth prefix for --bot-token (default: Bot)\n" + + " --agent Expected ACP agent id (default: codex)\n" + + " --mention Mention this user in the instruction (optional)\n" + + " --instruction Custom instruction template (optional)\n" + + " --timeout-ms Total timeout in ms (default: 240000)\n" + + " --poll-ms Poll interval in ms (default: 1500)\n" + + " --thread-bindings-path

Override thread-bindings json path\n" + + " --json Emit JSON output\n" + + "\n" + + "Environment fallbacks:\n" + + " OPENCLAW_DISCORD_SMOKE_CHANNEL_ID\n" + + " OPENCLAW_DISCORD_SMOKE_DRIVER\n" + + " OPENCLAW_DISCORD_SMOKE_DRIVER_TOKEN\n" + + " OPENCLAW_DISCORD_SMOKE_DRIVER_TOKEN_PREFIX\n" + + " OPENCLAW_DISCORD_SMOKE_BOT_TOKEN\n" + + " OPENCLAW_DISCORD_SMOKE_BOT_TOKEN_PREFIX\n" + + " OPENCLAW_DISCORD_SMOKE_AGENT\n" + + " OPENCLAW_DISCORD_SMOKE_MENTION_USER_ID\n" + + " OPENCLAW_DISCORD_SMOKE_TIMEOUT_MS\n" + + " OPENCLAW_DISCORD_SMOKE_POLL_MS\n" + + " OPENCLAW_DISCORD_SMOKE_THREAD_BINDINGS_PATH" + ); +} + +function parseArgs(): Args { + const channelId = + resolveArg("--channel") || + process.env.OPENCLAW_DISCORD_SMOKE_CHANNEL_ID || + process.env.CLAWDBOT_DISCORD_SMOKE_CHANNEL_ID || + ""; + const driverModeRaw = + resolveArg("--driver") || + process.env.OPENCLAW_DISCORD_SMOKE_DRIVER || + process.env.CLAWDBOT_DISCORD_SMOKE_DRIVER || + "token"; + const normalizedDriverMode = driverModeRaw.trim().toLowerCase(); + const driverMode: DriverMode = + normalizedDriverMode === "webhook" + ? "webhook" + : normalizedDriverMode === "token" + ? "token" + : "token"; + const driverToken = + resolveArg("--token") || + process.env.OPENCLAW_DISCORD_SMOKE_DRIVER_TOKEN || + process.env.CLAWDBOT_DISCORD_SMOKE_DRIVER_TOKEN || + ""; + const driverTokenPrefix = + resolveArg("--token-prefix") || process.env.OPENCLAW_DISCORD_SMOKE_DRIVER_TOKEN_PREFIX || "Bot"; + const botToken = + resolveArg("--bot-token") || + process.env.OPENCLAW_DISCORD_SMOKE_BOT_TOKEN || + process.env.CLAWDBOT_DISCORD_SMOKE_BOT_TOKEN || + process.env.DISCORD_BOT_TOKEN || + ""; + const botTokenPrefix = + resolveArg("--bot-token-prefix") || + process.env.OPENCLAW_DISCORD_SMOKE_BOT_TOKEN_PREFIX || + "Bot"; + const targetAgent = + resolveArg("--agent") || + process.env.OPENCLAW_DISCORD_SMOKE_AGENT || + process.env.CLAWDBOT_DISCORD_SMOKE_AGENT || + "codex"; + const mentionUserId = + resolveArg("--mention") || + process.env.OPENCLAW_DISCORD_SMOKE_MENTION_USER_ID || + process.env.CLAWDBOT_DISCORD_SMOKE_MENTION_USER_ID || + undefined; + const instruction = + resolveArg("--instruction") || + process.env.OPENCLAW_DISCORD_SMOKE_INSTRUCTION || + process.env.CLAWDBOT_DISCORD_SMOKE_INSTRUCTION || + undefined; + const timeoutMs = parseNumber( + resolveArg("--timeout-ms") || process.env.OPENCLAW_DISCORD_SMOKE_TIMEOUT_MS, + 240_000, + ); + const pollMs = parseNumber( + resolveArg("--poll-ms") || process.env.OPENCLAW_DISCORD_SMOKE_POLL_MS, + 1_500, + ); + const defaultBindingsPath = path.join(resolveStateDir(), "discord", "thread-bindings.json"); + const threadBindingsPath = + resolveArg("--thread-bindings-path") || + process.env.OPENCLAW_DISCORD_SMOKE_THREAD_BINDINGS_PATH || + defaultBindingsPath; + const json = hasFlag("--json"); + + if (!channelId) { + throw new Error(usage()); + } + if (driverMode === "token" && !driverToken) { + throw new Error(usage()); + } + if (driverMode === "webhook" && !botToken) { + throw new Error(usage()); + } + + return { + channelId, + driverMode, + driverToken, + driverTokenPrefix, + botToken, + botTokenPrefix, + targetAgent, + timeoutMs, + pollMs, + mentionUserId, + instruction, + threadBindingsPath, + json, + }; +} + +function resolveAuthorizationHeader(params: { token: string; tokenPrefix: string }): string { + const token = params.token.trim(); + if (!token) { + throw new Error("Missing Discord driver token."); + } + if (token.includes(" ")) { + return token; + } + return `${params.tokenPrefix.trim() || "Bot"} ${token}`; +} + +async function discordApi(params: { + method: "GET" | "POST"; + path: string; + authHeader: string; + body?: unknown; + retries?: number; +}): Promise { + const retries = params.retries ?? 6; + for (let attempt = 0; attempt <= retries; attempt += 1) { + const response = await fetch(`${DISCORD_API_BASE}${params.path}`, { + method: params.method, + headers: { + Authorization: params.authHeader, + "Content-Type": "application/json", + }, + body: params.body === undefined ? undefined : JSON.stringify(params.body), + }); + + if (response.status === 429) { + const body = (await response.json().catch(() => ({}))) as { retry_after?: number }; + const waitSeconds = typeof body.retry_after === "number" ? body.retry_after : 1; + await sleep(Math.ceil(waitSeconds * 1000)); + continue; + } + + if (!response.ok) { + const text = await response.text().catch(() => ""); + throw new Error( + `Discord API ${params.method} ${params.path} failed: ${response.status} ${response.statusText}${text ? ` :: ${text}` : ""}`, + ); + } + + if (response.status === 204) { + return undefined as T; + } + + return (await response.json()) as T; + } + + throw new Error(`Discord API ${params.method} ${params.path} exceeded retry budget.`); +} + +async function discordWebhookApi(params: { + method: "POST" | "DELETE"; + webhookId: string; + webhookToken: string; + body?: unknown; + query?: string; + retries?: number; +}): Promise { + const retries = params.retries ?? 6; + const suffix = params.query ? `?${params.query}` : ""; + const path = `/webhooks/${encodeURIComponent(params.webhookId)}/${encodeURIComponent(params.webhookToken)}${suffix}`; + for (let attempt = 0; attempt <= retries; attempt += 1) { + const response = await fetch(`${DISCORD_API_BASE}${path}`, { + method: params.method, + headers: { + "Content-Type": "application/json", + }, + body: params.body === undefined ? undefined : JSON.stringify(params.body), + }); + + if (response.status === 429) { + const body = (await response.json().catch(() => ({}))) as { retry_after?: number }; + const waitSeconds = typeof body.retry_after === "number" ? body.retry_after : 1; + await sleep(Math.ceil(waitSeconds * 1000)); + continue; + } + + if (!response.ok) { + const text = await response.text().catch(() => ""); + throw new Error( + `Discord webhook API ${params.method} ${path} failed: ${response.status} ${response.statusText}${text ? ` :: ${text}` : ""}`, + ); + } + + if (response.status === 204) { + return undefined as T; + } + + return (await response.json()) as T; + } + + throw new Error(`Discord webhook API ${params.method} ${path} exceeded retry budget.`); +} + +async function readThreadBindings(filePath: string): Promise { + const raw = await fs.readFile(filePath, "utf8"); + const payload = JSON.parse(raw) as ThreadBindingsPayload; + const entries = Object.values(payload.bindings ?? {}); + return entries.filter((entry) => Boolean(entry?.threadId && entry?.targetSessionKey)); +} + +function normalizeBoundAt(record: ThreadBindingRecord): number { + if (typeof record.boundAt === "number" && Number.isFinite(record.boundAt)) { + return record.boundAt; + } + return 0; +} + +function resolveCandidateBindings(params: { + entries: ThreadBindingRecord[]; + minBoundAt: number; + targetAgent: string; +}): ThreadBindingRecord[] { + const normalizedTargetAgent = params.targetAgent.trim().toLowerCase(); + return params.entries + .filter((entry) => { + const targetKind = String(entry.targetKind || "") + .trim() + .toLowerCase(); + if (targetKind !== "acp") { + return false; + } + if (normalizeBoundAt(entry) < params.minBoundAt) { + return false; + } + const agentId = String(entry.agentId || "") + .trim() + .toLowerCase(); + if (normalizedTargetAgent && agentId && agentId !== normalizedTargetAgent) { + return false; + } + return true; + }) + .toSorted((a, b) => normalizeBoundAt(b) - normalizeBoundAt(a)); +} + +function buildInstruction(params: { + smokeId: string; + ackToken: string; + targetAgent: string; + mentionUserId?: string; + template?: string; +}): string { + const mentionPrefix = params.mentionUserId?.trim() ? `<@${params.mentionUserId.trim()}> ` : ""; + if (params.template?.trim()) { + return mentionPrefix + params.template.trim(); + } + return ( + mentionPrefix + + `Manual smoke ${params.smokeId}: Please spawn a ${params.targetAgent} ACP coding agent in a thread for this request, keep it persistent, and in that thread reply with exactly "${params.ackToken}" and nothing else.` + ); +} + +function toRecentMessageRow(message: DiscordMessage) { + return { + id: message.id, + author: message.author?.username || message.author?.id || "unknown", + bot: Boolean(message.author?.bot), + content: (message.content || "").slice(0, 500), + }; +} + +function printOutput(params: { json: boolean; payload: SuccessResult | FailureResult }) { + if (params.json) { + // eslint-disable-next-line no-console + console.log(JSON.stringify(params.payload, null, 2)); + return; + } + if (params.payload.ok) { + const success = params.payload; + // eslint-disable-next-line no-console + console.log("PASS"); + // eslint-disable-next-line no-console + console.log(`smokeId: ${success.smokeId}`); + // eslint-disable-next-line no-console + console.log(`sentMessageId: ${success.sentMessageId}`); + // eslint-disable-next-line no-console + console.log(`threadId: ${success.binding.threadId}`); + // eslint-disable-next-line no-console + console.log(`sessionKey: ${success.binding.targetSessionKey}`); + // eslint-disable-next-line no-console + console.log(`ackMessageId: ${success.ackMessage.id}`); + // eslint-disable-next-line no-console + console.log( + `ackAuthor: ${success.ackMessage.authorUsername || success.ackMessage.authorId || "unknown"}`, + ); + return; + } + const failure = params.payload; + // eslint-disable-next-line no-console + console.error("FAIL"); + // eslint-disable-next-line no-console + console.error(`stage: ${failure.stage}`); + // eslint-disable-next-line no-console + console.error(`smokeId: ${failure.smokeId}`); + // eslint-disable-next-line no-console + console.error(`error: ${failure.error}`); + if (failure.diagnostics?.bindingCandidates?.length) { + // eslint-disable-next-line no-console + console.error("binding candidates:"); + for (const candidate of failure.diagnostics.bindingCandidates) { + // eslint-disable-next-line no-console + console.error( + ` thread=${candidate.threadId} kind=${candidate.targetKind || "?"} agent=${candidate.agentId || "?"} boundAt=${candidate.boundAt || 0} session=${candidate.targetSessionKey}`, + ); + } + } + if (failure.diagnostics?.parentChannelRecent?.length) { + // eslint-disable-next-line no-console + console.error("recent parent channel messages:"); + for (const row of failure.diagnostics.parentChannelRecent) { + // eslint-disable-next-line no-console + console.error(` ${row.id} ${row.author}${row.bot ? " [bot]" : ""}: ${row.content || ""}`); + } + } +} + +async function run(): Promise { + let args: Args; + try { + args = parseArgs(); + } catch (err) { + return { + ok: false, + stage: "validation", + smokeId: "n/a", + error: err instanceof Error ? err.message : String(err), + }; + } + + const smokeId = `acp-smoke-${Date.now()}-${randomUUID().slice(0, 8)}`; + const ackToken = `ACP_SMOKE_ACK_${smokeId}`; + const instruction = buildInstruction({ + smokeId, + ackToken, + targetAgent: args.targetAgent, + mentionUserId: args.mentionUserId, + template: args.instruction, + }); + + let readAuthHeader = ""; + let sentMessageId = ""; + let setupStage: "discord-api" | "send-message" = "discord-api"; + let senderAuthorId: string | undefined; + let webhookForCleanup: + | { + id: string; + token: string; + } + | undefined; + + try { + if (args.driverMode === "token") { + const authHeader = resolveAuthorizationHeader({ + token: args.driverToken, + tokenPrefix: args.driverTokenPrefix, + }); + readAuthHeader = authHeader; + + const driverUser = await discordApi({ + method: "GET", + path: "/users/@me", + authHeader, + }); + senderAuthorId = driverUser.id; + + setupStage = "send-message"; + const sent = await discordApi({ + method: "POST", + path: `/channels/${encodeURIComponent(args.channelId)}/messages`, + authHeader, + body: { + content: instruction, + allowed_mentions: args.mentionUserId + ? { parse: [], users: [args.mentionUserId] } + : { parse: [] }, + }, + }); + sentMessageId = sent.id; + } else { + const botAuthHeader = resolveAuthorizationHeader({ + token: args.botToken, + tokenPrefix: args.botTokenPrefix, + }); + readAuthHeader = botAuthHeader; + + await discordApi({ + method: "GET", + path: "/users/@me", + authHeader: botAuthHeader, + }); + + setupStage = "send-message"; + const webhook = await discordApi<{ id: string; token?: string | null }>({ + method: "POST", + path: `/channels/${encodeURIComponent(args.channelId)}/webhooks`, + authHeader: botAuthHeader, + body: { + name: `openclaw-acp-smoke-${smokeId.slice(-8)}`, + }, + }); + if (!webhook.id || !webhook.token) { + return { + ok: false, + stage: "send-message", + smokeId, + error: + "Discord webhook creation succeeded but no webhook token was returned; cannot post smoke message.", + }; + } + webhookForCleanup = { id: webhook.id, token: webhook.token }; + + const sent = await discordWebhookApi({ + method: "POST", + webhookId: webhook.id, + webhookToken: webhook.token, + query: "wait=true", + body: { + content: instruction, + allowed_mentions: args.mentionUserId + ? { parse: [], users: [args.mentionUserId] } + : { parse: [] }, + }, + }); + sentMessageId = sent.id; + senderAuthorId = sent.author?.id; + } + } catch (err) { + return { + ok: false, + stage: setupStage, + smokeId, + error: err instanceof Error ? err.message : String(err), + }; + } + + const startedAt = Date.now(); + + const deadline = startedAt + args.timeoutMs; + let winningBinding: ThreadBindingRecord | undefined; + let latestCandidates: ThreadBindingRecord[] = []; + + try { + while (Date.now() < deadline && !winningBinding) { + try { + const entries = await readThreadBindings(args.threadBindingsPath); + latestCandidates = resolveCandidateBindings({ + entries, + minBoundAt: startedAt - 3_000, + targetAgent: args.targetAgent, + }); + winningBinding = latestCandidates[0]; + } catch { + // Keep polling; file may not exist yet or may be mid-write. + } + if (!winningBinding) { + await sleep(args.pollMs); + } + } + + if (!winningBinding?.threadId || !winningBinding?.targetSessionKey) { + let parentRecent: DiscordMessage[] = []; + try { + parentRecent = await discordApi({ + method: "GET", + path: `/channels/${encodeURIComponent(args.channelId)}/messages?limit=20`, + authHeader: readAuthHeader, + }); + } catch { + // Best effort diagnostics only. + } + return { + ok: false, + stage: "wait-binding", + smokeId, + error: `Timed out waiting for new ACP thread binding (path: ${args.threadBindingsPath}).`, + diagnostics: { + bindingCandidates: latestCandidates.slice(0, 6).map((entry) => ({ + threadId: entry.threadId || "", + targetSessionKey: entry.targetSessionKey || "", + targetKind: entry.targetKind, + agentId: entry.agentId, + boundAt: entry.boundAt, + })), + parentChannelRecent: parentRecent.map(toRecentMessageRow), + }, + }; + } + + const threadId = winningBinding.threadId; + let ackMessage: DiscordMessage | undefined; + while (Date.now() < deadline && !ackMessage) { + try { + const threadMessages = await discordApi({ + method: "GET", + path: `/channels/${encodeURIComponent(threadId)}/messages?limit=50`, + authHeader: readAuthHeader, + }); + ackMessage = threadMessages.find((message) => { + const content = message.content || ""; + if (!content.includes(ackToken)) { + return false; + } + const authorId = message.author?.id || ""; + return !senderAuthorId || authorId !== senderAuthorId; + }); + } catch { + // Keep polling; thread can appear before read permissions settle. + } + if (!ackMessage) { + await sleep(args.pollMs); + } + } + + if (!ackMessage) { + let parentRecent: DiscordMessage[] = []; + try { + parentRecent = await discordApi({ + method: "GET", + path: `/channels/${encodeURIComponent(args.channelId)}/messages?limit=20`, + authHeader: readAuthHeader, + }); + } catch { + // Best effort diagnostics only. + } + + return { + ok: false, + stage: "wait-ack", + smokeId, + error: `Thread bound (${threadId}) but timed out waiting for ACK token "${ackToken}" from OpenClaw.`, + diagnostics: { + bindingCandidates: [ + { + threadId: winningBinding.threadId || "", + targetSessionKey: winningBinding.targetSessionKey || "", + targetKind: winningBinding.targetKind, + agentId: winningBinding.agentId, + boundAt: winningBinding.boundAt, + }, + ], + parentChannelRecent: parentRecent.map(toRecentMessageRow), + }, + }; + } + + return { + ok: true, + smokeId, + ackToken, + sentMessageId, + binding: { + threadId, + targetSessionKey: winningBinding.targetSessionKey, + targetKind: String(winningBinding.targetKind || "acp"), + agentId: String(winningBinding.agentId || args.targetAgent), + boundAt: normalizeBoundAt(winningBinding), + accountId: winningBinding.accountId, + channelId: winningBinding.channelId, + }, + ackMessage: { + id: ackMessage.id, + authorId: ackMessage.author?.id, + authorUsername: ackMessage.author?.username, + timestamp: ackMessage.timestamp, + content: ackMessage.content, + }, + }; + } finally { + if (webhookForCleanup) { + await discordWebhookApi({ + method: "DELETE", + webhookId: webhookForCleanup.id, + webhookToken: webhookForCleanup.token, + }).catch(() => { + // Best-effort cleanup only. + }); + } + } +} + +if (hasFlag("--help") || hasFlag("-h")) { + // eslint-disable-next-line no-console + console.log(usage()); + process.exit(0); +} + +const result = await run().catch( + (err): FailureResult => ({ + ok: false, + stage: "unexpected", + smokeId: "n/a", + error: err instanceof Error ? err.message : String(err), + }), +); + +printOutput({ + json: hasFlag("--json"), + payload: result, +}); + +process.exit(result.ok ? 0 : 1); diff --git a/scripts/e2e/onboard-docker.sh b/scripts/e2e/onboard-docker.sh index bdfb0ca6b3e..0f7a894e394 100755 --- a/scripts/e2e/onboard-docker.sh +++ b/scripts/e2e/onboard-docker.sh @@ -409,6 +409,7 @@ NODE # Seed a remote config to exercise reset path. cat > "$HOME/.openclaw/openclaw.json" <<'"'"'JSON'"'"' { + "meta": {}, "agents": { "defaults": { "workspace": "/root/old" } }, "gateway": { "mode": "remote", @@ -504,6 +505,7 @@ NODE # Seed skills config to ensure it survives the wizard. cat > "$HOME/.openclaw/openclaw.json" <<'"'"'JSON'"'"' { + "meta": {}, "skills": { "allowBundled": ["__none__"], "install": { "nodeManager": "bun" } diff --git a/scripts/generate-host-env-security-policy-swift.mjs b/scripts/generate-host-env-security-policy-swift.mjs new file mode 100644 index 00000000000..4de64ad8d98 --- /dev/null +++ b/scripts/generate-host-env-security-policy-swift.mjs @@ -0,0 +1,74 @@ +#!/usr/bin/env node +import fs from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +const args = new Set(process.argv.slice(2)); +const checkOnly = args.has("--check"); +const writeMode = args.has("--write") || !checkOnly; + +if (checkOnly && args.has("--write")) { + console.error("Use either --check or --write, not both."); + process.exit(1); +} + +const here = path.dirname(fileURLToPath(import.meta.url)); +const repoRoot = path.resolve(here, ".."); +const policyPath = path.join(repoRoot, "src", "infra", "host-env-security-policy.json"); +const outputPath = path.join( + repoRoot, + "apps", + "macos", + "Sources", + "OpenClaw", + "HostEnvSecurityPolicy.generated.swift", +); + +/** @type {{blockedKeys: string[]; blockedOverrideKeys?: string[]; blockedPrefixes: string[]}} */ +const policy = JSON.parse(fs.readFileSync(policyPath, "utf8")); + +const renderSwiftStringArray = (items) => items.map((item) => ` "${item}"`).join(",\n"); + +const generated = `// Generated file. Do not edit directly. +// Source: src/infra/host-env-security-policy.json +// Regenerate: node scripts/generate-host-env-security-policy-swift.mjs --write + +import Foundation + +enum HostEnvSecurityPolicy { + static let blockedKeys: Set = [ +${renderSwiftStringArray(policy.blockedKeys)} + ] + + static let blockedOverrideKeys: Set = [ +${renderSwiftStringArray(policy.blockedOverrideKeys ?? [])} + ] + + static let blockedPrefixes: [String] = [ +${renderSwiftStringArray(policy.blockedPrefixes)} + ] +} +`; + +const current = fs.existsSync(outputPath) ? fs.readFileSync(outputPath, "utf8") : null; + +if (checkOnly) { + if (current === generated) { + console.log(`OK ${path.relative(repoRoot, outputPath)}`); + process.exit(0); + } + console.error( + [ + `Out of date ${path.relative(repoRoot, outputPath)}.`, + "Run: node scripts/generate-host-env-security-policy-swift.mjs --write", + ].join("\n"), + ); + process.exit(1); +} + +if (writeMode) { + if (current !== generated) { + fs.writeFileSync(outputPath, generated); + } + console.log(`Wrote ${path.relative(repoRoot, outputPath)}`); +} diff --git a/scripts/ghsa-patch.mjs b/scripts/ghsa-patch.mjs new file mode 100644 index 00000000000..44e7daa2bee --- /dev/null +++ b/scripts/ghsa-patch.mjs @@ -0,0 +1,168 @@ +#!/usr/bin/env node +import { execFileSync, spawnSync } from "node:child_process"; +import crypto from "node:crypto"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; + +function usage() { + console.error( + [ + "Usage:", + " node scripts/ghsa-patch.mjs --ghsa [--repo owner/name]", + " --summary --severity ", + " --description-file ", + " --vulnerable-version-range ", + " --patched-versions ", + " [--package openclaw] [--ecosystem npm] [--cvss ]", + ].join("\n"), + ); +} + +function fail(message) { + console.error(message); + process.exit(1); +} + +function parseArgs(argv) { + const out = {}; + for (let i = 0; i < argv.length; i += 1) { + const arg = argv[i]; + if (!arg.startsWith("--")) { + fail(`Unexpected argument: ${arg}`); + } + const key = arg.slice(2); + const value = argv[i + 1]; + if (!value || value.startsWith("--")) { + fail(`Missing value for --${key}`); + } + out[key] = value; + i += 1; + } + return out; +} + +function runGh(args) { + const proc = spawnSync("gh", args, { encoding: "utf8" }); + if (proc.status !== 0) { + fail(proc.stderr.trim() || proc.stdout.trim() || `gh ${args.join(" ")} failed`); + } + return proc.stdout; +} + +function deriveRepoFromOrigin() { + const remote = execFileSync("git", ["remote", "get-url", "origin"], { encoding: "utf8" }).trim(); + const httpsMatch = remote.match(/github\.com[/:]([^/]+)\/([^/.]+)(?:\.git)?$/); + if (!httpsMatch) { + fail(`Could not parse origin remote: ${remote}`); + } + return `${httpsMatch[1]}/${httpsMatch[2]}`; +} + +function parseGhsaId(value) { + const match = value.match(/GHSA-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}/i); + if (!match) { + fail(`Could not parse GHSA id from: ${value}`); + } + return match[0]; +} + +function writeTempJson(data) { + const file = path.join(os.tmpdir(), `ghsa-patch-${crypto.randomUUID()}.json`); + fs.writeFileSync(file, `${JSON.stringify(data, null, 2)}\n`); + return file; +} + +const args = parseArgs(process.argv.slice(2)); +if (!args.ghsa || !args.summary || !args.severity || !args["description-file"]) { + usage(); + process.exit(1); +} + +const repo = args.repo || deriveRepoFromOrigin(); +const ghsaId = parseGhsaId(args.ghsa); +const advisoryPath = `/repos/${repo}/security-advisories/${ghsaId}`; +const descriptionPath = path.resolve(args["description-file"]); + +if (!fs.existsSync(descriptionPath)) { + fail(`Description file does not exist: ${descriptionPath}`); +} + +const current = JSON.parse(runGh(["api", "-H", "X-GitHub-Api-Version: 2022-11-28", advisoryPath])); +const restoredCvss = args.cvss || current?.cvss?.vector_string || null; + +const ecosystem = args.ecosystem || "npm"; +const packageName = args.package || "openclaw"; +const vulnerableRange = args["vulnerable-version-range"]; +const patchedVersionsRaw = args["patched-versions"]; + +if (!vulnerableRange) { + fail("Missing --vulnerable-version-range"); +} +if (patchedVersionsRaw === undefined) { + fail("Missing --patched-versions"); +} + +const patchedVersions = patchedVersionsRaw === "null" ? null : patchedVersionsRaw; +const description = fs.readFileSync(descriptionPath, "utf8"); + +const payload = { + summary: args.summary, + severity: args.severity, + description, + vulnerabilities: [ + { + package: { + ecosystem, + name: packageName, + }, + vulnerable_version_range: vulnerableRange, + patched_versions: patchedVersions, + vulnerable_functions: [], + }, + ], +}; + +const patchFile = writeTempJson(payload); +runGh([ + "api", + "-H", + "X-GitHub-Api-Version: 2022-11-28", + "-X", + "PATCH", + advisoryPath, + "--input", + patchFile, +]); + +if (restoredCvss) { + runGh([ + "api", + "-H", + "X-GitHub-Api-Version: 2022-11-28", + "-X", + "PATCH", + advisoryPath, + "-f", + `cvss_vector_string=${restoredCvss}`, + ]); +} + +const refreshed = JSON.parse( + runGh(["api", "-H", "X-GitHub-Api-Version: 2022-11-28", advisoryPath]), +); +console.log( + JSON.stringify( + { + html_url: refreshed.html_url, + state: refreshed.state, + severity: refreshed.severity, + summary: refreshed.summary, + vulnerabilities: refreshed.vulnerabilities, + cvss: refreshed.cvss, + updated_at: refreshed.updated_at, + }, + null, + 2, + ), +); diff --git a/scripts/pr b/scripts/pr index 90cfe029db0..215b72bcbb0 100755 --- a/scripts/pr +++ b/scripts/pr @@ -664,6 +664,99 @@ validate_changelog_entry_for_pr() { echo "changelog validated: found PR #$pr (contributor handle unavailable, skipping thanks check)" } +validate_changelog_merge_hygiene() { + local diff + diff=$(git diff --unified=0 origin/main...HEAD -- CHANGELOG.md) + + local removed_lines + removed_lines=$(printf '%s\n' "$diff" | awk ' + /^---/ { next } + /^-/ { print substr($0, 2) } + ') + if [ -z "$removed_lines" ]; then + return 0 + fi + + local removed_refs + removed_refs=$(printf '%s\n' "$removed_lines" | rg -o '#[0-9]+' | sort -u || true) + if [ -z "$removed_refs" ]; then + return 0 + fi + + local added_lines + added_lines=$(printf '%s\n' "$diff" | awk ' + /^\+\+\+/ { next } + /^\+/ { print substr($0, 2) } + ') + + local ref + while IFS= read -r ref; do + [ -z "$ref" ] && continue + if ! printf '%s\n' "$added_lines" | rg -q -F "$ref"; then + echo "CHANGELOG.md drops existing entry reference $ref without re-adding it." + echo "Likely merge conflict loss; restore the dropped entry (or keep the same PR ref in rewritten text)." + exit 1 + fi + done <<<"$removed_refs" + + echo "changelog merge hygiene validated: no dropped PR references" +} + +changed_changelog_fragment_files() { + git diff --name-only origin/main...HEAD -- changelog/fragments | rg '^changelog/fragments/.*\.md$' || true +} + +validate_changelog_fragments_for_pr() { + local pr="$1" + local contrib="$2" + shift 2 + + if [ "$#" -lt 1 ]; then + echo "No changelog fragments provided for validation." + exit 1 + fi + + local pr_pattern + pr_pattern="(#$pr|openclaw#$pr)" + + local added_lines + local file + local all_added_lines="" + for file in "$@"; do + added_lines=$(git diff --unified=0 origin/main...HEAD -- "$file" | awk ' + /^\+\+\+/ { next } + /^\+/ { print substr($0, 2) } + ') + + if [ -z "$added_lines" ]; then + echo "$file is in diff but no added lines were detected." + exit 1 + fi + + all_added_lines=$(printf '%s\n%s\n' "$all_added_lines" "$added_lines") + done + + local with_pr + with_pr=$(printf '%s\n' "$all_added_lines" | rg -in "$pr_pattern" || true) + if [ -z "$with_pr" ]; then + echo "Changelog fragment update must reference PR #$pr (for example, (#$pr))." + exit 1 + fi + + if [ -n "$contrib" ] && [ "$contrib" != "null" ]; then + local with_pr_and_thanks + with_pr_and_thanks=$(printf '%s\n' "$all_added_lines" | rg -in "$pr_pattern" | rg -i "thanks @$contrib" || true) + if [ -z "$with_pr_and_thanks" ]; then + echo "Changelog fragment update must include both PR #$pr and thanks @$contrib on the entry line." + exit 1 + fi + echo "changelog fragments validated: found PR #$pr + thanks @$contrib" + return 0 + fi + + echo "changelog fragments validated: found PR #$pr (contributor handle unavailable, skipping thanks check)" +} + prepare_gates() { local pr="$1" enter_worktree "$pr" false @@ -684,13 +777,31 @@ prepare_gates() { docs_only=true fi - # Enforce workflow policy: every prepared PR must include a changelog update. - if ! printf '%s\n' "$changed_files" | rg -q '^CHANGELOG\.md$'; then - echo "Missing CHANGELOG.md update in PR diff. This workflow requires a changelog entry." + local has_changelog_update=false + if printf '%s\n' "$changed_files" | rg -q '^CHANGELOG\.md$'; then + has_changelog_update=true + fi + local fragment_files + fragment_files=$(changed_changelog_fragment_files) + local has_fragment_update=false + if [ -n "$fragment_files" ]; then + has_fragment_update=true + fi + # Enforce workflow policy: every prepared PR must include either CHANGELOG.md + # or one or more changelog fragments. + if [ "$has_changelog_update" = "false" ] && [ "$has_fragment_update" = "false" ]; then + echo "Missing changelog update. Add CHANGELOG.md changes or changelog/fragments/*.md entry." exit 1 fi local contrib="${PR_AUTHOR:-}" - validate_changelog_entry_for_pr "$pr" "$contrib" + if [ "$has_changelog_update" = "true" ]; then + validate_changelog_merge_hygiene + validate_changelog_entry_for_pr "$pr" "$contrib" + fi + if [ "$has_fragment_update" = "true" ]; then + mapfile -t fragment_file_list <<<"$fragment_files" + validate_changelog_fragments_for_pr "$pr" "$contrib" "${fragment_file_list[@]}" + fi run_quiet_logged "pnpm build" ".local/gates-build.log" pnpm build run_quiet_logged "pnpm check" ".local/gates-check.log" pnpm check diff --git a/scripts/run-openclaw-podman.sh b/scripts/run-openclaw-podman.sh index 2be9d0a5304..9f0cd0bb6d5 100755 --- a/scripts/run-openclaw-podman.sh +++ b/scripts/run-openclaw-podman.sh @@ -75,7 +75,9 @@ OPENCLAW_IMAGE="${OPENCLAW_PODMAN_IMAGE:-openclaw:local}" PODMAN_PULL="${OPENCLAW_PODMAN_PULL:-never}" HOST_GATEWAY_PORT="${OPENCLAW_PODMAN_GATEWAY_HOST_PORT:-${OPENCLAW_GATEWAY_PORT:-18789}}" HOST_BRIDGE_PORT="${OPENCLAW_PODMAN_BRIDGE_HOST_PORT:-${OPENCLAW_BRIDGE_PORT:-18790}}" -GATEWAY_BIND="${OPENCLAW_GATEWAY_BIND:-lan}" +# Keep Podman default local-only unless explicitly overridden. +# Non-loopback binds require gateway.controlUi.allowedOrigins (security hardening). +GATEWAY_BIND="${OPENCLAW_GATEWAY_BIND:-loopback}" # Safe cwd for podman (openclaw is nologin; avoid inherited cwd from sudo) cd "$EFFECTIVE_HOME" 2>/dev/null || cd /tmp 2>/dev/null || true diff --git a/scripts/test-parallel.mjs b/scripts/test-parallel.mjs index 35afef83c3f..e866ef712ab 100644 --- a/scripts/test-parallel.mjs +++ b/scripts/test-parallel.mjs @@ -160,11 +160,31 @@ const runs = [ }, ]; const shardOverride = Number.parseInt(process.env.OPENCLAW_TEST_SHARDS ?? "", 10); -const shardCount = isWindowsCi - ? Number.isFinite(shardOverride) && shardOverride > 1 - ? shardOverride - : 2 - : 1; +const configuredShardCount = + Number.isFinite(shardOverride) && shardOverride > 1 ? shardOverride : null; +const shardCount = configuredShardCount ?? (isWindowsCi ? 2 : 1); +const shardIndexOverride = (() => { + const parsed = Number.parseInt(process.env.OPENCLAW_TEST_SHARD_INDEX ?? "", 10); + return Number.isFinite(parsed) && parsed > 0 ? parsed : null; +})(); + +if (shardIndexOverride !== null && shardCount <= 1) { + console.error( + `[test-parallel] OPENCLAW_TEST_SHARD_INDEX=${String( + shardIndexOverride, + )} requires OPENCLAW_TEST_SHARDS>1.`, + ); + process.exit(2); +} + +if (shardIndexOverride !== null && shardIndexOverride > shardCount) { + console.error( + `[test-parallel] OPENCLAW_TEST_SHARD_INDEX=${String( + shardIndexOverride, + )} exceeds OPENCLAW_TEST_SHARDS=${String(shardCount)}.`, + ); + process.exit(2); +} const windowsCiArgs = isWindowsCi ? ["--dangerouslyIgnoreUnhandledErrors"] : []; const silentArgs = process.env.OPENCLAW_TEST_SHOW_PASSED_LOGS === "1" ? [] : ["--silent=passed-only"]; @@ -391,6 +411,9 @@ const run = async (entry) => { if (shardCount <= 1) { return runOnce(entry); } + if (shardIndexOverride !== null) { + return runOnce(entry, ["--shard", `${shardIndexOverride}/${shardCount}`]); + } for (let shardIndex = 1; shardIndex <= shardCount; shardIndex += 1) { // eslint-disable-next-line no-await-in-loop const code = await runOnce(entry, ["--shard", `${shardIndex}/${shardCount}`]); diff --git a/skills/coding-agent/SKILL.md b/skills/coding-agent/SKILL.md index ef4e059499d..cca6ef83ad5 100644 --- a/skills/coding-agent/SKILL.md +++ b/skills/coding-agent/SKILL.md @@ -1,6 +1,6 @@ --- name: coding-agent -description: "Delegate coding tasks to Codex, Claude Code, or Pi agents via background process. Use when: (1) building/creating new features or apps, (2) reviewing PRs (spawn in temp dir), (3) refactoring large codebases, (4) iterative coding that needs file exploration. NOT for: simple one-liner fixes (just edit), reading code (use read tool), or any work in ~/clawd workspace (never spawn agents here). Requires a bash tool that supports pty:true." +description: 'Delegate coding tasks to Codex, Claude Code, or Pi agents via background process. Use when: (1) building/creating new features or apps, (2) reviewing PRs (spawn in temp dir), (3) refactoring large codebases, (4) iterative coding that needs file exploration. NOT for: simple one-liner fixes (just edit), reading code (use read tool), thread-bound ACP harness requests in chat (for example spawn/run Codex or Claude Code in a Discord thread; use sessions_spawn with runtime:"acp"), or any work in ~/clawd workspace (never spawn agents here). Requires a bash tool that supports pty:true.' metadata: { "openclaw": { "emoji": "🧩", "requires": { "anyBins": ["claude", "codex", "opencode", "pi"] } }, diff --git a/src/acp/control-plane/manager.core.ts b/src/acp/control-plane/manager.core.ts new file mode 100644 index 00000000000..99ec096bb7f --- /dev/null +++ b/src/acp/control-plane/manager.core.ts @@ -0,0 +1,1314 @@ +import type { OpenClawConfig } from "../../config/config.js"; +import { logVerbose } from "../../globals.js"; +import { normalizeAgentId } from "../../routing/session-key.js"; +import { isAcpSessionKey } from "../../sessions/session-key-utils.js"; +import { + AcpRuntimeError, + toAcpRuntimeError, + withAcpRuntimeErrorBoundary, +} from "../runtime/errors.js"; +import { + createIdentityFromEnsure, + identityEquals, + isSessionIdentityPending, + mergeSessionIdentity, + resolveRuntimeHandleIdentifiersFromIdentity, + resolveSessionIdentityFromMeta, +} from "../runtime/session-identity.js"; +import type { + AcpRuntime, + AcpRuntimeCapabilities, + AcpRuntimeHandle, + AcpRuntimeStatus, +} from "../runtime/types.js"; +import { reconcileManagerRuntimeSessionIdentifiers } from "./manager.identity-reconcile.js"; +import { + applyManagerRuntimeControls, + resolveManagerRuntimeCapabilities, +} from "./manager.runtime-controls.js"; +import { + type AcpCloseSessionInput, + type AcpCloseSessionResult, + type AcpInitializeSessionInput, + type AcpManagerObservabilitySnapshot, + type AcpRunTurnInput, + type AcpSessionManagerDeps, + type AcpSessionResolution, + type AcpSessionRuntimeOptions, + type AcpSessionStatus, + type AcpStartupIdentityReconcileResult, + type ActiveTurnState, + DEFAULT_DEPS, + type SessionAcpMeta, + type SessionEntry, + type TurnLatencyStats, +} from "./manager.types.js"; +import { + createUnsupportedControlError, + hasLegacyAcpIdentityProjection, + normalizeAcpErrorCode, + normalizeActorKey, + normalizeSessionKey, + resolveAcpAgentFromSessionKey, + resolveMissingMetaError, + resolveRuntimeIdleTtlMs, +} from "./manager.utils.js"; +import { CachedRuntimeState, RuntimeCache } from "./runtime-cache.js"; +import { + inferRuntimeOptionPatchFromConfigOption, + mergeRuntimeOptions, + normalizeRuntimeOptions, + normalizeText, + resolveRuntimeOptionsFromMeta, + runtimeOptionsEqual, + validateRuntimeConfigOptionInput, + validateRuntimeModeInput, + validateRuntimeOptionPatch, +} from "./runtime-options.js"; +import { SessionActorQueue } from "./session-actor-queue.js"; + +export class AcpSessionManager { + private readonly actorQueue = new SessionActorQueue(); + private readonly actorTailBySession = this.actorQueue.getTailMapForTesting(); + private readonly runtimeCache = new RuntimeCache(); + private readonly activeTurnBySession = new Map(); + private readonly turnLatencyStats: TurnLatencyStats = { + completed: 0, + failed: 0, + totalMs: 0, + maxMs: 0, + }; + private readonly errorCountsByCode = new Map(); + private evictedRuntimeCount = 0; + private lastEvictedAt: number | undefined; + + constructor(private readonly deps: AcpSessionManagerDeps = DEFAULT_DEPS) {} + + resolveSession(params: { cfg: OpenClawConfig; sessionKey: string }): AcpSessionResolution { + const sessionKey = normalizeSessionKey(params.sessionKey); + if (!sessionKey) { + return { + kind: "none", + sessionKey, + }; + } + const acp = this.deps.readSessionEntry({ + cfg: params.cfg, + sessionKey, + })?.acp; + if (acp) { + return { + kind: "ready", + sessionKey, + meta: acp, + }; + } + if (isAcpSessionKey(sessionKey)) { + return { + kind: "stale", + sessionKey, + error: resolveMissingMetaError(sessionKey), + }; + } + return { + kind: "none", + sessionKey, + }; + } + + getObservabilitySnapshot(cfg: OpenClawConfig): AcpManagerObservabilitySnapshot { + const completedTurns = this.turnLatencyStats.completed + this.turnLatencyStats.failed; + const averageLatencyMs = + completedTurns > 0 ? Math.round(this.turnLatencyStats.totalMs / completedTurns) : 0; + return { + runtimeCache: { + activeSessions: this.runtimeCache.size(), + idleTtlMs: resolveRuntimeIdleTtlMs(cfg), + evictedTotal: this.evictedRuntimeCount, + ...(this.lastEvictedAt ? { lastEvictedAt: this.lastEvictedAt } : {}), + }, + turns: { + active: this.activeTurnBySession.size, + queueDepth: this.actorQueue.getTotalPendingCount(), + completed: this.turnLatencyStats.completed, + failed: this.turnLatencyStats.failed, + averageLatencyMs, + maxLatencyMs: this.turnLatencyStats.maxMs, + }, + errorsByCode: Object.fromEntries( + [...this.errorCountsByCode.entries()].toSorted(([a], [b]) => a.localeCompare(b)), + ), + }; + } + + async reconcilePendingSessionIdentities(params: { + cfg: OpenClawConfig; + }): Promise { + let checked = 0; + let resolved = 0; + let failed = 0; + + let acpSessions: Awaited>; + try { + acpSessions = await this.deps.listAcpSessions({ + cfg: params.cfg, + }); + } catch (error) { + logVerbose(`acp-manager: startup identity scan failed: ${String(error)}`); + return { checked, resolved, failed: failed + 1 }; + } + + for (const session of acpSessions) { + if (!session.acp || !session.sessionKey) { + continue; + } + const currentIdentity = resolveSessionIdentityFromMeta(session.acp); + if (!isSessionIdentityPending(currentIdentity)) { + continue; + } + + checked += 1; + try { + const becameResolved = await this.withSessionActor(session.sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey: session.sessionKey, + }); + if (resolution.kind !== "ready") { + return false; + } + const { runtime, handle, meta } = await this.ensureRuntimeHandle({ + cfg: params.cfg, + sessionKey: session.sessionKey, + meta: resolution.meta, + }); + const reconciled = await this.reconcileRuntimeSessionIdentifiers({ + cfg: params.cfg, + sessionKey: session.sessionKey, + runtime, + handle, + meta, + failOnStatusError: false, + }); + return !isSessionIdentityPending(resolveSessionIdentityFromMeta(reconciled.meta)); + }); + if (becameResolved) { + resolved += 1; + } + } catch (error) { + failed += 1; + logVerbose( + `acp-manager: startup identity reconcile failed for ${session.sessionKey}: ${String(error)}`, + ); + } + } + + return { checked, resolved, failed }; + } + + async initializeSession(input: AcpInitializeSessionInput): Promise<{ + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + }> { + const sessionKey = normalizeSessionKey(input.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + const agent = normalizeAgentId(input.agent); + await this.evictIdleRuntimeHandles({ cfg: input.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const backend = this.deps.requireRuntimeBackend(input.backendId || input.cfg.acp?.backend); + const runtime = backend.runtime; + const initialRuntimeOptions = validateRuntimeOptionPatch({ cwd: input.cwd }); + const requestedCwd = initialRuntimeOptions.cwd; + this.enforceConcurrentSessionLimit({ + cfg: input.cfg, + sessionKey, + }); + const handle = await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.ensureSession({ + sessionKey, + agent, + mode: input.mode, + cwd: requestedCwd, + }), + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "Could not initialize ACP session runtime.", + }); + const effectiveCwd = normalizeText(handle.cwd) ?? requestedCwd; + const effectiveRuntimeOptions = normalizeRuntimeOptions({ + ...initialRuntimeOptions, + ...(effectiveCwd ? { cwd: effectiveCwd } : {}), + }); + + const identityNow = Date.now(); + const initializedIdentity = + mergeSessionIdentity({ + current: undefined, + incoming: createIdentityFromEnsure({ + handle, + now: identityNow, + }), + now: identityNow, + }) ?? + ({ + state: "pending", + source: "ensure", + lastUpdatedAt: identityNow, + } as const); + const meta: SessionAcpMeta = { + backend: handle.backend || backend.id, + agent, + runtimeSessionName: handle.runtimeSessionName, + identity: initializedIdentity, + mode: input.mode, + ...(Object.keys(effectiveRuntimeOptions).length > 0 + ? { runtimeOptions: effectiveRuntimeOptions } + : {}), + cwd: effectiveCwd, + state: "idle", + lastActivityAt: Date.now(), + }; + try { + const persisted = await this.writeSessionMeta({ + cfg: input.cfg, + sessionKey, + mutate: () => meta, + failOnError: true, + }); + if (!persisted?.acp) { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Could not persist ACP metadata for ${sessionKey}.`, + ); + } + } catch (error) { + await runtime + .close({ + handle, + reason: "init-meta-failed", + }) + .catch((closeError) => { + logVerbose( + `acp-manager: cleanup close failed after metadata write error for ${sessionKey}: ${String(closeError)}`, + ); + }); + throw error; + } + this.setCachedRuntimeState(sessionKey, { + runtime, + handle, + backend: handle.backend || backend.id, + agent, + mode: input.mode, + cwd: effectiveCwd, + }); + return { + runtime, + handle, + meta, + }; + }); + } + + async getSessionStatus(params: { + cfg: OpenClawConfig; + sessionKey: string; + }): Promise { + const sessionKey = normalizeSessionKey(params.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + await this.evictIdleRuntimeHandles({ cfg: params.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + const { + runtime, + handle: ensuredHandle, + meta: ensuredMeta, + } = await this.ensureRuntimeHandle({ + cfg: params.cfg, + sessionKey, + meta: resolution.meta, + }); + let handle = ensuredHandle; + let meta = ensuredMeta; + const capabilities = await this.resolveRuntimeCapabilities({ runtime, handle }); + let runtimeStatus: AcpRuntimeStatus | undefined; + if (runtime.getStatus) { + runtimeStatus = await withAcpRuntimeErrorBoundary({ + run: async () => await runtime.getStatus!({ handle }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not read ACP runtime status.", + }); + } + ({ handle, meta, runtimeStatus } = await this.reconcileRuntimeSessionIdentifiers({ + cfg: params.cfg, + sessionKey, + runtime, + handle, + meta, + runtimeStatus, + failOnStatusError: true, + })); + const identity = resolveSessionIdentityFromMeta(meta); + return { + sessionKey, + backend: handle.backend || meta.backend, + agent: meta.agent, + ...(identity ? { identity } : {}), + state: meta.state, + mode: meta.mode, + runtimeOptions: resolveRuntimeOptionsFromMeta(meta), + capabilities, + runtimeStatus, + lastActivityAt: meta.lastActivityAt, + lastError: meta.lastError, + }; + }); + } + + async setSessionRuntimeMode(params: { + cfg: OpenClawConfig; + sessionKey: string; + runtimeMode: string; + }): Promise { + const sessionKey = normalizeSessionKey(params.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + const runtimeMode = validateRuntimeModeInput(params.runtimeMode); + + await this.evictIdleRuntimeHandles({ cfg: params.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + const { runtime, handle, meta } = await this.ensureRuntimeHandle({ + cfg: params.cfg, + sessionKey, + meta: resolution.meta, + }); + const capabilities = await this.resolveRuntimeCapabilities({ runtime, handle }); + if (!capabilities.controls.includes("session/set_mode") || !runtime.setMode) { + throw createUnsupportedControlError({ + backend: handle.backend || meta.backend, + control: "session/set_mode", + }); + } + + await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.setMode!({ + handle, + mode: runtimeMode, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP runtime mode.", + }); + + const nextOptions = mergeRuntimeOptions({ + current: resolveRuntimeOptionsFromMeta(meta), + patch: { runtimeMode }, + }); + await this.persistRuntimeOptions({ + cfg: params.cfg, + sessionKey, + options: nextOptions, + }); + return nextOptions; + }); + } + + async setSessionConfigOption(params: { + cfg: OpenClawConfig; + sessionKey: string; + key: string; + value: string; + }): Promise { + const sessionKey = normalizeSessionKey(params.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + const normalizedOption = validateRuntimeConfigOptionInput(params.key, params.value); + const key = normalizedOption.key; + const value = normalizedOption.value; + + await this.evictIdleRuntimeHandles({ cfg: params.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + const { runtime, handle, meta } = await this.ensureRuntimeHandle({ + cfg: params.cfg, + sessionKey, + meta: resolution.meta, + }); + const inferredPatch = inferRuntimeOptionPatchFromConfigOption(key, value); + const capabilities = await this.resolveRuntimeCapabilities({ runtime, handle }); + if ( + !capabilities.controls.includes("session/set_config_option") || + !runtime.setConfigOption + ) { + throw createUnsupportedControlError({ + backend: handle.backend || meta.backend, + control: "session/set_config_option", + }); + } + + const advertisedKeys = new Set( + (capabilities.configOptionKeys ?? []) + .map((entry) => normalizeText(entry)) + .filter(Boolean) as string[], + ); + if (advertisedKeys.size > 0 && !advertisedKeys.has(key)) { + throw new AcpRuntimeError( + "ACP_BACKEND_UNSUPPORTED_CONTROL", + `ACP backend "${handle.backend || meta.backend}" does not accept config key "${key}".`, + ); + } + + await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.setConfigOption!({ + handle, + key, + value, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP runtime config option.", + }); + + const nextOptions = mergeRuntimeOptions({ + current: resolveRuntimeOptionsFromMeta(meta), + patch: inferredPatch, + }); + await this.persistRuntimeOptions({ + cfg: params.cfg, + sessionKey, + options: nextOptions, + }); + return nextOptions; + }); + } + + async updateSessionRuntimeOptions(params: { + cfg: OpenClawConfig; + sessionKey: string; + patch: Partial; + }): Promise { + const sessionKey = normalizeSessionKey(params.sessionKey); + const validatedPatch = validateRuntimeOptionPatch(params.patch); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + + await this.evictIdleRuntimeHandles({ cfg: params.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + const nextOptions = mergeRuntimeOptions({ + current: resolveRuntimeOptionsFromMeta(resolution.meta), + patch: validatedPatch, + }); + await this.persistRuntimeOptions({ + cfg: params.cfg, + sessionKey, + options: nextOptions, + }); + return nextOptions; + }); + } + + async resetSessionRuntimeOptions(params: { + cfg: OpenClawConfig; + sessionKey: string; + }): Promise { + const sessionKey = normalizeSessionKey(params.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + await this.evictIdleRuntimeHandles({ cfg: params.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + const { runtime, handle } = await this.ensureRuntimeHandle({ + cfg: params.cfg, + sessionKey, + meta: resolution.meta, + }); + await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.close({ + handle, + reason: "reset-runtime-options", + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not reset ACP runtime options.", + }); + this.clearCachedRuntimeState(sessionKey); + await this.persistRuntimeOptions({ + cfg: params.cfg, + sessionKey, + options: {}, + }); + return {}; + }); + } + + async runTurn(input: AcpRunTurnInput): Promise { + const sessionKey = normalizeSessionKey(input.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + await this.evictIdleRuntimeHandles({ cfg: input.cfg }); + await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: input.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + + const { + runtime, + handle: ensuredHandle, + meta: ensuredMeta, + } = await this.ensureRuntimeHandle({ + cfg: input.cfg, + sessionKey, + meta: resolution.meta, + }); + let handle = ensuredHandle; + const meta = ensuredMeta; + await this.applyRuntimeControls({ + sessionKey, + runtime, + handle, + meta, + }); + const turnStartedAt = Date.now(); + const actorKey = normalizeActorKey(sessionKey); + + await this.setSessionState({ + cfg: input.cfg, + sessionKey, + state: "running", + clearLastError: true, + }); + + const internalAbortController = new AbortController(); + const onCallerAbort = () => { + internalAbortController.abort(); + }; + if (input.signal?.aborted) { + internalAbortController.abort(); + } else if (input.signal) { + input.signal.addEventListener("abort", onCallerAbort, { once: true }); + } + + const activeTurn: ActiveTurnState = { + runtime, + handle, + abortController: internalAbortController, + }; + this.activeTurnBySession.set(actorKey, activeTurn); + + let streamError: AcpRuntimeError | null = null; + try { + const combinedSignal = + input.signal && typeof AbortSignal.any === "function" + ? AbortSignal.any([input.signal, internalAbortController.signal]) + : internalAbortController.signal; + for await (const event of runtime.runTurn({ + handle, + text: input.text, + mode: input.mode, + requestId: input.requestId, + signal: combinedSignal, + })) { + if (event.type === "error") { + streamError = new AcpRuntimeError( + normalizeAcpErrorCode(event.code), + event.message?.trim() || "ACP turn failed before completion.", + ); + } + if (input.onEvent) { + await input.onEvent(event); + } + } + if (streamError) { + throw streamError; + } + this.recordTurnCompletion({ + startedAt: turnStartedAt, + }); + await this.setSessionState({ + cfg: input.cfg, + sessionKey, + state: "idle", + clearLastError: true, + }); + } catch (error) { + const acpError = toAcpRuntimeError({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP turn failed before completion.", + }); + this.recordTurnCompletion({ + startedAt: turnStartedAt, + errorCode: acpError.code, + }); + await this.setSessionState({ + cfg: input.cfg, + sessionKey, + state: "error", + lastError: acpError.message, + }); + throw acpError; + } finally { + if (input.signal) { + input.signal.removeEventListener("abort", onCallerAbort); + } + if (this.activeTurnBySession.get(actorKey) === activeTurn) { + this.activeTurnBySession.delete(actorKey); + } + if (meta.mode !== "oneshot") { + ({ handle } = await this.reconcileRuntimeSessionIdentifiers({ + cfg: input.cfg, + sessionKey, + runtime, + handle, + meta, + failOnStatusError: false, + })); + } + if (meta.mode === "oneshot") { + try { + await runtime.close({ + handle, + reason: "oneshot-complete", + }); + } catch (error) { + logVerbose(`acp-manager: ACP oneshot close failed for ${sessionKey}: ${String(error)}`); + } finally { + this.clearCachedRuntimeState(sessionKey); + } + } + } + }); + } + + async cancelSession(params: { + cfg: OpenClawConfig; + sessionKey: string; + reason?: string; + }): Promise { + const sessionKey = normalizeSessionKey(params.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + await this.evictIdleRuntimeHandles({ cfg: params.cfg }); + const actorKey = normalizeActorKey(sessionKey); + const activeTurn = this.activeTurnBySession.get(actorKey); + if (activeTurn) { + activeTurn.abortController.abort(); + if (!activeTurn.cancelPromise) { + activeTurn.cancelPromise = activeTurn.runtime.cancel({ + handle: activeTurn.handle, + reason: params.reason, + }); + } + await withAcpRuntimeErrorBoundary({ + run: async () => await activeTurn.cancelPromise!, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP cancel failed before completion.", + }); + return; + } + + await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: params.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + if (resolution.kind === "stale") { + throw resolution.error; + } + const { runtime, handle } = await this.ensureRuntimeHandle({ + cfg: params.cfg, + sessionKey, + meta: resolution.meta, + }); + try { + await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.cancel({ + handle, + reason: params.reason, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP cancel failed before completion.", + }); + await this.setSessionState({ + cfg: params.cfg, + sessionKey, + state: "idle", + clearLastError: true, + }); + } catch (error) { + const acpError = toAcpRuntimeError({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP cancel failed before completion.", + }); + await this.setSessionState({ + cfg: params.cfg, + sessionKey, + state: "error", + lastError: acpError.message, + }); + throw acpError; + } + }); + } + + async closeSession(input: AcpCloseSessionInput): Promise { + const sessionKey = normalizeSessionKey(input.sessionKey); + if (!sessionKey) { + throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); + } + await this.evictIdleRuntimeHandles({ cfg: input.cfg }); + return await this.withSessionActor(sessionKey, async () => { + const resolution = this.resolveSession({ + cfg: input.cfg, + sessionKey, + }); + if (resolution.kind === "none") { + if (input.requireAcpSession ?? true) { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${sessionKey}`, + ); + } + return { + runtimeClosed: false, + metaCleared: false, + }; + } + if (resolution.kind === "stale") { + if (input.requireAcpSession ?? true) { + throw resolution.error; + } + return { + runtimeClosed: false, + metaCleared: false, + }; + } + + let runtimeClosed = false; + let runtimeNotice: string | undefined; + try { + const { runtime, handle } = await this.ensureRuntimeHandle({ + cfg: input.cfg, + sessionKey, + meta: resolution.meta, + }); + await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.close({ + handle, + reason: input.reason, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP close failed before completion.", + }); + runtimeClosed = true; + this.clearCachedRuntimeState(sessionKey); + } catch (error) { + const acpError = toAcpRuntimeError({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP close failed before completion.", + }); + if ( + input.allowBackendUnavailable && + (acpError.code === "ACP_BACKEND_MISSING" || acpError.code === "ACP_BACKEND_UNAVAILABLE") + ) { + // Treat unavailable backends as terminal for this cached handle so it + // cannot continue counting against maxConcurrentSessions. + this.clearCachedRuntimeState(sessionKey); + runtimeNotice = acpError.message; + } else { + throw acpError; + } + } + + let metaCleared = false; + if (input.clearMeta) { + await this.writeSessionMeta({ + cfg: input.cfg, + sessionKey, + mutate: (_current, entry) => { + if (!entry) { + return null; + } + return null; + }, + failOnError: true, + }); + metaCleared = true; + } + + return { + runtimeClosed, + runtimeNotice, + metaCleared, + }; + }); + } + + private async ensureRuntimeHandle(params: { + cfg: OpenClawConfig; + sessionKey: string; + meta: SessionAcpMeta; + }): Promise<{ runtime: AcpRuntime; handle: AcpRuntimeHandle; meta: SessionAcpMeta }> { + const agent = + params.meta.agent?.trim() || resolveAcpAgentFromSessionKey(params.sessionKey, "main"); + const mode = params.meta.mode; + const runtimeOptions = resolveRuntimeOptionsFromMeta(params.meta); + const cwd = runtimeOptions.cwd ?? normalizeText(params.meta.cwd); + const configuredBackend = (params.meta.backend || params.cfg.acp?.backend || "").trim(); + const cached = this.getCachedRuntimeState(params.sessionKey); + if (cached) { + const backendMatches = !configuredBackend || cached.backend === configuredBackend; + const agentMatches = cached.agent === agent; + const modeMatches = cached.mode === mode; + const cwdMatches = (cached.cwd ?? "") === (cwd ?? ""); + if (backendMatches && agentMatches && modeMatches && cwdMatches) { + return { + runtime: cached.runtime, + handle: cached.handle, + meta: params.meta, + }; + } + this.clearCachedRuntimeState(params.sessionKey); + } + + this.enforceConcurrentSessionLimit({ + cfg: params.cfg, + sessionKey: params.sessionKey, + }); + + const backend = this.deps.requireRuntimeBackend(configuredBackend || undefined); + const runtime = backend.runtime; + const ensured = await withAcpRuntimeErrorBoundary({ + run: async () => + await runtime.ensureSession({ + sessionKey: params.sessionKey, + agent, + mode, + cwd, + }), + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "Could not initialize ACP session runtime.", + }); + + const previousMeta = params.meta; + const previousIdentity = resolveSessionIdentityFromMeta(previousMeta); + const now = Date.now(); + const effectiveCwd = normalizeText(ensured.cwd) ?? cwd; + const nextRuntimeOptions = normalizeRuntimeOptions({ + ...runtimeOptions, + ...(effectiveCwd ? { cwd: effectiveCwd } : {}), + }); + const nextIdentity = + mergeSessionIdentity({ + current: previousIdentity, + incoming: createIdentityFromEnsure({ + handle: ensured, + now, + }), + now, + }) ?? previousIdentity; + const nextHandleIdentifiers = resolveRuntimeHandleIdentifiersFromIdentity(nextIdentity); + const nextHandle: AcpRuntimeHandle = { + ...ensured, + ...(nextHandleIdentifiers.backendSessionId + ? { backendSessionId: nextHandleIdentifiers.backendSessionId } + : {}), + ...(nextHandleIdentifiers.agentSessionId + ? { agentSessionId: nextHandleIdentifiers.agentSessionId } + : {}), + }; + const nextMeta: SessionAcpMeta = { + backend: ensured.backend || backend.id, + agent, + runtimeSessionName: ensured.runtimeSessionName, + ...(nextIdentity ? { identity: nextIdentity } : {}), + mode: params.meta.mode, + ...(Object.keys(nextRuntimeOptions).length > 0 ? { runtimeOptions: nextRuntimeOptions } : {}), + ...(effectiveCwd ? { cwd: effectiveCwd } : {}), + state: previousMeta.state, + lastActivityAt: now, + ...(previousMeta.lastError ? { lastError: previousMeta.lastError } : {}), + }; + const shouldPersistMeta = + previousMeta.backend !== nextMeta.backend || + previousMeta.runtimeSessionName !== nextMeta.runtimeSessionName || + !identityEquals(previousIdentity, nextIdentity) || + previousMeta.agent !== nextMeta.agent || + previousMeta.cwd !== nextMeta.cwd || + !runtimeOptionsEqual(previousMeta.runtimeOptions, nextMeta.runtimeOptions) || + hasLegacyAcpIdentityProjection(previousMeta); + if (shouldPersistMeta) { + await this.writeSessionMeta({ + cfg: params.cfg, + sessionKey: params.sessionKey, + mutate: (_current, entry) => { + if (!entry) { + return null; + } + return nextMeta; + }, + }); + } + this.setCachedRuntimeState(params.sessionKey, { + runtime, + handle: nextHandle, + backend: ensured.backend || backend.id, + agent, + mode, + cwd: effectiveCwd, + appliedControlSignature: undefined, + }); + return { + runtime, + handle: nextHandle, + meta: nextMeta, + }; + } + + private async persistRuntimeOptions(params: { + cfg: OpenClawConfig; + sessionKey: string; + options: AcpSessionRuntimeOptions; + }): Promise { + const normalized = normalizeRuntimeOptions(params.options); + const hasOptions = Object.keys(normalized).length > 0; + await this.writeSessionMeta({ + cfg: params.cfg, + sessionKey: params.sessionKey, + mutate: (current, entry) => { + if (!entry) { + return null; + } + const base = current ?? entry.acp; + if (!base) { + return null; + } + return { + backend: base.backend, + agent: base.agent, + runtimeSessionName: base.runtimeSessionName, + ...(base.identity ? { identity: base.identity } : {}), + mode: base.mode, + runtimeOptions: hasOptions ? normalized : undefined, + cwd: normalized.cwd, + state: base.state, + lastActivityAt: Date.now(), + ...(base.lastError ? { lastError: base.lastError } : {}), + }; + }, + failOnError: true, + }); + + const cached = this.getCachedRuntimeState(params.sessionKey); + if (!cached) { + return; + } + if ((cached.cwd ?? "") !== (normalized.cwd ?? "")) { + this.clearCachedRuntimeState(params.sessionKey); + return; + } + // Persisting options does not guarantee this process pushed all controls to the runtime. + // Force the next turn to reconcile runtime controls from persisted metadata. + cached.appliedControlSignature = undefined; + } + + private enforceConcurrentSessionLimit(params: { cfg: OpenClawConfig; sessionKey: string }): void { + const configuredLimit = params.cfg.acp?.maxConcurrentSessions; + if (typeof configuredLimit !== "number" || !Number.isFinite(configuredLimit)) { + return; + } + const limit = Math.max(1, Math.floor(configuredLimit)); + const actorKey = normalizeActorKey(params.sessionKey); + if (this.runtimeCache.has(actorKey)) { + return; + } + const activeCount = this.runtimeCache.size(); + if (activeCount >= limit) { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `ACP max concurrent sessions reached (${activeCount}/${limit}).`, + ); + } + } + + private recordTurnCompletion(params: { startedAt: number; errorCode?: AcpRuntimeError["code"] }) { + const durationMs = Math.max(0, Date.now() - params.startedAt); + this.turnLatencyStats.totalMs += durationMs; + this.turnLatencyStats.maxMs = Math.max(this.turnLatencyStats.maxMs, durationMs); + if (params.errorCode) { + this.turnLatencyStats.failed += 1; + this.recordErrorCode(params.errorCode); + return; + } + this.turnLatencyStats.completed += 1; + } + + private recordErrorCode(code: string): void { + const normalized = normalizeAcpErrorCode(code); + this.errorCountsByCode.set(normalized, (this.errorCountsByCode.get(normalized) ?? 0) + 1); + } + + private async evictIdleRuntimeHandles(params: { cfg: OpenClawConfig }): Promise { + const idleTtlMs = resolveRuntimeIdleTtlMs(params.cfg); + if (idleTtlMs <= 0 || this.runtimeCache.size() === 0) { + return; + } + const now = Date.now(); + const candidates = this.runtimeCache.collectIdleCandidates({ + maxIdleMs: idleTtlMs, + now, + }); + if (candidates.length === 0) { + return; + } + + for (const candidate of candidates) { + await this.actorQueue.run(candidate.actorKey, async () => { + if (this.activeTurnBySession.has(candidate.actorKey)) { + return; + } + const lastTouchedAt = this.runtimeCache.getLastTouchedAt(candidate.actorKey); + if (lastTouchedAt == null || now - lastTouchedAt < idleTtlMs) { + return; + } + const cached = this.runtimeCache.peek(candidate.actorKey); + if (!cached) { + return; + } + this.runtimeCache.clear(candidate.actorKey); + this.evictedRuntimeCount += 1; + this.lastEvictedAt = Date.now(); + try { + await cached.runtime.close({ + handle: cached.handle, + reason: "idle-evicted", + }); + } catch (error) { + logVerbose( + `acp-manager: idle eviction close failed for ${candidate.state.handle.sessionKey}: ${String(error)}`, + ); + } + }); + } + } + + private async resolveRuntimeCapabilities(params: { + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + }): Promise { + return await resolveManagerRuntimeCapabilities(params); + } + + private async applyRuntimeControls(params: { + sessionKey: string; + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + }): Promise { + await applyManagerRuntimeControls({ + ...params, + getCachedRuntimeState: (sessionKey) => this.getCachedRuntimeState(sessionKey), + }); + } + + private async setSessionState(params: { + cfg: OpenClawConfig; + sessionKey: string; + state: SessionAcpMeta["state"]; + lastError?: string; + clearLastError?: boolean; + }): Promise { + await this.writeSessionMeta({ + cfg: params.cfg, + sessionKey: params.sessionKey, + mutate: (current, entry) => { + if (!entry) { + return null; + } + const base = current ?? entry.acp; + if (!base) { + return null; + } + const next: SessionAcpMeta = { + backend: base.backend, + agent: base.agent, + runtimeSessionName: base.runtimeSessionName, + ...(base.identity ? { identity: base.identity } : {}), + mode: base.mode, + ...(base.runtimeOptions ? { runtimeOptions: base.runtimeOptions } : {}), + ...(base.cwd ? { cwd: base.cwd } : {}), + state: params.state, + lastActivityAt: Date.now(), + ...(base.lastError ? { lastError: base.lastError } : {}), + }; + if (params.lastError?.trim()) { + next.lastError = params.lastError.trim(); + } else if (params.clearLastError) { + delete next.lastError; + } + return next; + }, + }); + } + + private async reconcileRuntimeSessionIdentifiers(params: { + cfg: OpenClawConfig; + sessionKey: string; + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + runtimeStatus?: AcpRuntimeStatus; + failOnStatusError: boolean; + }): Promise<{ + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + runtimeStatus?: AcpRuntimeStatus; + }> { + return await reconcileManagerRuntimeSessionIdentifiers({ + ...params, + setCachedHandle: (sessionKey, handle) => { + const cached = this.getCachedRuntimeState(sessionKey); + if (cached) { + cached.handle = handle; + } + }, + writeSessionMeta: async (writeParams) => await this.writeSessionMeta(writeParams), + }); + } + + private async writeSessionMeta(params: { + cfg: OpenClawConfig; + sessionKey: string; + mutate: ( + current: SessionAcpMeta | undefined, + entry: SessionEntry | undefined, + ) => SessionAcpMeta | null | undefined; + failOnError?: boolean; + }): Promise { + try { + return await this.deps.upsertSessionMeta({ + cfg: params.cfg, + sessionKey: params.sessionKey, + mutate: params.mutate, + }); + } catch (error) { + if (params.failOnError) { + throw error; + } + logVerbose( + `acp-manager: failed persisting ACP metadata for ${params.sessionKey}: ${String(error)}`, + ); + return null; + } + } + + private async withSessionActor(sessionKey: string, op: () => Promise): Promise { + const actorKey = normalizeActorKey(sessionKey); + return await this.actorQueue.run(actorKey, op); + } + + private getCachedRuntimeState(sessionKey: string): CachedRuntimeState | null { + return this.runtimeCache.get(normalizeActorKey(sessionKey)); + } + + private setCachedRuntimeState(sessionKey: string, state: CachedRuntimeState): void { + this.runtimeCache.set(normalizeActorKey(sessionKey), state); + } + + private clearCachedRuntimeState(sessionKey: string): void { + this.runtimeCache.clear(normalizeActorKey(sessionKey)); + } +} diff --git a/src/acp/control-plane/manager.identity-reconcile.ts b/src/acp/control-plane/manager.identity-reconcile.ts new file mode 100644 index 00000000000..d78a22ea04f --- /dev/null +++ b/src/acp/control-plane/manager.identity-reconcile.ts @@ -0,0 +1,159 @@ +import type { OpenClawConfig } from "../../config/config.js"; +import { logVerbose } from "../../globals.js"; +import { withAcpRuntimeErrorBoundary } from "../runtime/errors.js"; +import { + createIdentityFromStatus, + identityEquals, + mergeSessionIdentity, + resolveRuntimeHandleIdentifiersFromIdentity, + resolveSessionIdentityFromMeta, +} from "../runtime/session-identity.js"; +import type { AcpRuntime, AcpRuntimeHandle, AcpRuntimeStatus } from "../runtime/types.js"; +import type { SessionAcpMeta, SessionEntry } from "./manager.types.js"; +import { hasLegacyAcpIdentityProjection } from "./manager.utils.js"; + +export async function reconcileManagerRuntimeSessionIdentifiers(params: { + cfg: OpenClawConfig; + sessionKey: string; + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + runtimeStatus?: AcpRuntimeStatus; + failOnStatusError: boolean; + setCachedHandle: (sessionKey: string, handle: AcpRuntimeHandle) => void; + writeSessionMeta: (params: { + cfg: OpenClawConfig; + sessionKey: string; + mutate: ( + current: SessionAcpMeta | undefined, + entry: SessionEntry | undefined, + ) => SessionAcpMeta | null | undefined; + failOnError?: boolean; + }) => Promise; +}): Promise<{ + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + runtimeStatus?: AcpRuntimeStatus; +}> { + let runtimeStatus = params.runtimeStatus; + if (!runtimeStatus && params.runtime.getStatus) { + try { + runtimeStatus = await withAcpRuntimeErrorBoundary({ + run: async () => + await params.runtime.getStatus!({ + handle: params.handle, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not read ACP runtime status.", + }); + } catch (error) { + if (params.failOnStatusError) { + throw error; + } + logVerbose( + `acp-manager: failed to refresh ACP runtime status for ${params.sessionKey}: ${String(error)}`, + ); + return { + handle: params.handle, + meta: params.meta, + runtimeStatus, + }; + } + } + + const now = Date.now(); + const currentIdentity = resolveSessionIdentityFromMeta(params.meta); + const nextIdentity = + mergeSessionIdentity({ + current: currentIdentity, + incoming: createIdentityFromStatus({ + status: runtimeStatus, + now, + }), + now, + }) ?? currentIdentity; + const handleIdentifiers = resolveRuntimeHandleIdentifiersFromIdentity(nextIdentity); + const handleChanged = + handleIdentifiers.backendSessionId !== params.handle.backendSessionId || + handleIdentifiers.agentSessionId !== params.handle.agentSessionId; + const nextHandle: AcpRuntimeHandle = handleChanged + ? { + ...params.handle, + ...(handleIdentifiers.backendSessionId + ? { backendSessionId: handleIdentifiers.backendSessionId } + : {}), + ...(handleIdentifiers.agentSessionId + ? { agentSessionId: handleIdentifiers.agentSessionId } + : {}), + } + : params.handle; + if (handleChanged) { + params.setCachedHandle(params.sessionKey, nextHandle); + } + + const metaChanged = + !identityEquals(currentIdentity, nextIdentity) || hasLegacyAcpIdentityProjection(params.meta); + if (!metaChanged) { + return { + handle: nextHandle, + meta: params.meta, + runtimeStatus, + }; + } + const nextMeta: SessionAcpMeta = { + backend: params.meta.backend, + agent: params.meta.agent, + runtimeSessionName: params.meta.runtimeSessionName, + ...(nextIdentity ? { identity: nextIdentity } : {}), + mode: params.meta.mode, + ...(params.meta.runtimeOptions ? { runtimeOptions: params.meta.runtimeOptions } : {}), + ...(params.meta.cwd ? { cwd: params.meta.cwd } : {}), + lastActivityAt: now, + state: params.meta.state, + ...(params.meta.lastError ? { lastError: params.meta.lastError } : {}), + }; + if (!identityEquals(currentIdentity, nextIdentity)) { + const currentAgentSessionId = currentIdentity?.agentSessionId ?? ""; + const nextAgentSessionId = nextIdentity?.agentSessionId ?? ""; + const currentAcpxSessionId = currentIdentity?.acpxSessionId ?? ""; + const nextAcpxSessionId = nextIdentity?.acpxSessionId ?? ""; + const currentAcpxRecordId = currentIdentity?.acpxRecordId ?? ""; + const nextAcpxRecordId = nextIdentity?.acpxRecordId ?? ""; + logVerbose( + `acp-manager: session identity updated for ${params.sessionKey} ` + + `(agentSessionId ${currentAgentSessionId} -> ${nextAgentSessionId}, ` + + `acpxSessionId ${currentAcpxSessionId} -> ${nextAcpxSessionId}, ` + + `acpxRecordId ${currentAcpxRecordId} -> ${nextAcpxRecordId})`, + ); + } + await params.writeSessionMeta({ + cfg: params.cfg, + sessionKey: params.sessionKey, + mutate: (current, entry) => { + if (!entry) { + return null; + } + const base = current ?? entry.acp; + if (!base) { + return null; + } + return { + backend: base.backend, + agent: base.agent, + runtimeSessionName: base.runtimeSessionName, + ...(nextIdentity ? { identity: nextIdentity } : {}), + mode: base.mode, + ...(base.runtimeOptions ? { runtimeOptions: base.runtimeOptions } : {}), + ...(base.cwd ? { cwd: base.cwd } : {}), + state: base.state, + lastActivityAt: now, + ...(base.lastError ? { lastError: base.lastError } : {}), + }; + }, + }); + return { + handle: nextHandle, + meta: nextMeta, + runtimeStatus, + }; +} diff --git a/src/acp/control-plane/manager.runtime-controls.ts b/src/acp/control-plane/manager.runtime-controls.ts new file mode 100644 index 00000000000..6c2b9e0a267 --- /dev/null +++ b/src/acp/control-plane/manager.runtime-controls.ts @@ -0,0 +1,118 @@ +import { AcpRuntimeError, withAcpRuntimeErrorBoundary } from "../runtime/errors.js"; +import type { AcpRuntime, AcpRuntimeCapabilities, AcpRuntimeHandle } from "../runtime/types.js"; +import type { SessionAcpMeta } from "./manager.types.js"; +import { createUnsupportedControlError } from "./manager.utils.js"; +import type { CachedRuntimeState } from "./runtime-cache.js"; +import { + buildRuntimeConfigOptionPairs, + buildRuntimeControlSignature, + normalizeText, + resolveRuntimeOptionsFromMeta, +} from "./runtime-options.js"; + +export async function resolveManagerRuntimeCapabilities(params: { + runtime: AcpRuntime; + handle: AcpRuntimeHandle; +}): Promise { + let reported: AcpRuntimeCapabilities | undefined; + if (params.runtime.getCapabilities) { + reported = await withAcpRuntimeErrorBoundary({ + run: async () => await params.runtime.getCapabilities!({ handle: params.handle }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not read ACP runtime capabilities.", + }); + } + const controls = new Set(reported?.controls ?? []); + if (params.runtime.setMode) { + controls.add("session/set_mode"); + } + if (params.runtime.setConfigOption) { + controls.add("session/set_config_option"); + } + if (params.runtime.getStatus) { + controls.add("session/status"); + } + const normalizedKeys = (reported?.configOptionKeys ?? []) + .map((entry) => normalizeText(entry)) + .filter(Boolean) as string[]; + return { + controls: [...controls].toSorted(), + ...(normalizedKeys.length > 0 ? { configOptionKeys: normalizedKeys } : {}), + }; +} + +export async function applyManagerRuntimeControls(params: { + sessionKey: string; + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + meta: SessionAcpMeta; + getCachedRuntimeState: (sessionKey: string) => CachedRuntimeState | null; +}): Promise { + const options = resolveRuntimeOptionsFromMeta(params.meta); + const signature = buildRuntimeControlSignature(options); + const cached = params.getCachedRuntimeState(params.sessionKey); + if (cached?.appliedControlSignature === signature) { + return; + } + + const capabilities = await resolveManagerRuntimeCapabilities({ + runtime: params.runtime, + handle: params.handle, + }); + const backend = params.handle.backend || params.meta.backend; + const runtimeMode = normalizeText(options.runtimeMode); + const configOptions = buildRuntimeConfigOptionPairs(options); + const advertisedKeys = new Set( + (capabilities.configOptionKeys ?? []) + .map((entry) => normalizeText(entry)) + .filter(Boolean) as string[], + ); + + await withAcpRuntimeErrorBoundary({ + run: async () => { + if (runtimeMode) { + if (!capabilities.controls.includes("session/set_mode") || !params.runtime.setMode) { + throw createUnsupportedControlError({ + backend, + control: "session/set_mode", + }); + } + await params.runtime.setMode({ + handle: params.handle, + mode: runtimeMode, + }); + } + + if (configOptions.length > 0) { + if ( + !capabilities.controls.includes("session/set_config_option") || + !params.runtime.setConfigOption + ) { + throw createUnsupportedControlError({ + backend, + control: "session/set_config_option", + }); + } + for (const [key, value] of configOptions) { + if (advertisedKeys.size > 0 && !advertisedKeys.has(key)) { + throw new AcpRuntimeError( + "ACP_BACKEND_UNSUPPORTED_CONTROL", + `ACP backend "${backend}" does not accept config key "${key}".`, + ); + } + await params.runtime.setConfigOption({ + handle: params.handle, + key, + value, + }); + } + } + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not apply ACP runtime options before turn execution.", + }); + + if (cached) { + cached.appliedControlSignature = signature; + } +} diff --git a/src/acp/control-plane/manager.test.ts b/src/acp/control-plane/manager.test.ts new file mode 100644 index 00000000000..ebdf356ca9f --- /dev/null +++ b/src/acp/control-plane/manager.test.ts @@ -0,0 +1,1250 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import type { AcpSessionRuntimeOptions, SessionAcpMeta } from "../../config/sessions/types.js"; +import { AcpRuntimeError } from "../runtime/errors.js"; +import type { AcpRuntime, AcpRuntimeCapabilities } from "../runtime/types.js"; + +const hoisted = vi.hoisted(() => { + const listAcpSessionEntriesMock = vi.fn(); + const readAcpSessionEntryMock = vi.fn(); + const upsertAcpSessionMetaMock = vi.fn(); + const requireAcpRuntimeBackendMock = vi.fn(); + return { + listAcpSessionEntriesMock, + readAcpSessionEntryMock, + upsertAcpSessionMetaMock, + requireAcpRuntimeBackendMock, + }; +}); + +vi.mock("../runtime/session-meta.js", () => ({ + listAcpSessionEntries: (params: unknown) => hoisted.listAcpSessionEntriesMock(params), + readAcpSessionEntry: (params: unknown) => hoisted.readAcpSessionEntryMock(params), + upsertAcpSessionMeta: (params: unknown) => hoisted.upsertAcpSessionMetaMock(params), +})); + +vi.mock("../runtime/registry.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + requireAcpRuntimeBackend: (backendId?: string) => + hoisted.requireAcpRuntimeBackendMock(backendId), + }; +}); + +const { AcpSessionManager } = await import("./manager.js"); + +const baseCfg = { + acp: { + enabled: true, + backend: "acpx", + dispatch: { enabled: true }, + }, +} as const; + +function createRuntime(): { + runtime: AcpRuntime; + ensureSession: ReturnType; + runTurn: ReturnType; + cancel: ReturnType; + close: ReturnType; + getCapabilities: ReturnType; + getStatus: ReturnType; + setMode: ReturnType; + setConfigOption: ReturnType; +} { + const ensureSession = vi.fn( + async (input: { sessionKey: string; agent: string; mode: "persistent" | "oneshot" }) => ({ + sessionKey: input.sessionKey, + backend: "acpx", + runtimeSessionName: `${input.sessionKey}:${input.mode}:runtime`, + }), + ); + const runTurn = vi.fn(async function* () { + yield { type: "done" as const }; + }); + const cancel = vi.fn(async () => {}); + const close = vi.fn(async () => {}); + const getCapabilities = vi.fn( + async (): Promise => ({ + controls: ["session/set_mode", "session/set_config_option", "session/status"], + }), + ); + const getStatus = vi.fn(async () => ({ + summary: "status=alive", + details: { status: "alive" }, + })); + const setMode = vi.fn(async () => {}); + const setConfigOption = vi.fn(async () => {}); + return { + runtime: { + ensureSession, + runTurn, + getCapabilities, + getStatus, + setMode, + setConfigOption, + cancel, + close, + }, + ensureSession, + runTurn, + cancel, + close, + getCapabilities, + getStatus, + setMode, + setConfigOption, + }; +} + +function readySessionMeta() { + return { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent" as const, + state: "idle" as const, + lastActivityAt: Date.now(), + }; +} + +function extractStatesFromUpserts(): SessionAcpMeta["state"][] { + const states: SessionAcpMeta["state"][] = []; + for (const [firstArg] of hoisted.upsertAcpSessionMetaMock.mock.calls) { + const payload = firstArg as { + mutate: ( + current: SessionAcpMeta | undefined, + entry: { acp?: SessionAcpMeta } | undefined, + ) => SessionAcpMeta | null | undefined; + }; + const current = readySessionMeta(); + const next = payload.mutate(current, { acp: current }); + if (next?.state) { + states.push(next.state); + } + } + return states; +} + +function extractRuntimeOptionsFromUpserts(): Array { + const options: Array = []; + for (const [firstArg] of hoisted.upsertAcpSessionMetaMock.mock.calls) { + const payload = firstArg as { + mutate: ( + current: SessionAcpMeta | undefined, + entry: { acp?: SessionAcpMeta } | undefined, + ) => SessionAcpMeta | null | undefined; + }; + const current = readySessionMeta(); + const next = payload.mutate(current, { acp: current }); + if (next) { + options.push(next.runtimeOptions); + } + } + return options; +} + +describe("AcpSessionManager", () => { + beforeEach(() => { + hoisted.listAcpSessionEntriesMock.mockReset().mockResolvedValue([]); + hoisted.readAcpSessionEntryMock.mockReset(); + hoisted.upsertAcpSessionMetaMock.mockReset().mockResolvedValue(null); + hoisted.requireAcpRuntimeBackendMock.mockReset(); + }); + + it("marks ACP-shaped sessions without metadata as stale", () => { + hoisted.readAcpSessionEntryMock.mockReturnValue(null); + const manager = new AcpSessionManager(); + + const resolved = manager.resolveSession({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + }); + + expect(resolved.kind).toBe("stale"); + if (resolved.kind !== "stale") { + return; + } + expect(resolved.error.code).toBe("ACP_SESSION_INIT_FAILED"); + expect(resolved.error.message).toContain("ACP metadata is missing"); + }); + + it("serializes concurrent turns for the same ACP session", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + let inFlight = 0; + let maxInFlight = 0; + runtimeState.runTurn.mockImplementation(async function* (_input: { requestId: string }) { + inFlight += 1; + maxInFlight = Math.max(maxInFlight, inFlight); + try { + await new Promise((resolve) => setTimeout(resolve, 10)); + yield { type: "done" }; + } finally { + inFlight -= 1; + } + }); + + const manager = new AcpSessionManager(); + const first = manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "first", + mode: "prompt", + requestId: "r1", + }); + const second = manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "second", + mode: "prompt", + requestId: "r2", + }); + await Promise.all([first, second]); + + expect(maxInFlight).toBe(1); + expect(runtimeState.runTurn).toHaveBeenCalledTimes(2); + }); + + it("runs turns for different ACP sessions in parallel", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + runtimeSessionName: `runtime:${sessionKey}`, + }, + }; + }); + + let inFlight = 0; + let maxInFlight = 0; + runtimeState.runTurn.mockImplementation(async function* () { + inFlight += 1; + maxInFlight = Math.max(maxInFlight, inFlight); + try { + await new Promise((resolve) => setTimeout(resolve, 15)); + yield { type: "done" as const }; + } finally { + inFlight -= 1; + } + }); + + const manager = new AcpSessionManager(); + await Promise.all([ + manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-a", + text: "first", + mode: "prompt", + requestId: "r1", + }), + manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-b", + text: "second", + mode: "prompt", + requestId: "r2", + }), + ]); + + expect(maxInFlight).toBe(2); + }); + + it("reuses runtime session handles for repeat turns in the same manager process", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "first", + mode: "prompt", + requestId: "r1", + }); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "second", + mode: "prompt", + requestId: "r2", + }); + + expect(runtimeState.ensureSession).toHaveBeenCalledTimes(1); + expect(runtimeState.runTurn).toHaveBeenCalledTimes(2); + }); + + it("rehydrates runtime handles after a manager restart", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + const managerA = new AcpSessionManager(); + await managerA.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "before restart", + mode: "prompt", + requestId: "r1", + }); + const managerB = new AcpSessionManager(); + await managerB.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "after restart", + mode: "prompt", + requestId: "r2", + }); + + expect(runtimeState.ensureSession).toHaveBeenCalledTimes(2); + }); + + it("enforces acp.maxConcurrentSessions when opening new runtime handles", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + runtimeSessionName: `runtime:${sessionKey}`, + }, + }; + }); + const limitedCfg = { + acp: { + ...baseCfg.acp, + maxConcurrentSessions: 1, + }, + } as OpenClawConfig; + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-a", + text: "first", + mode: "prompt", + requestId: "r1", + }); + + await expect( + manager.runTurn({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-b", + text: "second", + mode: "prompt", + requestId: "r2", + }), + ).rejects.toMatchObject({ + code: "ACP_SESSION_INIT_FAILED", + message: expect.stringContaining("max concurrent sessions"), + }); + expect(runtimeState.ensureSession).toHaveBeenCalledTimes(1); + }); + + it("enforces acp.maxConcurrentSessions during initializeSession", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.upsertAcpSessionMetaMock.mockResolvedValue({ + sessionKey: "agent:codex:acp:session-a", + storeSessionKey: "agent:codex:acp:session-a", + acp: readySessionMeta(), + }); + const limitedCfg = { + acp: { + ...baseCfg.acp, + maxConcurrentSessions: 1, + }, + } as OpenClawConfig; + + const manager = new AcpSessionManager(); + await manager.initializeSession({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-a", + agent: "codex", + mode: "persistent", + }); + + await expect( + manager.initializeSession({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-b", + agent: "codex", + mode: "persistent", + }), + ).rejects.toMatchObject({ + code: "ACP_SESSION_INIT_FAILED", + message: expect.stringContaining("max concurrent sessions"), + }); + expect(runtimeState.ensureSession).toHaveBeenCalledTimes(1); + }); + + it("drops cached runtime handles when close tolerates backend-unavailable errors", async () => { + const runtimeState = createRuntime(); + runtimeState.close.mockRejectedValueOnce( + new AcpRuntimeError("ACP_BACKEND_UNAVAILABLE", "runtime temporarily unavailable"), + ); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + runtimeSessionName: `runtime:${sessionKey}`, + }, + }; + }); + const limitedCfg = { + acp: { + ...baseCfg.acp, + maxConcurrentSessions: 1, + }, + } as OpenClawConfig; + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-a", + text: "first", + mode: "prompt", + requestId: "r1", + }); + + const closeResult = await manager.closeSession({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-a", + reason: "manual-close", + allowBackendUnavailable: true, + }); + expect(closeResult.runtimeClosed).toBe(false); + expect(closeResult.runtimeNotice).toContain("temporarily unavailable"); + + await expect( + manager.runTurn({ + cfg: limitedCfg, + sessionKey: "agent:codex:acp:session-b", + text: "second", + mode: "prompt", + requestId: "r2", + }), + ).resolves.toBeUndefined(); + expect(runtimeState.ensureSession).toHaveBeenCalledTimes(2); + }); + + it("evicts idle cached runtimes before enforcing max concurrent limits", async () => { + vi.useFakeTimers(); + try { + vi.setSystemTime(new Date("2026-02-23T00:00:00.000Z")); + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + runtimeSessionName: `runtime:${sessionKey}`, + }, + }; + }); + const cfg = { + acp: { + ...baseCfg.acp, + maxConcurrentSessions: 1, + runtime: { + ttlMinutes: 0.01, + }, + }, + } as OpenClawConfig; + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg, + sessionKey: "agent:codex:acp:session-a", + text: "first", + mode: "prompt", + requestId: "r1", + }); + + vi.advanceTimersByTime(2_000); + await manager.runTurn({ + cfg, + sessionKey: "agent:codex:acp:session-b", + text: "second", + mode: "prompt", + requestId: "r2", + }); + + expect(runtimeState.ensureSession).toHaveBeenCalledTimes(2); + expect(runtimeState.close).toHaveBeenCalledWith( + expect.objectContaining({ + reason: "idle-evicted", + handle: expect.objectContaining({ + sessionKey: "agent:codex:acp:session-a", + }), + }), + ); + } finally { + vi.useRealTimers(); + } + }); + + it("tracks ACP turn latency and error-code observability", async () => { + const runtimeState = createRuntime(); + runtimeState.runTurn.mockImplementation(async function* (input: { requestId: string }) { + if (input.requestId === "fail") { + throw new Error("runtime exploded"); + } + yield { type: "done" as const }; + }); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + runtimeSessionName: `runtime:${sessionKey}`, + }, + }; + }); + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "ok", + mode: "prompt", + requestId: "ok", + }); + await expect( + manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "boom", + mode: "prompt", + requestId: "fail", + }), + ).rejects.toMatchObject({ + code: "ACP_TURN_FAILED", + }); + + const snapshot = manager.getObservabilitySnapshot(baseCfg); + expect(snapshot.turns.completed).toBe(1); + expect(snapshot.turns.failed).toBe(1); + expect(snapshot.turns.active).toBe(0); + expect(snapshot.turns.queueDepth).toBe(0); + expect(snapshot.errorsByCode.ACP_TURN_FAILED).toBe(1); + }); + + it("rolls back ensured runtime sessions when metadata persistence fails", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.upsertAcpSessionMetaMock.mockRejectedValueOnce(new Error("disk full")); + + const manager = new AcpSessionManager(); + await expect( + manager.initializeSession({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + agent: "codex", + mode: "persistent", + }), + ).rejects.toThrow("disk full"); + expect(runtimeState.close).toHaveBeenCalledWith( + expect.objectContaining({ + reason: "init-meta-failed", + handle: expect.objectContaining({ + sessionKey: "agent:codex:acp:session-1", + }), + }), + ); + }); + + it("preempts an active turn on cancel and returns to idle state", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + let enteredRun = false; + runtimeState.runTurn.mockImplementation(async function* (input: { signal?: AbortSignal }) { + enteredRun = true; + await new Promise((resolve) => { + if (input.signal?.aborted) { + resolve(); + return; + } + input.signal?.addEventListener("abort", () => resolve(), { once: true }); + }); + yield { type: "done" as const, stopReason: "cancel" }; + }); + + const manager = new AcpSessionManager(); + const runPromise = manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "long task", + mode: "prompt", + requestId: "run-1", + }); + await vi.waitFor(() => { + expect(enteredRun).toBe(true); + }); + + await manager.cancelSession({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + reason: "manual-cancel", + }); + await runPromise; + + expect(runtimeState.cancel).toHaveBeenCalledTimes(1); + expect(runtimeState.cancel).toHaveBeenCalledWith( + expect.objectContaining({ + reason: "manual-cancel", + }), + ); + const states = extractStatesFromUpserts(); + expect(states).toContain("running"); + expect(states).toContain("idle"); + expect(states).not.toContain("error"); + }); + + it("cleans actor-tail bookkeeping after session turns complete", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + runtimeSessionName: `runtime:${sessionKey}`, + }, + }; + }); + runtimeState.runTurn.mockImplementation(async function* () { + yield { type: "done" as const }; + }); + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-a", + text: "first", + mode: "prompt", + requestId: "r1", + }); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-b", + text: "second", + mode: "prompt", + requestId: "r2", + }); + + const internals = manager as unknown as { + actorTailBySession: Map>; + }; + expect(internals.actorTailBySession.size).toBe(0); + }); + + it("surfaces backend failures raised after a done event", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + runtimeState.runTurn.mockImplementation(async function* () { + yield { type: "done" as const }; + throw new Error("acpx exited with code 1"); + }); + + const manager = new AcpSessionManager(); + await expect( + manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "do work", + mode: "prompt", + requestId: "run-1", + }), + ).rejects.toMatchObject({ + code: "ACP_TURN_FAILED", + message: "acpx exited with code 1", + }); + + const states = extractStatesFromUpserts(); + expect(states).toContain("running"); + expect(states).toContain("error"); + expect(states.at(-1)).toBe("error"); + }); + + it("persists runtime mode changes through setSessionRuntimeMode", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + const manager = new AcpSessionManager(); + const options = await manager.setSessionRuntimeMode({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + runtimeMode: "plan", + }); + + expect(runtimeState.setMode).toHaveBeenCalledWith( + expect.objectContaining({ + mode: "plan", + }), + ); + expect(options.runtimeMode).toBe("plan"); + expect(extractRuntimeOptionsFromUpserts().some((entry) => entry?.runtimeMode === "plan")).toBe( + true, + ); + }); + + it("reapplies persisted controls on next turn after runtime option updates", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + + let currentMeta: SessionAcpMeta = { + ...readySessionMeta(), + runtimeOptions: { + runtimeMode: "plan", + }, + }; + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = + (paramsUnknown as { sessionKey?: string }).sessionKey ?? "agent:codex:acp:session-1"; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: currentMeta, + }; + }); + hoisted.upsertAcpSessionMetaMock.mockImplementation(async (paramsUnknown: unknown) => { + const params = paramsUnknown as { + mutate: ( + current: SessionAcpMeta | undefined, + entry: { acp?: SessionAcpMeta } | undefined, + ) => SessionAcpMeta | null | undefined; + }; + const next = params.mutate(currentMeta, { acp: currentMeta }); + if (next) { + currentMeta = next; + } + return { + sessionId: "session-1", + updatedAt: Date.now(), + acp: currentMeta, + }; + }); + + const manager = new AcpSessionManager(); + await manager.setSessionConfigOption({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + key: "model", + value: "openai-codex/gpt-5.3-codex", + }); + expect(runtimeState.setMode).not.toHaveBeenCalled(); + + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "do work", + mode: "prompt", + requestId: "run-1", + }); + + expect(runtimeState.setMode).toHaveBeenCalledWith( + expect.objectContaining({ + mode: "plan", + }), + ); + }); + + it("reconciles persisted ACP session identifiers from runtime status after a turn", async () => { + const runtimeState = createRuntime(); + runtimeState.ensureSession.mockResolvedValue({ + sessionKey: "agent:codex:acp:session-1", + backend: "acpx", + runtimeSessionName: "runtime-1", + backendSessionId: "acpx-stale", + agentSessionId: "agent-stale", + }); + runtimeState.getStatus.mockResolvedValue({ + summary: "status=alive", + backendSessionId: "acpx-fresh", + agentSessionId: "agent-fresh", + details: { status: "alive" }, + }); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + + let currentMeta: SessionAcpMeta = { + ...readySessionMeta(), + identity: { + state: "resolved", + source: "status", + acpxSessionId: "acpx-stale", + agentSessionId: "agent-stale", + lastUpdatedAt: Date.now(), + }, + }; + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = + (paramsUnknown as { sessionKey?: string }).sessionKey ?? "agent:codex:acp:session-1"; + return { + sessionKey, + storeSessionKey: sessionKey, + acp: currentMeta, + }; + }); + hoisted.upsertAcpSessionMetaMock.mockImplementation(async (paramsUnknown: unknown) => { + const params = paramsUnknown as { + mutate: ( + current: SessionAcpMeta | undefined, + entry: { acp?: SessionAcpMeta } | undefined, + ) => SessionAcpMeta | null | undefined; + }; + const next = params.mutate(currentMeta, { acp: currentMeta }); + if (next) { + currentMeta = next; + } + return { + sessionId: "session-1", + updatedAt: Date.now(), + acp: currentMeta, + }; + }); + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "do work", + mode: "prompt", + requestId: "run-1", + }); + + expect(runtimeState.getStatus).toHaveBeenCalledTimes(1); + expect(currentMeta.identity?.acpxSessionId).toBe("acpx-fresh"); + expect(currentMeta.identity?.agentSessionId).toBe("agent-fresh"); + }); + + it("reconciles pending ACP identities during startup scan", async () => { + const runtimeState = createRuntime(); + runtimeState.getStatus.mockResolvedValue({ + summary: "status=alive", + acpxRecordId: "acpx-record-1", + backendSessionId: "acpx-session-1", + agentSessionId: "agent-session-1", + details: { status: "alive" }, + }); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + + let currentMeta: SessionAcpMeta = { + ...readySessionMeta(), + identity: { + state: "pending", + source: "ensure", + acpxSessionId: "acpx-stale", + lastUpdatedAt: Date.now(), + }, + }; + const sessionKey = "agent:codex:acp:session-1"; + hoisted.listAcpSessionEntriesMock.mockResolvedValue([ + { + cfg: baseCfg, + storePath: "/tmp/sessions-acp.json", + sessionKey, + storeSessionKey: sessionKey, + entry: { + sessionId: "session-1", + updatedAt: Date.now(), + acp: currentMeta, + }, + acp: currentMeta, + }, + ]); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const key = (paramsUnknown as { sessionKey?: string }).sessionKey ?? sessionKey; + return { + sessionKey: key, + storeSessionKey: key, + acp: currentMeta, + }; + }); + hoisted.upsertAcpSessionMetaMock.mockImplementation(async (paramsUnknown: unknown) => { + const params = paramsUnknown as { + mutate: ( + current: SessionAcpMeta | undefined, + entry: { acp?: SessionAcpMeta } | undefined, + ) => SessionAcpMeta | null | undefined; + }; + const next = params.mutate(currentMeta, { acp: currentMeta }); + if (next) { + currentMeta = next; + } + return { + sessionId: "session-1", + updatedAt: Date.now(), + acp: currentMeta, + }; + }); + + const manager = new AcpSessionManager(); + const result = await manager.reconcilePendingSessionIdentities({ cfg: baseCfg }); + + expect(result).toEqual({ checked: 1, resolved: 1, failed: 0 }); + expect(currentMeta.identity?.state).toBe("resolved"); + expect(currentMeta.identity?.acpxRecordId).toBe("acpx-record-1"); + expect(currentMeta.identity?.acpxSessionId).toBe("acpx-session-1"); + expect(currentMeta.identity?.agentSessionId).toBe("agent-session-1"); + }); + + it("skips startup identity reconciliation for already resolved sessions", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + const sessionKey = "agent:codex:acp:session-1"; + const resolvedMeta: SessionAcpMeta = { + ...readySessionMeta(), + identity: { + state: "resolved", + source: "status", + acpxSessionId: "acpx-sid-1", + agentSessionId: "agent-sid-1", + lastUpdatedAt: Date.now(), + }, + }; + hoisted.listAcpSessionEntriesMock.mockResolvedValue([ + { + cfg: baseCfg, + storePath: "/tmp/sessions-acp.json", + sessionKey, + storeSessionKey: sessionKey, + entry: { + sessionId: "session-1", + updatedAt: Date.now(), + acp: resolvedMeta, + }, + acp: resolvedMeta, + }, + ]); + + const manager = new AcpSessionManager(); + const result = await manager.reconcilePendingSessionIdentities({ cfg: baseCfg }); + + expect(result).toEqual({ checked: 0, resolved: 0, failed: 0 }); + expect(runtimeState.getStatus).not.toHaveBeenCalled(); + expect(runtimeState.ensureSession).not.toHaveBeenCalled(); + }); + + it("preserves existing ACP session identifiers when ensure returns none", async () => { + const runtimeState = createRuntime(); + runtimeState.ensureSession.mockResolvedValue({ + sessionKey: "agent:codex:acp:session-1", + backend: "acpx", + runtimeSessionName: "runtime-2", + }); + runtimeState.getStatus.mockResolvedValue({ + summary: "status=alive", + details: { status: "alive" }, + }); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: { + ...readySessionMeta(), + identity: { + state: "resolved", + source: "status", + acpxSessionId: "acpx-stable", + agentSessionId: "agent-stable", + lastUpdatedAt: Date.now(), + }, + }, + }); + + const manager = new AcpSessionManager(); + const status = await manager.getSessionStatus({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + }); + + expect(status.identity?.acpxSessionId).toBe("acpx-stable"); + expect(status.identity?.agentSessionId).toBe("agent-stable"); + }); + + it("applies persisted runtime options before running turns", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: { + ...readySessionMeta(), + runtimeOptions: { + runtimeMode: "plan", + model: "openai-codex/gpt-5.3-codex", + permissionProfile: "strict", + timeoutSeconds: 120, + }, + }, + }); + + const manager = new AcpSessionManager(); + await manager.runTurn({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + text: "do work", + mode: "prompt", + requestId: "run-1", + }); + + expect(runtimeState.setMode).toHaveBeenCalledWith( + expect.objectContaining({ + mode: "plan", + }), + ); + expect(runtimeState.setConfigOption).toHaveBeenCalledWith( + expect.objectContaining({ + key: "model", + value: "openai-codex/gpt-5.3-codex", + }), + ); + expect(runtimeState.setConfigOption).toHaveBeenCalledWith( + expect.objectContaining({ + key: "approval_policy", + value: "strict", + }), + ); + expect(runtimeState.setConfigOption).toHaveBeenCalledWith( + expect.objectContaining({ + key: "timeout", + value: "120", + }), + ); + }); + + it("returns unsupported-control error when backend does not support set_config_option", async () => { + const runtimeState = createRuntime(); + const unsupportedRuntime: AcpRuntime = { + ensureSession: runtimeState.ensureSession as AcpRuntime["ensureSession"], + runTurn: runtimeState.runTurn as AcpRuntime["runTurn"], + getCapabilities: vi.fn(async () => ({ controls: [] })), + cancel: runtimeState.cancel as AcpRuntime["cancel"], + close: runtimeState.close as AcpRuntime["close"], + }; + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: unsupportedRuntime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + const manager = new AcpSessionManager(); + await expect( + manager.setSessionConfigOption({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + key: "model", + value: "gpt-5.3-codex", + }), + ).rejects.toMatchObject({ + code: "ACP_BACKEND_UNSUPPORTED_CONTROL", + }); + }); + + it("rejects invalid runtime option values before backend controls run", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + + const manager = new AcpSessionManager(); + await expect( + manager.setSessionConfigOption({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + key: "timeout", + value: "not-a-number", + }), + ).rejects.toMatchObject({ + code: "ACP_INVALID_RUNTIME_OPTION", + }); + expect(runtimeState.setConfigOption).not.toHaveBeenCalled(); + + await expect( + manager.updateSessionRuntimeOptions({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + patch: { cwd: "relative/path" }, + }), + ).rejects.toMatchObject({ + code: "ACP_INVALID_RUNTIME_OPTION", + }); + }); + + it("can close and clear metadata when backend is unavailable", async () => { + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + hoisted.requireAcpRuntimeBackendMock.mockImplementation(() => { + throw new AcpRuntimeError( + "ACP_BACKEND_MISSING", + "ACP runtime backend is not configured. Install and enable the acpx runtime plugin.", + ); + }); + + const manager = new AcpSessionManager(); + const result = await manager.closeSession({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + reason: "manual-close", + allowBackendUnavailable: true, + clearMeta: true, + }); + + expect(result.runtimeClosed).toBe(false); + expect(result.runtimeNotice).toContain("not configured"); + expect(result.metaCleared).toBe(true); + expect(hoisted.upsertAcpSessionMetaMock).toHaveBeenCalled(); + }); + + it("surfaces metadata clear errors during closeSession", async () => { + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:session-1", + storeSessionKey: "agent:codex:acp:session-1", + acp: readySessionMeta(), + }); + hoisted.requireAcpRuntimeBackendMock.mockImplementation(() => { + throw new AcpRuntimeError( + "ACP_BACKEND_MISSING", + "ACP runtime backend is not configured. Install and enable the acpx runtime plugin.", + ); + }); + hoisted.upsertAcpSessionMetaMock.mockRejectedValueOnce(new Error("disk locked")); + + const manager = new AcpSessionManager(); + await expect( + manager.closeSession({ + cfg: baseCfg, + sessionKey: "agent:codex:acp:session-1", + reason: "manual-close", + allowBackendUnavailable: true, + clearMeta: true, + }), + ).rejects.toThrow("disk locked"); + }); +}); diff --git a/src/acp/control-plane/manager.ts b/src/acp/control-plane/manager.ts new file mode 100644 index 00000000000..e15bf1ec9b7 --- /dev/null +++ b/src/acp/control-plane/manager.ts @@ -0,0 +1,29 @@ +import { AcpSessionManager } from "./manager.core.js"; + +export { AcpSessionManager } from "./manager.core.js"; +export type { + AcpCloseSessionInput, + AcpCloseSessionResult, + AcpInitializeSessionInput, + AcpManagerObservabilitySnapshot, + AcpRunTurnInput, + AcpSessionResolution, + AcpSessionRuntimeOptions, + AcpSessionStatus, + AcpStartupIdentityReconcileResult, +} from "./manager.types.js"; + +let ACP_SESSION_MANAGER_SINGLETON: AcpSessionManager | null = null; + +export function getAcpSessionManager(): AcpSessionManager { + if (!ACP_SESSION_MANAGER_SINGLETON) { + ACP_SESSION_MANAGER_SINGLETON = new AcpSessionManager(); + } + return ACP_SESSION_MANAGER_SINGLETON; +} + +export const __testing = { + resetAcpSessionManagerForTests() { + ACP_SESSION_MANAGER_SINGLETON = null; + }, +}; diff --git a/src/acp/control-plane/manager.types.ts b/src/acp/control-plane/manager.types.ts new file mode 100644 index 00000000000..7337e8063f9 --- /dev/null +++ b/src/acp/control-plane/manager.types.ts @@ -0,0 +1,141 @@ +import type { OpenClawConfig } from "../../config/config.js"; +import type { + SessionAcpIdentity, + AcpSessionRuntimeOptions, + SessionAcpMeta, + SessionEntry, +} from "../../config/sessions/types.js"; +import type { AcpRuntimeError } from "../runtime/errors.js"; +import { requireAcpRuntimeBackend } from "../runtime/registry.js"; +import { + listAcpSessionEntries, + readAcpSessionEntry, + upsertAcpSessionMeta, +} from "../runtime/session-meta.js"; +import type { + AcpRuntime, + AcpRuntimeCapabilities, + AcpRuntimeEvent, + AcpRuntimeHandle, + AcpRuntimePromptMode, + AcpRuntimeSessionMode, + AcpRuntimeStatus, +} from "../runtime/types.js"; + +export type AcpSessionResolution = + | { + kind: "none"; + sessionKey: string; + } + | { + kind: "stale"; + sessionKey: string; + error: AcpRuntimeError; + } + | { + kind: "ready"; + sessionKey: string; + meta: SessionAcpMeta; + }; + +export type AcpInitializeSessionInput = { + cfg: OpenClawConfig; + sessionKey: string; + agent: string; + mode: AcpRuntimeSessionMode; + cwd?: string; + backendId?: string; +}; + +export type AcpRunTurnInput = { + cfg: OpenClawConfig; + sessionKey: string; + text: string; + mode: AcpRuntimePromptMode; + requestId: string; + signal?: AbortSignal; + onEvent?: (event: AcpRuntimeEvent) => Promise | void; +}; + +export type AcpCloseSessionInput = { + cfg: OpenClawConfig; + sessionKey: string; + reason: string; + clearMeta?: boolean; + allowBackendUnavailable?: boolean; + requireAcpSession?: boolean; +}; + +export type AcpCloseSessionResult = { + runtimeClosed: boolean; + runtimeNotice?: string; + metaCleared: boolean; +}; + +export type AcpSessionStatus = { + sessionKey: string; + backend: string; + agent: string; + identity?: SessionAcpIdentity; + state: SessionAcpMeta["state"]; + mode: AcpRuntimeSessionMode; + runtimeOptions: AcpSessionRuntimeOptions; + capabilities: AcpRuntimeCapabilities; + runtimeStatus?: AcpRuntimeStatus; + lastActivityAt: number; + lastError?: string; +}; + +export type AcpManagerObservabilitySnapshot = { + runtimeCache: { + activeSessions: number; + idleTtlMs: number; + evictedTotal: number; + lastEvictedAt?: number; + }; + turns: { + active: number; + queueDepth: number; + completed: number; + failed: number; + averageLatencyMs: number; + maxLatencyMs: number; + }; + errorsByCode: Record; +}; + +export type AcpStartupIdentityReconcileResult = { + checked: number; + resolved: number; + failed: number; +}; + +export type ActiveTurnState = { + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + abortController: AbortController; + cancelPromise?: Promise; +}; + +export type TurnLatencyStats = { + completed: number; + failed: number; + totalMs: number; + maxMs: number; +}; + +export type AcpSessionManagerDeps = { + listAcpSessions: typeof listAcpSessionEntries; + readSessionEntry: typeof readAcpSessionEntry; + upsertSessionMeta: typeof upsertAcpSessionMeta; + requireRuntimeBackend: typeof requireAcpRuntimeBackend; +}; + +export const DEFAULT_DEPS: AcpSessionManagerDeps = { + listAcpSessions: listAcpSessionEntries, + readSessionEntry: readAcpSessionEntry, + upsertSessionMeta: upsertAcpSessionMeta, + requireRuntimeBackend: requireAcpRuntimeBackend, +}; + +export type { AcpSessionRuntimeOptions, SessionAcpMeta, SessionEntry }; diff --git a/src/acp/control-plane/manager.utils.ts b/src/acp/control-plane/manager.utils.ts new file mode 100644 index 00000000000..3b6b2dacc45 --- /dev/null +++ b/src/acp/control-plane/manager.utils.ts @@ -0,0 +1,64 @@ +import type { OpenClawConfig } from "../../config/config.js"; +import type { SessionAcpMeta } from "../../config/sessions/types.js"; +import { normalizeAgentId, parseAgentSessionKey } from "../../routing/session-key.js"; +import { ACP_ERROR_CODES, AcpRuntimeError } from "../runtime/errors.js"; + +export function resolveAcpAgentFromSessionKey(sessionKey: string, fallback = "main"): string { + const parsed = parseAgentSessionKey(sessionKey); + return normalizeAgentId(parsed?.agentId ?? fallback); +} + +export function resolveMissingMetaError(sessionKey: string): AcpRuntimeError { + return new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `ACP metadata is missing for ${sessionKey}. Recreate this ACP session with /acp spawn and rebind the thread.`, + ); +} + +export function normalizeSessionKey(sessionKey: string): string { + return sessionKey.trim(); +} + +export function normalizeActorKey(sessionKey: string): string { + return sessionKey.trim().toLowerCase(); +} + +export function normalizeAcpErrorCode(code: string | undefined): AcpRuntimeError["code"] { + if (!code) { + return "ACP_TURN_FAILED"; + } + const normalized = code.trim().toUpperCase(); + for (const allowed of ACP_ERROR_CODES) { + if (allowed === normalized) { + return allowed; + } + } + return "ACP_TURN_FAILED"; +} + +export function createUnsupportedControlError(params: { + backend: string; + control: string; +}): AcpRuntimeError { + return new AcpRuntimeError( + "ACP_BACKEND_UNSUPPORTED_CONTROL", + `ACP backend "${params.backend}" does not support ${params.control}.`, + ); +} + +export function resolveRuntimeIdleTtlMs(cfg: OpenClawConfig): number { + const ttlMinutes = cfg.acp?.runtime?.ttlMinutes; + if (typeof ttlMinutes !== "number" || !Number.isFinite(ttlMinutes) || ttlMinutes <= 0) { + return 0; + } + return Math.round(ttlMinutes * 60 * 1000); +} + +export function hasLegacyAcpIdentityProjection(meta: SessionAcpMeta): boolean { + const raw = meta as Record; + return ( + Object.hasOwn(raw, "backendSessionId") || + Object.hasOwn(raw, "agentSessionId") || + Object.hasOwn(raw, "sessionIdsProvisional") + ); +} diff --git a/src/acp/control-plane/runtime-cache.test.ts b/src/acp/control-plane/runtime-cache.test.ts new file mode 100644 index 00000000000..ea0aa2f7124 --- /dev/null +++ b/src/acp/control-plane/runtime-cache.test.ts @@ -0,0 +1,62 @@ +import { describe, expect, it, vi } from "vitest"; +import type { AcpRuntime } from "../runtime/types.js"; +import type { AcpRuntimeHandle } from "../runtime/types.js"; +import type { CachedRuntimeState } from "./runtime-cache.js"; +import { RuntimeCache } from "./runtime-cache.js"; + +function mockState(sessionKey: string): CachedRuntimeState { + const runtime = { + ensureSession: vi.fn(async () => ({ + sessionKey, + backend: "acpx", + runtimeSessionName: `runtime:${sessionKey}`, + })), + runTurn: vi.fn(async function* () { + yield { type: "done" as const }; + }), + cancel: vi.fn(async () => {}), + close: vi.fn(async () => {}), + } as unknown as AcpRuntime; + return { + runtime, + handle: { + sessionKey, + backend: "acpx", + runtimeSessionName: `runtime:${sessionKey}`, + } as AcpRuntimeHandle, + backend: "acpx", + agent: "codex", + mode: "persistent", + }; +} + +describe("RuntimeCache", () => { + it("tracks idle candidates with touch-aware lookups", () => { + vi.useFakeTimers(); + try { + const cache = new RuntimeCache(); + const actor = "agent:codex:acp:s1"; + cache.set(actor, mockState(actor), { now: 1_000 }); + + expect(cache.collectIdleCandidates({ maxIdleMs: 1_000, now: 1_999 })).toHaveLength(0); + expect(cache.collectIdleCandidates({ maxIdleMs: 1_000, now: 2_000 })).toHaveLength(1); + + cache.get(actor, { now: 2_500 }); + expect(cache.collectIdleCandidates({ maxIdleMs: 1_000, now: 3_200 })).toHaveLength(0); + expect(cache.collectIdleCandidates({ maxIdleMs: 1_000, now: 3_500 })).toHaveLength(1); + } finally { + vi.useRealTimers(); + } + }); + + it("returns snapshot entries with idle durations", () => { + const cache = new RuntimeCache(); + cache.set("a", mockState("a"), { now: 10 }); + cache.set("b", mockState("b"), { now: 100 }); + + const snapshot = cache.snapshot({ now: 1_100 }); + const byActor = new Map(snapshot.map((entry) => [entry.actorKey, entry])); + expect(byActor.get("a")?.idleMs).toBe(1_090); + expect(byActor.get("b")?.idleMs).toBe(1_000); + }); +}); diff --git a/src/acp/control-plane/runtime-cache.ts b/src/acp/control-plane/runtime-cache.ts new file mode 100644 index 00000000000..ca00cc1331b --- /dev/null +++ b/src/acp/control-plane/runtime-cache.ts @@ -0,0 +1,99 @@ +import type { AcpRuntime, AcpRuntimeHandle, AcpRuntimeSessionMode } from "../runtime/types.js"; + +export type CachedRuntimeState = { + runtime: AcpRuntime; + handle: AcpRuntimeHandle; + backend: string; + agent: string; + mode: AcpRuntimeSessionMode; + cwd?: string; + appliedControlSignature?: string; +}; + +type RuntimeCacheEntry = { + state: CachedRuntimeState; + lastTouchedAt: number; +}; + +export type CachedRuntimeSnapshot = { + actorKey: string; + state: CachedRuntimeState; + lastTouchedAt: number; + idleMs: number; +}; + +export class RuntimeCache { + private readonly cache = new Map(); + + size(): number { + return this.cache.size; + } + + has(actorKey: string): boolean { + return this.cache.has(actorKey); + } + + get( + actorKey: string, + params: { + touch?: boolean; + now?: number; + } = {}, + ): CachedRuntimeState | null { + const entry = this.cache.get(actorKey); + if (!entry) { + return null; + } + if (params.touch !== false) { + entry.lastTouchedAt = params.now ?? Date.now(); + } + return entry.state; + } + + peek(actorKey: string): CachedRuntimeState | null { + return this.get(actorKey, { touch: false }); + } + + getLastTouchedAt(actorKey: string): number | null { + return this.cache.get(actorKey)?.lastTouchedAt ?? null; + } + + set( + actorKey: string, + state: CachedRuntimeState, + params: { + now?: number; + } = {}, + ): void { + this.cache.set(actorKey, { + state, + lastTouchedAt: params.now ?? Date.now(), + }); + } + + clear(actorKey: string): void { + this.cache.delete(actorKey); + } + + snapshot(params: { now?: number } = {}): CachedRuntimeSnapshot[] { + const now = params.now ?? Date.now(); + const entries: CachedRuntimeSnapshot[] = []; + for (const [actorKey, entry] of this.cache.entries()) { + entries.push({ + actorKey, + state: entry.state, + lastTouchedAt: entry.lastTouchedAt, + idleMs: Math.max(0, now - entry.lastTouchedAt), + }); + } + return entries; + } + + collectIdleCandidates(params: { maxIdleMs: number; now?: number }): CachedRuntimeSnapshot[] { + if (!Number.isFinite(params.maxIdleMs) || params.maxIdleMs <= 0) { + return []; + } + const now = params.now ?? Date.now(); + return this.snapshot({ now }).filter((entry) => entry.idleMs >= params.maxIdleMs); + } +} diff --git a/src/acp/control-plane/runtime-options.ts b/src/acp/control-plane/runtime-options.ts new file mode 100644 index 00000000000..5f3b77bf1c8 --- /dev/null +++ b/src/acp/control-plane/runtime-options.ts @@ -0,0 +1,349 @@ +import { isAbsolute } from "node:path"; +import type { AcpSessionRuntimeOptions, SessionAcpMeta } from "../../config/sessions/types.js"; +import { AcpRuntimeError } from "../runtime/errors.js"; + +const MAX_RUNTIME_MODE_LENGTH = 64; +const MAX_MODEL_LENGTH = 200; +const MAX_PERMISSION_PROFILE_LENGTH = 80; +const MAX_CWD_LENGTH = 4096; +const MIN_TIMEOUT_SECONDS = 1; +const MAX_TIMEOUT_SECONDS = 24 * 60 * 60; +const MAX_BACKEND_OPTION_KEY_LENGTH = 64; +const MAX_BACKEND_OPTION_VALUE_LENGTH = 512; +const MAX_BACKEND_EXTRAS = 32; + +const SAFE_OPTION_KEY_RE = /^[a-z0-9][a-z0-9._:-]*$/i; + +function failInvalidOption(message: string): never { + throw new AcpRuntimeError("ACP_INVALID_RUNTIME_OPTION", message); +} + +function validateNoControlChars(value: string, field: string): string { + for (let i = 0; i < value.length; i += 1) { + const code = value.charCodeAt(i); + if (code < 32 || code === 127) { + failInvalidOption(`${field} must not include control characters.`); + } + } + return value; +} + +function validateBoundedText(params: { value: unknown; field: string; maxLength: number }): string { + const normalized = normalizeText(params.value); + if (!normalized) { + failInvalidOption(`${params.field} must not be empty.`); + } + if (normalized.length > params.maxLength) { + failInvalidOption(`${params.field} must be at most ${params.maxLength} characters.`); + } + return validateNoControlChars(normalized, params.field); +} + +function validateBackendOptionKey(rawKey: unknown): string { + const key = validateBoundedText({ + value: rawKey, + field: "ACP config key", + maxLength: MAX_BACKEND_OPTION_KEY_LENGTH, + }); + if (!SAFE_OPTION_KEY_RE.test(key)) { + failInvalidOption( + "ACP config key must use letters, numbers, dots, colons, underscores, or dashes.", + ); + } + return key; +} + +function validateBackendOptionValue(rawValue: unknown): string { + return validateBoundedText({ + value: rawValue, + field: "ACP config value", + maxLength: MAX_BACKEND_OPTION_VALUE_LENGTH, + }); +} + +export function validateRuntimeModeInput(rawMode: unknown): string { + return validateBoundedText({ + value: rawMode, + field: "Runtime mode", + maxLength: MAX_RUNTIME_MODE_LENGTH, + }); +} + +export function validateRuntimeModelInput(rawModel: unknown): string { + return validateBoundedText({ + value: rawModel, + field: "Model id", + maxLength: MAX_MODEL_LENGTH, + }); +} + +export function validateRuntimePermissionProfileInput(rawProfile: unknown): string { + return validateBoundedText({ + value: rawProfile, + field: "Permission profile", + maxLength: MAX_PERMISSION_PROFILE_LENGTH, + }); +} + +export function validateRuntimeCwdInput(rawCwd: unknown): string { + const cwd = validateBoundedText({ + value: rawCwd, + field: "Working directory", + maxLength: MAX_CWD_LENGTH, + }); + if (!isAbsolute(cwd)) { + failInvalidOption(`Working directory must be an absolute path. Received "${cwd}".`); + } + return cwd; +} + +export function validateRuntimeTimeoutSecondsInput(rawTimeout: unknown): number { + if (typeof rawTimeout !== "number" || !Number.isFinite(rawTimeout)) { + failInvalidOption("Timeout must be a positive integer in seconds."); + } + const timeout = Math.round(rawTimeout); + if (timeout < MIN_TIMEOUT_SECONDS || timeout > MAX_TIMEOUT_SECONDS) { + failInvalidOption( + `Timeout must be between ${MIN_TIMEOUT_SECONDS} and ${MAX_TIMEOUT_SECONDS} seconds.`, + ); + } + return timeout; +} + +export function parseRuntimeTimeoutSecondsInput(rawTimeout: unknown): number { + const normalized = normalizeText(rawTimeout); + if (!normalized || !/^\d+$/.test(normalized)) { + failInvalidOption("Timeout must be a positive integer in seconds."); + } + return validateRuntimeTimeoutSecondsInput(Number.parseInt(normalized, 10)); +} + +export function validateRuntimeConfigOptionInput( + rawKey: unknown, + rawValue: unknown, +): { + key: string; + value: string; +} { + return { + key: validateBackendOptionKey(rawKey), + value: validateBackendOptionValue(rawValue), + }; +} + +export function validateRuntimeOptionPatch( + patch: Partial | undefined, +): Partial { + if (!patch) { + return {}; + } + const rawPatch = patch as Record; + const allowedKeys = new Set([ + "runtimeMode", + "model", + "cwd", + "permissionProfile", + "timeoutSeconds", + "backendExtras", + ]); + for (const key of Object.keys(rawPatch)) { + if (!allowedKeys.has(key)) { + failInvalidOption(`Unknown runtime option "${key}".`); + } + } + + const next: Partial = {}; + if (Object.hasOwn(rawPatch, "runtimeMode")) { + if (rawPatch.runtimeMode === undefined) { + next.runtimeMode = undefined; + } else { + next.runtimeMode = validateRuntimeModeInput(rawPatch.runtimeMode); + } + } + if (Object.hasOwn(rawPatch, "model")) { + if (rawPatch.model === undefined) { + next.model = undefined; + } else { + next.model = validateRuntimeModelInput(rawPatch.model); + } + } + if (Object.hasOwn(rawPatch, "cwd")) { + if (rawPatch.cwd === undefined) { + next.cwd = undefined; + } else { + next.cwd = validateRuntimeCwdInput(rawPatch.cwd); + } + } + if (Object.hasOwn(rawPatch, "permissionProfile")) { + if (rawPatch.permissionProfile === undefined) { + next.permissionProfile = undefined; + } else { + next.permissionProfile = validateRuntimePermissionProfileInput(rawPatch.permissionProfile); + } + } + if (Object.hasOwn(rawPatch, "timeoutSeconds")) { + if (rawPatch.timeoutSeconds === undefined) { + next.timeoutSeconds = undefined; + } else { + next.timeoutSeconds = validateRuntimeTimeoutSecondsInput(rawPatch.timeoutSeconds); + } + } + if (Object.hasOwn(rawPatch, "backendExtras")) { + const rawExtras = rawPatch.backendExtras; + if (rawExtras === undefined) { + next.backendExtras = undefined; + } else if (!rawExtras || typeof rawExtras !== "object" || Array.isArray(rawExtras)) { + failInvalidOption("Backend extras must be a key/value object."); + } else { + const entries = Object.entries(rawExtras); + if (entries.length > MAX_BACKEND_EXTRAS) { + failInvalidOption(`Backend extras must include at most ${MAX_BACKEND_EXTRAS} entries.`); + } + const extras: Record = {}; + for (const [entryKey, entryValue] of entries) { + const { key, value } = validateRuntimeConfigOptionInput(entryKey, entryValue); + extras[key] = value; + } + next.backendExtras = Object.keys(extras).length > 0 ? extras : undefined; + } + } + + return next; +} + +export function normalizeText(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed || undefined; +} + +export function normalizeRuntimeOptions( + options: AcpSessionRuntimeOptions | undefined, +): AcpSessionRuntimeOptions { + const runtimeMode = normalizeText(options?.runtimeMode); + const model = normalizeText(options?.model); + const cwd = normalizeText(options?.cwd); + const permissionProfile = normalizeText(options?.permissionProfile); + let timeoutSeconds: number | undefined; + if (typeof options?.timeoutSeconds === "number" && Number.isFinite(options.timeoutSeconds)) { + const rounded = Math.round(options.timeoutSeconds); + if (rounded > 0) { + timeoutSeconds = rounded; + } + } + const backendExtrasEntries = Object.entries(options?.backendExtras ?? {}) + .map(([key, value]) => [normalizeText(key), normalizeText(value)] as const) + .filter(([key, value]) => Boolean(key && value)) as Array<[string, string]>; + const backendExtras = + backendExtrasEntries.length > 0 ? Object.fromEntries(backendExtrasEntries) : undefined; + return { + ...(runtimeMode ? { runtimeMode } : {}), + ...(model ? { model } : {}), + ...(cwd ? { cwd } : {}), + ...(permissionProfile ? { permissionProfile } : {}), + ...(typeof timeoutSeconds === "number" ? { timeoutSeconds } : {}), + ...(backendExtras ? { backendExtras } : {}), + }; +} + +export function mergeRuntimeOptions(params: { + current?: AcpSessionRuntimeOptions; + patch?: Partial; +}): AcpSessionRuntimeOptions { + const current = normalizeRuntimeOptions(params.current); + const patch = normalizeRuntimeOptions(validateRuntimeOptionPatch(params.patch)); + const mergedExtras = { + ...current.backendExtras, + ...patch.backendExtras, + }; + return normalizeRuntimeOptions({ + ...current, + ...patch, + ...(Object.keys(mergedExtras).length > 0 ? { backendExtras: mergedExtras } : {}), + }); +} + +export function resolveRuntimeOptionsFromMeta(meta: SessionAcpMeta): AcpSessionRuntimeOptions { + const normalized = normalizeRuntimeOptions(meta.runtimeOptions); + if (normalized.cwd || !meta.cwd) { + return normalized; + } + return normalizeRuntimeOptions({ + ...normalized, + cwd: meta.cwd, + }); +} + +export function runtimeOptionsEqual( + a: AcpSessionRuntimeOptions | undefined, + b: AcpSessionRuntimeOptions | undefined, +): boolean { + return JSON.stringify(normalizeRuntimeOptions(a)) === JSON.stringify(normalizeRuntimeOptions(b)); +} + +export function buildRuntimeControlSignature(options: AcpSessionRuntimeOptions): string { + const normalized = normalizeRuntimeOptions(options); + const extras = Object.entries(normalized.backendExtras ?? {}).toSorted(([a], [b]) => + a.localeCompare(b), + ); + return JSON.stringify({ + runtimeMode: normalized.runtimeMode ?? null, + model: normalized.model ?? null, + permissionProfile: normalized.permissionProfile ?? null, + timeoutSeconds: normalized.timeoutSeconds ?? null, + backendExtras: extras, + }); +} + +export function buildRuntimeConfigOptionPairs( + options: AcpSessionRuntimeOptions, +): Array<[string, string]> { + const normalized = normalizeRuntimeOptions(options); + const pairs = new Map(); + if (normalized.model) { + pairs.set("model", normalized.model); + } + if (normalized.permissionProfile) { + pairs.set("approval_policy", normalized.permissionProfile); + } + if (typeof normalized.timeoutSeconds === "number") { + pairs.set("timeout", String(normalized.timeoutSeconds)); + } + for (const [key, value] of Object.entries(normalized.backendExtras ?? {})) { + if (!pairs.has(key)) { + pairs.set(key, value); + } + } + return [...pairs.entries()]; +} + +export function inferRuntimeOptionPatchFromConfigOption( + key: string, + value: string, +): Partial { + const validated = validateRuntimeConfigOptionInput(key, value); + const normalizedKey = validated.key.toLowerCase(); + if (normalizedKey === "model") { + return { model: validateRuntimeModelInput(validated.value) }; + } + if ( + normalizedKey === "approval_policy" || + normalizedKey === "permission_profile" || + normalizedKey === "permissions" + ) { + return { permissionProfile: validateRuntimePermissionProfileInput(validated.value) }; + } + if (normalizedKey === "timeout" || normalizedKey === "timeout_seconds") { + return { timeoutSeconds: parseRuntimeTimeoutSecondsInput(validated.value) }; + } + if (normalizedKey === "cwd") { + return { cwd: validateRuntimeCwdInput(validated.value) }; + } + return { + backendExtras: { + [validated.key]: validated.value, + }, + }; +} diff --git a/src/acp/control-plane/session-actor-queue.ts b/src/acp/control-plane/session-actor-queue.ts new file mode 100644 index 00000000000..67dd6119a3b --- /dev/null +++ b/src/acp/control-plane/session-actor-queue.ts @@ -0,0 +1,53 @@ +export class SessionActorQueue { + private readonly tailBySession = new Map>(); + private readonly pendingBySession = new Map(); + + getTailMapForTesting(): Map> { + return this.tailBySession; + } + + getTotalPendingCount(): number { + let total = 0; + for (const count of this.pendingBySession.values()) { + total += count; + } + return total; + } + + getPendingCountForSession(actorKey: string): number { + return this.pendingBySession.get(actorKey) ?? 0; + } + + async run(actorKey: string, op: () => Promise): Promise { + const previous = this.tailBySession.get(actorKey) ?? Promise.resolve(); + this.pendingBySession.set(actorKey, (this.pendingBySession.get(actorKey) ?? 0) + 1); + let release: () => void = () => {}; + const marker = new Promise((resolve) => { + release = resolve; + }); + const queuedTail = previous + .catch(() => { + // Keep actor queue alive after an operation failure. + }) + .then(() => marker); + this.tailBySession.set(actorKey, queuedTail); + + await previous.catch(() => { + // Previous failures should not block newer commands. + }); + try { + return await op(); + } finally { + const pending = (this.pendingBySession.get(actorKey) ?? 1) - 1; + if (pending <= 0) { + this.pendingBySession.delete(actorKey); + } else { + this.pendingBySession.set(actorKey, pending); + } + release(); + if (this.tailBySession.get(actorKey) === queuedTail) { + this.tailBySession.delete(actorKey); + } + } + } +} diff --git a/src/acp/control-plane/spawn.ts b/src/acp/control-plane/spawn.ts new file mode 100644 index 00000000000..5d9790cb5e7 --- /dev/null +++ b/src/acp/control-plane/spawn.ts @@ -0,0 +1,77 @@ +import type { OpenClawConfig } from "../../config/config.js"; +import { callGateway } from "../../gateway/call.js"; +import { logVerbose } from "../../globals.js"; +import { getSessionBindingService } from "../../infra/outbound/session-binding-service.js"; +import { getAcpSessionManager } from "./manager.js"; + +export type AcpSpawnRuntimeCloseHandle = { + runtime: { + close: (params: { + handle: { sessionKey: string; backend: string; runtimeSessionName: string }; + reason: string; + }) => Promise; + }; + handle: { sessionKey: string; backend: string; runtimeSessionName: string }; +}; + +export async function cleanupFailedAcpSpawn(params: { + cfg: OpenClawConfig; + sessionKey: string; + shouldDeleteSession: boolean; + deleteTranscript: boolean; + runtimeCloseHandle?: AcpSpawnRuntimeCloseHandle; +}): Promise { + if (params.runtimeCloseHandle) { + await params.runtimeCloseHandle.runtime + .close({ + handle: params.runtimeCloseHandle.handle, + reason: "spawn-failed", + }) + .catch((err) => { + logVerbose( + `acp-spawn: runtime cleanup close failed for ${params.sessionKey}: ${String(err)}`, + ); + }); + } + + const acpManager = getAcpSessionManager(); + await acpManager + .closeSession({ + cfg: params.cfg, + sessionKey: params.sessionKey, + reason: "spawn-failed", + allowBackendUnavailable: true, + requireAcpSession: false, + }) + .catch((err) => { + logVerbose( + `acp-spawn: manager cleanup close failed for ${params.sessionKey}: ${String(err)}`, + ); + }); + + await getSessionBindingService() + .unbind({ + targetSessionKey: params.sessionKey, + reason: "spawn-failed", + }) + .catch((err) => { + logVerbose( + `acp-spawn: binding cleanup unbind failed for ${params.sessionKey}: ${String(err)}`, + ); + }); + + if (!params.shouldDeleteSession) { + return; + } + await callGateway({ + method: "sessions.delete", + params: { + key: params.sessionKey, + deleteTranscript: params.deleteTranscript, + emitLifecycleHooks: false, + }, + timeoutMs: 10_000, + }).catch(() => { + // Best-effort cleanup only. + }); +} diff --git a/src/acp/policy.test.ts b/src/acp/policy.test.ts new file mode 100644 index 00000000000..3a623373a7b --- /dev/null +++ b/src/acp/policy.test.ts @@ -0,0 +1,59 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { + isAcpAgentAllowedByPolicy, + isAcpDispatchEnabledByPolicy, + isAcpEnabledByPolicy, + resolveAcpAgentPolicyError, + resolveAcpDispatchPolicyError, + resolveAcpDispatchPolicyMessage, + resolveAcpDispatchPolicyState, +} from "./policy.js"; + +describe("acp policy", () => { + it("treats ACP as enabled by default", () => { + const cfg = {} satisfies OpenClawConfig; + expect(isAcpEnabledByPolicy(cfg)).toBe(true); + expect(isAcpDispatchEnabledByPolicy(cfg)).toBe(false); + expect(resolveAcpDispatchPolicyState(cfg)).toBe("dispatch_disabled"); + }); + + it("reports ACP disabled state when acp.enabled is false", () => { + const cfg = { + acp: { + enabled: false, + }, + } satisfies OpenClawConfig; + expect(isAcpEnabledByPolicy(cfg)).toBe(false); + expect(resolveAcpDispatchPolicyState(cfg)).toBe("acp_disabled"); + expect(resolveAcpDispatchPolicyMessage(cfg)).toContain("acp.enabled=false"); + expect(resolveAcpDispatchPolicyError(cfg)?.code).toBe("ACP_DISPATCH_DISABLED"); + }); + + it("reports dispatch-disabled state when dispatch gate is false", () => { + const cfg = { + acp: { + enabled: true, + dispatch: { + enabled: false, + }, + }, + } satisfies OpenClawConfig; + expect(isAcpDispatchEnabledByPolicy(cfg)).toBe(false); + expect(resolveAcpDispatchPolicyState(cfg)).toBe("dispatch_disabled"); + expect(resolveAcpDispatchPolicyMessage(cfg)).toContain("acp.dispatch.enabled=false"); + }); + + it("applies allowlist filtering for ACP agents", () => { + const cfg = { + acp: { + allowedAgents: ["Codex", "claude-code"], + }, + } satisfies OpenClawConfig; + expect(isAcpAgentAllowedByPolicy(cfg, "codex")).toBe(true); + expect(isAcpAgentAllowedByPolicy(cfg, "claude-code")).toBe(true); + expect(isAcpAgentAllowedByPolicy(cfg, "gemini")).toBe(false); + expect(resolveAcpAgentPolicyError(cfg, "gemini")?.code).toBe("ACP_SESSION_INIT_FAILED"); + expect(resolveAcpAgentPolicyError(cfg, "codex")).toBeNull(); + }); +}); diff --git a/src/acp/policy.ts b/src/acp/policy.ts new file mode 100644 index 00000000000..8297783b62d --- /dev/null +++ b/src/acp/policy.ts @@ -0,0 +1,69 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { normalizeAgentId } from "../routing/session-key.js"; +import { AcpRuntimeError } from "./runtime/errors.js"; + +const ACP_DISABLED_MESSAGE = "ACP is disabled by policy (`acp.enabled=false`)."; +const ACP_DISPATCH_DISABLED_MESSAGE = + "ACP dispatch is disabled by policy (`acp.dispatch.enabled=false`)."; + +export type AcpDispatchPolicyState = "enabled" | "acp_disabled" | "dispatch_disabled"; + +export function isAcpEnabledByPolicy(cfg: OpenClawConfig): boolean { + return cfg.acp?.enabled !== false; +} + +export function resolveAcpDispatchPolicyState(cfg: OpenClawConfig): AcpDispatchPolicyState { + if (!isAcpEnabledByPolicy(cfg)) { + return "acp_disabled"; + } + if (cfg.acp?.dispatch?.enabled !== true) { + return "dispatch_disabled"; + } + return "enabled"; +} + +export function isAcpDispatchEnabledByPolicy(cfg: OpenClawConfig): boolean { + return resolveAcpDispatchPolicyState(cfg) === "enabled"; +} + +export function resolveAcpDispatchPolicyMessage(cfg: OpenClawConfig): string | null { + const state = resolveAcpDispatchPolicyState(cfg); + if (state === "acp_disabled") { + return ACP_DISABLED_MESSAGE; + } + if (state === "dispatch_disabled") { + return ACP_DISPATCH_DISABLED_MESSAGE; + } + return null; +} + +export function resolveAcpDispatchPolicyError(cfg: OpenClawConfig): AcpRuntimeError | null { + const message = resolveAcpDispatchPolicyMessage(cfg); + if (!message) { + return null; + } + return new AcpRuntimeError("ACP_DISPATCH_DISABLED", message); +} + +export function isAcpAgentAllowedByPolicy(cfg: OpenClawConfig, agentId: string): boolean { + const allowed = (cfg.acp?.allowedAgents ?? []) + .map((entry) => normalizeAgentId(entry)) + .filter(Boolean); + if (allowed.length === 0) { + return true; + } + return allowed.includes(normalizeAgentId(agentId)); +} + +export function resolveAcpAgentPolicyError( + cfg: OpenClawConfig, + agentId: string, +): AcpRuntimeError | null { + if (isAcpAgentAllowedByPolicy(cfg, agentId)) { + return null; + } + return new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `ACP agent "${normalizeAgentId(agentId)}" is not allowed by policy.`, + ); +} diff --git a/src/acp/runtime/adapter-contract.testkit.ts b/src/acp/runtime/adapter-contract.testkit.ts new file mode 100644 index 00000000000..3c715b4777f --- /dev/null +++ b/src/acp/runtime/adapter-contract.testkit.ts @@ -0,0 +1,114 @@ +import { randomUUID } from "node:crypto"; +import { expect } from "vitest"; +import { toAcpRuntimeError } from "./errors.js"; +import type { AcpRuntime, AcpRuntimeEvent } from "./types.js"; + +export type AcpRuntimeAdapterContractParams = { + createRuntime: () => Promise | AcpRuntime; + agentId?: string; + successPrompt?: string; + errorPrompt?: string; + assertSuccessEvents?: (events: AcpRuntimeEvent[]) => void | Promise; + assertErrorOutcome?: (params: { + events: AcpRuntimeEvent[]; + thrown: unknown; + }) => void | Promise; +}; + +export async function runAcpRuntimeAdapterContract( + params: AcpRuntimeAdapterContractParams, +): Promise { + const runtime = await params.createRuntime(); + const sessionKey = `agent:${params.agentId ?? "codex"}:acp:contract-${randomUUID()}`; + const agent = params.agentId ?? "codex"; + + const handle = await runtime.ensureSession({ + sessionKey, + agent, + mode: "persistent", + }); + expect(handle.sessionKey).toBe(sessionKey); + expect(handle.backend.trim()).not.toHaveLength(0); + expect(handle.runtimeSessionName.trim()).not.toHaveLength(0); + + const successEvents: AcpRuntimeEvent[] = []; + for await (const event of runtime.runTurn({ + handle, + text: params.successPrompt ?? "contract-success", + mode: "prompt", + requestId: `contract-success-${randomUUID()}`, + })) { + successEvents.push(event); + } + expect( + successEvents.some( + (event) => + event.type === "done" || + event.type === "text_delta" || + event.type === "status" || + event.type === "tool_call", + ), + ).toBe(true); + await params.assertSuccessEvents?.(successEvents); + + if (runtime.getStatus) { + const status = await runtime.getStatus({ handle }); + expect(status).toBeDefined(); + expect(typeof status).toBe("object"); + } + if (runtime.setMode) { + await runtime.setMode({ + handle, + mode: "contract", + }); + } + if (runtime.setConfigOption) { + await runtime.setConfigOption({ + handle, + key: "contract_key", + value: "contract_value", + }); + } + + let errorThrown: unknown = null; + const errorEvents: AcpRuntimeEvent[] = []; + const errorPrompt = params.errorPrompt?.trim(); + if (errorPrompt) { + try { + for await (const event of runtime.runTurn({ + handle, + text: errorPrompt, + mode: "prompt", + requestId: `contract-error-${randomUUID()}`, + })) { + errorEvents.push(event); + } + } catch (error) { + errorThrown = error; + } + const sawErrorEvent = errorEvents.some((event) => event.type === "error"); + expect(Boolean(errorThrown) || sawErrorEvent).toBe(true); + if (errorThrown) { + const acpError = toAcpRuntimeError({ + error: errorThrown, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP runtime contract expected an error turn failure.", + }); + expect(acpError.code.length).toBeGreaterThan(0); + expect(acpError.message.length).toBeGreaterThan(0); + } + } + await params.assertErrorOutcome?.({ + events: errorEvents, + thrown: errorThrown, + }); + + await runtime.cancel({ + handle, + reason: "contract-cancel", + }); + await runtime.close({ + handle, + reason: "contract-close", + }); +} diff --git a/src/acp/runtime/error-text.test.ts b/src/acp/runtime/error-text.test.ts new file mode 100644 index 00000000000..b58cd3ef4fb --- /dev/null +++ b/src/acp/runtime/error-text.test.ts @@ -0,0 +1,19 @@ +import { describe, expect, it } from "vitest"; +import { formatAcpRuntimeErrorText } from "./error-text.js"; +import { AcpRuntimeError } from "./errors.js"; + +describe("formatAcpRuntimeErrorText", () => { + it("adds actionable next steps for known ACP runtime error codes", () => { + const text = formatAcpRuntimeErrorText( + new AcpRuntimeError("ACP_BACKEND_MISSING", "backend missing"), + ); + expect(text).toContain("ACP error (ACP_BACKEND_MISSING): backend missing"); + expect(text).toContain("next:"); + }); + + it("returns consistent ACP error envelope for runtime failures", () => { + const text = formatAcpRuntimeErrorText(new AcpRuntimeError("ACP_TURN_FAILED", "turn failed")); + expect(text).toContain("ACP error (ACP_TURN_FAILED): turn failed"); + expect(text).toContain("next:"); + }); +}); diff --git a/src/acp/runtime/error-text.ts b/src/acp/runtime/error-text.ts new file mode 100644 index 00000000000..e4901e1c869 --- /dev/null +++ b/src/acp/runtime/error-text.ts @@ -0,0 +1,45 @@ +import { type AcpRuntimeErrorCode, AcpRuntimeError, toAcpRuntimeError } from "./errors.js"; + +function resolveAcpRuntimeErrorNextStep(error: AcpRuntimeError): string | undefined { + if (error.code === "ACP_BACKEND_MISSING" || error.code === "ACP_BACKEND_UNAVAILABLE") { + return "Run `/acp doctor`, install/enable the backend plugin, then retry."; + } + if (error.code === "ACP_DISPATCH_DISABLED") { + return "Enable `acp.dispatch.enabled=true` to allow thread-message ACP turns."; + } + if (error.code === "ACP_SESSION_INIT_FAILED") { + return "If this session is stale, recreate it with `/acp spawn` and rebind the thread."; + } + if (error.code === "ACP_INVALID_RUNTIME_OPTION") { + return "Use `/acp status` to inspect options and pass valid values."; + } + if (error.code === "ACP_BACKEND_UNSUPPORTED_CONTROL") { + return "This backend does not support that control; use a supported command."; + } + if (error.code === "ACP_TURN_FAILED") { + return "Retry, or use `/acp cancel` and send the message again."; + } + return undefined; +} + +export function formatAcpRuntimeErrorText(error: AcpRuntimeError): string { + const next = resolveAcpRuntimeErrorNextStep(error); + if (!next) { + return `ACP error (${error.code}): ${error.message}`; + } + return `ACP error (${error.code}): ${error.message}\nnext: ${next}`; +} + +export function toAcpRuntimeErrorText(params: { + error: unknown; + fallbackCode: AcpRuntimeErrorCode; + fallbackMessage: string; +}): string { + return formatAcpRuntimeErrorText( + toAcpRuntimeError({ + error: params.error, + fallbackCode: params.fallbackCode, + fallbackMessage: params.fallbackMessage, + }), + ); +} diff --git a/src/acp/runtime/errors.test.ts b/src/acp/runtime/errors.test.ts new file mode 100644 index 00000000000..10ba3667d84 --- /dev/null +++ b/src/acp/runtime/errors.test.ts @@ -0,0 +1,33 @@ +import { describe, expect, it } from "vitest"; +import { AcpRuntimeError, withAcpRuntimeErrorBoundary } from "./errors.js"; + +describe("withAcpRuntimeErrorBoundary", () => { + it("wraps generic errors with fallback code and source message", async () => { + await expect( + withAcpRuntimeErrorBoundary({ + run: async () => { + throw new Error("boom"); + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "fallback", + }), + ).rejects.toMatchObject({ + name: "AcpRuntimeError", + code: "ACP_TURN_FAILED", + message: "boom", + }); + }); + + it("passes through existing ACP runtime errors", async () => { + const existing = new AcpRuntimeError("ACP_BACKEND_MISSING", "backend missing"); + await expect( + withAcpRuntimeErrorBoundary({ + run: async () => { + throw existing; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "fallback", + }), + ).rejects.toBe(existing); + }); +}); diff --git a/src/acp/runtime/errors.ts b/src/acp/runtime/errors.ts new file mode 100644 index 00000000000..0ac56251f8e --- /dev/null +++ b/src/acp/runtime/errors.ts @@ -0,0 +1,61 @@ +export const ACP_ERROR_CODES = [ + "ACP_BACKEND_MISSING", + "ACP_BACKEND_UNAVAILABLE", + "ACP_BACKEND_UNSUPPORTED_CONTROL", + "ACP_DISPATCH_DISABLED", + "ACP_INVALID_RUNTIME_OPTION", + "ACP_SESSION_INIT_FAILED", + "ACP_TURN_FAILED", +] as const; + +export type AcpRuntimeErrorCode = (typeof ACP_ERROR_CODES)[number]; + +export class AcpRuntimeError extends Error { + readonly code: AcpRuntimeErrorCode; + override readonly cause?: unknown; + + constructor(code: AcpRuntimeErrorCode, message: string, options?: { cause?: unknown }) { + super(message); + this.name = "AcpRuntimeError"; + this.code = code; + this.cause = options?.cause; + } +} + +export function isAcpRuntimeError(value: unknown): value is AcpRuntimeError { + return value instanceof AcpRuntimeError; +} + +export function toAcpRuntimeError(params: { + error: unknown; + fallbackCode: AcpRuntimeErrorCode; + fallbackMessage: string; +}): AcpRuntimeError { + if (params.error instanceof AcpRuntimeError) { + return params.error; + } + if (params.error instanceof Error) { + return new AcpRuntimeError(params.fallbackCode, params.error.message, { + cause: params.error, + }); + } + return new AcpRuntimeError(params.fallbackCode, params.fallbackMessage, { + cause: params.error, + }); +} + +export async function withAcpRuntimeErrorBoundary(params: { + run: () => Promise; + fallbackCode: AcpRuntimeErrorCode; + fallbackMessage: string; +}): Promise { + try { + return await params.run(); + } catch (error) { + throw toAcpRuntimeError({ + error, + fallbackCode: params.fallbackCode, + fallbackMessage: params.fallbackMessage, + }); + } +} diff --git a/src/acp/runtime/registry.test.ts b/src/acp/runtime/registry.test.ts new file mode 100644 index 00000000000..fab6a1b51e7 --- /dev/null +++ b/src/acp/runtime/registry.test.ts @@ -0,0 +1,99 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { AcpRuntimeError } from "./errors.js"; +import { + __testing, + getAcpRuntimeBackend, + registerAcpRuntimeBackend, + requireAcpRuntimeBackend, + unregisterAcpRuntimeBackend, +} from "./registry.js"; +import type { AcpRuntime } from "./types.js"; + +function createRuntimeStub(): AcpRuntime { + return { + ensureSession: vi.fn(async (input) => ({ + sessionKey: input.sessionKey, + backend: "stub", + runtimeSessionName: `${input.sessionKey}:runtime`, + })), + runTurn: vi.fn(async function* () { + // no-op stream + }), + cancel: vi.fn(async () => {}), + close: vi.fn(async () => {}), + }; +} + +describe("acp runtime registry", () => { + beforeEach(() => { + __testing.resetAcpRuntimeBackendsForTests(); + }); + + it("registers and resolves backends by id", () => { + const runtime = createRuntimeStub(); + registerAcpRuntimeBackend({ id: "acpx", runtime }); + + const backend = getAcpRuntimeBackend("acpx"); + expect(backend?.id).toBe("acpx"); + expect(backend?.runtime).toBe(runtime); + }); + + it("prefers a healthy backend when resolving without explicit id", () => { + const unhealthyRuntime = createRuntimeStub(); + const healthyRuntime = createRuntimeStub(); + + registerAcpRuntimeBackend({ + id: "unhealthy", + runtime: unhealthyRuntime, + healthy: () => false, + }); + registerAcpRuntimeBackend({ + id: "healthy", + runtime: healthyRuntime, + healthy: () => true, + }); + + const backend = getAcpRuntimeBackend(); + expect(backend?.id).toBe("healthy"); + }); + + it("throws a typed missing-backend error when no backend is registered", () => { + expect(() => requireAcpRuntimeBackend()).toThrowError(AcpRuntimeError); + expect(() => requireAcpRuntimeBackend()).toThrowError(/ACP runtime backend is not configured/i); + }); + + it("throws a typed unavailable error when the requested backend is unhealthy", () => { + registerAcpRuntimeBackend({ + id: "acpx", + runtime: createRuntimeStub(), + healthy: () => false, + }); + + try { + requireAcpRuntimeBackend("acpx"); + throw new Error("expected requireAcpRuntimeBackend to throw"); + } catch (err) { + expect(err).toBeInstanceOf(AcpRuntimeError); + expect((err as AcpRuntimeError).code).toBe("ACP_BACKEND_UNAVAILABLE"); + } + }); + + it("unregisters a backend by id", () => { + registerAcpRuntimeBackend({ id: "acpx", runtime: createRuntimeStub() }); + unregisterAcpRuntimeBackend("acpx"); + expect(getAcpRuntimeBackend("acpx")).toBeNull(); + }); + + it("keeps backend state on a global registry for cross-loader access", () => { + const runtime = createRuntimeStub(); + const sharedState = __testing.getAcpRuntimeRegistryGlobalStateForTests(); + + sharedState.backendsById.set("acpx", { + id: "acpx", + runtime, + }); + + const backend = getAcpRuntimeBackend("acpx"); + expect(backend?.runtime).toBe(runtime); + }); +}); diff --git a/src/acp/runtime/registry.ts b/src/acp/runtime/registry.ts new file mode 100644 index 00000000000..4c0a3d73cd0 --- /dev/null +++ b/src/acp/runtime/registry.ts @@ -0,0 +1,118 @@ +import { AcpRuntimeError } from "./errors.js"; +import type { AcpRuntime } from "./types.js"; + +export type AcpRuntimeBackend = { + id: string; + runtime: AcpRuntime; + healthy?: () => boolean; +}; + +type AcpRuntimeRegistryGlobalState = { + backendsById: Map; +}; + +const ACP_RUNTIME_REGISTRY_STATE_KEY = Symbol.for("openclaw.acpRuntimeRegistryState"); + +function createAcpRuntimeRegistryGlobalState(): AcpRuntimeRegistryGlobalState { + return { + backendsById: new Map(), + }; +} + +function resolveAcpRuntimeRegistryGlobalState(): AcpRuntimeRegistryGlobalState { + const runtimeGlobal = globalThis as typeof globalThis & { + [ACP_RUNTIME_REGISTRY_STATE_KEY]?: AcpRuntimeRegistryGlobalState; + }; + if (!runtimeGlobal[ACP_RUNTIME_REGISTRY_STATE_KEY]) { + runtimeGlobal[ACP_RUNTIME_REGISTRY_STATE_KEY] = createAcpRuntimeRegistryGlobalState(); + } + return runtimeGlobal[ACP_RUNTIME_REGISTRY_STATE_KEY]; +} + +const ACP_BACKENDS_BY_ID = resolveAcpRuntimeRegistryGlobalState().backendsById; + +function normalizeBackendId(id: string | undefined): string { + return id?.trim().toLowerCase() || ""; +} + +function isBackendHealthy(backend: AcpRuntimeBackend): boolean { + if (!backend.healthy) { + return true; + } + try { + return backend.healthy(); + } catch { + return false; + } +} + +export function registerAcpRuntimeBackend(backend: AcpRuntimeBackend): void { + const id = normalizeBackendId(backend.id); + if (!id) { + throw new Error("ACP runtime backend id is required"); + } + if (!backend.runtime) { + throw new Error(`ACP runtime backend "${id}" is missing runtime implementation`); + } + ACP_BACKENDS_BY_ID.set(id, { + ...backend, + id, + }); +} + +export function unregisterAcpRuntimeBackend(id: string): void { + const normalized = normalizeBackendId(id); + if (!normalized) { + return; + } + ACP_BACKENDS_BY_ID.delete(normalized); +} + +export function getAcpRuntimeBackend(id?: string): AcpRuntimeBackend | null { + const normalized = normalizeBackendId(id); + if (normalized) { + return ACP_BACKENDS_BY_ID.get(normalized) ?? null; + } + if (ACP_BACKENDS_BY_ID.size === 0) { + return null; + } + for (const backend of ACP_BACKENDS_BY_ID.values()) { + if (isBackendHealthy(backend)) { + return backend; + } + } + return ACP_BACKENDS_BY_ID.values().next().value ?? null; +} + +export function requireAcpRuntimeBackend(id?: string): AcpRuntimeBackend { + const normalized = normalizeBackendId(id); + const backend = getAcpRuntimeBackend(normalized || undefined); + if (!backend) { + throw new AcpRuntimeError( + "ACP_BACKEND_MISSING", + "ACP runtime backend is not configured. Install and enable the acpx runtime plugin.", + ); + } + if (!isBackendHealthy(backend)) { + throw new AcpRuntimeError( + "ACP_BACKEND_UNAVAILABLE", + "ACP runtime backend is currently unavailable. Try again in a moment.", + ); + } + if (normalized && backend.id !== normalized) { + throw new AcpRuntimeError( + "ACP_BACKEND_MISSING", + `ACP runtime backend "${normalized}" is not registered.`, + ); + } + return backend; +} + +export const __testing = { + resetAcpRuntimeBackendsForTests() { + ACP_BACKENDS_BY_ID.clear(); + }, + getAcpRuntimeRegistryGlobalStateForTests() { + return resolveAcpRuntimeRegistryGlobalState(); + }, +}; diff --git a/src/acp/runtime/session-identifiers.test.ts b/src/acp/runtime/session-identifiers.test.ts new file mode 100644 index 00000000000..fe7b0d6c2bc --- /dev/null +++ b/src/acp/runtime/session-identifiers.test.ts @@ -0,0 +1,89 @@ +import { describe, expect, it } from "vitest"; +import { + resolveAcpSessionCwd, + resolveAcpSessionIdentifierLinesFromIdentity, + resolveAcpThreadSessionDetailLines, +} from "./session-identifiers.js"; + +describe("session identifier helpers", () => { + it("hides unresolved identifiers from thread intro details while pending", () => { + const lines = resolveAcpThreadSessionDetailLines({ + sessionKey: "agent:codex:acp:pending-1", + meta: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + identity: { + state: "pending", + source: "ensure", + lastUpdatedAt: Date.now(), + acpxSessionId: "acpx-123", + agentSessionId: "inner-123", + }, + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + + expect(lines).toEqual([]); + }); + + it("adds a Codex resume hint when agent identity is resolved", () => { + const lines = resolveAcpThreadSessionDetailLines({ + sessionKey: "agent:codex:acp:resolved-1", + meta: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + identity: { + state: "resolved", + source: "status", + lastUpdatedAt: Date.now(), + acpxSessionId: "acpx-123", + agentSessionId: "inner-123", + }, + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + + expect(lines).toContain("agent session id: inner-123"); + expect(lines).toContain("acpx session id: acpx-123"); + expect(lines).toContain( + "resume in Codex CLI: `codex resume inner-123` (continues this conversation).", + ); + }); + + it("shows pending identity text for status rendering", () => { + const lines = resolveAcpSessionIdentifierLinesFromIdentity({ + backend: "acpx", + mode: "status", + identity: { + state: "pending", + source: "status", + lastUpdatedAt: Date.now(), + agentSessionId: "inner-123", + }, + }); + + expect(lines).toEqual(["session ids: pending (available after the first reply)"]); + }); + + it("prefers runtimeOptions.cwd over legacy meta.cwd", () => { + const cwd = resolveAcpSessionCwd({ + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + runtimeOptions: { + cwd: "/repo/new", + }, + cwd: "/repo/old", + state: "idle", + lastActivityAt: Date.now(), + }); + expect(cwd).toBe("/repo/new"); + }); +}); diff --git a/src/acp/runtime/session-identifiers.ts b/src/acp/runtime/session-identifiers.ts new file mode 100644 index 00000000000..d342d8b02eb --- /dev/null +++ b/src/acp/runtime/session-identifiers.ts @@ -0,0 +1,131 @@ +import type { SessionAcpIdentity, SessionAcpMeta } from "../../config/sessions/types.js"; +import { isSessionIdentityPending, resolveSessionIdentityFromMeta } from "./session-identity.js"; + +export const ACP_SESSION_IDENTITY_RENDERER_VERSION = "v1"; +export type AcpSessionIdentifierRenderMode = "status" | "thread"; + +type SessionResumeHintResolver = (params: { agentSessionId: string }) => string; + +const ACP_AGENT_RESUME_HINT_BY_KEY = new Map([ + [ + "codex", + ({ agentSessionId }) => + `resume in Codex CLI: \`codex resume ${agentSessionId}\` (continues this conversation).`, + ], + [ + "openai-codex", + ({ agentSessionId }) => + `resume in Codex CLI: \`codex resume ${agentSessionId}\` (continues this conversation).`, + ], + [ + "codex-cli", + ({ agentSessionId }) => + `resume in Codex CLI: \`codex resume ${agentSessionId}\` (continues this conversation).`, + ], +]); + +function normalizeText(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed || undefined; +} + +function normalizeAgentHintKey(value: unknown): string | undefined { + const normalized = normalizeText(value); + if (!normalized) { + return undefined; + } + return normalized.toLowerCase().replace(/[\s_]+/g, "-"); +} + +function resolveAcpAgentResumeHintLine(params: { + agentId?: string; + agentSessionId?: string; +}): string | undefined { + const agentSessionId = normalizeText(params.agentSessionId); + const agentKey = normalizeAgentHintKey(params.agentId); + if (!agentSessionId || !agentKey) { + return undefined; + } + const resolver = ACP_AGENT_RESUME_HINT_BY_KEY.get(agentKey); + return resolver ? resolver({ agentSessionId }) : undefined; +} + +export function resolveAcpSessionIdentifierLines(params: { + sessionKey: string; + meta?: SessionAcpMeta; +}): string[] { + const backend = normalizeText(params.meta?.backend) ?? "backend"; + const identity = resolveSessionIdentityFromMeta(params.meta); + return resolveAcpSessionIdentifierLinesFromIdentity({ + backend, + identity, + mode: "status", + }); +} + +export function resolveAcpSessionIdentifierLinesFromIdentity(params: { + backend: string; + identity?: SessionAcpIdentity; + mode?: AcpSessionIdentifierRenderMode; +}): string[] { + const backend = normalizeText(params.backend) ?? "backend"; + const mode = params.mode ?? "status"; + const identity = params.identity; + const agentSessionId = normalizeText(identity?.agentSessionId); + const acpxSessionId = normalizeText(identity?.acpxSessionId); + const acpxRecordId = normalizeText(identity?.acpxRecordId); + const hasIdentifier = Boolean(agentSessionId || acpxSessionId || acpxRecordId); + if (isSessionIdentityPending(identity) && hasIdentifier) { + if (mode === "status") { + return ["session ids: pending (available after the first reply)"]; + } + return []; + } + const lines: string[] = []; + if (agentSessionId) { + lines.push(`agent session id: ${agentSessionId}`); + } + if (acpxSessionId) { + lines.push(`${backend} session id: ${acpxSessionId}`); + } + if (acpxRecordId) { + lines.push(`${backend} record id: ${acpxRecordId}`); + } + return lines; +} + +export function resolveAcpSessionCwd(meta?: SessionAcpMeta): string | undefined { + const runtimeCwd = normalizeText(meta?.runtimeOptions?.cwd); + if (runtimeCwd) { + return runtimeCwd; + } + return normalizeText(meta?.cwd); +} + +export function resolveAcpThreadSessionDetailLines(params: { + sessionKey: string; + meta?: SessionAcpMeta; +}): string[] { + const meta = params.meta; + const identity = resolveSessionIdentityFromMeta(meta); + const backend = normalizeText(meta?.backend) ?? "backend"; + const lines = resolveAcpSessionIdentifierLinesFromIdentity({ + backend, + identity, + mode: "thread", + }); + if (lines.length === 0) { + return lines; + } + const hint = resolveAcpAgentResumeHintLine({ + agentId: meta?.agent, + agentSessionId: identity?.agentSessionId, + }); + if (hint) { + lines.push(hint); + } + return lines; +} diff --git a/src/acp/runtime/session-identity.ts b/src/acp/runtime/session-identity.ts new file mode 100644 index 00000000000..066a3cb71e5 --- /dev/null +++ b/src/acp/runtime/session-identity.ts @@ -0,0 +1,210 @@ +import type { + SessionAcpIdentity, + SessionAcpIdentitySource, + SessionAcpMeta, +} from "../../config/sessions/types.js"; +import type { AcpRuntimeHandle, AcpRuntimeStatus } from "./types.js"; + +function normalizeText(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed || undefined; +} + +function normalizeIdentityState(value: unknown): SessionAcpIdentity["state"] | undefined { + if (value !== "pending" && value !== "resolved") { + return undefined; + } + return value; +} + +function normalizeIdentitySource(value: unknown): SessionAcpIdentitySource | undefined { + if (value !== "ensure" && value !== "status" && value !== "event") { + return undefined; + } + return value; +} + +function normalizeIdentity( + identity: SessionAcpIdentity | undefined, +): SessionAcpIdentity | undefined { + if (!identity) { + return undefined; + } + const state = normalizeIdentityState(identity.state); + const source = normalizeIdentitySource(identity.source); + const acpxRecordId = normalizeText(identity.acpxRecordId); + const acpxSessionId = normalizeText(identity.acpxSessionId); + const agentSessionId = normalizeText(identity.agentSessionId); + const lastUpdatedAt = + typeof identity.lastUpdatedAt === "number" && Number.isFinite(identity.lastUpdatedAt) + ? identity.lastUpdatedAt + : undefined; + const hasAnyId = Boolean(acpxRecordId || acpxSessionId || agentSessionId); + if (!state && !source && !hasAnyId && lastUpdatedAt === undefined) { + return undefined; + } + const resolved = Boolean(acpxSessionId || agentSessionId); + const normalizedState = state ?? (resolved ? "resolved" : "pending"); + return { + state: normalizedState, + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(acpxSessionId ? { acpxSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + source: source ?? "status", + lastUpdatedAt: lastUpdatedAt ?? Date.now(), + }; +} + +export function resolveSessionIdentityFromMeta( + meta: SessionAcpMeta | undefined, +): SessionAcpIdentity | undefined { + if (!meta) { + return undefined; + } + return normalizeIdentity(meta.identity); +} + +export function identityHasStableSessionId(identity: SessionAcpIdentity | undefined): boolean { + return Boolean(identity?.acpxSessionId || identity?.agentSessionId); +} + +export function isSessionIdentityPending(identity: SessionAcpIdentity | undefined): boolean { + if (!identity) { + return true; + } + return identity.state === "pending"; +} + +export function identityEquals( + left: SessionAcpIdentity | undefined, + right: SessionAcpIdentity | undefined, +): boolean { + const a = normalizeIdentity(left); + const b = normalizeIdentity(right); + if (!a && !b) { + return true; + } + if (!a || !b) { + return false; + } + return ( + a.state === b.state && + a.acpxRecordId === b.acpxRecordId && + a.acpxSessionId === b.acpxSessionId && + a.agentSessionId === b.agentSessionId && + a.source === b.source + ); +} + +export function mergeSessionIdentity(params: { + current: SessionAcpIdentity | undefined; + incoming: SessionAcpIdentity | undefined; + now: number; +}): SessionAcpIdentity | undefined { + const current = normalizeIdentity(params.current); + const incoming = normalizeIdentity(params.incoming); + if (!current) { + if (!incoming) { + return undefined; + } + return { ...incoming, lastUpdatedAt: params.now }; + } + if (!incoming) { + return current; + } + + const currentResolved = current.state === "resolved"; + const incomingResolved = incoming.state === "resolved"; + const allowIncomingValue = !currentResolved || incomingResolved; + const nextRecordId = + allowIncomingValue && incoming.acpxRecordId ? incoming.acpxRecordId : current.acpxRecordId; + const nextAcpxSessionId = + allowIncomingValue && incoming.acpxSessionId ? incoming.acpxSessionId : current.acpxSessionId; + const nextAgentSessionId = + allowIncomingValue && incoming.agentSessionId + ? incoming.agentSessionId + : current.agentSessionId; + + const nextResolved = Boolean(nextAcpxSessionId || nextAgentSessionId); + const nextState: SessionAcpIdentity["state"] = nextResolved + ? "resolved" + : currentResolved + ? "resolved" + : incoming.state; + const nextSource = allowIncomingValue ? incoming.source : current.source; + const next: SessionAcpIdentity = { + state: nextState, + ...(nextRecordId ? { acpxRecordId: nextRecordId } : {}), + ...(nextAcpxSessionId ? { acpxSessionId: nextAcpxSessionId } : {}), + ...(nextAgentSessionId ? { agentSessionId: nextAgentSessionId } : {}), + source: nextSource, + lastUpdatedAt: params.now, + }; + return next; +} + +export function createIdentityFromEnsure(params: { + handle: AcpRuntimeHandle; + now: number; +}): SessionAcpIdentity | undefined { + const acpxRecordId = normalizeText((params.handle as { acpxRecordId?: unknown }).acpxRecordId); + const acpxSessionId = normalizeText(params.handle.backendSessionId); + const agentSessionId = normalizeText(params.handle.agentSessionId); + if (!acpxRecordId && !acpxSessionId && !agentSessionId) { + return undefined; + } + return { + state: "pending", + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(acpxSessionId ? { acpxSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + source: "ensure", + lastUpdatedAt: params.now, + }; +} + +export function createIdentityFromStatus(params: { + status: AcpRuntimeStatus | undefined; + now: number; +}): SessionAcpIdentity | undefined { + if (!params.status) { + return undefined; + } + const details = params.status.details; + const acpxRecordId = + normalizeText((params.status as { acpxRecordId?: unknown }).acpxRecordId) ?? + normalizeText(details?.acpxRecordId); + const acpxSessionId = + normalizeText(params.status.backendSessionId) ?? + normalizeText(details?.backendSessionId) ?? + normalizeText(details?.acpxSessionId); + const agentSessionId = + normalizeText(params.status.agentSessionId) ?? normalizeText(details?.agentSessionId); + if (!acpxRecordId && !acpxSessionId && !agentSessionId) { + return undefined; + } + const resolved = Boolean(acpxSessionId || agentSessionId); + return { + state: resolved ? "resolved" : "pending", + ...(acpxRecordId ? { acpxRecordId } : {}), + ...(acpxSessionId ? { acpxSessionId } : {}), + ...(agentSessionId ? { agentSessionId } : {}), + source: "status", + lastUpdatedAt: params.now, + }; +} + +export function resolveRuntimeHandleIdentifiersFromIdentity( + identity: SessionAcpIdentity | undefined, +): { backendSessionId?: string; agentSessionId?: string } { + if (!identity) { + return {}; + } + return { + ...(identity.acpxSessionId ? { backendSessionId: identity.acpxSessionId } : {}), + ...(identity.agentSessionId ? { agentSessionId: identity.agentSessionId } : {}), + }; +} diff --git a/src/acp/runtime/session-meta.ts b/src/acp/runtime/session-meta.ts new file mode 100644 index 00000000000..fd4a5813f9b --- /dev/null +++ b/src/acp/runtime/session-meta.ts @@ -0,0 +1,165 @@ +import path from "node:path"; +import { resolveAgentSessionDirs } from "../../agents/session-dirs.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { loadConfig } from "../../config/config.js"; +import { resolveStateDir } from "../../config/paths.js"; +import { loadSessionStore, resolveStorePath, updateSessionStore } from "../../config/sessions.js"; +import { + mergeSessionEntry, + type SessionAcpMeta, + type SessionEntry, +} from "../../config/sessions/types.js"; +import { parseAgentSessionKey } from "../../routing/session-key.js"; + +export type AcpSessionStoreEntry = { + cfg: OpenClawConfig; + storePath: string; + sessionKey: string; + storeSessionKey: string; + entry?: SessionEntry; + acp?: SessionAcpMeta; + storeReadFailed?: boolean; +}; + +function resolveStoreSessionKey(store: Record, sessionKey: string): string { + const normalized = sessionKey.trim(); + if (!normalized) { + return ""; + } + if (store[normalized]) { + return normalized; + } + const lower = normalized.toLowerCase(); + if (store[lower]) { + return lower; + } + for (const key of Object.keys(store)) { + if (key.toLowerCase() === lower) { + return key; + } + } + return lower; +} + +export function resolveSessionStorePathForAcp(params: { + sessionKey: string; + cfg?: OpenClawConfig; +}): { cfg: OpenClawConfig; storePath: string } { + const cfg = params.cfg ?? loadConfig(); + const parsed = parseAgentSessionKey(params.sessionKey); + const storePath = resolveStorePath(cfg.session?.store, { + agentId: parsed?.agentId, + }); + return { cfg, storePath }; +} + +export function readAcpSessionEntry(params: { + sessionKey: string; + cfg?: OpenClawConfig; +}): AcpSessionStoreEntry | null { + const sessionKey = params.sessionKey.trim(); + if (!sessionKey) { + return null; + } + const { cfg, storePath } = resolveSessionStorePathForAcp({ + sessionKey, + cfg: params.cfg, + }); + let store: Record; + let storeReadFailed = false; + try { + store = loadSessionStore(storePath); + } catch { + storeReadFailed = true; + store = {}; + } + const storeSessionKey = resolveStoreSessionKey(store, sessionKey); + const entry = store[storeSessionKey]; + return { + cfg, + storePath, + sessionKey, + storeSessionKey, + entry, + acp: entry?.acp, + storeReadFailed, + }; +} + +export async function listAcpSessionEntries(params: { + cfg?: OpenClawConfig; +}): Promise { + const cfg = params.cfg ?? loadConfig(); + const stateDir = resolveStateDir(process.env); + const sessionDirs = await resolveAgentSessionDirs(stateDir); + const entries: AcpSessionStoreEntry[] = []; + + for (const sessionsDir of sessionDirs) { + const storePath = path.join(sessionsDir, "sessions.json"); + let store: Record; + try { + store = loadSessionStore(storePath); + } catch { + continue; + } + for (const [sessionKey, entry] of Object.entries(store)) { + if (!entry?.acp) { + continue; + } + entries.push({ + cfg, + storePath, + sessionKey, + storeSessionKey: sessionKey, + entry, + acp: entry.acp, + }); + } + } + + return entries; +} + +export async function upsertAcpSessionMeta(params: { + sessionKey: string; + cfg?: OpenClawConfig; + mutate: ( + current: SessionAcpMeta | undefined, + entry: SessionEntry | undefined, + ) => SessionAcpMeta | null | undefined; +}): Promise { + const sessionKey = params.sessionKey.trim(); + if (!sessionKey) { + return null; + } + const { storePath } = resolveSessionStorePathForAcp({ + sessionKey, + cfg: params.cfg, + }); + return await updateSessionStore( + storePath, + (store) => { + const storeSessionKey = resolveStoreSessionKey(store, sessionKey); + const currentEntry = store[storeSessionKey]; + const nextMeta = params.mutate(currentEntry?.acp, currentEntry); + if (nextMeta === undefined) { + return currentEntry ?? null; + } + if (nextMeta === null && !currentEntry) { + return null; + } + + const nextEntry = mergeSessionEntry(currentEntry, { + acp: nextMeta ?? undefined, + }); + if (nextMeta === null) { + delete nextEntry.acp; + } + store[storeSessionKey] = nextEntry; + return nextEntry; + }, + { + activeSessionKey: sessionKey.toLowerCase(), + }, + ); +} diff --git a/src/acp/runtime/types.ts b/src/acp/runtime/types.ts new file mode 100644 index 00000000000..4e479eb8c8c --- /dev/null +++ b/src/acp/runtime/types.ts @@ -0,0 +1,110 @@ +export type AcpRuntimePromptMode = "prompt" | "steer"; + +export type AcpRuntimeSessionMode = "persistent" | "oneshot"; + +export type AcpRuntimeControl = "session/set_mode" | "session/set_config_option" | "session/status"; + +export type AcpRuntimeHandle = { + sessionKey: string; + backend: string; + runtimeSessionName: string; + /** Effective runtime working directory for this ACP session, if exposed by adapter/runtime. */ + cwd?: string; + /** Backend-local record identifier, if exposed by adapter/runtime (for example acpx record id). */ + acpxRecordId?: string; + /** Backend-level ACP session identifier, if exposed by adapter/runtime. */ + backendSessionId?: string; + /** Upstream harness session identifier, if exposed by adapter/runtime. */ + agentSessionId?: string; +}; + +export type AcpRuntimeEnsureInput = { + sessionKey: string; + agent: string; + mode: AcpRuntimeSessionMode; + cwd?: string; + env?: Record; +}; + +export type AcpRuntimeTurnInput = { + handle: AcpRuntimeHandle; + text: string; + mode: AcpRuntimePromptMode; + requestId: string; + signal?: AbortSignal; +}; + +export type AcpRuntimeCapabilities = { + controls: AcpRuntimeControl[]; + /** + * Optional backend-advertised option keys for session/set_config_option. + * Empty/undefined means "backend accepts keys, but did not advertise a strict list". + */ + configOptionKeys?: string[]; +}; + +export type AcpRuntimeStatus = { + summary?: string; + /** Backend-local record identifier, if exposed by adapter/runtime. */ + acpxRecordId?: string; + /** Backend-level ACP session identifier, if known at status time. */ + backendSessionId?: string; + /** Upstream harness session identifier, if known at status time. */ + agentSessionId?: string; + details?: Record; +}; + +export type AcpRuntimeDoctorReport = { + ok: boolean; + code?: string; + message: string; + installCommand?: string; + details?: string[]; +}; + +export type AcpRuntimeEvent = + | { + type: "text_delta"; + text: string; + stream?: "output" | "thought"; + } + | { + type: "status"; + text: string; + } + | { + type: "tool_call"; + text: string; + } + | { + type: "done"; + stopReason?: string; + } + | { + type: "error"; + message: string; + code?: string; + retryable?: boolean; + }; + +export interface AcpRuntime { + ensureSession(input: AcpRuntimeEnsureInput): Promise; + + runTurn(input: AcpRuntimeTurnInput): AsyncIterable; + + getCapabilities?(input: { + handle?: AcpRuntimeHandle; + }): Promise | AcpRuntimeCapabilities; + + getStatus?(input: { handle: AcpRuntimeHandle }): Promise; + + setMode?(input: { handle: AcpRuntimeHandle; mode: string }): Promise; + + setConfigOption?(input: { handle: AcpRuntimeHandle; key: string; value: string }): Promise; + + doctor?(): Promise; + + cancel(input: { handle: AcpRuntimeHandle; reason?: string }): Promise; + + close(input: { handle: AcpRuntimeHandle; reason: string }): Promise; +} diff --git a/src/agents/acp-binding-architecture.guardrail.test.ts b/src/agents/acp-binding-architecture.guardrail.test.ts new file mode 100644 index 00000000000..ab8f04a2166 --- /dev/null +++ b/src/agents/acp-binding-architecture.guardrail.test.ts @@ -0,0 +1,42 @@ +import { readFileSync } from "node:fs"; +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { describe, expect, it } from "vitest"; + +const ROOT_DIR = resolve(dirname(fileURLToPath(import.meta.url)), ".."); + +type GuardedSource = { + path: string; + forbiddenPatterns: RegExp[]; +}; + +const GUARDED_SOURCES: GuardedSource[] = [ + { + path: "agents/acp-spawn.ts", + forbiddenPatterns: [/\bgetThreadBindingManager\b/, /\bparseDiscordTarget\b/], + }, + { + path: "auto-reply/reply/commands-acp/lifecycle.ts", + forbiddenPatterns: [/\bgetThreadBindingManager\b/, /\bunbindThreadBindingsBySessionKey\b/], + }, + { + path: "auto-reply/reply/commands-acp/targets.ts", + forbiddenPatterns: [/\bgetThreadBindingManager\b/], + }, + { + path: "auto-reply/reply/commands-subagents/action-focus.ts", + forbiddenPatterns: [/\bgetThreadBindingManager\b/], + }, +]; + +describe("ACP/session binding architecture guardrails", () => { + it("keeps ACP/focus flows off Discord thread-binding manager APIs", () => { + for (const source of GUARDED_SOURCES) { + const absolutePath = resolve(ROOT_DIR, source.path); + const text = readFileSync(absolutePath, "utf8"); + for (const pattern of source.forbiddenPatterns) { + expect(text).not.toMatch(pattern); + } + } + }); +}); diff --git a/src/agents/acp-spawn.test.ts b/src/agents/acp-spawn.test.ts new file mode 100644 index 00000000000..f722451d0c6 --- /dev/null +++ b/src/agents/acp-spawn.test.ts @@ -0,0 +1,373 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import type { SessionBindingRecord } from "../infra/outbound/session-binding-service.js"; + +const hoisted = vi.hoisted(() => { + const callGatewayMock = vi.fn(); + const sessionBindingCapabilitiesMock = vi.fn(); + const sessionBindingBindMock = vi.fn(); + const sessionBindingUnbindMock = vi.fn(); + const sessionBindingResolveByConversationMock = vi.fn(); + const sessionBindingListBySessionMock = vi.fn(); + const closeSessionMock = vi.fn(); + const initializeSessionMock = vi.fn(); + const state = { + cfg: { + acp: { + enabled: true, + backend: "acpx", + allowedAgents: ["codex"], + }, + session: { + mainKey: "main", + scope: "per-sender", + }, + channels: { + discord: { + threadBindings: { + enabled: true, + spawnAcpSessions: true, + }, + }, + }, + } as OpenClawConfig, + }; + return { + callGatewayMock, + sessionBindingCapabilitiesMock, + sessionBindingBindMock, + sessionBindingUnbindMock, + sessionBindingResolveByConversationMock, + sessionBindingListBySessionMock, + closeSessionMock, + initializeSessionMock, + state, + }; +}); + +vi.mock("../config/config.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadConfig: () => hoisted.state.cfg, + }; +}); + +vi.mock("../gateway/call.js", () => ({ + callGateway: (opts: unknown) => hoisted.callGatewayMock(opts), +})); + +vi.mock("../acp/control-plane/manager.js", () => { + return { + getAcpSessionManager: () => ({ + initializeSession: (params: unknown) => hoisted.initializeSessionMock(params), + closeSession: (params: unknown) => hoisted.closeSessionMock(params), + }), + }; +}); + +vi.mock("../infra/outbound/session-binding-service.js", async (importOriginal) => { + const actual = + await importOriginal(); + return { + ...actual, + getSessionBindingService: () => ({ + bind: (input: unknown) => hoisted.sessionBindingBindMock(input), + getCapabilities: (params: unknown) => hoisted.sessionBindingCapabilitiesMock(params), + listBySession: (targetSessionKey: string) => + hoisted.sessionBindingListBySessionMock(targetSessionKey), + resolveByConversation: (ref: unknown) => hoisted.sessionBindingResolveByConversationMock(ref), + touch: vi.fn(), + unbind: (input: unknown) => hoisted.sessionBindingUnbindMock(input), + }), + }; +}); + +const { spawnAcpDirect } = await import("./acp-spawn.js"); + +function createSessionBinding(overrides?: Partial): SessionBindingRecord { + return { + bindingId: "default:child-thread", + targetSessionKey: "agent:codex:acp:s1", + targetKind: "session", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "child-thread", + parentConversationId: "parent-channel", + }, + status: "active", + boundAt: Date.now(), + metadata: { + agentId: "codex", + boundBy: "system", + }, + ...overrides, + }; +} + +describe("spawnAcpDirect", () => { + beforeEach(() => { + hoisted.state.cfg = { + acp: { + enabled: true, + backend: "acpx", + allowedAgents: ["codex"], + }, + session: { + mainKey: "main", + scope: "per-sender", + }, + channels: { + discord: { + threadBindings: { + enabled: true, + spawnAcpSessions: true, + }, + }, + }, + } satisfies OpenClawConfig; + + hoisted.callGatewayMock.mockReset().mockImplementation(async (argsUnknown: unknown) => { + const args = argsUnknown as { method?: string }; + if (args.method === "sessions.patch") { + return { ok: true }; + } + if (args.method === "agent") { + return { runId: "run-1" }; + } + if (args.method === "sessions.delete") { + return { ok: true }; + } + return {}; + }); + + hoisted.closeSessionMock.mockReset().mockResolvedValue({ + runtimeClosed: true, + metaCleared: false, + }); + hoisted.initializeSessionMock.mockReset().mockImplementation(async (argsUnknown: unknown) => { + const args = argsUnknown as { + sessionKey: string; + agent: string; + mode: "persistent" | "oneshot"; + cwd?: string; + }; + const runtimeSessionName = `${args.sessionKey}:runtime`; + const cwd = typeof args.cwd === "string" ? args.cwd : undefined; + return { + runtime: { + close: vi.fn().mockResolvedValue(undefined), + }, + handle: { + sessionKey: args.sessionKey, + backend: "acpx", + runtimeSessionName, + ...(cwd ? { cwd } : {}), + agentSessionId: "codex-inner-1", + backendSessionId: "acpx-1", + }, + meta: { + backend: "acpx", + agent: args.agent, + runtimeSessionName, + ...(cwd ? { runtimeOptions: { cwd }, cwd } : {}), + identity: { + state: "pending", + source: "ensure", + acpxSessionId: "acpx-1", + agentSessionId: "codex-inner-1", + lastUpdatedAt: Date.now(), + }, + mode: args.mode, + state: "idle", + lastActivityAt: Date.now(), + }, + }; + }); + + hoisted.sessionBindingCapabilitiesMock.mockReset().mockReturnValue({ + adapterAvailable: true, + bindSupported: true, + unbindSupported: true, + placements: ["current", "child"], + }); + hoisted.sessionBindingBindMock + .mockReset() + .mockImplementation( + async (input: { + targetSessionKey: string; + conversation: { accountId: string }; + metadata?: Record; + }) => + createSessionBinding({ + targetSessionKey: input.targetSessionKey, + conversation: { + channel: "discord", + accountId: input.conversation.accountId, + conversationId: "child-thread", + parentConversationId: "parent-channel", + }, + metadata: { + boundBy: + typeof input.metadata?.boundBy === "string" ? input.metadata.boundBy : "system", + agentId: "codex", + webhookId: "wh-1", + }, + }), + ); + hoisted.sessionBindingResolveByConversationMock.mockReset().mockReturnValue(null); + hoisted.sessionBindingListBySessionMock.mockReset().mockReturnValue([]); + hoisted.sessionBindingUnbindMock.mockReset().mockResolvedValue([]); + }); + + it("spawns ACP session, binds a new thread, and dispatches initial task", async () => { + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + mode: "session", + thread: true, + }, + { + agentSessionKey: "agent:main:main", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:parent-channel", + agentThreadId: "requester-thread", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.childSessionKey).toMatch(/^agent:codex:acp:/); + expect(result.runId).toBe("run-1"); + expect(result.mode).toBe("session"); + expect(hoisted.sessionBindingBindMock).toHaveBeenCalledWith( + expect.objectContaining({ + targetKind: "session", + placement: "child", + }), + ); + expect(hoisted.sessionBindingBindMock).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: expect.objectContaining({ + introText: expect.not.stringContaining( + "session ids: pending (available after the first reply)", + ), + }), + }), + ); + + const agentCall = hoisted.callGatewayMock.mock.calls + .map((call: unknown[]) => call[0] as { method?: string; params?: Record }) + .find((request) => request.method === "agent"); + expect(agentCall?.params?.sessionKey).toMatch(/^agent:codex:acp:/); + expect(agentCall?.params?.to).toBe("channel:child-thread"); + expect(agentCall?.params?.threadId).toBe("child-thread"); + expect(agentCall?.params?.deliver).toBe(true); + expect(hoisted.initializeSessionMock).toHaveBeenCalledWith( + expect.objectContaining({ + sessionKey: expect.stringMatching(/^agent:codex:acp:/), + agent: "codex", + mode: "persistent", + }), + ); + }); + + it("includes cwd in ACP thread intro banner when provided at spawn time", async () => { + const result = await spawnAcpDirect( + { + task: "Check workspace", + agentId: "codex", + cwd: "/home/bob/clawd", + mode: "session", + thread: true, + }, + { + agentSessionKey: "agent:main:main", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:parent-channel", + }, + ); + + expect(result.status).toBe("accepted"); + expect(hoisted.sessionBindingBindMock).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: expect.objectContaining({ + introText: expect.stringContaining("cwd: /home/bob/clawd"), + }), + }), + ); + }); + + it("rejects disallowed ACP agents", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + acp: { + enabled: true, + backend: "acpx", + allowedAgents: ["claudecode"], + }, + }; + + const result = await spawnAcpDirect( + { + task: "hello", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:main", + }, + ); + + expect(result).toMatchObject({ + status: "forbidden", + }); + }); + + it("requires an explicit ACP agent when no config default exists", async () => { + const result = await spawnAcpDirect( + { + task: "hello", + }, + { + agentSessionKey: "agent:main:main", + }, + ); + + expect(result.status).toBe("error"); + expect(result.error).toContain("set `acp.defaultAgent`"); + }); + + it("fails fast when Discord ACP thread spawn is disabled", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + channels: { + discord: { + threadBindings: { + enabled: true, + spawnAcpSessions: false, + }, + }, + }, + }; + + const result = await spawnAcpDirect( + { + task: "hello", + agentId: "codex", + thread: true, + mode: "session", + }, + { + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:parent-channel", + }, + ); + + expect(result.status).toBe("error"); + expect(result.error).toContain("spawnAcpSessions=true"); + }); +}); diff --git a/src/agents/acp-spawn.ts b/src/agents/acp-spawn.ts new file mode 100644 index 00000000000..1ebd7b9d856 --- /dev/null +++ b/src/agents/acp-spawn.ts @@ -0,0 +1,424 @@ +import crypto from "node:crypto"; +import { getAcpSessionManager } from "../acp/control-plane/manager.js"; +import { + cleanupFailedAcpSpawn, + type AcpSpawnRuntimeCloseHandle, +} from "../acp/control-plane/spawn.js"; +import { isAcpEnabledByPolicy, resolveAcpAgentPolicyError } from "../acp/policy.js"; +import { + resolveAcpSessionCwd, + resolveAcpThreadSessionDetailLines, +} from "../acp/runtime/session-identifiers.js"; +import type { AcpRuntimeSessionMode } from "../acp/runtime/types.js"; +import { + resolveThreadBindingIntroText, + resolveThreadBindingThreadName, +} from "../channels/thread-bindings-messages.js"; +import { + formatThreadBindingDisabledError, + formatThreadBindingSpawnDisabledError, + resolveThreadBindingSessionTtlMsForChannel, + resolveThreadBindingSpawnPolicy, +} from "../channels/thread-bindings-policy.js"; +import { loadConfig } from "../config/config.js"; +import type { OpenClawConfig } from "../config/config.js"; +import { callGateway } from "../gateway/call.js"; +import { resolveConversationIdFromTargets } from "../infra/outbound/conversation-id.js"; +import { + getSessionBindingService, + isSessionBindingError, + type SessionBindingRecord, +} from "../infra/outbound/session-binding-service.js"; +import { normalizeAgentId } from "../routing/session-key.js"; +import { normalizeDeliveryContext } from "../utils/delivery-context.js"; + +export const ACP_SPAWN_MODES = ["run", "session"] as const; +export type SpawnAcpMode = (typeof ACP_SPAWN_MODES)[number]; + +export type SpawnAcpParams = { + task: string; + label?: string; + agentId?: string; + cwd?: string; + mode?: SpawnAcpMode; + thread?: boolean; +}; + +export type SpawnAcpContext = { + agentSessionKey?: string; + agentChannel?: string; + agentAccountId?: string; + agentTo?: string; + agentThreadId?: string | number; +}; + +export type SpawnAcpResult = { + status: "accepted" | "forbidden" | "error"; + childSessionKey?: string; + runId?: string; + mode?: SpawnAcpMode; + note?: string; + error?: string; +}; + +export const ACP_SPAWN_ACCEPTED_NOTE = + "initial ACP task queued in isolated session; follow-ups continue in the bound thread."; +export const ACP_SPAWN_SESSION_ACCEPTED_NOTE = + "thread-bound ACP session stays active after this task; continue in-thread for follow-ups."; + +type PreparedAcpThreadBinding = { + channel: string; + accountId: string; + conversationId: string; +}; + +function resolveSpawnMode(params: { + requestedMode?: SpawnAcpMode; + threadRequested: boolean; +}): SpawnAcpMode { + if (params.requestedMode === "run" || params.requestedMode === "session") { + return params.requestedMode; + } + // Thread-bound spawns should default to persistent sessions. + return params.threadRequested ? "session" : "run"; +} + +function resolveAcpSessionMode(mode: SpawnAcpMode): AcpRuntimeSessionMode { + return mode === "session" ? "persistent" : "oneshot"; +} + +function resolveTargetAcpAgentId(params: { + requestedAgentId?: string; + cfg: OpenClawConfig; +}): { ok: true; agentId: string } | { ok: false; error: string } { + const requested = normalizeOptionalAgentId(params.requestedAgentId); + if (requested) { + return { ok: true, agentId: requested }; + } + + const configuredDefault = normalizeOptionalAgentId(params.cfg.acp?.defaultAgent); + if (configuredDefault) { + return { ok: true, agentId: configuredDefault }; + } + + return { + ok: false, + error: + "ACP target agent is not configured. Pass `agentId` in `sessions_spawn` or set `acp.defaultAgent` in config.", + }; +} + +function normalizeOptionalAgentId(value: string | undefined | null): string | undefined { + const trimmed = (value ?? "").trim(); + if (!trimmed) { + return undefined; + } + return normalizeAgentId(trimmed); +} + +function summarizeError(err: unknown): string { + if (err instanceof Error) { + return err.message; + } + if (typeof err === "string") { + return err; + } + return "error"; +} + +function resolveConversationIdForThreadBinding(params: { + to?: string; + threadId?: string | number; +}): string | undefined { + return resolveConversationIdFromTargets({ + threadId: params.threadId, + targets: [params.to], + }); +} + +function prepareAcpThreadBinding(params: { + cfg: OpenClawConfig; + channel?: string; + accountId?: string; + to?: string; + threadId?: string | number; +}): { ok: true; binding: PreparedAcpThreadBinding } | { ok: false; error: string } { + const channel = params.channel?.trim().toLowerCase(); + if (!channel) { + return { + ok: false, + error: "thread=true for ACP sessions requires a channel context.", + }; + } + + const accountId = params.accountId?.trim() || "default"; + const policy = resolveThreadBindingSpawnPolicy({ + cfg: params.cfg, + channel, + accountId, + kind: "acp", + }); + if (!policy.enabled) { + return { + ok: false, + error: formatThreadBindingDisabledError({ + channel: policy.channel, + accountId: policy.accountId, + kind: "acp", + }), + }; + } + if (!policy.spawnEnabled) { + return { + ok: false, + error: formatThreadBindingSpawnDisabledError({ + channel: policy.channel, + accountId: policy.accountId, + kind: "acp", + }), + }; + } + const bindingService = getSessionBindingService(); + const capabilities = bindingService.getCapabilities({ + channel: policy.channel, + accountId: policy.accountId, + }); + if (!capabilities.adapterAvailable) { + return { + ok: false, + error: `Thread bindings are unavailable for ${policy.channel}.`, + }; + } + if (!capabilities.bindSupported || !capabilities.placements.includes("child")) { + return { + ok: false, + error: `Thread bindings do not support ACP thread spawn for ${policy.channel}.`, + }; + } + const conversationId = resolveConversationIdForThreadBinding({ + to: params.to, + threadId: params.threadId, + }); + if (!conversationId) { + return { + ok: false, + error: `Could not resolve a ${policy.channel} conversation for ACP thread spawn.`, + }; + } + + return { + ok: true, + binding: { + channel: policy.channel, + accountId: policy.accountId, + conversationId, + }, + }; +} + +export async function spawnAcpDirect( + params: SpawnAcpParams, + ctx: SpawnAcpContext, +): Promise { + const cfg = loadConfig(); + if (!isAcpEnabledByPolicy(cfg)) { + return { + status: "forbidden", + error: "ACP is disabled by policy (`acp.enabled=false`).", + }; + } + + const requestThreadBinding = params.thread === true; + const spawnMode = resolveSpawnMode({ + requestedMode: params.mode, + threadRequested: requestThreadBinding, + }); + if (spawnMode === "session" && !requestThreadBinding) { + return { + status: "error", + error: 'mode="session" requires thread=true so the ACP session can stay bound to a thread.', + }; + } + + const targetAgentResult = resolveTargetAcpAgentId({ + requestedAgentId: params.agentId, + cfg, + }); + if (!targetAgentResult.ok) { + return { + status: "error", + error: targetAgentResult.error, + }; + } + const targetAgentId = targetAgentResult.agentId; + const agentPolicyError = resolveAcpAgentPolicyError(cfg, targetAgentId); + if (agentPolicyError) { + return { + status: "forbidden", + error: agentPolicyError.message, + }; + } + + const sessionKey = `agent:${targetAgentId}:acp:${crypto.randomUUID()}`; + const runtimeMode = resolveAcpSessionMode(spawnMode); + + let preparedBinding: PreparedAcpThreadBinding | null = null; + if (requestThreadBinding) { + const prepared = prepareAcpThreadBinding({ + cfg, + channel: ctx.agentChannel, + accountId: ctx.agentAccountId, + to: ctx.agentTo, + threadId: ctx.agentThreadId, + }); + if (!prepared.ok) { + return { + status: "error", + error: prepared.error, + }; + } + preparedBinding = prepared.binding; + } + + const acpManager = getAcpSessionManager(); + const bindingService = getSessionBindingService(); + let binding: SessionBindingRecord | null = null; + let sessionCreated = false; + let initializedRuntime: AcpSpawnRuntimeCloseHandle | undefined; + try { + await callGateway({ + method: "sessions.patch", + params: { + key: sessionKey, + ...(params.label ? { label: params.label } : {}), + }, + timeoutMs: 10_000, + }); + sessionCreated = true; + const initialized = await acpManager.initializeSession({ + cfg, + sessionKey, + agent: targetAgentId, + mode: runtimeMode, + cwd: params.cwd, + backendId: cfg.acp?.backend, + }); + initializedRuntime = { + runtime: initialized.runtime, + handle: initialized.handle, + }; + + if (preparedBinding) { + binding = await bindingService.bind({ + targetSessionKey: sessionKey, + targetKind: "session", + conversation: { + channel: preparedBinding.channel, + accountId: preparedBinding.accountId, + conversationId: preparedBinding.conversationId, + }, + placement: "child", + metadata: { + threadName: resolveThreadBindingThreadName({ + agentId: targetAgentId, + label: params.label || targetAgentId, + }), + agentId: targetAgentId, + label: params.label || undefined, + boundBy: "system", + introText: resolveThreadBindingIntroText({ + agentId: targetAgentId, + label: params.label || undefined, + sessionTtlMs: resolveThreadBindingSessionTtlMsForChannel({ + cfg, + channel: preparedBinding.channel, + accountId: preparedBinding.accountId, + }), + sessionCwd: resolveAcpSessionCwd(initialized.meta), + sessionDetails: resolveAcpThreadSessionDetailLines({ + sessionKey, + meta: initialized.meta, + }), + }), + }, + }); + if (!binding?.conversation.conversationId) { + throw new Error( + `Failed to create and bind a ${preparedBinding.channel} thread for this ACP session.`, + ); + } + } + } catch (err) { + await cleanupFailedAcpSpawn({ + cfg, + sessionKey, + shouldDeleteSession: sessionCreated, + deleteTranscript: true, + runtimeCloseHandle: initializedRuntime, + }); + return { + status: "error", + error: isSessionBindingError(err) ? err.message : summarizeError(err), + }; + } + + const requesterOrigin = normalizeDeliveryContext({ + channel: ctx.agentChannel, + accountId: ctx.agentAccountId, + to: ctx.agentTo, + threadId: ctx.agentThreadId, + }); + // For thread-bound ACP spawns, force bootstrap delivery to the new child thread. + const boundThreadIdRaw = binding?.conversation.conversationId; + const boundThreadId = boundThreadIdRaw ? String(boundThreadIdRaw).trim() || undefined : undefined; + const fallbackThreadIdRaw = requesterOrigin?.threadId; + const fallbackThreadId = + fallbackThreadIdRaw != null ? String(fallbackThreadIdRaw).trim() || undefined : undefined; + const deliveryThreadId = boundThreadId ?? fallbackThreadId; + const inferredDeliveryTo = boundThreadId + ? `channel:${boundThreadId}` + : requesterOrigin?.to?.trim() || (deliveryThreadId ? `channel:${deliveryThreadId}` : undefined); + const hasDeliveryTarget = Boolean(requesterOrigin?.channel && inferredDeliveryTo); + const childIdem = crypto.randomUUID(); + let childRunId: string = childIdem; + try { + const response = await callGateway<{ runId?: string }>({ + method: "agent", + params: { + message: params.task, + sessionKey, + channel: hasDeliveryTarget ? requesterOrigin?.channel : undefined, + to: hasDeliveryTarget ? inferredDeliveryTo : undefined, + accountId: hasDeliveryTarget ? (requesterOrigin?.accountId ?? undefined) : undefined, + threadId: hasDeliveryTarget ? deliveryThreadId : undefined, + idempotencyKey: childIdem, + deliver: hasDeliveryTarget, + label: params.label || undefined, + }, + timeoutMs: 10_000, + }); + if (typeof response?.runId === "string" && response.runId.trim()) { + childRunId = response.runId.trim(); + } + } catch (err) { + await cleanupFailedAcpSpawn({ + cfg, + sessionKey, + shouldDeleteSession: true, + deleteTranscript: true, + }); + return { + status: "error", + error: summarizeError(err), + childSessionKey: sessionKey, + }; + } + + return { + status: "accepted", + childSessionKey: sessionKey, + runId: childRunId, + mode: spawnMode, + note: spawnMode === "session" ? ACP_SPAWN_SESSION_ACCEPTED_NOTE : ACP_SPAWN_ACCEPTED_NOTE, + }; +} diff --git a/src/agents/apply-patch.test.ts b/src/agents/apply-patch.test.ts index 5a2dae87e75..575f3f21d87 100644 --- a/src/agents/apply-patch.test.ts +++ b/src/agents/apply-patch.test.ts @@ -13,6 +13,15 @@ async function withTempDir(fn: (dir: string) => Promise) { } } +async function withWorkspaceTempDir(fn: (dir: string) => Promise) { + const dir = await fs.mkdtemp(path.join(process.cwd(), "openclaw-patch-workspace-")); + try { + return await fn(dir); + } finally { + await fs.rm(dir, { recursive: true, force: true }); + } +} + function buildAddFilePatch(targetPath: string): string { return `*** Begin Patch *** Add File: ${targetPath} @@ -159,6 +168,69 @@ describe("applyPatch", () => { }); }); + it("rejects broken final symlink targets outside cwd by default", async () => { + if (process.platform === "win32") { + return; + } + await withWorkspaceTempDir(async (dir) => { + const outsideDir = path.join(path.dirname(dir), `outside-broken-link-${Date.now()}`); + const outsideFile = path.join(outsideDir, "owned.txt"); + const linkPath = path.join(dir, "jump"); + await fs.mkdir(outsideDir, { recursive: true }); + await fs.symlink(outsideFile, linkPath); + + const patch = `*** Begin Patch +*** Add File: jump ++pwned +*** End Patch`; + + try { + await expect(applyPatch(patch, { cwd: dir })).rejects.toThrow( + /Symlink escapes sandbox root/, + ); + await expect(fs.readFile(outsideFile, "utf8")).rejects.toBeDefined(); + } finally { + await fs.rm(outsideDir, { recursive: true, force: true }); + } + }); + }); + + it("rejects hardlink alias escapes by default", async () => { + if (process.platform === "win32") { + return; + } + await withTempDir(async (dir) => { + const outside = path.join( + path.dirname(dir), + `outside-hardlink-${process.pid}-${Date.now()}.txt`, + ); + const linkPath = path.join(dir, "hardlink.txt"); + await fs.writeFile(outside, "initial\n", "utf8"); + try { + try { + await fs.link(outside, linkPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + const patch = `*** Begin Patch +*** Update File: hardlink.txt +@@ +-initial ++pwned +*** End Patch`; + await expect(applyPatch(patch, { cwd: dir })).rejects.toThrow(/hardlink|sandbox/i); + const outsideContents = await fs.readFile(outside, "utf8"); + expect(outsideContents).toBe("initial\n"); + } finally { + await fs.rm(linkPath, { force: true }); + await fs.rm(outside, { force: true }); + } + }); + }); + it("allows symlinks that resolve within cwd by default", async () => { await withTempDir(async (dir) => { const target = path.join(dir, "target.txt"); diff --git a/src/agents/apply-patch.ts b/src/agents/apply-patch.ts index fecf4cf03bc..cc3bf7df07c 100644 --- a/src/agents/apply-patch.ts +++ b/src/agents/apply-patch.ts @@ -1,7 +1,11 @@ +import syncFs from "node:fs"; import fs from "node:fs/promises"; import path from "node:path"; import type { AgentTool } from "@mariozechner/pi-agent-core"; import { Type } from "@sinclair/typebox"; +import { openBoundaryFile, type BoundaryFileOpenResult } from "../infra/boundary-file-read.js"; +import { writeFileWithinRoot } from "../infra/fs-safe.js"; +import { PATH_ALIAS_POLICIES, type PathAliasPolicy } from "../infra/path-alias-guards.js"; import { applyUpdateHunk } from "./apply-patch-update.js"; import { assertSandboxPath, resolveSandboxInputPath } from "./sandbox-paths.js"; import type { SandboxFsBridge } from "./sandbox/fs-bridge.js"; @@ -154,7 +158,7 @@ export async function applyPatch( } if (hunk.kind === "delete") { - const target = await resolvePatchPath(hunk.path, options, "unlink"); + const target = await resolvePatchPath(hunk.path, options, PATH_ALIAS_POLICIES.unlinkTarget); await fileOps.remove(target.resolved); recordSummary(summary, seen, "deleted", target.display); continue; @@ -234,9 +238,37 @@ function resolvePatchFileOps(options: ApplyPatchOptions): PatchFileOps { mkdirp: (dir) => bridge.mkdirp({ filePath: dir, cwd: root }), }; } + const workspaceOnly = options.workspaceOnly !== false; return { - readFile: (filePath) => fs.readFile(filePath, "utf8"), - writeFile: (filePath, content) => fs.writeFile(filePath, content, "utf8"), + readFile: async (filePath) => { + if (!workspaceOnly) { + return await fs.readFile(filePath, "utf8"); + } + const opened = await openBoundaryFile({ + absolutePath: filePath, + rootPath: options.cwd, + boundaryLabel: "workspace root", + }); + assertBoundaryRead(opened, filePath); + try { + return syncFs.readFileSync(opened.fd, "utf8"); + } finally { + syncFs.closeSync(opened.fd); + } + }, + writeFile: async (filePath, content) => { + if (!workspaceOnly) { + await fs.writeFile(filePath, content, "utf8"); + return; + } + const relative = toRelativeWorkspacePath(options.cwd, filePath); + await writeFileWithinRoot({ + rootDir: options.cwd, + relativePath: relative, + data: content, + encoding: "utf8", + }); + }, remove: (filePath) => fs.rm(filePath), mkdirp: (dir) => fs.mkdir(dir, { recursive: true }).then(() => {}), }; @@ -253,7 +285,7 @@ async function ensureDir(filePath: string, ops: PatchFileOps) { async function resolvePatchPath( filePath: string, options: ApplyPatchOptions, - purpose: "readWrite" | "unlink" = "readWrite", + aliasPolicy: PathAliasPolicy = PATH_ALIAS_POLICIES.strict, ): Promise<{ resolved: string; display: string }> { if (options.sandbox) { const resolved = options.sandbox.bridge.resolvePath({ @@ -265,7 +297,8 @@ async function resolvePatchPath( filePath: resolved.hostPath, cwd: options.cwd, root: options.cwd, - allowFinalSymlink: purpose === "unlink", + allowFinalSymlinkForUnlink: aliasPolicy.allowFinalSymlinkForUnlink, + allowFinalHardlinkForUnlink: aliasPolicy.allowFinalHardlinkForUnlink, }); } return { @@ -281,7 +314,8 @@ async function resolvePatchPath( filePath, cwd: options.cwd, root: options.cwd, - allowFinalSymlink: purpose === "unlink", + allowFinalSymlinkForUnlink: aliasPolicy.allowFinalSymlinkForUnlink, + allowFinalHardlinkForUnlink: aliasPolicy.allowFinalHardlinkForUnlink, }) ).resolved : resolvePathFromCwd(filePath, options.cwd); @@ -295,6 +329,27 @@ function resolvePathFromCwd(filePath: string, cwd: string): string { return path.normalize(resolveSandboxInputPath(filePath, cwd)); } +function toRelativeWorkspacePath(workspaceRoot: string, absolutePath: string): string { + const rootResolved = path.resolve(workspaceRoot); + const resolved = path.resolve(absolutePath); + const relative = path.relative(rootResolved, resolved); + if (!relative || relative === "." || relative.startsWith("..") || path.isAbsolute(relative)) { + throw new Error(`Path escapes sandbox root (${workspaceRoot}): ${absolutePath}`); + } + return relative; +} + +function assertBoundaryRead( + opened: BoundaryFileOpenResult, + targetPath: string, +): asserts opened is Extract { + if (opened.ok) { + return; + } + const reason = opened.reason === "validation" ? "unsafe path" : "path not found"; + throw new Error(`Failed boundary read for ${targetPath} (${reason})`); +} + function toDisplayPath(resolved: string, cwd: string): string { const relative = path.relative(cwd, resolved); if (!relative || relative === "") { diff --git a/src/agents/auth-profiles.ensureauthprofilestore.test.ts b/src/agents/auth-profiles.ensureauthprofilestore.test.ts index e106a2391e7..537cb9512d4 100644 --- a/src/agents/auth-profiles.ensureauthprofilestore.test.ts +++ b/src/agents/auth-profiles.ensureauthprofilestore.test.ts @@ -1,9 +1,9 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; -import { describe, expect, it } from "vitest"; +import { describe, expect, it, vi } from "vitest"; import { ensureAuthProfileStore } from "./auth-profiles.js"; -import { AUTH_STORE_VERSION } from "./auth-profiles/constants.js"; +import { AUTH_STORE_VERSION, log } from "./auth-profiles/constants.js"; describe("ensureAuthProfileStore", () => { it("migrates legacy auth.json and deletes it (PR #368)", () => { @@ -122,4 +122,156 @@ describe("ensureAuthProfileStore", () => { fs.rmSync(root, { recursive: true, force: true }); } }); + + it("normalizes auth-profiles credential aliases with canonical-field precedence", () => { + const cases = [ + { + name: "mode/apiKey aliases map to type/key", + profile: { + provider: "anthropic", + mode: "api_key", + apiKey: "sk-ant-alias", + }, + expected: { + type: "api_key", + key: "sk-ant-alias", + }, + }, + { + name: "canonical type overrides conflicting mode alias", + profile: { + provider: "anthropic", + type: "api_key", + mode: "token", + key: "sk-ant-canonical", + }, + expected: { + type: "api_key", + key: "sk-ant-canonical", + }, + }, + { + name: "canonical key overrides conflicting apiKey alias", + profile: { + provider: "anthropic", + type: "api_key", + key: "sk-ant-canonical", + apiKey: "sk-ant-alias", + }, + expected: { + type: "api_key", + key: "sk-ant-canonical", + }, + }, + { + name: "canonical profile shape remains unchanged", + profile: { + provider: "anthropic", + type: "api_key", + key: "sk-ant-direct", + }, + expected: { + type: "api_key", + key: "sk-ant-direct", + }, + }, + ] as const; + + for (const testCase of cases) { + const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-alias-")); + try { + const storeData = { + version: AUTH_STORE_VERSION, + profiles: { + "anthropic:work": testCase.profile, + }, + }; + fs.writeFileSync( + path.join(agentDir, "auth-profiles.json"), + `${JSON.stringify(storeData, null, 2)}\n`, + "utf8", + ); + + const store = ensureAuthProfileStore(agentDir); + expect(store.profiles["anthropic:work"], testCase.name).toMatchObject(testCase.expected); + } finally { + fs.rmSync(agentDir, { recursive: true, force: true }); + } + } + }); + + it("normalizes mode/apiKey aliases while migrating legacy auth.json", () => { + const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-legacy-alias-")); + try { + fs.writeFileSync( + path.join(agentDir, "auth.json"), + `${JSON.stringify( + { + anthropic: { + provider: "anthropic", + mode: "api_key", + apiKey: "sk-ant-legacy", + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + const store = ensureAuthProfileStore(agentDir); + expect(store.profiles["anthropic:default"]).toMatchObject({ + type: "api_key", + provider: "anthropic", + key: "sk-ant-legacy", + }); + } finally { + fs.rmSync(agentDir, { recursive: true, force: true }); + } + }); + + it("logs one warning with aggregated reasons for rejected auth-profiles entries", () => { + const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-invalid-")); + const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined); + try { + const invalidStore = { + version: AUTH_STORE_VERSION, + profiles: { + "anthropic:missing-type": { + provider: "anthropic", + }, + "openai:missing-provider": { + type: "api_key", + key: "sk-openai", + }, + "qwen:not-object": "broken", + }, + }; + fs.writeFileSync( + path.join(agentDir, "auth-profiles.json"), + `${JSON.stringify(invalidStore, null, 2)}\n`, + "utf8", + ); + + const store = ensureAuthProfileStore(agentDir); + expect(store.profiles).toEqual({}); + expect(warnSpy).toHaveBeenCalledTimes(1); + expect(warnSpy).toHaveBeenCalledWith( + "ignored invalid auth profile entries during store load", + { + source: "auth-profiles.json", + dropped: 3, + reasons: { + invalid_type: 1, + missing_provider: 1, + non_object: 1, + }, + keys: ["anthropic:missing-type", "openai:missing-provider", "qwen:not-object"], + }, + ); + } finally { + warnSpy.mockRestore(); + fs.rmSync(agentDir, { recursive: true, force: true }); + } + }); }); diff --git a/src/agents/auth-profiles.markauthprofilefailure.test.ts b/src/agents/auth-profiles.markauthprofilefailure.test.ts index 1a30d8a9119..865fbf87816 100644 --- a/src/agents/auth-profiles.markauthprofilefailure.test.ts +++ b/src/agents/auth-profiles.markauthprofilefailure.test.ts @@ -114,6 +114,22 @@ describe("markAuthProfileFailure", () => { expect(reloaded.usageStats?.["anthropic:default"]?.cooldownUntil).toBe(firstCooldownUntil); }); }); + it("disables auth_permanent failures via disabledUntil (like billing)", async () => { + await withAuthProfileStore(async ({ agentDir, store }) => { + await markAuthProfileFailure({ + store, + profileId: "anthropic:default", + reason: "auth_permanent", + agentDir, + }); + + const stats = store.usageStats?.["anthropic:default"]; + expect(typeof stats?.disabledUntil).toBe("number"); + expect(stats?.disabledReason).toBe("auth_permanent"); + // Should NOT set cooldownUntil (that's for transient errors) + expect(stats?.cooldownUntil).toBeUndefined(); + }); + }); it("resets backoff counters outside the failure window", async () => { const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-")); try { diff --git a/src/agents/auth-profiles.readonly-sync.test.ts b/src/agents/auth-profiles.readonly-sync.test.ts new file mode 100644 index 00000000000..2ef1c40d2f8 --- /dev/null +++ b/src/agents/auth-profiles.readonly-sync.test.ts @@ -0,0 +1,67 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { AUTH_STORE_VERSION } from "./auth-profiles/constants.js"; +import type { AuthProfileStore } from "./auth-profiles/types.js"; + +const mocks = vi.hoisted(() => ({ + syncExternalCliCredentials: vi.fn((store: AuthProfileStore) => { + store.profiles["qwen-portal:default"] = { + type: "oauth", + provider: "qwen-portal", + access: "access-token", + refresh: "refresh-token", + expires: Date.now() + 60_000, + }; + return true; + }), +})); + +vi.mock("./auth-profiles/external-cli-sync.js", () => ({ + syncExternalCliCredentials: mocks.syncExternalCliCredentials, +})); + +const { loadAuthProfileStoreForRuntime } = await import("./auth-profiles.js"); + +describe("auth profiles read-only external CLI sync", () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it("syncs external CLI credentials in-memory without writing auth-profiles.json in read-only mode", () => { + const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-readonly-sync-")); + try { + const authPath = path.join(agentDir, "auth-profiles.json"); + const baseline: AuthProfileStore = { + version: AUTH_STORE_VERSION, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + key: "sk-test", + }, + }, + }; + fs.writeFileSync(authPath, `${JSON.stringify(baseline, null, 2)}\n`, "utf8"); + + const loaded = loadAuthProfileStoreForRuntime(agentDir, { readOnly: true }); + + expect(mocks.syncExternalCliCredentials).toHaveBeenCalled(); + expect(loaded.profiles["qwen-portal:default"]).toMatchObject({ + type: "oauth", + provider: "qwen-portal", + }); + + const persisted = JSON.parse(fs.readFileSync(authPath, "utf8")) as AuthProfileStore; + expect(persisted.profiles["qwen-portal:default"]).toBeUndefined(); + expect(persisted.profiles["openai:default"]).toMatchObject({ + type: "api_key", + provider: "openai", + key: "sk-test", + }); + } finally { + fs.rmSync(agentDir, { recursive: true, force: true }); + } + }); +}); diff --git a/src/agents/auth-profiles.runtime-snapshot-save.test.ts b/src/agents/auth-profiles.runtime-snapshot-save.test.ts new file mode 100644 index 00000000000..3cb3d238975 --- /dev/null +++ b/src/agents/auth-profiles.runtime-snapshot-save.test.ts @@ -0,0 +1,72 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { + activateSecretsRuntimeSnapshot, + clearSecretsRuntimeSnapshot, + prepareSecretsRuntimeSnapshot, +} from "../secrets/runtime.js"; +import { ensureAuthProfileStore, markAuthProfileUsed } from "./auth-profiles.js"; + +describe("auth profile runtime snapshot persistence", () => { + it("does not write resolved plaintext keys during usage updates", async () => { + const stateDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-auth-runtime-save-")); + const agentDir = path.join(stateDir, "agents", "main", "agent"); + const authPath = path.join(agentDir, "auth-profiles.json"); + try { + await fs.mkdir(agentDir, { recursive: true }); + await fs.writeFile( + authPath, + `${JSON.stringify( + { + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + const snapshot = await prepareSecretsRuntimeSnapshot({ + config: {}, + env: { OPENAI_API_KEY: "sk-runtime-openai" }, + agentDirs: [agentDir], + }); + activateSecretsRuntimeSnapshot(snapshot); + + const runtimeStore = ensureAuthProfileStore(agentDir); + expect(runtimeStore.profiles["openai:default"]).toMatchObject({ + type: "api_key", + key: "sk-runtime-openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }); + + await markAuthProfileUsed({ + store: runtimeStore, + profileId: "openai:default", + agentDir, + }); + + const persisted = JSON.parse(await fs.readFile(authPath, "utf8")) as { + profiles: Record; + }; + expect(persisted.profiles["openai:default"]?.key).toBeUndefined(); + expect(persisted.profiles["openai:default"]?.keyRef).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + } finally { + clearSecretsRuntimeSnapshot(); + await fs.rm(stateDir, { recursive: true, force: true }); + } + }); +}); diff --git a/src/agents/auth-profiles.store.save.test.ts b/src/agents/auth-profiles.store.save.test.ts new file mode 100644 index 00000000000..292921feaf1 --- /dev/null +++ b/src/agents/auth-profiles.store.save.test.ts @@ -0,0 +1,64 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { resolveAuthStorePath } from "./auth-profiles/paths.js"; +import { saveAuthProfileStore } from "./auth-profiles/store.js"; +import type { AuthProfileStore } from "./auth-profiles/types.js"; + +describe("saveAuthProfileStore", () => { + it("strips plaintext when keyRef/tokenRef are present", async () => { + const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-auth-save-")); + try { + const store: AuthProfileStore = { + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + key: "sk-runtime-value", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + "github-copilot:default": { + type: "token", + provider: "github-copilot", + token: "gh-runtime-token", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + "anthropic:default": { + type: "api_key", + provider: "anthropic", + key: "sk-anthropic-plain", + }, + }, + }; + + saveAuthProfileStore(store, agentDir); + + const parsed = JSON.parse(await fs.readFile(resolveAuthStorePath(agentDir), "utf8")) as { + profiles: Record< + string, + { key?: string; keyRef?: unknown; token?: string; tokenRef?: unknown } + >; + }; + + expect(parsed.profiles["openai:default"]?.key).toBeUndefined(); + expect(parsed.profiles["openai:default"]?.keyRef).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + + expect(parsed.profiles["github-copilot:default"]?.token).toBeUndefined(); + expect(parsed.profiles["github-copilot:default"]?.tokenRef).toEqual({ + source: "env", + provider: "default", + id: "GITHUB_TOKEN", + }); + + expect(parsed.profiles["anthropic:default"]?.key).toBe("sk-anthropic-plain"); + } finally { + await fs.rm(agentDir, { recursive: true, force: true }); + } + }); +}); diff --git a/src/agents/auth-profiles.ts b/src/agents/auth-profiles.ts index 42941e6b1c8..7bf01847e55 100644 --- a/src/agents/auth-profiles.ts +++ b/src/agents/auth-profiles.ts @@ -17,7 +17,11 @@ export { suggestOAuthProfileIdForLegacyDefault, } from "./auth-profiles/repair.js"; export { + clearRuntimeAuthProfileStoreSnapshots, ensureAuthProfileStore, + loadAuthProfileStoreForSecretsRuntime, + loadAuthProfileStoreForRuntime, + replaceRuntimeAuthProfileStoreSnapshots, loadAuthProfileStore, saveAuthProfileStore, } from "./auth-profiles/store.js"; diff --git a/src/agents/auth-profiles/oauth.test.ts b/src/agents/auth-profiles/oauth.test.ts index a91d3e4a5b7..e4c8c536c76 100644 --- a/src/agents/auth-profiles/oauth.test.ts +++ b/src/agents/auth-profiles/oauth.test.ts @@ -168,3 +168,138 @@ describe("resolveApiKeyForProfile token expiry handling", () => { }); }); }); + +describe("resolveApiKeyForProfile secret refs", () => { + it("resolves api_key keyRef from env", async () => { + const profileId = "openai:default"; + const previous = process.env.OPENAI_API_KEY; + process.env.OPENAI_API_KEY = "sk-openai-ref"; + try { + const result = await resolveApiKeyForProfile({ + cfg: cfgFor(profileId, "openai", "api_key"), + store: { + version: 1, + profiles: { + [profileId]: { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }, + profileId, + }); + expect(result).toEqual({ + apiKey: "sk-openai-ref", + provider: "openai", + email: undefined, + }); + } finally { + if (previous === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = previous; + } + } + }); + + it("resolves token tokenRef from env", async () => { + const profileId = "github-copilot:default"; + const previous = process.env.GITHUB_TOKEN; + process.env.GITHUB_TOKEN = "gh-ref-token"; + try { + const result = await resolveApiKeyForProfile({ + cfg: cfgFor(profileId, "github-copilot", "token"), + store: { + version: 1, + profiles: { + [profileId]: { + type: "token", + provider: "github-copilot", + token: "", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + }, + }, + profileId, + }); + expect(result).toEqual({ + apiKey: "gh-ref-token", + provider: "github-copilot", + email: undefined, + }); + } finally { + if (previous === undefined) { + delete process.env.GITHUB_TOKEN; + } else { + process.env.GITHUB_TOKEN = previous; + } + } + }); + + it("resolves inline ${ENV} api_key values", async () => { + const profileId = "openai:inline-env"; + const previous = process.env.OPENAI_API_KEY; + process.env.OPENAI_API_KEY = "sk-openai-inline"; + try { + const result = await resolveApiKeyForProfile({ + cfg: cfgFor(profileId, "openai", "api_key"), + store: { + version: 1, + profiles: { + [profileId]: { + type: "api_key", + provider: "openai", + key: "${OPENAI_API_KEY}", + }, + }, + }, + profileId, + }); + expect(result).toEqual({ + apiKey: "sk-openai-inline", + provider: "openai", + email: undefined, + }); + } finally { + if (previous === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = previous; + } + } + }); + + it("resolves inline ${ENV} token values", async () => { + const profileId = "github-copilot:inline-env"; + const previous = process.env.GITHUB_TOKEN; + process.env.GITHUB_TOKEN = "gh-inline-token"; + try { + const result = await resolveApiKeyForProfile({ + cfg: cfgFor(profileId, "github-copilot", "token"), + store: { + version: 1, + profiles: { + [profileId]: { + type: "token", + provider: "github-copilot", + token: "${GITHUB_TOKEN}", + }, + }, + }, + profileId, + }); + expect(result).toEqual({ + apiKey: "gh-inline-token", + provider: "github-copilot", + email: undefined, + }); + } finally { + if (previous === undefined) { + delete process.env.GITHUB_TOKEN; + } else { + process.env.GITHUB_TOKEN = previous; + } + } + }); +}); diff --git a/src/agents/auth-profiles/oauth.ts b/src/agents/auth-profiles/oauth.ts index a4f10b6a587..7303a2ec0e0 100644 --- a/src/agents/auth-profiles/oauth.ts +++ b/src/agents/auth-profiles/oauth.ts @@ -4,9 +4,11 @@ import { type OAuthCredentials, type OAuthProvider, } from "@mariozechner/pi-ai"; -import type { OpenClawConfig } from "../../config/config.js"; +import { loadConfig, type OpenClawConfig } from "../../config/config.js"; +import { coerceSecretRef } from "../../config/types.secrets.js"; import { withFileLock } from "../../infra/file-lock.js"; import { refreshQwenPortalCredentials } from "../../providers/qwen-portal-oauth.js"; +import { resolveSecretRefString, type SecretRefResolveCache } from "../../secrets/resolve.js"; import { refreshChutesTokens } from "../chutes-oauth.js"; import { AUTH_STORE_LOCK_OPTIONS, log } from "./constants.js"; import { formatAuthDoctorHint } from "./doctor.js"; @@ -97,6 +99,8 @@ type ResolveApiKeyForProfileParams = { agentDir?: string; }; +type SecretDefaults = NonNullable["defaults"]; + function adoptNewerMainOAuthCredential(params: { store: AuthProfileStore; profileId: string; @@ -234,6 +238,57 @@ async function tryResolveOAuthProfile( }); } +async function resolveProfileSecretString(params: { + profileId: string; + provider: string; + value: string | undefined; + valueRef: unknown; + refDefaults: SecretDefaults | undefined; + configForRefResolution: OpenClawConfig; + cache: SecretRefResolveCache; + inlineFailureMessage: string; + refFailureMessage: string; +}): Promise { + let resolvedValue = params.value?.trim(); + if (resolvedValue) { + const inlineRef = coerceSecretRef(resolvedValue, params.refDefaults); + if (inlineRef) { + try { + resolvedValue = await resolveSecretRefString(inlineRef, { + config: params.configForRefResolution, + env: process.env, + cache: params.cache, + }); + } catch (err) { + log.debug(params.inlineFailureMessage, { + profileId: params.profileId, + provider: params.provider, + error: err instanceof Error ? err.message : String(err), + }); + } + } + } + + const explicitRef = coerceSecretRef(params.valueRef, params.refDefaults); + if (!resolvedValue && explicitRef) { + try { + resolvedValue = await resolveSecretRefString(explicitRef, { + config: params.configForRefResolution, + env: process.env, + cache: params.cache, + }); + } catch (err) { + log.debug(params.refFailureMessage, { + profileId: params.profileId, + provider: params.provider, + error: err instanceof Error ? err.message : String(err), + }); + } + } + + return resolvedValue; +} + export async function resolveApiKeyForProfile( params: ResolveApiKeyForProfileParams, ): Promise<{ apiKey: string; provider: string; email?: string } | null> { @@ -255,15 +310,39 @@ export async function resolveApiKeyForProfile( return null; } + const refResolveCache: SecretRefResolveCache = {}; + const configForRefResolution = cfg ?? loadConfig(); + const refDefaults = configForRefResolution.secrets?.defaults; + if (cred.type === "api_key") { - const key = cred.key?.trim(); + const key = await resolveProfileSecretString({ + profileId, + provider: cred.provider, + value: cred.key, + valueRef: cred.keyRef, + refDefaults, + configForRefResolution, + cache: refResolveCache, + inlineFailureMessage: "failed to resolve inline auth profile api_key ref", + refFailureMessage: "failed to resolve auth profile api_key ref", + }); if (!key) { return null; } return buildApiKeyProfileResult({ apiKey: key, provider: cred.provider, email: cred.email }); } if (cred.type === "token") { - const token = cred.token?.trim(); + const token = await resolveProfileSecretString({ + profileId, + provider: cred.provider, + value: cred.token, + valueRef: cred.tokenRef, + refDefaults, + configForRefResolution, + cache: refResolveCache, + inlineFailureMessage: "failed to resolve inline auth profile token ref", + refFailureMessage: "failed to resolve auth profile token ref", + }); if (!token) { return null; } diff --git a/src/agents/auth-profiles/store.ts b/src/agents/auth-profiles/store.ts index 4e6b1f91bf6..0fa050e55ec 100644 --- a/src/agents/auth-profiles/store.ts +++ b/src/agents/auth-profiles/store.ts @@ -9,13 +9,72 @@ import { ensureAuthStoreFile, resolveAuthStorePath, resolveLegacyAuthStorePath } import type { AuthProfileCredential, AuthProfileStore, ProfileUsageStats } from "./types.js"; type LegacyAuthStore = Record; +type CredentialRejectReason = "non_object" | "invalid_type" | "missing_provider"; +type RejectedCredentialEntry = { key: string; reason: CredentialRejectReason }; +type LoadAuthProfileStoreOptions = { + allowKeychainPrompt?: boolean; + readOnly?: boolean; +}; -function _syncAuthProfileStore(target: AuthProfileStore, source: AuthProfileStore): void { - target.version = source.version; - target.profiles = source.profiles; - target.order = source.order; - target.lastGood = source.lastGood; - target.usageStats = source.usageStats; +const AUTH_PROFILE_TYPES = new Set(["api_key", "oauth", "token"]); + +const runtimeAuthStoreSnapshots = new Map(); + +function resolveRuntimeStoreKey(agentDir?: string): string { + return resolveAuthStorePath(agentDir); +} + +function cloneAuthProfileStore(store: AuthProfileStore): AuthProfileStore { + return structuredClone(store); +} + +function resolveRuntimeAuthProfileStore(agentDir?: string): AuthProfileStore | null { + if (runtimeAuthStoreSnapshots.size === 0) { + return null; + } + + const mainKey = resolveRuntimeStoreKey(undefined); + const requestedKey = resolveRuntimeStoreKey(agentDir); + const mainStore = runtimeAuthStoreSnapshots.get(mainKey); + const requestedStore = runtimeAuthStoreSnapshots.get(requestedKey); + + if (!agentDir || requestedKey === mainKey) { + if (!mainStore) { + return null; + } + return cloneAuthProfileStore(mainStore); + } + + if (mainStore && requestedStore) { + return mergeAuthProfileStores( + cloneAuthProfileStore(mainStore), + cloneAuthProfileStore(requestedStore), + ); + } + if (requestedStore) { + return cloneAuthProfileStore(requestedStore); + } + if (mainStore) { + return cloneAuthProfileStore(mainStore); + } + + return null; +} + +export function replaceRuntimeAuthProfileStoreSnapshots( + entries: Array<{ agentDir?: string; store: AuthProfileStore }>, +): void { + runtimeAuthStoreSnapshots.clear(); + for (const entry of entries) { + runtimeAuthStoreSnapshots.set( + resolveRuntimeStoreKey(entry.agentDir), + cloneAuthProfileStore(entry.store), + ); + } +} + +export function clearRuntimeAuthProfileStoreSnapshots(): void { + runtimeAuthStoreSnapshots.clear(); } export async function updateAuthProfileStoreWithLock(params: { @@ -39,6 +98,71 @@ export async function updateAuthProfileStoreWithLock(params: { } } +/** + * Normalise a raw auth-profiles.json credential entry. + * + * The official format uses `type` and (for api_key credentials) `key`. + * A common mistake — caused by the similarity with the `openclaw.json` + * `auth.profiles` section which uses `mode` — is to write `mode` instead of + * `type` and `apiKey` instead of `key`. Accept both spellings so users don't + * silently lose their credentials. + */ +function normalizeRawCredentialEntry(raw: Record): Partial { + const entry = { ...raw } as Record; + // mode → type alias (openclaw.json uses "mode"; auth-profiles.json uses "type") + if (!("type" in entry) && typeof entry["mode"] === "string") { + entry["type"] = entry["mode"]; + } + // apiKey → key alias for ApiKeyCredential + if (!("key" in entry) && typeof entry["apiKey"] === "string") { + entry["key"] = entry["apiKey"]; + } + return entry as Partial; +} + +function parseCredentialEntry( + raw: unknown, + fallbackProvider?: string, +): { ok: true; credential: AuthProfileCredential } | { ok: false; reason: CredentialRejectReason } { + if (!raw || typeof raw !== "object") { + return { ok: false, reason: "non_object" }; + } + const typed = normalizeRawCredentialEntry(raw as Record); + if (!AUTH_PROFILE_TYPES.has(typed.type as AuthProfileCredential["type"])) { + return { ok: false, reason: "invalid_type" }; + } + const provider = typed.provider ?? fallbackProvider; + if (typeof provider !== "string" || provider.trim().length === 0) { + return { ok: false, reason: "missing_provider" }; + } + return { + ok: true, + credential: { + ...typed, + provider, + } as AuthProfileCredential, + }; +} + +function warnRejectedCredentialEntries(source: string, rejected: RejectedCredentialEntry[]): void { + if (rejected.length === 0) { + return; + } + const reasons = rejected.reduce( + (acc, current) => { + acc[current.reason] = (acc[current.reason] ?? 0) + 1; + return acc; + }, + {} as Partial>, + ); + log.warn("ignored invalid auth profile entries during store load", { + source, + dropped: rejected.length, + reasons, + keys: rejected.slice(0, 10).map((entry) => entry.key), + }); +} + function coerceLegacyStore(raw: unknown): LegacyAuthStore | null { if (!raw || typeof raw !== "object") { return null; @@ -48,19 +172,16 @@ function coerceLegacyStore(raw: unknown): LegacyAuthStore | null { return null; } const entries: LegacyAuthStore = {}; + const rejected: RejectedCredentialEntry[] = []; for (const [key, value] of Object.entries(record)) { - if (!value || typeof value !== "object") { + const parsed = parseCredentialEntry(value, key); + if (!parsed.ok) { + rejected.push({ key, reason: parsed.reason }); continue; } - const typed = value as Partial; - if (typed.type !== "api_key" && typed.type !== "oauth" && typed.type !== "token") { - continue; - } - entries[key] = { - ...typed, - provider: String(typed.provider ?? key), - } as AuthProfileCredential; + entries[key] = parsed.credential; } + warnRejectedCredentialEntries("auth.json", rejected); return Object.keys(entries).length > 0 ? entries : null; } @@ -74,19 +195,16 @@ function coerceAuthStore(raw: unknown): AuthProfileStore | null { } const profiles = record.profiles as Record; const normalized: Record = {}; + const rejected: RejectedCredentialEntry[] = []; for (const [key, value] of Object.entries(profiles)) { - if (!value || typeof value !== "object") { + const parsed = parseCredentialEntry(value); + if (!parsed.ok) { + rejected.push({ key, reason: parsed.reason }); continue; } - const typed = value as Partial; - if (typed.type !== "api_key" && typed.type !== "oauth" && typed.type !== "token") { - continue; - } - if (!typed.provider) { - continue; - } - normalized[key] = typed as AuthProfileCredential; + normalized[key] = parsed.credential; } + warnRejectedCredentialEntries("auth-profiles.json", rejected); const order = record.order && typeof record.order === "object" ? Object.entries(record.order as Record).reduce( @@ -220,19 +338,22 @@ function applyLegacyStore(store: AuthProfileStore, legacy: LegacyAuthStore): voi } } +function loadCoercedStore(authPath: string): AuthProfileStore | null { + const raw = loadJsonFile(authPath); + return coerceAuthStore(raw); +} + export function loadAuthProfileStore(): AuthProfileStore { const authPath = resolveAuthStorePath(); - const raw = loadJsonFile(authPath); - const asStore = coerceAuthStore(raw); + const asStore = loadCoercedStore(authPath); if (asStore) { - // Sync from external CLI tools on every load + // Sync from external CLI tools on every load. const synced = syncExternalCliCredentials(asStore); if (synced) { saveJsonFile(authPath, asStore); } return asStore; } - const legacyRaw = loadJsonFile(resolveLegacyAuthStorePath()); const legacy = coerceLegacyStore(legacyRaw); if (legacy) { @@ -252,22 +373,23 @@ export function loadAuthProfileStore(): AuthProfileStore { function loadAuthProfileStoreForAgent( agentDir?: string, - _options?: { allowKeychainPrompt?: boolean }, + options?: LoadAuthProfileStoreOptions, ): AuthProfileStore { + const readOnly = options?.readOnly === true; const authPath = resolveAuthStorePath(agentDir); - const raw = loadJsonFile(authPath); - const asStore = coerceAuthStore(raw); + const asStore = loadCoercedStore(authPath); if (asStore) { - // Sync from external CLI tools on every load + // Runtime secret activation must remain read-only: + // sync external CLI credentials in-memory, but never persist while readOnly. const synced = syncExternalCliCredentials(asStore); - if (synced) { + if (synced && !readOnly) { saveJsonFile(authPath, asStore); } return asStore; } // Fallback: inherit auth-profiles from main agent if subagent has none - if (agentDir) { + if (agentDir && !readOnly) { const mainAuthPath = resolveAuthStorePath(); // without agentDir = main const mainRaw = loadJsonFile(mainAuthPath); const mainStore = coerceAuthStore(mainRaw); @@ -290,8 +412,10 @@ function loadAuthProfileStoreForAgent( } const mergedOAuth = mergeOAuthFileIntoStore(store); + // Keep external CLI credentials visible in runtime even during read-only loads. const syncedCli = syncExternalCliCredentials(store); - const shouldWrite = legacy !== null || mergedOAuth || syncedCli; + const forceReadOnly = process.env.OPENCLAW_AUTH_STORE_READONLY === "1"; + const shouldWrite = !readOnly && !forceReadOnly && (legacy !== null || mergedOAuth || syncedCli); if (shouldWrite) { saveJsonFile(authPath, store); } @@ -316,10 +440,34 @@ function loadAuthProfileStoreForAgent( return store; } +export function loadAuthProfileStoreForRuntime( + agentDir?: string, + options?: LoadAuthProfileStoreOptions, +): AuthProfileStore { + const store = loadAuthProfileStoreForAgent(agentDir, options); + const authPath = resolveAuthStorePath(agentDir); + const mainAuthPath = resolveAuthStorePath(); + if (!agentDir || authPath === mainAuthPath) { + return store; + } + + const mainStore = loadAuthProfileStoreForAgent(undefined, options); + return mergeAuthProfileStores(mainStore, store); +} + +export function loadAuthProfileStoreForSecretsRuntime(agentDir?: string): AuthProfileStore { + return loadAuthProfileStoreForRuntime(agentDir, { readOnly: true, allowKeychainPrompt: false }); +} + export function ensureAuthProfileStore( agentDir?: string, options?: { allowKeychainPrompt?: boolean }, ): AuthProfileStore { + const runtimeStore = resolveRuntimeAuthProfileStore(agentDir); + if (runtimeStore) { + return runtimeStore; + } + const store = loadAuthProfileStoreForAgent(agentDir, options); const authPath = resolveAuthStorePath(agentDir); const mainAuthPath = resolveAuthStorePath(); @@ -335,9 +483,24 @@ export function ensureAuthProfileStore( export function saveAuthProfileStore(store: AuthProfileStore, agentDir?: string): void { const authPath = resolveAuthStorePath(agentDir); + const profiles = Object.fromEntries( + Object.entries(store.profiles).map(([profileId, credential]) => { + if (credential.type === "api_key" && credential.keyRef && credential.key !== undefined) { + const sanitized = { ...credential } as Record; + delete sanitized.key; + return [profileId, sanitized]; + } + if (credential.type === "token" && credential.tokenRef && credential.token !== undefined) { + const sanitized = { ...credential } as Record; + delete sanitized.token; + return [profileId, sanitized]; + } + return [profileId, credential]; + }), + ) as AuthProfileStore["profiles"]; const payload = { version: AUTH_STORE_VERSION, - profiles: store.profiles, + profiles, order: store.order ?? undefined, lastGood: store.lastGood ?? undefined, usageStats: store.usageStats ?? undefined, diff --git a/src/agents/auth-profiles/types.ts b/src/agents/auth-profiles/types.ts index 7332d304812..f4e56f59d68 100644 --- a/src/agents/auth-profiles/types.ts +++ b/src/agents/auth-profiles/types.ts @@ -1,10 +1,12 @@ import type { OAuthCredentials } from "@mariozechner/pi-ai"; import type { OpenClawConfig } from "../../config/config.js"; +import type { SecretRef } from "../../config/types.secrets.js"; export type ApiKeyCredential = { type: "api_key"; provider: string; key?: string; + keyRef?: SecretRef; email?: string; /** Optional provider-specific metadata (e.g., account IDs, gateway IDs). */ metadata?: Record; @@ -18,6 +20,7 @@ export type TokenCredential = { type: "token"; provider: string; token: string; + tokenRef?: SecretRef; /** Optional expiry timestamp (ms since epoch). */ expires?: number; email?: string; @@ -34,6 +37,7 @@ export type AuthProfileCredential = ApiKeyCredential | TokenCredential | OAuthCr export type AuthProfileFailureReason = | "auth" + | "auth_permanent" | "format" | "rate_limit" | "billing" diff --git a/src/agents/auth-profiles/usage.test.ts b/src/agents/auth-profiles/usage.test.ts index 0025007f729..8c499654b49 100644 --- a/src/agents/auth-profiles/usage.test.ts +++ b/src/agents/auth-profiles/usage.test.ts @@ -141,6 +141,24 @@ describe("resolveProfilesUnavailableReason", () => { ).toBe("billing"); }); + it("returns auth_permanent for active permanent auth disables", () => { + const now = Date.now(); + const store = makeStore({ + "anthropic:default": { + disabledUntil: now + 60_000, + disabledReason: "auth_permanent", + }, + }); + + expect( + resolveProfilesUnavailableReason({ + store, + profileIds: ["anthropic:default"], + now, + }), + ).toBe("auth_permanent"); + }); + it("uses recorded non-rate-limit failure counts for active cooldown windows", () => { const now = Date.now(); const store = makeStore({ @@ -490,7 +508,7 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () async function markFailureAt(params: { store: ReturnType; now: number; - reason: "rate_limit" | "billing"; + reason: "rate_limit" | "billing" | "auth_permanent"; }): Promise { vi.useFakeTimers(); vi.setSystemTime(params.now); @@ -528,6 +546,18 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () }), readUntil: (stats: WindowStats | undefined) => stats?.disabledUntil, }, + { + label: "disabledUntil(auth_permanent)", + reason: "auth_permanent" as const, + buildUsageStats: (now: number): WindowStats => ({ + disabledUntil: now + 20 * 60 * 60 * 1000, + disabledReason: "auth_permanent", + errorCount: 5, + failureCounts: { auth_permanent: 5 }, + lastFailureAt: now - 60_000, + }), + readUntil: (stats: WindowStats | undefined) => stats?.disabledUntil, + }, ]; for (const testCase of activeWindowCases) { @@ -573,6 +603,19 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () expectedUntil: (now: number) => now + 20 * 60 * 60 * 1000, readUntil: (stats: WindowStats | undefined) => stats?.disabledUntil, }, + { + label: "disabledUntil(auth_permanent)", + reason: "auth_permanent" as const, + buildUsageStats: (now: number): WindowStats => ({ + disabledUntil: now - 60_000, + disabledReason: "auth_permanent", + errorCount: 5, + failureCounts: { auth_permanent: 2 }, + lastFailureAt: now - 60_000, + }), + expectedUntil: (now: number) => now + 20 * 60 * 60 * 1000, + readUntil: (stats: WindowStats | undefined) => stats?.disabledUntil, + }, ]; for (const testCase of expiredWindowCases) { diff --git a/src/agents/auth-profiles/usage.ts b/src/agents/auth-profiles/usage.ts index 958e3ae127e..60c43c9c3c8 100644 --- a/src/agents/auth-profiles/usage.ts +++ b/src/agents/auth-profiles/usage.ts @@ -4,6 +4,7 @@ import { saveAuthProfileStore, updateAuthProfileStoreWithLock } from "./store.js import type { AuthProfileFailureReason, AuthProfileStore, ProfileUsageStats } from "./types.js"; const FAILURE_REASON_PRIORITY: AuthProfileFailureReason[] = [ + "auth_permanent", "auth", "billing", "format", @@ -394,8 +395,8 @@ function computeNextProfileUsageStats(params: { lastFailureAt: params.now, }; - if (params.reason === "billing") { - const billingCount = failureCounts.billing ?? 1; + if (params.reason === "billing" || params.reason === "auth_permanent") { + const billingCount = failureCounts[params.reason] ?? 1; const backoffMs = calculateAuthProfileBillingDisableMsWithConfig({ errorCount: billingCount, baseMs: params.cfgResolved.billingBackoffMs, @@ -408,7 +409,7 @@ function computeNextProfileUsageStats(params: { now: params.now, recomputedUntil: params.now + backoffMs, }); - updatedStats.disabledReason = "billing"; + updatedStats.disabledReason = params.reason; } else { const backoffMs = calculateAuthProfileCooldownMs(nextErrorCount); // Keep active cooldown windows immutable so retries within the window @@ -424,8 +425,9 @@ function computeNextProfileUsageStats(params: { } /** - * Mark a profile as failed for a specific reason. Billing failures are treated - * as "disabled" (longer backoff) vs the regular cooldown window. + * Mark a profile as failed for a specific reason. Billing and permanent-auth + * failures are treated as "disabled" (longer backoff) vs the regular cooldown + * window. */ export async function markAuthProfileFailure(params: { store: AuthProfileStore; diff --git a/src/agents/bash-tools.exec-approval-request.test.ts b/src/agents/bash-tools.exec-approval-request.test.ts index c14a3f62b91..7911b9bdf2b 100644 --- a/src/agents/bash-tools.exec-approval-request.test.ts +++ b/src/agents/bash-tools.exec-approval-request.test.ts @@ -40,6 +40,10 @@ describe("requestExecApprovalDecision", () => { agentId: "main", resolvedPath: "/usr/bin/echo", sessionKey: "session", + turnSourceChannel: "whatsapp", + turnSourceTo: "+15555550123", + turnSourceAccountId: "work", + turnSourceThreadId: "1739201675.123", }); expect(result).toBe("allow-once"); @@ -57,6 +61,10 @@ describe("requestExecApprovalDecision", () => { agentId: "main", resolvedPath: "/usr/bin/echo", sessionKey: "session", + turnSourceChannel: "whatsapp", + turnSourceTo: "+15555550123", + turnSourceAccountId: "work", + turnSourceThreadId: "1739201675.123", timeoutMs: DEFAULT_APPROVAL_TIMEOUT_MS, twoPhase: true, }, diff --git a/src/agents/bash-tools.exec-approval-request.ts b/src/agents/bash-tools.exec-approval-request.ts index 83323845c0c..842fcc1dcf4 100644 --- a/src/agents/bash-tools.exec-approval-request.ts +++ b/src/agents/bash-tools.exec-approval-request.ts @@ -8,6 +8,8 @@ import { callGatewayTool } from "./tools/gateway.js"; export type RequestExecApprovalDecisionParams = { id: string; command: string; + commandArgv?: string[]; + env?: Record; cwd: string; nodeId?: string; host: "gateway" | "node"; @@ -16,8 +18,58 @@ export type RequestExecApprovalDecisionParams = { agentId?: string; resolvedPath?: string; sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; }; +type ExecApprovalRequestToolParams = { + id: string; + command: string; + commandArgv?: string[]; + env?: Record; + cwd: string; + nodeId?: string; + host: "gateway" | "node"; + security: ExecSecurity; + ask: ExecAsk; + agentId?: string; + resolvedPath?: string; + sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; + timeoutMs: number; + twoPhase: true; +}; + +function buildExecApprovalRequestToolParams( + params: RequestExecApprovalDecisionParams, +): ExecApprovalRequestToolParams { + return { + id: params.id, + command: params.command, + commandArgv: params.commandArgv, + env: params.env, + cwd: params.cwd, + nodeId: params.nodeId, + host: params.host, + security: params.security, + ask: params.ask, + agentId: params.agentId, + resolvedPath: params.resolvedPath, + sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, + timeoutMs: DEFAULT_APPROVAL_TIMEOUT_MS, + twoPhase: true, + }; +} + type ParsedDecision = { present: boolean; value: string | null }; function parseDecision(value: unknown): ParsedDecision { @@ -59,20 +111,7 @@ export async function registerExecApprovalRequest( }>( "exec.approval.request", { timeoutMs: DEFAULT_APPROVAL_REQUEST_TIMEOUT_MS }, - { - id: params.id, - command: params.command, - cwd: params.cwd, - nodeId: params.nodeId, - host: params.host, - security: params.security, - ask: params.ask, - agentId: params.agentId, - resolvedPath: params.resolvedPath, - sessionKey: params.sessionKey, - timeoutMs: DEFAULT_APPROVAL_TIMEOUT_MS, - twoPhase: true, - }, + buildExecApprovalRequestToolParams(params), { expectFinal: false }, ); const decision = parseDecision(registrationResult); @@ -116,6 +155,8 @@ export async function requestExecApprovalDecision( export async function requestExecApprovalDecisionForHost(params: { approvalId: string; command: string; + commandArgv?: string[]; + env?: Record; workdir: string; host: "gateway" | "node"; nodeId?: string; @@ -124,10 +165,16 @@ export async function requestExecApprovalDecisionForHost(params: { agentId?: string; resolvedPath?: string; sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; }): Promise { return await requestExecApprovalDecision({ id: params.approvalId, command: params.command, + commandArgv: params.commandArgv, + env: params.env, cwd: params.workdir, nodeId: params.nodeId, host: params.host, @@ -136,12 +183,18 @@ export async function requestExecApprovalDecisionForHost(params: { agentId: params.agentId, resolvedPath: params.resolvedPath, sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); } export async function registerExecApprovalRequestForHost(params: { approvalId: string; command: string; + commandArgv?: string[]; + env?: Record; workdir: string; host: "gateway" | "node"; nodeId?: string; @@ -150,10 +203,16 @@ export async function registerExecApprovalRequestForHost(params: { agentId?: string; resolvedPath?: string; sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; }): Promise { return await registerExecApprovalRequest({ id: params.approvalId, command: params.command, + commandArgv: params.commandArgv, + env: params.env, cwd: params.workdir, nodeId: params.nodeId, host: params.host, @@ -162,5 +221,9 @@ export async function registerExecApprovalRequestForHost(params: { agentId: params.agentId, resolvedPath: params.resolvedPath, sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); } diff --git a/src/agents/bash-tools.exec-host-gateway.ts b/src/agents/bash-tools.exec-host-gateway.ts index 60711910975..9ce27e077cb 100644 --- a/src/agents/bash-tools.exec-host-gateway.ts +++ b/src/agents/bash-tools.exec-host-gateway.ts @@ -44,6 +44,10 @@ export type ProcessGatewayAllowlistParams = { safeBinProfiles: Readonly>; agentId?: string; sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; scopeKey?: string; warnings: string[]; notifySessionKey?: string; @@ -159,6 +163,10 @@ export async function processGatewayAllowlist( agentId: params.agentId, resolvedPath, sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); expiresAtMs = registration.expiresAtMs; preResolvedDecision = registration.finalDecision; diff --git a/src/agents/bash-tools.exec-host-node.ts b/src/agents/bash-tools.exec-host-node.ts index 5a45c869292..1c210ef7b88 100644 --- a/src/agents/bash-tools.exec-host-node.ts +++ b/src/agents/bash-tools.exec-host-node.ts @@ -35,6 +35,10 @@ export type ExecuteNodeHostCommandParams = { requestedNode?: string; boundNode?: string; sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; agentId?: string; security: ExecSecurity; ask: ExecAsk; @@ -194,6 +198,8 @@ export async function executeNodeHostCommand( const registration = await registerExecApprovalRequestForHost({ approvalId, command: params.command, + commandArgv: argv, + env: nodeEnv, workdir: params.workdir, host: "node", nodeId, @@ -201,6 +207,10 @@ export async function executeNodeHostCommand( ask: hostAsk, agentId: params.agentId, sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); expiresAtMs = registration.expiresAtMs; preResolvedDecision = registration.finalDecision; diff --git a/src/agents/bash-tools.exec-types.ts b/src/agents/bash-tools.exec-types.ts index 24227a134c4..bef8ea4bff1 100644 --- a/src/agents/bash-tools.exec-types.ts +++ b/src/agents/bash-tools.exec-types.ts @@ -21,6 +21,9 @@ export type ExecToolDefaults = { scopeKey?: string; sessionKey?: string; messageProvider?: string; + currentChannelId?: string; + currentThreadTs?: string; + accountId?: string; notifyOnExit?: boolean; notifyOnExitEmptySuccess?: boolean; cwd?: string; diff --git a/src/agents/bash-tools.exec.ts b/src/agents/bash-tools.exec.ts index fac68eb823f..105815cf3d8 100644 --- a/src/agents/bash-tools.exec.ts +++ b/src/agents/bash-tools.exec.ts @@ -407,6 +407,10 @@ export function createExecTool( requestedNode: params.node?.trim(), boundNode: defaults?.node?.trim(), sessionKey: defaults?.sessionKey, + turnSourceChannel: defaults?.messageProvider, + turnSourceTo: defaults?.currentChannelId, + turnSourceAccountId: defaults?.accountId, + turnSourceThreadId: defaults?.currentThreadTs, agentId, security, ask, @@ -433,6 +437,10 @@ export function createExecTool( safeBinProfiles, agentId, sessionKey: defaults?.sessionKey, + turnSourceChannel: defaults?.messageProvider, + turnSourceTo: defaults?.currentChannelId, + turnSourceAccountId: defaults?.accountId, + turnSourceThreadId: defaults?.currentThreadTs, scopeKey: defaults?.scopeKey, warnings, notifySessionKey, diff --git a/src/agents/cli-runner/helpers.ts b/src/agents/cli-runner/helpers.ts index e211e3df49c..dbabca75faa 100644 --- a/src/agents/cli-runner/helpers.ts +++ b/src/agents/cli-runner/helpers.ts @@ -93,6 +93,7 @@ export function buildSystemPrompt(params: { reasoningTagHint: false, heartbeatPrompt: params.heartbeatPrompt, docsPath: params.docsPath, + acpEnabled: params.config?.acp?.enabled !== false, runtimeInfo, toolNames: params.tools.map((tool) => tool.name), modelAliasLines: buildModelAliasLines(params.config), diff --git a/src/agents/failover-error.test.ts b/src/agents/failover-error.test.ts index d7c1edccbe1..8b2cb846298 100644 --- a/src/agents/failover-error.test.ts +++ b/src/agents/failover-error.test.ts @@ -4,6 +4,7 @@ import { describeFailoverError, isTimeoutError, resolveFailoverReasonFromError, + resolveFailoverStatus, } from "./failover-error.js"; describe("failover-error", () => { @@ -69,6 +70,36 @@ describe("failover-error", () => { expect(err?.status).toBe(400); }); + it("401/403 with generic message still returns auth (backward compat)", () => { + expect(resolveFailoverReasonFromError({ status: 401, message: "Unauthorized" })).toBe("auth"); + expect(resolveFailoverReasonFromError({ status: 403, message: "Forbidden" })).toBe("auth"); + }); + + it("401 with permanent auth message returns auth_permanent", () => { + expect(resolveFailoverReasonFromError({ status: 401, message: "invalid_api_key" })).toBe( + "auth_permanent", + ); + }); + + it("403 with revoked key message returns auth_permanent", () => { + expect(resolveFailoverReasonFromError({ status: 403, message: "api key revoked" })).toBe( + "auth_permanent", + ); + }); + + it("resolveFailoverStatus maps auth_permanent to 403", () => { + expect(resolveFailoverStatus("auth_permanent")).toBe(403); + }); + + it("coerces permanent auth error with correct reason", () => { + const err = coerceToFailoverError( + { status: 401, message: "invalid_api_key" }, + { provider: "anthropic", model: "claude-opus-4-6" }, + ); + expect(err?.reason).toBe("auth_permanent"); + expect(err?.provider).toBe("anthropic"); + }); + it("describes non-Error values consistently", () => { const described = describeFailoverError(123); expect(described.message).toBe("123"); diff --git a/src/agents/failover-error.ts b/src/agents/failover-error.ts index 4de2babde4d..708af55e322 100644 --- a/src/agents/failover-error.ts +++ b/src/agents/failover-error.ts @@ -1,4 +1,8 @@ -import { classifyFailoverReason, type FailoverReason } from "./pi-embedded-helpers.js"; +import { + classifyFailoverReason, + isAuthPermanentErrorMessage, + type FailoverReason, +} from "./pi-embedded-helpers.js"; const TIMEOUT_HINT_RE = /timeout|timed out|deadline exceeded|context deadline exceeded|stop reason:\s*abort|reason:\s*abort|unhandled stop reason:\s*abort/i; @@ -47,6 +51,8 @@ export function resolveFailoverStatus(reason: FailoverReason): number | undefine return 429; case "auth": return 401; + case "auth_permanent": + return 403; case "timeout": return 408; case "format": @@ -158,6 +164,10 @@ export function resolveFailoverReasonFromError(err: unknown): FailoverReason | n return "rate_limit"; } if (status === 401 || status === 403) { + const msg = getErrorMessage(err); + if (msg && isAuthPermanentErrorMessage(msg)) { + return "auth_permanent"; + } return "auth"; } if (status === 408) { diff --git a/src/agents/model-auth-label.test.ts b/src/agents/model-auth-label.test.ts new file mode 100644 index 00000000000..adcb6ce49b6 --- /dev/null +++ b/src/agents/model-auth-label.test.ts @@ -0,0 +1,76 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const ensureAuthProfileStoreMock = vi.hoisted(() => vi.fn()); +const resolveAuthProfileOrderMock = vi.hoisted(() => vi.fn()); +const resolveAuthProfileDisplayLabelMock = vi.hoisted(() => vi.fn()); + +vi.mock("./auth-profiles.js", () => ({ + ensureAuthProfileStore: (...args: unknown[]) => ensureAuthProfileStoreMock(...args), + resolveAuthProfileOrder: (...args: unknown[]) => resolveAuthProfileOrderMock(...args), + resolveAuthProfileDisplayLabel: (...args: unknown[]) => + resolveAuthProfileDisplayLabelMock(...args), +})); + +vi.mock("./model-auth.js", () => ({ + getCustomProviderApiKey: () => undefined, + resolveEnvApiKey: () => null, +})); + +const { resolveModelAuthLabel } = await import("./model-auth-label.js"); + +describe("resolveModelAuthLabel", () => { + beforeEach(() => { + ensureAuthProfileStoreMock.mockReset(); + resolveAuthProfileOrderMock.mockReset(); + resolveAuthProfileDisplayLabelMock.mockReset(); + }); + + it("does not throw when token profile only has tokenRef", () => { + ensureAuthProfileStoreMock.mockReturnValue({ + version: 1, + profiles: { + "github-copilot:default": { + type: "token", + provider: "github-copilot", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + }, + } as never); + resolveAuthProfileOrderMock.mockReturnValue(["github-copilot:default"]); + resolveAuthProfileDisplayLabelMock.mockReturnValue("github-copilot:default"); + + const label = resolveModelAuthLabel({ + provider: "github-copilot", + cfg: {}, + sessionEntry: { authProfileOverride: "github-copilot:default" } as never, + }); + + expect(label).toContain("token ref(env:GITHUB_TOKEN)"); + }); + + it("masks short api-key profile values", () => { + const shortSecret = "abc123"; + ensureAuthProfileStoreMock.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + key: shortSecret, + }, + }, + } as never); + resolveAuthProfileOrderMock.mockReturnValue(["openai:default"]); + resolveAuthProfileDisplayLabelMock.mockReturnValue("openai:default"); + + const label = resolveModelAuthLabel({ + provider: "openai", + cfg: {}, + sessionEntry: { authProfileOverride: "openai:default" } as never, + }); + + expect(label).toContain("api-key"); + expect(label).toContain("..."); + expect(label).not.toContain(shortSecret); + }); +}); diff --git a/src/agents/model-auth-label.ts b/src/agents/model-auth-label.ts index 9781791574b..4538cc1c872 100644 --- a/src/agents/model-auth-label.ts +++ b/src/agents/model-auth-label.ts @@ -1,5 +1,6 @@ import type { OpenClawConfig } from "../config/config.js"; import type { SessionEntry } from "../config/sessions.js"; +import { maskApiKey } from "../utils/mask-api-key.js"; import { ensureAuthProfileStore, resolveAuthProfileDisplayLabel, @@ -13,10 +14,21 @@ function formatApiKeySnippet(apiKey: string): string { if (!compact) { return "unknown"; } - const edge = compact.length >= 12 ? 6 : 4; - const head = compact.slice(0, edge); - const tail = compact.slice(-edge); - return `${head}…${tail}`; + return maskApiKey(compact); +} + +function formatCredentialSnippet(params: { + value: string | undefined; + ref: { source: string; id: string } | undefined; +}): string { + const value = typeof params.value === "string" ? params.value.trim() : ""; + if (value) { + return formatApiKeySnippet(value); + } + if (params.ref) { + return `ref(${params.ref.source}:${params.ref.id})`; + } + return "unknown"; } export function resolveModelAuthLabel(params: { @@ -57,9 +69,13 @@ export function resolveModelAuthLabel(params: { return `oauth${label ? ` (${label})` : ""}`; } if (profile.type === "token") { - return `token ${formatApiKeySnippet(profile.token)}${label ? ` (${label})` : ""}`; + return `token ${formatCredentialSnippet({ value: profile.token, ref: profile.tokenRef })}${ + label ? ` (${label})` : "" + }`; } - return `api-key ${formatApiKeySnippet(profile.key ?? "")}${label ? ` (${label})` : ""}`; + return `api-key ${formatCredentialSnippet({ value: profile.key, ref: profile.keyRef })}${ + label ? ` (${label})` : "" + }`; } const envKey = resolveEnvApiKey(providerKey); diff --git a/src/agents/model-catalog.test-harness.ts b/src/agents/model-catalog.test-harness.ts index 26b8bb10736..0c4633d6748 100644 --- a/src/agents/model-catalog.test-harness.ts +++ b/src/agents/model-catalog.test-harness.ts @@ -31,6 +31,7 @@ export function mockCatalogImportFailThenRecover() { throw new Error("boom"); } return { + discoverAuthStorage: () => ({}), AuthStorage: class {}, ModelRegistry: class { getAll() { diff --git a/src/agents/model-catalog.test.ts b/src/agents/model-catalog.test.ts index ada47c86126..8641b8b6c4d 100644 --- a/src/agents/model-catalog.test.ts +++ b/src/agents/model-catalog.test.ts @@ -38,6 +38,7 @@ describe("loadModelCatalog", () => { __setModelCatalogImportForTest( async () => ({ + discoverAuthStorage: () => ({}), AuthStorage: class {}, ModelRegistry: class { getAll() { @@ -69,6 +70,7 @@ describe("loadModelCatalog", () => { __setModelCatalogImportForTest( async () => ({ + discoverAuthStorage: () => ({}), AuthStorage: class {}, ModelRegistry: class { getAll() { @@ -108,6 +110,7 @@ describe("loadModelCatalog", () => { __setModelCatalogImportForTest( async () => ({ + discoverAuthStorage: () => ({}), AuthStorage: class {}, ModelRegistry: class { getAll() { @@ -154,6 +157,7 @@ describe("loadModelCatalog", () => { __setModelCatalogImportForTest( async () => ({ + discoverAuthStorage: () => ({}), AuthStorage: class {}, ModelRegistry: class { getAll() { @@ -196,6 +200,7 @@ describe("loadModelCatalog", () => { __setModelCatalogImportForTest( async () => ({ + discoverAuthStorage: () => ({}), AuthStorage: class {}, ModelRegistry: class { getAll() { diff --git a/src/agents/model-catalog.ts b/src/agents/model-catalog.ts index 82ca5686493..ccae3baa18a 100644 --- a/src/agents/model-catalog.ts +++ b/src/agents/model-catalog.ts @@ -154,14 +154,6 @@ export function __setModelCatalogImportForTest(loader?: () => Promise unknown }; - if (typeof withFactory.create === "function") { - return withFactory.create(path); - } - return new (AuthStorageLike as { new (path: string): unknown })(path); -} - export async function loadModelCatalog(params?: { config?: OpenClawConfig; useCache?: boolean; @@ -186,9 +178,6 @@ export async function loadModelCatalog(params?: { try { const cfg = params?.config ?? loadConfig(); await ensureOpenClawModelsJson(cfg); - await ( - await import("./pi-auth-json.js") - ).ensurePiAuthJsonFromAuthProfiles(resolveOpenClawAgentDir()); // IMPORTANT: keep the dynamic import *inside* the try/catch. // If this fails once (e.g. during a pnpm install that temporarily swaps node_modules), // we must not poison the cache with a rejected promise (otherwise all channel handlers @@ -196,7 +185,7 @@ export async function loadModelCatalog(params?: { const piSdk = await importPiSdk(); const agentDir = resolveOpenClawAgentDir(); const { join } = await import("node:path"); - const authStorage = createAuthStorage(piSdk.AuthStorage, join(agentDir, "auth.json")); + const authStorage = piSdk.discoverAuthStorage(agentDir); const registry = new (piSdk.ModelRegistry as unknown as { new ( authStorage: unknown, diff --git a/src/agents/model-compat.test.ts b/src/agents/model-compat.test.ts index 1e11b12437f..0aed752e7a6 100644 --- a/src/agents/model-compat.test.ts +++ b/src/agents/model-compat.test.ts @@ -1,9 +1,9 @@ import type { Api, Model } from "@mariozechner/pi-ai"; +import type { ModelRegistry } from "@mariozechner/pi-coding-agent"; import { describe, expect, it } from "vitest"; import { isModernModelRef } from "./live-model-filter.js"; import { normalizeModelCompat } from "./model-compat.js"; import { resolveForwardCompatModel } from "./model-forward-compat.js"; -import type { ModelRegistry } from "./pi-model-discovery.js"; const baseModel = (): Model => ({ diff --git a/src/agents/model-fallback.probe.test.ts b/src/agents/model-fallback.probe.test.ts index 0c222ec2115..3e36366c4ad 100644 --- a/src/agents/model-fallback.probe.test.ts +++ b/src/agents/model-fallback.probe.test.ts @@ -163,7 +163,7 @@ describe("runWithModelFallback – probe logic", () => { expectPrimaryProbeSuccess(result, run, "recovered"); }); - it("does NOT probe non-primary candidates during cooldown", async () => { + it("attempts non-primary fallbacks during rate-limit cooldown after primary probe failure", async () => { const cfg = makeCfg({ agents: { defaults: { @@ -182,25 +182,23 @@ describe("runWithModelFallback – probe logic", () => { const almostExpired = NOW + 30 * 1000; // 30s remaining mockedGetSoonestCooldownExpiry.mockReturnValue(almostExpired); - // Primary probe fails with 429 + // Primary probe fails with 429; fallback should still be attempted for rate_limit cooldowns. const run = vi .fn() .mockRejectedValueOnce(Object.assign(new Error("rate limited"), { status: 429 })) - .mockResolvedValue("should-not-reach"); + .mockResolvedValue("fallback-ok"); - try { - await runWithModelFallback({ - cfg, - provider: "openai", - model: "gpt-4.1-mini", - run, - }); - expect.unreachable("should have thrown since all candidates exhausted"); - } catch { - // Primary was probed (i === 0 + within margin), non-primary were skipped - expect(run).toHaveBeenCalledTimes(1); // only primary was actually called - expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini"); - } + const result = await runWithModelFallback({ + cfg, + provider: "openai", + model: "gpt-4.1-mini", + run, + }); + + expect(result.result).toBe("fallback-ok"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(1, "openai", "gpt-4.1-mini"); + expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-haiku-3-5"); }); it("throttles probe when called within 30s interval", async () => { diff --git a/src/agents/model-fallback.test.ts b/src/agents/model-fallback.test.ts index 16592cdb456..cd0217faafc 100644 --- a/src/agents/model-fallback.test.ts +++ b/src/agents/model-fallback.test.ts @@ -143,10 +143,22 @@ async function expectSkippedUnavailableProvider(params: { }) { const provider = `${params.providerPrefix}-${crypto.randomUUID()}`; const cfg = makeProviderFallbackCfg(provider); - const store = makeSingleProviderStore({ + const primaryStore = makeSingleProviderStore({ provider, usageStat: params.usageStat, }); + // Include fallback provider profile so the fallback is attempted (not skipped as no-profile). + const store: AuthProfileStore = { + ...primaryStore, + profiles: { + ...primaryStore.profiles, + "fallback:default": { + type: "api_key", + provider: "fallback", + key: "test-key", + }, + }, + }; const run = createFallbackOnlyRun(); const result = await runWithStoredAuth({ @@ -436,11 +448,11 @@ describe("runWithModelFallback", () => { run, }); - // Override model failed with model_not_found → falls back to configured primary. + // Override model failed with model_not_found → tries fallbacks first (same provider). expect(result.result).toBe("ok"); expect(run).toHaveBeenCalledTimes(2); - expect(run.mock.calls[1]?.[0]).toBe("openai"); - expect(run.mock.calls[1]?.[1]).toBe("gpt-4.1-mini"); + expect(run.mock.calls[1]?.[0]).toBe("anthropic"); + expect(run.mock.calls[1]?.[1]).toBe("claude-haiku-3-5"); }); it("skips providers when all profiles are in cooldown", async () => { @@ -794,6 +806,296 @@ describe("runWithModelFallback", () => { expect(result.provider).toBe("openai"); expect(result.model).toBe("gpt-4.1-mini"); }); + + // Tests for Bug A fix: Model fallback with session overrides + describe("fallback behavior with session model overrides", () => { + it("allows fallbacks when session model differs from config within same provider", async () => { + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["anthropic/claude-sonnet-4-5", "google/gemini-2.5-flash"], + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Rate limit exceeded")) // Session model fails + .mockResolvedValueOnce("fallback success"); // First fallback succeeds + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-sonnet-4-20250514", // Different from config primary + run, + }); + + expect(result.result).toBe("fallback success"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-20250514"); + expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-sonnet-4-5"); // Fallback tried + }); + + it("allows fallbacks with model version differences within same provider", async () => { + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["groq/llama-3.3-70b-versatile"], + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Weekly quota exceeded")) + .mockResolvedValueOnce("groq success"); + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-5", // Version difference from config + run, + }); + + expect(result.result).toBe("groq success"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile"); + }); + + it("still skips fallbacks when using different provider than config", async () => { + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: [], // Empty fallbacks to match working pattern + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error('No credentials found for profile "openai:default".')) + .mockResolvedValueOnce("config primary worked"); + + const result = await runWithModelFallback({ + cfg, + provider: "openai", // Different provider + model: "gpt-4.1-mini", + run, + }); + + // Cross-provider requests should skip configured fallbacks but still try configured primary + expect(result.result).toBe("config primary worked"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(1, "openai", "gpt-4.1-mini"); // Original request + expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-opus-4-6"); // Config primary as final fallback + }); + + it("uses fallbacks when session model exactly matches config primary", async () => { + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["groq/llama-3.3-70b-versatile"], + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Quota exceeded")) + .mockResolvedValueOnce("fallback worked"); + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", // Exact match + run, + }); + + expect(result.result).toBe("fallback worked"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile"); + }); + }); + + // Tests for Bug B fix: Rate limit vs auth/billing cooldown distinction + describe("fallback behavior with provider cooldowns", () => { + async function makeAuthStoreWithCooldown( + provider: string, + reason: "rate_limit" | "auth" | "billing", + ): Promise<{ store: AuthProfileStore; dir: string }> { + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-test-")); + const now = Date.now(); + const store: AuthProfileStore = { + version: AUTH_STORE_VERSION, + profiles: { + [`${provider}:default`]: { type: "api_key", provider, key: "test-key" }, + }, + usageStats: { + [`${provider}:default`]: + reason === "rate_limit" + ? { + // Real rate-limit cooldowns are tracked through cooldownUntil + // and failureCounts, not disabledReason. + cooldownUntil: now + 300000, + failureCounts: { rate_limit: 1 }, + } + : { + // Auth/billing issues use disabledUntil + disabledUntil: now + 300000, + disabledReason: reason, + }, + }, + }; + saveAuthProfileStore(store, tmpDir); + return { store, dir: tmpDir }; + } + + it("attempts same-provider fallbacks during rate limit cooldown", async () => { + const { dir } = await makeAuthStoreWithCooldown("anthropic", "rate_limit"); + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"], + }, + }, + }, + }); + + const run = vi.fn().mockResolvedValueOnce("sonnet success"); // Fallback succeeds + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", + run, + agentDir: dir, + }); + + expect(result.result).toBe("sonnet success"); + expect(run).toHaveBeenCalledTimes(1); // Primary skipped, fallback attempted + expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5"); + }); + + it("skips same-provider models on auth cooldown but still tries no-profile fallback providers", async () => { + const { dir } = await makeAuthStoreWithCooldown("anthropic", "auth"); + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"], + }, + }, + }, + }); + + const run = vi.fn().mockResolvedValueOnce("groq success"); + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", + run, + agentDir: dir, + }); + + expect(result.result).toBe("groq success"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenNthCalledWith(1, "groq", "llama-3.3-70b-versatile"); + }); + + it("skips same-provider models on billing cooldown but still tries no-profile fallback providers", async () => { + const { dir } = await makeAuthStoreWithCooldown("anthropic", "billing"); + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"], + }, + }, + }, + }); + + const run = vi.fn().mockResolvedValueOnce("groq success"); + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", + run, + agentDir: dir, + }); + + expect(result.result).toBe("groq success"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenNthCalledWith(1, "groq", "llama-3.3-70b-versatile"); + }); + + it("tries cross-provider fallbacks when same provider has rate limit", async () => { + // Anthropic in rate limit cooldown, Groq available + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-test-")); + const store: AuthProfileStore = { + version: AUTH_STORE_VERSION, + profiles: { + "anthropic:default": { type: "api_key", provider: "anthropic", key: "test-key" }, + "groq:default": { type: "api_key", provider: "groq", key: "test-key" }, + }, + usageStats: { + "anthropic:default": { + // Rate-limit reason is inferred from failureCounts for cooldown windows. + cooldownUntil: Date.now() + 300000, + failureCounts: { rate_limit: 2 }, + }, + // Groq not in cooldown + }, + }; + saveAuthProfileStore(store, tmpDir); + + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: ["anthropic/claude-sonnet-4-5", "groq/llama-3.3-70b-versatile"], + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Still rate limited")) // Sonnet still fails + .mockResolvedValueOnce("groq success"); // Groq works + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", + run, + agentDir: tmpDir, + }); + + expect(result.result).toBe("groq success"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5"); // Rate limit allows attempt + expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile"); // Cross-provider works + }); + }); }); describe("runWithImageModelFallback", () => { diff --git a/src/agents/model-fallback.ts b/src/agents/model-fallback.ts index e59d9e9357c..da03d88d847 100644 --- a/src/agents/model-fallback.ts +++ b/src/agents/model-fallback.ts @@ -224,21 +224,21 @@ function resolveFallbackCandidates(params: { const configuredFallbacks = resolveAgentModelFallbackValues( params.cfg?.agents?.defaults?.model, ); - if (sameModelCandidate(normalizedPrimary, configuredPrimary)) { - return configuredFallbacks; - } - // Preserve resilience after failover: when current model is one of the - // configured fallback refs, keep traversing the configured fallback chain. - const isConfiguredFallback = configuredFallbacks.some((raw) => { - const resolved = resolveModelRefFromString({ - raw: String(raw ?? ""), - defaultProvider, - aliasIndex, + // When user runs a different provider than config, only use configured fallbacks + // if the current model is already in that chain (e.g. session on first fallback). + if (normalizedPrimary.provider !== configuredPrimary.provider) { + const isConfiguredFallback = configuredFallbacks.some((raw) => { + const resolved = resolveModelRefFromString({ + raw: String(raw ?? ""), + defaultProvider, + aliasIndex, + }); + return resolved ? sameModelCandidate(resolved.ref, normalizedPrimary) : false; }); - return resolved ? sameModelCandidate(resolved.ref, normalizedPrimary) : false; - }); - // Keep legacy override behavior for ad-hoc models outside configured chain. - return isConfiguredFallback ? configuredFallbacks : []; + return isConfiguredFallback ? configuredFallbacks : []; + } + // Same provider: always use full fallback chain (model version differences within provider). + return configuredFallbacks; })(); for (const raw of modelFallbacks) { @@ -306,6 +306,76 @@ export const _probeThrottleInternals = { resolveProbeThrottleKey, } as const; +type CooldownDecision = + | { + type: "skip"; + reason: FailoverReason; + error: string; + } + | { + type: "attempt"; + reason: FailoverReason; + markProbe: boolean; + }; + +function resolveCooldownDecision(params: { + candidate: ModelCandidate; + isPrimary: boolean; + requestedModel: boolean; + hasFallbackCandidates: boolean; + now: number; + probeThrottleKey: string; + authStore: ReturnType; + profileIds: string[]; +}): CooldownDecision { + const shouldProbe = shouldProbePrimaryDuringCooldown({ + isPrimary: params.isPrimary, + hasFallbackCandidates: params.hasFallbackCandidates, + now: params.now, + throttleKey: params.probeThrottleKey, + authStore: params.authStore, + profileIds: params.profileIds, + }); + + const inferredReason = + resolveProfilesUnavailableReason({ + store: params.authStore, + profileIds: params.profileIds, + now: params.now, + }) ?? "rate_limit"; + const isPersistentIssue = + inferredReason === "auth" || + inferredReason === "auth_permanent" || + inferredReason === "billing"; + if (isPersistentIssue) { + return { + type: "skip", + reason: inferredReason, + error: `Provider ${params.candidate.provider} has ${inferredReason} issue (skipping all models)`, + }; + } + + // For primary: try when requested model or when probe allows. + // For same-provider fallbacks: only relax cooldown on rate_limit, which + // is commonly model-scoped and can recover on a sibling model. + const shouldAttemptDespiteCooldown = + (params.isPrimary && (!params.requestedModel || shouldProbe)) || + (!params.isPrimary && inferredReason === "rate_limit"); + if (!shouldAttemptDespiteCooldown) { + return { + type: "skip", + reason: inferredReason, + error: `Provider ${params.candidate.provider} is in cooldown (all profiles unavailable)`, + }; + } + + return { + type: "attempt", + reason: inferredReason, + markProbe: params.isPrimary && shouldProbe, + }; +} + export async function runWithModelFallback(params: { cfg: OpenClawConfig | undefined; provider: string; @@ -342,41 +412,38 @@ export async function runWithModelFallback(params: { if (profileIds.length > 0 && !isAnyProfileAvailable) { // All profiles for this provider are in cooldown. - // For the primary model (i === 0), probe it if the soonest cooldown - // expiry is close or already past. This avoids staying on a fallback - // model long after the real rate-limit window clears. + const isPrimary = i === 0; + const requestedModel = + params.provider === candidate.provider && params.model === candidate.model; const now = Date.now(); const probeThrottleKey = resolveProbeThrottleKey(candidate.provider, params.agentDir); - const shouldProbe = shouldProbePrimaryDuringCooldown({ - isPrimary: i === 0, + const decision = resolveCooldownDecision({ + candidate, + isPrimary, + requestedModel, hasFallbackCandidates, now, - throttleKey: probeThrottleKey, + probeThrottleKey, authStore, profileIds, }); - if (!shouldProbe) { - const inferredReason = - resolveProfilesUnavailableReason({ - store: authStore, - profileIds, - now, - }) ?? "rate_limit"; - // Skip without attempting + + if (decision.type === "skip") { attempts.push({ provider: candidate.provider, model: candidate.model, - error: `Provider ${candidate.provider} is in cooldown (all profiles unavailable)`, - reason: inferredReason, + error: decision.error, + reason: decision.reason, }); continue; } - // Primary model probe: attempt it despite cooldown to detect recovery. - // If it fails, the error is caught below and we fall through to the - // next candidate as usual. - lastProbeAttempt.set(probeThrottleKey, now); + + if (decision.markProbe) { + lastProbeAttempt.set(probeThrottleKey, now); + } } } + try { const result = await params.run(candidate.provider, candidate.model); return { diff --git a/src/agents/model-forward-compat.ts b/src/agents/model-forward-compat.ts index a160302f7eb..d99dc8ca4b3 100644 --- a/src/agents/model-forward-compat.ts +++ b/src/agents/model-forward-compat.ts @@ -1,8 +1,8 @@ import type { Api, Model } from "@mariozechner/pi-ai"; +import type { ModelRegistry } from "@mariozechner/pi-coding-agent"; import { DEFAULT_CONTEXT_TOKENS } from "./defaults.js"; import { normalizeModelCompat } from "./model-compat.js"; import { normalizeProviderId } from "./model-selection.js"; -import type { ModelRegistry } from "./pi-model-discovery.js"; const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex"; const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const; @@ -17,6 +17,14 @@ const ANTHROPIC_SONNET_TEMPLATE_MODEL_IDS = ["claude-sonnet-4-5", "claude-sonnet const ZAI_GLM5_MODEL_ID = "glm-5"; const ZAI_GLM5_TEMPLATE_MODEL_IDS = ["glm-4.7"] as const; +// gemini-3.1-pro-preview / gemini-3.1-flash-preview are not yet in pi-ai's built-in +// google-gemini-cli catalog. Clone the gemini-3-pro/flash-preview template so users +// don't get "Unknown model" errors when Google releases a new minor version. +const GEMINI_3_1_PRO_PREFIX = "gemini-3.1-pro"; +const GEMINI_3_1_FLASH_PREFIX = "gemini-3.1-flash"; +const GEMINI_3_1_PRO_TEMPLATE_IDS = ["gemini-3-pro-preview"] as const; +const GEMINI_3_1_FLASH_TEMPLATE_IDS = ["gemini-3-flash-preview"] as const; + function cloneFirstTemplateModel(params: { normalizedProvider: string; trimmedModelId: string; @@ -40,6 +48,8 @@ function cloneFirstTemplateModel(params: { return undefined; } +const CODEX_GPT53_ELIGIBLE_PROVIDERS = new Set(["openai-codex", "github-copilot"]); + function resolveOpenAICodexGpt53FallbackModel( provider: string, modelId: string, @@ -47,7 +57,7 @@ function resolveOpenAICodexGpt53FallbackModel( ): Model | undefined { const normalizedProvider = normalizeProviderId(provider); const trimmedModelId = modelId.trim(); - if (normalizedProvider !== "openai-codex") { + if (!CODEX_GPT53_ELIGIBLE_PROVIDERS.has(normalizedProvider)) { return undefined; } if (trimmedModelId.toLowerCase() !== OPENAI_CODEX_GPT_53_MODEL_ID) { @@ -158,6 +168,38 @@ function resolveAnthropicSonnet46ForwardCompatModel( }); } +// gemini-3.1-pro-preview / gemini-3.1-flash-preview are not present in pi-ai's built-in +// google-gemini-cli catalog yet. Clone the nearest gemini-3 template so users don't get +// "Unknown model" errors when Google Gemini CLI gains new minor-version models. +function resolveGoogleGeminiCli31ForwardCompatModel( + provider: string, + modelId: string, + modelRegistry: ModelRegistry, +): Model | undefined { + if (normalizeProviderId(provider) !== "google-gemini-cli") { + return undefined; + } + const trimmed = modelId.trim(); + const lower = trimmed.toLowerCase(); + + let templateIds: readonly string[]; + if (lower.startsWith(GEMINI_3_1_PRO_PREFIX)) { + templateIds = GEMINI_3_1_PRO_TEMPLATE_IDS; + } else if (lower.startsWith(GEMINI_3_1_FLASH_PREFIX)) { + templateIds = GEMINI_3_1_FLASH_TEMPLATE_IDS; + } else { + return undefined; + } + + return cloneFirstTemplateModel({ + normalizedProvider: "google-gemini-cli", + trimmedModelId: trimmed, + templateIds: [...templateIds], + modelRegistry, + patch: { reasoning: true }, + }); +} + // Z.ai's GLM-5 may not be present in pi-ai's built-in model catalog yet. // When a user configures zai/glm-5 without a models.json entry, clone glm-4.7 as a forward-compat fallback. function resolveZaiGlm5ForwardCompatModel( @@ -209,6 +251,7 @@ export function resolveForwardCompatModel( resolveOpenAICodexGpt53FallbackModel(provider, modelId, modelRegistry) ?? resolveAnthropicOpus46ForwardCompatModel(provider, modelId, modelRegistry) ?? resolveAnthropicSonnet46ForwardCompatModel(provider, modelId, modelRegistry) ?? - resolveZaiGlm5ForwardCompatModel(provider, modelId, modelRegistry) + resolveZaiGlm5ForwardCompatModel(provider, modelId, modelRegistry) ?? + resolveGoogleGeminiCli31ForwardCompatModel(provider, modelId, modelRegistry) ); } diff --git a/src/agents/model-ref-profile.test.ts b/src/agents/model-ref-profile.test.ts new file mode 100644 index 00000000000..68ba917c2c1 --- /dev/null +++ b/src/agents/model-ref-profile.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it } from "vitest"; +import { splitTrailingAuthProfile } from "./model-ref-profile.js"; + +describe("splitTrailingAuthProfile", () => { + it("returns trimmed model when no profile suffix exists", () => { + expect(splitTrailingAuthProfile(" openai/gpt-5 ")).toEqual({ + model: "openai/gpt-5", + }); + }); + + it("splits trailing @profile suffix", () => { + expect(splitTrailingAuthProfile("openai/gpt-5@work")).toEqual({ + model: "openai/gpt-5", + profile: "work", + }); + }); + + it("keeps @-prefixed path segments in model ids", () => { + expect(splitTrailingAuthProfile("openai/@cf/openai/gpt-oss-20b")).toEqual({ + model: "openai/@cf/openai/gpt-oss-20b", + }); + }); + + it("supports trailing profile override after @-prefixed path segments", () => { + expect(splitTrailingAuthProfile("openai/@cf/openai/gpt-oss-20b@cf:default")).toEqual({ + model: "openai/@cf/openai/gpt-oss-20b", + profile: "cf:default", + }); + }); + + it("keeps openrouter preset paths without profile override", () => { + expect(splitTrailingAuthProfile("openrouter/@preset/kimi-2-5")).toEqual({ + model: "openrouter/@preset/kimi-2-5", + }); + }); + + it("supports openrouter preset profile overrides", () => { + expect(splitTrailingAuthProfile("openrouter/@preset/kimi-2-5@work")).toEqual({ + model: "openrouter/@preset/kimi-2-5", + profile: "work", + }); + }); + + it("does not split when suffix after @ contains slash", () => { + expect(splitTrailingAuthProfile("provider/foo@bar/baz")).toEqual({ + model: "provider/foo@bar/baz", + }); + }); +}); diff --git a/src/agents/model-ref-profile.ts b/src/agents/model-ref-profile.ts new file mode 100644 index 00000000000..76f8108ddf2 --- /dev/null +++ b/src/agents/model-ref-profile.ts @@ -0,0 +1,23 @@ +export function splitTrailingAuthProfile(raw: string): { + model: string; + profile?: string; +} { + const trimmed = raw.trim(); + if (!trimmed) { + return { model: "" }; + } + + const profileDelimiter = trimmed.lastIndexOf("@"); + const lastSlash = trimmed.lastIndexOf("/"); + if (profileDelimiter <= 0 || profileDelimiter <= lastSlash) { + return { model: trimmed }; + } + + const model = trimmed.slice(0, profileDelimiter).trim(); + const profile = trimmed.slice(profileDelimiter + 1).trim(); + if (!model || !profile) { + return { model: trimmed }; + } + + return { model, profile }; +} diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index 8a80768c0db..3e99cbe3330 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -304,6 +304,30 @@ describe("model-selection", () => { ref: { provider: "anthropic", model: "claude-sonnet-4-6" }, }); }); + + it("strips trailing auth profile suffix before allowlist matching", () => { + const cfg: OpenClawConfig = { + agents: { + defaults: { + models: { + "openai/@cf/openai/gpt-oss-20b": {}, + }, + }, + }, + } as OpenClawConfig; + + const result = resolveAllowedModelRef({ + cfg, + catalog: [], + raw: "openai/@cf/openai/gpt-oss-20b@cf:default", + defaultProvider: "anthropic", + }); + + expect(result).toEqual({ + key: "openai/@cf/openai/gpt-oss-20b", + ref: { provider: "openai", model: "@cf/openai/gpt-oss-20b" }, + }); + }); }); describe("resolveModelRefFromString", () => { @@ -332,6 +356,78 @@ describe("model-selection", () => { }); expect(resolved?.ref).toEqual({ provider: "openai", model: "gpt-4" }); }); + + it("strips trailing profile suffix for simple model refs", () => { + const resolved = resolveModelRefFromString({ + raw: "gpt-5@myprofile", + defaultProvider: "openai", + }); + expect(resolved?.ref).toEqual({ provider: "openai", model: "gpt-5" }); + }); + + it("strips trailing profile suffix for provider/model refs", () => { + const resolved = resolveModelRefFromString({ + raw: "google/gemini-flash-latest@google:bevfresh", + defaultProvider: "anthropic", + }); + expect(resolved?.ref).toEqual({ + provider: "google", + model: "gemini-flash-latest", + }); + }); + + it("preserves Cloudflare @cf model segments", () => { + const resolved = resolveModelRefFromString({ + raw: "openai/@cf/openai/gpt-oss-20b", + defaultProvider: "anthropic", + }); + expect(resolved?.ref).toEqual({ + provider: "openai", + model: "@cf/openai/gpt-oss-20b", + }); + }); + + it("preserves OpenRouter @preset model segments", () => { + const resolved = resolveModelRefFromString({ + raw: "openrouter/@preset/kimi-2-5", + defaultProvider: "anthropic", + }); + expect(resolved?.ref).toEqual({ + provider: "openrouter", + model: "@preset/kimi-2-5", + }); + }); + + it("splits trailing profile suffix after OpenRouter preset paths", () => { + const resolved = resolveModelRefFromString({ + raw: "openrouter/@preset/kimi-2-5@work", + defaultProvider: "anthropic", + }); + expect(resolved?.ref).toEqual({ + provider: "openrouter", + model: "@preset/kimi-2-5", + }); + }); + + it("strips profile suffix before alias resolution", () => { + const index = { + byAlias: new Map([ + ["kimi", { alias: "kimi", ref: { provider: "nvidia", model: "moonshotai/kimi-k2.5" } }], + ]), + byKey: new Map(), + }; + + const resolved = resolveModelRefFromString({ + raw: "kimi@nvidia:default", + defaultProvider: "openai", + aliasIndex: index, + }); + expect(resolved?.ref).toEqual({ + provider: "nvidia", + model: "moonshotai/kimi-k2.5", + }); + expect(resolved?.alias).toBe("kimi"); + }); }); describe("resolveConfiguredModelRef", () => { diff --git a/src/agents/model-selection.ts b/src/agents/model-selection.ts index ac45200039f..a094e7657d0 100644 --- a/src/agents/model-selection.ts +++ b/src/agents/model-selection.ts @@ -4,6 +4,7 @@ import { createSubsystemLogger } from "../logging/subsystem.js"; import { resolveAgentConfig, resolveAgentEffectiveModelPrimary } from "./agent-scope.js"; import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "./defaults.js"; import type { ModelCatalogEntry } from "./model-catalog.js"; +import { splitTrailingAuthProfile } from "./model-ref-profile.js"; import { normalizeGoogleModelId } from "./models-config.providers.js"; const log = createSubsystemLogger("model-selection"); @@ -283,18 +284,18 @@ export function resolveModelRefFromString(params: { defaultProvider: string; aliasIndex?: ModelAliasIndex; }): { ref: ModelRef; alias?: string } | null { - const trimmed = params.raw.trim(); - if (!trimmed) { + const { model } = splitTrailingAuthProfile(params.raw); + if (!model) { return null; } - if (!trimmed.includes("/")) { - const aliasKey = normalizeAliasKey(trimmed); + if (!model.includes("/")) { + const aliasKey = normalizeAliasKey(model); const aliasMatch = params.aliasIndex?.byAlias.get(aliasKey); if (aliasMatch) { return { ref: aliasMatch.ref, alias: aliasMatch.alias }; } } - const parsed = parseModelRef(trimmed, params.defaultProvider); + const parsed = parseModelRef(model, params.defaultProvider); if (!parsed) { return null; } diff --git a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts index c26142158e8..4abfa4f1ab4 100644 --- a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts +++ b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts @@ -134,6 +134,116 @@ describe("models-config", () => { }); }); + it("preserves non-empty agent apiKey/baseUrl for matching providers in merge mode", async () => { + await withTempHome(async () => { + const agentDir = resolveOpenClawAgentDir(); + await fs.mkdir(agentDir, { recursive: true }); + await fs.writeFile( + path.join(agentDir, "models.json"), + JSON.stringify( + { + providers: { + custom: { + baseUrl: "https://agent.example/v1", + apiKey: "AGENT_KEY", + api: "openai-responses", + models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], + }, + }, + }, + null, + 2, + ), + "utf8", + ); + + await ensureOpenClawModelsJson({ + models: { + mode: "merge", + providers: { + custom: { + baseUrl: "https://config.example/v1", + apiKey: "CONFIG_KEY", + api: "openai-responses", + models: [ + { + id: "config-model", + name: "Config model", + input: ["text"], + reasoning: false, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 2048, + }, + ], + }, + }, + }, + }); + + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); + expect(parsed.providers.custom?.baseUrl).toBe("https://agent.example/v1"); + }); + }); + + it("uses config apiKey/baseUrl when existing agent values are empty", async () => { + await withTempHome(async () => { + const agentDir = resolveOpenClawAgentDir(); + await fs.mkdir(agentDir, { recursive: true }); + await fs.writeFile( + path.join(agentDir, "models.json"), + JSON.stringify( + { + providers: { + custom: { + baseUrl: "", + apiKey: "", + api: "openai-responses", + models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], + }, + }, + }, + null, + 2, + ), + "utf8", + ); + + await ensureOpenClawModelsJson({ + models: { + mode: "merge", + providers: { + custom: { + baseUrl: "https://config.example/v1", + apiKey: "CONFIG_KEY", + api: "openai-responses", + models: [ + { + id: "config-model", + name: "Config model", + input: ["text"], + reasoning: false, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 2048, + }, + ], + }, + }, + }, + }); + + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers.custom?.apiKey).toBe("CONFIG_KEY"); + expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + }); + }); + it("refreshes stale explicit moonshot model capabilities from implicit catalog", async () => { await withTempHome(async () => { const prevKey = process.env.MOONSHOT_API_KEY; diff --git a/src/agents/models-config.providers.google-antigravity.test.ts b/src/agents/models-config.providers.google-antigravity.test.ts new file mode 100644 index 00000000000..51fe5fb32e0 --- /dev/null +++ b/src/agents/models-config.providers.google-antigravity.test.ts @@ -0,0 +1,87 @@ +import { mkdtempSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { describe, expect, it } from "vitest"; +import { + normalizeAntigravityModelId, + normalizeProviders, + type ProviderConfig, +} from "./models-config.providers.js"; + +function buildModel(id: string): NonNullable[number] { + return { + id, + name: id, + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1, + maxTokens: 1, + }; +} + +function buildProvider(modelIds: string[]): ProviderConfig { + return { + baseUrl: "https://example.invalid/v1", + api: "openai-completions", + apiKey: "EXAMPLE_KEY", + models: modelIds.map((id) => buildModel(id)), + }; +} + +describe("normalizeAntigravityModelId", () => { + it.each(["gemini-3-pro", "gemini-3.1-pro", "gemini-3-1-pro"])( + "adds default -low suffix to bare pro id: %s", + (id) => { + expect(normalizeAntigravityModelId(id)).toBe(`${id}-low`); + }, + ); + + it.each([ + "gemini-3-pro-low", + "gemini-3-pro-high", + "gemini-3.1-flash", + "claude-opus-4-6-thinking", + ])("keeps already-tiered and non-pro ids unchanged: %s", (id) => { + expect(normalizeAntigravityModelId(id)).toBe(id); + }); +}); + +describe("google-antigravity provider normalization", () => { + it("normalizes bare gemini pro IDs only for google-antigravity providers", () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const providers = { + "google-antigravity": buildProvider([ + "gemini-3-pro", + "gemini-3.1-pro", + "gemini-3-1-pro", + "gemini-3-pro-high", + "claude-opus-4-6-thinking", + ]), + openai: buildProvider(["gpt-5"]), + }; + + const normalized = normalizeProviders({ providers, agentDir }); + + expect(normalized).not.toBe(providers); + expect(normalized?.["google-antigravity"]?.models.map((model) => model.id)).toEqual([ + "gemini-3-pro-low", + "gemini-3.1-pro-low", + "gemini-3-1-pro-low", + "gemini-3-pro-high", + "claude-opus-4-6-thinking", + ]); + expect(normalized?.openai).toBe(providers.openai); + }); + + it("returns original providers object when no antigravity IDs need normalization", () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const providers = { + "google-antigravity": buildProvider(["gemini-3-pro-low", "claude-opus-4-6-thinking"]), + }; + + const normalized = normalizeProviders({ providers, agentDir }); + + expect(normalized).toBe(providers); + }); +}); diff --git a/src/agents/models-config.providers.nvidia.test.ts b/src/agents/models-config.providers.nvidia.test.ts index 17025cb86da..02086283c84 100644 --- a/src/agents/models-config.providers.nvidia.test.ts +++ b/src/agents/models-config.providers.nvidia.test.ts @@ -1,4 +1,5 @@ import { mkdtempSync } from "node:fs"; +import { writeFile } from "node:fs/promises"; import { tmpdir } from "node:os"; import { join } from "node:path"; import { describe, expect, it } from "vitest"; @@ -54,9 +55,38 @@ describe("MiniMax implicit provider (#15275)", () => { const providers = await resolveImplicitProviders({ agentDir }); expect(providers?.minimax).toBeDefined(); expect(providers?.minimax?.api).toBe("anthropic-messages"); + expect(providers?.minimax?.authHeader).toBe(true); expect(providers?.minimax?.baseUrl).toBe("https://api.minimax.io/anthropic"); }); }); + + it("should set authHeader for minimax portal provider", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + await writeFile( + join(agentDir, "auth-profiles.json"), + JSON.stringify( + { + version: 1, + profiles: { + "minimax-portal:default": { + type: "oauth", + provider: "minimax-portal", + oauth: { + access: "token", + expires: Date.now() + 60_000, + }, + }, + }, + }, + null, + 2, + ), + "utf8", + ); + + const providers = await resolveImplicitProviders({ agentDir }); + expect(providers?.["minimax-portal"]?.authHeader).toBe(true); + }); }); describe("vLLM provider", () => { diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index 4f921b6dd81..584b340ea11 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -1,5 +1,6 @@ import type { OpenClawConfig } from "../config/config.js"; import type { ModelDefinitionConfig } from "../config/types.models.js"; +import { coerceSecretRef } from "../config/types.secrets.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { DEFAULT_COPILOT_API_BASE_URL, @@ -12,6 +13,7 @@ import { KILOCODE_DEFAULT_MAX_TOKENS, KILOCODE_MODEL_CATALOG, } from "../providers/kilocode-shared.js"; +import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js"; import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js"; import { discoverBedrockModels } from "./bedrock-discovery.js"; import { @@ -356,10 +358,24 @@ function resolveApiKeyFromProfiles(params: { continue; } if (cred.type === "api_key") { - return cred.key; + if (cred.key?.trim()) { + return cred.key; + } + const keyRef = coerceSecretRef(cred.keyRef); + if (keyRef?.source === "env" && keyRef.id.trim()) { + return keyRef.id.trim(); + } + continue; } if (cred.type === "token") { - return cred.token; + if (cred.token?.trim()) { + return cred.token; + } + const tokenRef = coerceSecretRef(cred.tokenRef); + if (tokenRef?.source === "env" && tokenRef.id.trim()) { + return tokenRef.id.trim(); + } + continue; } } return undefined; @@ -375,10 +391,22 @@ export function normalizeGoogleModelId(id: string): string { return id; } -function normalizeGoogleProvider(provider: ProviderConfig): ProviderConfig { +const ANTIGRAVITY_BARE_PRO_IDS = new Set(["gemini-3-pro", "gemini-3.1-pro", "gemini-3-1-pro"]); + +export function normalizeAntigravityModelId(id: string): string { + if (ANTIGRAVITY_BARE_PRO_IDS.has(id)) { + return `${id}-low`; + } + return id; +} + +function normalizeProviderModels( + provider: ProviderConfig, + normalizeId: (id: string) => string, +): ProviderConfig { let mutated = false; const models = provider.models.map((model) => { - const nextId = normalizeGoogleModelId(model.id); + const nextId = normalizeId(model.id); if (nextId === model.id) { return model; } @@ -388,6 +416,14 @@ function normalizeGoogleProvider(provider: ProviderConfig): ProviderConfig { return mutated ? { ...provider, models } : provider; } +function normalizeGoogleProvider(provider: ProviderConfig): ProviderConfig { + return normalizeProviderModels(provider, normalizeGoogleModelId); +} + +function normalizeAntigravityProvider(provider: ProviderConfig): ProviderConfig { + return normalizeProviderModels(provider, normalizeAntigravityModelId); +} + export function normalizeProviders(params: { providers: ModelsConfig["providers"]; agentDir: string; @@ -405,16 +441,17 @@ export function normalizeProviders(params: { for (const [key, provider] of Object.entries(providers)) { const normalizedKey = key.trim(); let normalizedProvider = provider; + const configuredApiKey = normalizedProvider.apiKey; // Fix common misconfig: apiKey set to "${ENV_VAR}" instead of "ENV_VAR". if ( - normalizedProvider.apiKey && - normalizeApiKeyConfig(normalizedProvider.apiKey) !== normalizedProvider.apiKey + typeof configuredApiKey === "string" && + normalizeApiKeyConfig(configuredApiKey) !== configuredApiKey ) { mutated = true; normalizedProvider = { ...normalizedProvider, - apiKey: normalizeApiKeyConfig(normalizedProvider.apiKey), + apiKey: normalizeApiKeyConfig(configuredApiKey), }; } @@ -422,7 +459,9 @@ export function normalizeProviders(params: { // Fill it from the environment or auth profiles when possible. const hasModels = Array.isArray(normalizedProvider.models) && normalizedProvider.models.length > 0; - if (hasModels && !normalizedProvider.apiKey?.trim()) { + const normalizedApiKey = normalizeOptionalSecretInput(normalizedProvider.apiKey); + const hasConfiguredApiKey = Boolean(normalizedApiKey || normalizedProvider.apiKey); + if (hasModels && !hasConfiguredApiKey) { const authMode = normalizedProvider.auth ?? (normalizedKey === "amazon-bedrock" ? "aws-sdk" : undefined); if (authMode === "aws-sdk") { @@ -451,6 +490,14 @@ export function normalizeProviders(params: { normalizedProvider = googleNormalized; } + if (normalizedKey === "google-antigravity") { + const antigravityNormalized = normalizeAntigravityProvider(normalizedProvider); + if (antigravityNormalized !== normalizedProvider) { + mutated = true; + } + normalizedProvider = antigravityNormalized; + } + next[key] = normalizedProvider; } @@ -461,6 +508,7 @@ function buildMinimaxProvider(): ProviderConfig { return { baseUrl: MINIMAX_PORTAL_BASE_URL, api: "anthropic-messages", + authHeader: true, models: [ buildMinimaxTextModel({ id: MINIMAX_DEFAULT_MODEL_ID, @@ -496,6 +544,7 @@ function buildMinimaxPortalProvider(): ProviderConfig { return { baseUrl: MINIMAX_PORTAL_BASE_URL, api: "anthropic-messages", + authHeader: true, models: [ buildMinimaxTextModel({ id: MINIMAX_DEFAULT_MODEL_ID, @@ -1013,7 +1062,13 @@ export async function resolveImplicitCopilotProvider(params: { const profileId = listProfilesForProvider(authStore, "github-copilot")[0]; const profile = profileId ? authStore.profiles[profileId] : undefined; if (profile && profile.type === "token") { - selectedGithubToken = profile.token; + selectedGithubToken = profile.token?.trim() ?? ""; + if (!selectedGithubToken) { + const tokenRef = coerceSecretRef(profile.tokenRef); + if (tokenRef?.source === "env" && tokenRef.id.trim()) { + selectedGithubToken = (env[tokenRef.id] ?? process.env[tokenRef.id] ?? "").trim(); + } + } } } @@ -1030,17 +1085,8 @@ export async function resolveImplicitCopilotProvider(params: { } } - // pi-coding-agent's ModelRegistry marks a model "available" only if its - // `AuthStorage` has auth configured for that provider (via auth.json/env/etc). - // Our Copilot auth lives in OpenClaw's auth-profiles store instead, so we also - // write a runtime-only auth.json entry for pi-coding-agent to pick up. - // - // This is safe because it's (1) within OpenClaw's agent dir, (2) contains the - // GitHub token (not the exchanged Copilot token), and (3) matches existing - // patterns for OAuth-like providers in pi-coding-agent. - // Note: we deliberately do not write pi-coding-agent's `auth.json` here. - // OpenClaw uses its own auth store and exchanges tokens at runtime. - // `models list` uses OpenClaw's auth heuristics for availability. + // We deliberately do not write pi-coding-agent auth.json here. + // OpenClaw keeps auth in auth-profiles and resolves runtime availability from that store. // We intentionally do NOT define custom models for Copilot in models.json. // pi-coding-agent treats providers with models as replacements requiring apiKey. diff --git a/src/agents/models-config.providers.volcengine-byteplus.test.ts b/src/agents/models-config.providers.volcengine-byteplus.test.ts index 9ce3ad8922d..00dd65e38f0 100644 --- a/src/agents/models-config.providers.volcengine-byteplus.test.ts +++ b/src/agents/models-config.providers.volcengine-byteplus.test.ts @@ -3,6 +3,7 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import { describe, expect, it } from "vitest"; import { captureEnv } from "../test-utils/env.js"; +import { upsertAuthProfile } from "./auth-profiles.js"; import { resolveImplicitProviders } from "./models-config.providers.js"; describe("Volcengine and BytePlus providers", () => { @@ -37,4 +38,40 @@ describe("Volcengine and BytePlus providers", () => { envSnapshot.restore(); } }); + + it("includes providers when auth profiles are env keyRef-only", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const envSnapshot = captureEnv(["VOLCANO_ENGINE_API_KEY", "BYTEPLUS_API_KEY"]); + delete process.env.VOLCANO_ENGINE_API_KEY; + delete process.env.BYTEPLUS_API_KEY; + + upsertAuthProfile({ + profileId: "volcengine:default", + credential: { + type: "api_key", + provider: "volcengine", + keyRef: { source: "env", provider: "default", id: "VOLCANO_ENGINE_API_KEY" }, + }, + agentDir, + }); + upsertAuthProfile({ + profileId: "byteplus:default", + credential: { + type: "api_key", + provider: "byteplus", + keyRef: { source: "env", provider: "default", id: "BYTEPLUS_API_KEY" }, + }, + agentDir, + }); + + try { + const providers = await resolveImplicitProviders({ agentDir }); + expect(providers?.volcengine?.apiKey).toBe("VOLCANO_ENGINE_API_KEY"); + expect(providers?.["volcengine-plan"]?.apiKey).toBe("VOLCANO_ENGINE_API_KEY"); + expect(providers?.byteplus?.apiKey).toBe("BYTEPLUS_API_KEY"); + expect(providers?.["byteplus-plan"]?.apiKey).toBe("BYTEPLUS_API_KEY"); + } finally { + envSnapshot.restore(); + } + }); }); diff --git a/src/agents/models-config.ts b/src/agents/models-config.ts index 4b38b824398..3b02737eb4c 100644 --- a/src/agents/models-config.ts +++ b/src/agents/models-config.ts @@ -142,7 +142,30 @@ export async function ensureOpenClawModelsJson( string, NonNullable[string] >; - mergedProviders = { ...existingProviders, ...providers }; + mergedProviders = {}; + for (const [key, entry] of Object.entries(existingProviders)) { + mergedProviders[key] = entry; + } + for (const [key, newEntry] of Object.entries(providers)) { + const existing = existingProviders[key] as + | (NonNullable[string] & { + apiKey?: string; + baseUrl?: string; + }) + | undefined; + if (existing) { + const preserved: Record = {}; + if (typeof existing.apiKey === "string" && existing.apiKey) { + preserved.apiKey = existing.apiKey; + } + if (typeof existing.baseUrl === "string" && existing.baseUrl) { + preserved.baseUrl = existing.baseUrl; + } + mergedProviders[key] = { ...newEntry, ...preserved }; + } else { + mergedProviders[key] = newEntry; + } + } } } diff --git a/src/agents/models-config.uses-first-github-copilot-profile-env-tokens.test.ts b/src/agents/models-config.uses-first-github-copilot-profile-env-tokens.test.ts index 50b80f2eb0e..2ea2c25da04 100644 --- a/src/agents/models-config.uses-first-github-copilot-profile-env-tokens.test.ts +++ b/src/agents/models-config.uses-first-github-copilot-profile-env-tokens.test.ts @@ -76,4 +76,38 @@ describe("models-config", () => { }); }); }); + + it("uses tokenRef env var when github-copilot profile omits plaintext token", async () => { + await withTempHome(async (home) => { + await withUnsetCopilotTokenEnv(async () => { + const fetchMock = mockCopilotTokenExchangeSuccess(); + const agentDir = path.join(home, "agent-profiles"); + await fs.mkdir(agentDir, { recursive: true }); + process.env.COPILOT_REF_TOKEN = "token-from-ref-env"; + await fs.writeFile( + path.join(agentDir, "auth-profiles.json"), + JSON.stringify( + { + version: 1, + profiles: { + "github-copilot:default": { + type: "token", + provider: "github-copilot", + tokenRef: { source: "env", provider: "default", id: "COPILOT_REF_TOKEN" }, + }, + }, + }, + null, + 2, + ), + ); + + await ensureOpenClawModelsJson({ models: { providers: {} } }, agentDir); + + const [, opts] = fetchMock.mock.calls[0] as [string, { headers?: Record }]; + expect(opts?.headers?.Authorization).toBe("Bearer token-from-ref-env"); + delete process.env.COPILOT_REF_TOKEN; + }); + }); + }); }); diff --git a/src/agents/models.profiles.live.test.ts b/src/agents/models.profiles.live.test.ts index 7def3441ab6..c257c24f100 100644 --- a/src/agents/models.profiles.live.test.ts +++ b/src/agents/models.profiles.live.test.ts @@ -496,7 +496,10 @@ describeLive("live models (profile keys)", () => { throw new Error(msg || "model returned error with no message"); } - if (ok.text.length === 0 && model.provider === "google") { + if ( + ok.text.length === 0 && + (model.provider === "google" || model.provider === "google-gemini-cli") + ) { skipped.push({ model: id, reason: "no text returned (likely unavailable model id)", diff --git a/src/agents/openclaw-tools.camera.test.ts b/src/agents/openclaw-tools.camera.test.ts index 3082c849609..96be774b297 100644 --- a/src/agents/openclaw-tools.camera.test.ts +++ b/src/agents/openclaw-tools.camera.test.ts @@ -43,6 +43,41 @@ beforeEach(() => { }); describe("nodes camera_snap", () => { + it("uses front/high-quality defaults when params are omitted", async () => { + callGateway.mockImplementation(async ({ method, params }) => { + if (method === "node.list") { + return mockNodeList(); + } + if (method === "node.invoke") { + expect(params).toMatchObject({ + command: "camera.snap", + params: { + facing: "front", + maxWidth: 1600, + quality: 0.95, + }, + }); + return { + payload: { + format: "jpg", + base64: "aGVsbG8=", + width: 1, + height: 1, + }, + }; + } + return unexpectedGatewayMethod(method); + }); + + const result = await executeNodes({ + action: "camera_snap", + node: NODE_ID, + }); + + const images = (result.content ?? []).filter((block) => block.type === "image"); + expect(images).toHaveLength(1); + }); + it("maps jpg payloads to image/jpeg", async () => { callGateway.mockImplementation(async ({ method }) => { if (method === "node.list") { @@ -103,6 +138,42 @@ describe("nodes camera_snap", () => { }); }); +describe("nodes notifications_list", () => { + it("invokes notifications.list and returns payload", async () => { + callGateway.mockImplementation(async ({ method, params }) => { + if (method === "node.list") { + return mockNodeList(["notifications.list"]); + } + if (method === "node.invoke") { + expect(params).toMatchObject({ + nodeId: NODE_ID, + command: "notifications.list", + params: {}, + }); + return { + payload: { + enabled: true, + connected: true, + count: 1, + notifications: [{ key: "n1", packageName: "com.example.app" }], + }, + }; + } + return unexpectedGatewayMethod(method); + }); + + const result = await executeNodes({ + action: "notifications_list", + node: NODE_ID, + }); + + expect(result.content?.[0]).toMatchObject({ + type: "text", + text: expect.stringContaining('"notifications"'), + }); + }); +}); + describe("nodes run", () => { it("passes invoke and command timeouts", async () => { callGateway.mockImplementation(async ({ method, params }) => { diff --git a/src/agents/openclaw-tools.sessions.test.ts b/src/agents/openclaw-tools.sessions.test.ts index 42a3210fa80..753426a4c51 100644 --- a/src/agents/openclaw-tools.sessions.test.ts +++ b/src/agents/openclaw-tools.sessions.test.ts @@ -91,6 +91,8 @@ describe("sessions tools", () => { expect(schemaProp("sessions_spawn", "runTimeoutSeconds").type).toBe("number"); expect(schemaProp("sessions_spawn", "thread").type).toBe("boolean"); expect(schemaProp("sessions_spawn", "mode").type).toBe("string"); + expect(schemaProp("sessions_spawn", "runtime").type).toBe("string"); + expect(schemaProp("sessions_spawn", "cwd").type).toBe("string"); expect(schemaProp("subagents", "recentMinutes").type).toBe("number"); }); diff --git a/src/agents/openclaw-tools.subagents.sessions-spawn.cron-note.test.ts b/src/agents/openclaw-tools.subagents.sessions-spawn.cron-note.test.ts new file mode 100644 index 00000000000..6e25419cf04 --- /dev/null +++ b/src/agents/openclaw-tools.subagents.sessions-spawn.cron-note.test.ts @@ -0,0 +1,65 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import "./test-helpers/fast-core-tools.js"; +import { + getCallGatewayMock, + getSessionsSpawnTool, + resetSessionsSpawnConfigOverride, + setupSessionsSpawnGatewayMock, +} from "./openclaw-tools.subagents.sessions-spawn.test-harness.js"; +import { resetSubagentRegistryForTests } from "./subagent-registry.js"; +import { SUBAGENT_SPAWN_ACCEPTED_NOTE } from "./subagent-spawn.js"; + +const callGatewayMock = getCallGatewayMock(); + +type SpawnResult = { status?: string; note?: string }; + +describe("sessions_spawn: cron isolated session note suppression", () => { + beforeEach(() => { + callGatewayMock.mockReset(); + resetSubagentRegistryForTests(); + resetSessionsSpawnConfigOverride(); + }); + + it("suppresses ACCEPTED_NOTE for cron isolated sessions (mode=run)", async () => { + setupSessionsSpawnGatewayMock({}); + const tool = await getSessionsSpawnTool({ + agentSessionKey: "agent:main:cron:dd871818:run:cf959c9f", + }); + const result = await tool.execute("call-cron-run", { task: "test task", mode: "run" }); + const details = result.details as SpawnResult; + expect(details.note).toBeUndefined(); + expect(details.status).toBe("accepted"); + }); + + it("preserves ACCEPTED_NOTE for regular sessions (mode=run)", async () => { + setupSessionsSpawnGatewayMock({}); + const tool = await getSessionsSpawnTool({ + agentSessionKey: "agent:main:telegram:63448508", + }); + const result = await tool.execute("call-regular-run", { task: "test task", mode: "run" }); + const details = result.details as SpawnResult; + expect(details.note).toBe(SUBAGENT_SPAWN_ACCEPTED_NOTE); + expect(details.status).toBe("accepted"); + }); + + it("does not suppress ACCEPTED_NOTE for non-canonical cron-like keys", async () => { + setupSessionsSpawnGatewayMock({}); + const tool = await getSessionsSpawnTool({ + agentSessionKey: "agent:main:slack:cron:job:run:uuid", + }); + const result = await tool.execute("call-cron-like-noncanonical", { + task: "test task", + mode: "run", + }); + expect((result.details as SpawnResult).note).toBe(SUBAGENT_SPAWN_ACCEPTED_NOTE); + }); + + it("does not suppress note when agentSessionKey is undefined", async () => { + setupSessionsSpawnGatewayMock({}); + const tool = await getSessionsSpawnTool({ + agentSessionKey: undefined, + }); + const result = await tool.execute("call-no-key", { task: "test task", mode: "run" }); + expect((result.details as SpawnResult).note).toBe(SUBAGENT_SPAWN_ACCEPTED_NOTE); + }); +}); diff --git a/src/agents/openclaw-tools.ts b/src/agents/openclaw-tools.ts index d07f1d06d7f..22140d167a9 100644 --- a/src/agents/openclaw-tools.ts +++ b/src/agents/openclaw-tools.ts @@ -116,6 +116,10 @@ export function createOpenClawTools(options?: { createCanvasTool({ config: options?.config }), createNodesTool({ agentSessionKey: options?.agentSessionKey, + agentChannel: options?.agentChannel, + agentAccountId: options?.agentAccountId, + currentChannelId: options?.currentChannelId, + currentThreadTs: options?.currentThreadTs, config: options?.config, }), createCronTool({ diff --git a/src/agents/pi-auth-credentials.ts b/src/agents/pi-auth-credentials.ts new file mode 100644 index 00000000000..bf35328843d --- /dev/null +++ b/src/agents/pi-auth-credentials.ts @@ -0,0 +1,88 @@ +import type { AuthProfileCredential, AuthProfileStore } from "./auth-profiles.js"; +import { normalizeProviderId } from "./model-selection.js"; + +export type PiApiKeyCredential = { type: "api_key"; key: string }; +export type PiOAuthCredential = { + type: "oauth"; + access: string; + refresh: string; + expires: number; +}; + +export type PiCredential = PiApiKeyCredential | PiOAuthCredential; +export type PiCredentialMap = Record; + +export function convertAuthProfileCredentialToPi(cred: AuthProfileCredential): PiCredential | null { + if (cred.type === "api_key") { + const key = typeof cred.key === "string" ? cred.key.trim() : ""; + if (!key) { + return null; + } + return { type: "api_key", key }; + } + + if (cred.type === "token") { + const token = typeof cred.token === "string" ? cred.token.trim() : ""; + if (!token) { + return null; + } + if ( + typeof cred.expires === "number" && + Number.isFinite(cred.expires) && + Date.now() >= cred.expires + ) { + return null; + } + return { type: "api_key", key: token }; + } + + if (cred.type === "oauth") { + const access = typeof cred.access === "string" ? cred.access.trim() : ""; + const refresh = typeof cred.refresh === "string" ? cred.refresh.trim() : ""; + if (!access || !refresh || !Number.isFinite(cred.expires) || cred.expires <= 0) { + return null; + } + return { + type: "oauth", + access, + refresh, + expires: cred.expires, + }; + } + + return null; +} + +export function resolvePiCredentialMapFromStore(store: AuthProfileStore): PiCredentialMap { + const credentials: PiCredentialMap = {}; + for (const credential of Object.values(store.profiles)) { + const provider = normalizeProviderId(String(credential.provider ?? "")).trim(); + if (!provider || credentials[provider]) { + continue; + } + const converted = convertAuthProfileCredentialToPi(credential); + if (converted) { + credentials[provider] = converted; + } + } + return credentials; +} + +export function piCredentialsEqual(a: PiCredential | undefined, b: PiCredential): boolean { + if (!a || typeof a !== "object") { + return false; + } + if (a.type !== b.type) { + return false; + } + + if (a.type === "api_key" && b.type === "api_key") { + return a.key === b.key; + } + + if (a.type === "oauth" && b.type === "oauth") { + return a.access === b.access && a.refresh === b.refresh && a.expires === b.expires; + } + + return false; +} diff --git a/src/agents/pi-auth-json.ts b/src/agents/pi-auth-json.ts index 122efb7b9f6..5b0b2519e8f 100644 --- a/src/agents/pi-auth-json.ts +++ b/src/agents/pi-auth-json.ts @@ -1,21 +1,17 @@ import fs from "node:fs/promises"; import path from "node:path"; import { ensureAuthProfileStore } from "./auth-profiles.js"; -import type { AuthProfileCredential } from "./auth-profiles/types.js"; -import { normalizeProviderId } from "./model-selection.js"; +import { + piCredentialsEqual, + resolvePiCredentialMapFromStore, + type PiCredential, +} from "./pi-auth-credentials.js"; -type AuthJsonCredential = - | { - type: "api_key"; - key: string; - } - | { - type: "oauth"; - access: string; - refresh: string; - expires: number; - [key: string]: unknown; - }; +/** + * @deprecated Legacy bridge for older flows that still expect `agentDir/auth.json`. + * Runtime auth resolution uses auth-profiles directly and should not depend on this module. + */ +type AuthJsonCredential = PiCredential; type AuthJsonShape = Record; @@ -32,75 +28,6 @@ async function readAuthJson(filePath: string): Promise { } } -/** - * Convert an OpenClaw auth-profiles credential to pi-coding-agent auth.json format. - * Returns null if the credential cannot be converted. - */ -function convertCredential(cred: AuthProfileCredential): AuthJsonCredential | null { - if (cred.type === "api_key") { - const key = typeof cred.key === "string" ? cred.key.trim() : ""; - if (!key) { - return null; - } - return { type: "api_key", key }; - } - - if (cred.type === "token") { - // pi-coding-agent treats static tokens as api_key type - const token = typeof cred.token === "string" ? cred.token.trim() : ""; - if (!token) { - return null; - } - const expires = - typeof (cred as { expires?: unknown }).expires === "number" - ? (cred as { expires: number }).expires - : Number.NaN; - if (Number.isFinite(expires) && expires > 0 && Date.now() >= expires) { - return null; - } - return { type: "api_key", key: token }; - } - - if (cred.type === "oauth") { - const accessRaw = (cred as { access?: unknown }).access; - const refreshRaw = (cred as { refresh?: unknown }).refresh; - const expiresRaw = (cred as { expires?: unknown }).expires; - - const access = typeof accessRaw === "string" ? accessRaw.trim() : ""; - const refresh = typeof refreshRaw === "string" ? refreshRaw.trim() : ""; - const expires = typeof expiresRaw === "number" ? expiresRaw : Number.NaN; - - if (!access || !refresh || !Number.isFinite(expires) || expires <= 0) { - return null; - } - return { type: "oauth", access, refresh, expires }; - } - - return null; -} - -/** - * Check if two auth.json credentials are equivalent. - */ -function credentialsEqual(a: AuthJsonCredential | undefined, b: AuthJsonCredential): boolean { - if (!a || typeof a !== "object") { - return false; - } - if (a.type !== b.type) { - return false; - } - - if (a.type === "api_key" && b.type === "api_key") { - return a.key === b.key; - } - - if (a.type === "oauth" && b.type === "oauth") { - return a.access === b.access && a.refresh === b.refresh && a.expires === b.expires; - } - - return false; -} - /** * pi-coding-agent's ModelRegistry/AuthStorage expects credentials in auth.json. * @@ -110,6 +37,8 @@ function credentialsEqual(a: AuthJsonCredential | undefined, b: AuthJsonCredenti * registry/catalog output. * * Syncs all credential types: api_key, token (as api_key), and oauth. + * + * @deprecated Runtime auth now comes from OpenClaw auth-profiles snapshots. */ export async function ensurePiAuthJsonFromAuthProfiles(agentDir: string): Promise<{ wrote: boolean; @@ -117,31 +46,16 @@ export async function ensurePiAuthJsonFromAuthProfiles(agentDir: string): Promis }> { const store = ensureAuthProfileStore(agentDir, { allowKeychainPrompt: false }); const authPath = path.join(agentDir, "auth.json"); - - // Group profiles by provider, taking the first valid profile for each - const providerCredentials = new Map(); - - for (const [, cred] of Object.entries(store.profiles)) { - const provider = normalizeProviderId(String(cred.provider ?? "")).trim(); - if (!provider || providerCredentials.has(provider)) { - continue; - } - - const converted = convertCredential(cred); - if (converted) { - providerCredentials.set(provider, converted); - } - } - - if (providerCredentials.size === 0) { + const providerCredentials = resolvePiCredentialMapFromStore(store); + if (Object.keys(providerCredentials).length === 0) { return { wrote: false, authPath }; } const existing = await readAuthJson(authPath); let changed = false; - for (const [provider, cred] of providerCredentials) { - if (!credentialsEqual(existing[provider], cred)) { + for (const [provider, cred] of Object.entries(providerCredentials)) { + if (!piCredentialsEqual(existing[provider], cred)) { existing[provider] = cred; changed = true; } diff --git a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts index 638b6c24bb8..a109af6d89f 100644 --- a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts +++ b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it } from "vitest"; import { classifyFailoverReason, isAuthErrorMessage, + isAuthPermanentErrorMessage, isBillingErrorMessage, isCloudCodeAssistFormatError, isCloudflareOrHtmlErrorPage, @@ -16,6 +17,39 @@ import { parseImageSizeError, } from "./pi-embedded-helpers.js"; +describe("isAuthPermanentErrorMessage", () => { + it("matches permanent auth failure patterns", () => { + const samples = [ + "invalid_api_key", + "api key revoked", + "api key deactivated", + "key has been disabled", + "key has been revoked", + "account has been deactivated", + "could not authenticate api key", + "could not validate credentials", + "API_KEY_REVOKED", + "api_key_deleted", + ]; + for (const sample of samples) { + expect(isAuthPermanentErrorMessage(sample)).toBe(true); + } + }); + it("does not match transient auth errors", () => { + const samples = [ + "unauthorized", + "invalid token", + "authentication failed", + "forbidden", + "access denied", + "token has expired", + ]; + for (const sample of samples) { + expect(isAuthPermanentErrorMessage(sample)).toBe(false); + } + }); +}); + describe("isAuthErrorMessage", () => { it("matches credential validation errors", () => { const samples = [ @@ -480,6 +514,12 @@ describe("classifyFailoverReason", () => { ), ).toBe("rate_limit"); }); + it("classifies permanent auth errors as auth_permanent", () => { + expect(classifyFailoverReason("invalid_api_key")).toBe("auth_permanent"); + expect(classifyFailoverReason("Your api key has been revoked")).toBe("auth_permanent"); + expect(classifyFailoverReason("key has been disabled")).toBe("auth_permanent"); + expect(classifyFailoverReason("account has been deactivated")).toBe("auth_permanent"); + }); it("classifies JSON api_error internal server failures as timeout", () => { expect( classifyFailoverReason( diff --git a/src/agents/pi-embedded-helpers.ts b/src/agents/pi-embedded-helpers.ts index 06bf2b1938b..dd10fdca3d1 100644 --- a/src/agents/pi-embedded-helpers.ts +++ b/src/agents/pi-embedded-helpers.ts @@ -16,6 +16,7 @@ export { getApiErrorPayloadFingerprint, isAuthAssistantError, isAuthErrorMessage, + isAuthPermanentErrorMessage, isModelNotFoundErrorMessage, isBillingAssistantError, parseApiErrorInfo, diff --git a/src/agents/pi-embedded-helpers/errors.ts b/src/agents/pi-embedded-helpers/errors.ts index 6eea521ede1..246f6c0ad24 100644 --- a/src/agents/pi-embedded-helpers/errors.ts +++ b/src/agents/pi-embedded-helpers/errors.ts @@ -649,6 +649,14 @@ const ERROR_PATTERNS = { "plans & billing", "insufficient balance", ], + authPermanent: [ + /api[_ ]?key[_ ]?(?:revoked|invalid|deactivated|deleted)/i, + "invalid_api_key", + "key has been disabled", + "key has been revoked", + "account has been deactivated", + /could not (?:authenticate|validate).*(?:api[_ ]?key|credentials)/i, + ], auth: [ /invalid[_ ]?api[_ ]?key/, "incorrect api key", @@ -755,6 +763,10 @@ export function isBillingAssistantError(msg: AssistantMessage | undefined): bool return isBillingErrorMessage(msg.errorMessage ?? ""); } +export function isAuthPermanentErrorMessage(raw: string): boolean { + return matchesErrorPatterns(raw, ERROR_PATTERNS.authPermanent); +} + export function isAuthErrorMessage(raw: string): boolean { return matchesErrorPatterns(raw, ERROR_PATTERNS.auth); } @@ -899,6 +911,9 @@ export function classifyFailoverReason(raw: string): FailoverReason | null { if (isTimeoutErrorMessage(raw)) { return "timeout"; } + if (isAuthPermanentErrorMessage(raw)) { + return "auth_permanent"; + } if (isAuthErrorMessage(raw)) { return "auth"; } diff --git a/src/agents/pi-embedded-helpers/types.ts b/src/agents/pi-embedded-helpers/types.ts index 2753e979eb2..2440473d9f6 100644 --- a/src/agents/pi-embedded-helpers/types.ts +++ b/src/agents/pi-embedded-helpers/types.ts @@ -2,6 +2,7 @@ export type EmbeddedContextFile = { path: string; content: string }; export type FailoverReason = | "auth" + | "auth_permanent" | "format" | "rate_limit" | "billing" diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index 404d4439da4..3b717d3ab96 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -490,6 +490,160 @@ describe("applyExtraParamsToAgent", () => { }); }); + it("passes configured websocket transport through stream options", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "websocket", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("websocket"); + }); + + it("defaults Codex transport to auto (WebSocket-first)", () => { + const { calls, agent } = createOptionsCaptureAgent(); + + applyExtraParamsToAgent(agent, undefined, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("auto"); + }); + + it("does not set transport defaults for non-Codex providers", () => { + const { calls, agent } = createOptionsCaptureAgent(); + + applyExtraParamsToAgent(agent, undefined, "openai", "gpt-5"); + + const model = { + api: "openai-responses", + provider: "openai", + id: "gpt-5", + } as Model<"openai-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBeUndefined(); + }); + + it("allows forcing Codex transport to SSE", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "sse", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("sse"); + }); + + it("lets runtime options override configured transport", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "websocket", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, { transport: "sse" }); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("sse"); + }); + + it("falls back to Codex default transport when configured value is invalid", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "udp", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("auto"); + }); + it("disables prompt caching for non-Anthropic Bedrock models", () => { const { calls, agent } = createOptionsCaptureAgent(); diff --git a/src/agents/pi-embedded-runner.sanitize-session-history.test.ts b/src/agents/pi-embedded-runner.sanitize-session-history.test.ts index 6e401b92e0a..fc1a2cec801 100644 --- a/src/agents/pi-embedded-runner.sanitize-session-history.test.ts +++ b/src/agents/pi-embedded-runner.sanitize-session-history.test.ts @@ -14,6 +14,7 @@ import { sanitizeWithOpenAIResponses, TEST_SESSION_ID, } from "./pi-embedded-runner.sanitize-session-history.test-harness.js"; +import { makeZeroUsageSnapshot } from "./usage.js"; vi.mock("./pi-embedded-helpers.js", async () => ({ ...(await vi.importActual("./pi-embedded-helpers.js")), @@ -210,7 +211,7 @@ describe("sanitizeSessionHistory", () => { | (AgentMessage & { usage?: unknown }) | undefined; expect(staleAssistant).toBeDefined(); - expect(staleAssistant?.usage).toBeUndefined(); + expect(staleAssistant?.usage).toEqual(makeZeroUsageSnapshot()); }); it("preserves fresh assistant usage snapshots created after latest compaction summary", async () => { @@ -264,10 +265,114 @@ describe("sanitizeSessionHistory", () => { AgentMessage & { usage?: unknown } >; expect(assistants).toHaveLength(2); - expect(assistants[0]?.usage).toBeUndefined(); + expect(assistants[0]?.usage).toEqual(makeZeroUsageSnapshot()); expect(assistants[1]?.usage).toBeDefined(); }); + it("drops stale usage when compaction summary appears before kept assistant messages", async () => { + vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + + const compactionTs = Date.parse("2026-02-26T12:00:00.000Z"); + const messages = [ + { + role: "compactionSummary", + summary: "compressed", + tokensBefore: 191_919, + timestamp: new Date(compactionTs).toISOString(), + }, + { + role: "assistant", + content: [{ type: "text", text: "kept pre-compaction answer" }], + stopReason: "stop", + timestamp: compactionTs - 1_000, + usage: { + input: 191_919, + output: 2_000, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 193_919, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + }, + ] as unknown as AgentMessage[]; + + const result = await sanitizeSessionHistory({ + messages, + modelApi: "openai-responses", + provider: "openai", + sessionManager: mockSessionManager, + sessionId: TEST_SESSION_ID, + }); + + const assistant = result.find((message) => message.role === "assistant") as + | (AgentMessage & { usage?: unknown }) + | undefined; + expect(assistant?.usage).toEqual(makeZeroUsageSnapshot()); + }); + + it("keeps fresh usage after compaction timestamp in summary-first ordering", async () => { + vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + + const compactionTs = Date.parse("2026-02-26T12:00:00.000Z"); + const messages = [ + { + role: "compactionSummary", + summary: "compressed", + tokensBefore: 123_000, + timestamp: new Date(compactionTs).toISOString(), + }, + { + role: "assistant", + content: [{ type: "text", text: "kept pre-compaction answer" }], + stopReason: "stop", + timestamp: compactionTs - 2_000, + usage: { + input: 120_000, + output: 3_000, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 123_000, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + }, + { role: "user", content: "new question", timestamp: compactionTs + 1_000 }, + { + role: "assistant", + content: [{ type: "text", text: "fresh answer" }], + stopReason: "stop", + timestamp: compactionTs + 2_000, + usage: { + input: 1_000, + output: 250, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 1_250, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + }, + ] as unknown as AgentMessage[]; + + const result = await sanitizeSessionHistory({ + messages, + modelApi: "openai-responses", + provider: "openai", + sessionManager: mockSessionManager, + sessionId: TEST_SESSION_ID, + }); + + const assistants = result.filter((message) => message.role === "assistant") as Array< + AgentMessage & { usage?: unknown; content?: unknown } + >; + const keptAssistant = assistants.find((message) => + JSON.stringify(message.content).includes("kept pre-compaction answer"), + ); + const freshAssistant = assistants.find((message) => + JSON.stringify(message.content).includes("fresh answer"), + ); + expect(keptAssistant?.usage).toEqual(makeZeroUsageSnapshot()); + expect(freshAssistant?.usage).toBeDefined(); + }); + it("keeps reasoning-only assistant messages for openai-responses", async () => { setNonGoogleModelApi(); diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts index 9734c73be45..4bcdf1db66f 100644 --- a/src/agents/pi-embedded-runner/compact.ts +++ b/src/agents/pi-embedded-runner/compact.ts @@ -6,7 +6,6 @@ import { DefaultResourceLoader, estimateTokens, SessionManager, - SettingsManager, } from "@mariozechner/pi-coding-agent"; import { resolveHeartbeatPrompt } from "../../auto-reply/heartbeat.js"; import type { ReasoningLevel, ThinkLevel } from "../../auto-reply/thinking.js"; @@ -40,7 +39,7 @@ import { validateAnthropicTurns, validateGeminiTurns, } from "../pi-embedded-helpers.js"; -import { applyPiCompactionSettingsFromConfig } from "../pi-settings.js"; +import { createPreparedEmbeddedPiSettingsManager } from "../pi-project-settings.js"; import { createOpenClawCodingTools } from "../pi-tools.js"; import { resolveSandboxContext } from "../sandbox.js"; import { repairSessionFileIfNeeded } from "../session-file-repair.js"; @@ -499,6 +498,7 @@ export async function compactEmbeddedPiSessionDirect( docsPath: docsPath ?? undefined, ttsHint, promptMode, + acpEnabled: params.config?.acp?.enabled !== false, runtimeInfo, reactionGuidance, messageToolHints, @@ -537,9 +537,9 @@ export async function compactEmbeddedPiSessionDirect( allowedToolNames, }); trackSessionManagerAccess(params.sessionFile); - const settingsManager = SettingsManager.create(effectiveWorkspace, agentDir); - applyPiCompactionSettingsFromConfig({ - settingsManager, + const settingsManager = createPreparedEmbeddedPiSettingsManager({ + cwd: effectiveWorkspace, + agentDir, cfg: params.config, }); // Sets compaction/pruning runtime state and returns extension factories diff --git a/src/agents/pi-embedded-runner/extra-params.ts b/src/agents/pi-embedded-runner/extra-params.ts index 2e87dcee608..70662760235 100644 --- a/src/agents/pi-embedded-runner/extra-params.ts +++ b/src/agents/pi-embedded-runner/extra-params.ts @@ -14,7 +14,7 @@ const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as cons // NOTE: We only force `store=true` for *direct* OpenAI Responses. // Codex responses (chatgpt.com/backend-api/codex/responses) require `store=false`. const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]); -const OPENAI_RESPONSES_PROVIDERS = new Set(["openai"]); +const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai-responses"]); /** * Resolve provider-specific extra params from model config. @@ -117,6 +117,13 @@ function createStreamFnWithExtraParams( if (typeof extraParams.maxTokens === "number") { streamParams.maxTokens = extraParams.maxTokens; } + const transport = extraParams.transport; + if (transport === "sse" || transport === "websocket" || transport === "auto") { + streamParams.transport = transport; + } else if (transport != null) { + const transportSummary = typeof transport === "string" ? transport : typeof transport; + log.warn(`ignoring invalid transport param: ${transportSummary}`); + } const cacheRetention = resolveCacheRetention(extraParams, provider); if (cacheRetention) { streamParams.cacheRetention = cacheRetention; @@ -184,10 +191,16 @@ function isDirectOpenAIBaseUrl(baseUrl: unknown): boolean { try { const host = new URL(baseUrl).hostname.toLowerCase(); - return host === "api.openai.com" || host === "chatgpt.com"; + return ( + host === "api.openai.com" || host === "chatgpt.com" || host.endsWith(".openai.azure.com") + ); } catch { const normalized = baseUrl.toLowerCase(); - return normalized.includes("api.openai.com") || normalized.includes("chatgpt.com"); + return ( + normalized.includes("api.openai.com") || + normalized.includes("chatgpt.com") || + normalized.includes(".openai.azure.com") + ); } } @@ -228,6 +241,15 @@ function createOpenAIResponsesStoreWrapper(baseStreamFn: StreamFn | undefined): }; } +function createCodexDefaultTransportWrapper(baseStreamFn: StreamFn | undefined): StreamFn { + const underlying = baseStreamFn ?? streamSimple; + return (model, context, options) => + underlying(model, context, { + ...options, + transport: options?.transport ?? "auto", + }); +} + function isAnthropic1MModel(modelId: string): boolean { const normalized = modelId.trim().toLowerCase(); return ANTHROPIC_1M_MODEL_PREFIXES.some((prefix) => normalized.startsWith(prefix)); @@ -646,6 +668,10 @@ export function applyExtraParamsToAgent( modelId, agentId, }); + if (provider === "openai-codex") { + // Default Codex to WebSocket-first when nothing else specifies transport. + agent.streamFn = createCodexDefaultTransportWrapper(agent.streamFn); + } const override = extraParamsOverride && Object.keys(extraParamsOverride).length > 0 ? Object.fromEntries( diff --git a/src/agents/pi-embedded-runner/google.ts b/src/agents/pi-embedded-runner/google.ts index 42970ea4ef6..429c1ddd9d9 100644 --- a/src/agents/pi-embedded-runner/google.ts +++ b/src/agents/pi-embedded-runner/google.ts @@ -24,6 +24,7 @@ import { } from "../session-transcript-repair.js"; import type { TranscriptPolicy } from "../transcript-policy.js"; import { resolveTranscriptPolicy } from "../transcript-policy.js"; +import { makeZeroUsageSnapshot } from "../usage.js"; import { log } from "./logger.js"; import { dropThinkingBlocks } from "./thinking.js"; import { describeUnknownError } from "./utils.js"; @@ -133,30 +134,66 @@ function annotateInterSessionUserMessages(messages: AgentMessage[]): AgentMessag return touched ? out : messages; } -function stripStaleAssistantUsageBeforeLatestCompaction(messages: AgentMessage[]): AgentMessage[] { - let latestCompactionSummaryIndex = -1; - for (let i = 0; i < messages.length; i += 1) { - if (messages[i]?.role === "compactionSummary") { - latestCompactionSummaryIndex = i; +function parseMessageTimestamp(value: unknown): number | null { + if (typeof value === "number" && Number.isFinite(value)) { + return value; + } + if (typeof value === "string") { + const parsed = Date.parse(value); + if (Number.isFinite(parsed)) { + return parsed; } } - if (latestCompactionSummaryIndex <= 0) { + return null; +} + +function stripStaleAssistantUsageBeforeLatestCompaction(messages: AgentMessage[]): AgentMessage[] { + let latestCompactionSummaryIndex = -1; + let latestCompactionTimestamp: number | null = null; + for (let i = 0; i < messages.length; i += 1) { + const entry = messages[i]; + if (entry?.role !== "compactionSummary") { + continue; + } + latestCompactionSummaryIndex = i; + latestCompactionTimestamp = parseMessageTimestamp( + (entry as { timestamp?: unknown }).timestamp ?? null, + ); + } + if (latestCompactionSummaryIndex === -1) { return messages; } const out = [...messages]; let touched = false; - for (let i = 0; i < latestCompactionSummaryIndex; i += 1) { - const candidate = out[i] as (AgentMessage & { usage?: unknown }) | undefined; + for (let i = 0; i < out.length; i += 1) { + const candidate = out[i] as + | (AgentMessage & { usage?: unknown; timestamp?: unknown }) + | undefined; if (!candidate || candidate.role !== "assistant") { continue; } if (!candidate.usage || typeof candidate.usage !== "object") { continue; } + + const messageTimestamp = parseMessageTimestamp(candidate.timestamp); + const staleByTimestamp = + latestCompactionTimestamp !== null && + messageTimestamp !== null && + messageTimestamp <= latestCompactionTimestamp; + const staleByLegacyOrdering = i < latestCompactionSummaryIndex; + if (!staleByTimestamp && !staleByLegacyOrdering) { + continue; + } + + // pi-coding-agent expects assistant usage to always be present during context + // accounting. Keep stale snapshots structurally valid, but zeroed out. const candidateRecord = candidate as unknown as Record; - const { usage: _droppedUsage, ...rest } = candidateRecord; - out[i] = rest as unknown as AgentMessage; + out[i] = { + ...candidateRecord, + usage: makeZeroUsageSnapshot(), + } as unknown as AgentMessage; touched = true; } return touched ? out : messages; diff --git a/src/agents/pi-embedded-runner/model.forward-compat.test.ts b/src/agents/pi-embedded-runner/model.forward-compat.test.ts index bd86c255a86..07b96a1cae9 100644 --- a/src/agents/pi-embedded-runner/model.forward-compat.test.ts +++ b/src/agents/pi-embedded-runner/model.forward-compat.test.ts @@ -8,7 +8,11 @@ vi.mock("../pi-model-discovery.js", () => ({ import { buildInlineProviderModels, resolveModel } from "./model.js"; import { buildOpenAICodexForwardCompatExpectation, + GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL, + GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL, makeModel, + mockGoogleGeminiCliFlashTemplateModel, + mockGoogleGeminiCliProTemplateModel, mockOpenAICodexTemplateModel, resetMockDiscoverModels, } from "./model.test-harness.js"; @@ -50,4 +54,36 @@ describe("pi embedded model e2e smoke", () => { expect(result.model).toBeUndefined(); expect(result.error).toBe("Unknown model: openai-codex/gpt-4.1-mini"); }); + + it("builds a google-gemini-cli forward-compat fallback for gemini-3.1-pro-preview", () => { + mockGoogleGeminiCliProTemplateModel(); + + const result = resolveModel("google-gemini-cli", "gemini-3.1-pro-preview", "/tmp/agent"); + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + ...GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL, + id: "gemini-3.1-pro-preview", + name: "gemini-3.1-pro-preview", + reasoning: true, + }); + }); + + it("builds a google-gemini-cli forward-compat fallback for gemini-3.1-flash-preview", () => { + mockGoogleGeminiCliFlashTemplateModel(); + + const result = resolveModel("google-gemini-cli", "gemini-3.1-flash-preview", "/tmp/agent"); + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + ...GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL, + id: "gemini-3.1-flash-preview", + name: "gemini-3.1-flash-preview", + reasoning: true, + }); + }); + + it("keeps unknown-model errors for unrecognized google-gemini-cli model IDs", () => { + const result = resolveModel("google-gemini-cli", "gemini-4-unknown", "/tmp/agent"); + expect(result.model).toBeUndefined(); + expect(result.error).toBe("Unknown model: google-gemini-cli/gemini-4-unknown"); + }); }); diff --git a/src/agents/pi-embedded-runner/model.test-harness.ts b/src/agents/pi-embedded-runner/model.test-harness.ts index 410d3a8e756..c28210b1921 100644 --- a/src/agents/pi-embedded-runner/model.test-harness.ts +++ b/src/agents/pi-embedded-runner/model.test-harness.ts @@ -47,6 +47,48 @@ export function buildOpenAICodexForwardCompatExpectation( }; } +export const GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL = { + id: "gemini-3-pro-preview", + name: "Gemini 3 Pro Preview (Cloud Code Assist)", + provider: "google-gemini-cli", + api: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: true, + input: ["text", "image"] as const, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 200000, + maxTokens: 64000, +}; + +export const GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL = { + id: "gemini-3-flash-preview", + name: "Gemini 3 Flash Preview (Cloud Code Assist)", + provider: "google-gemini-cli", + api: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: false, + input: ["text", "image"] as const, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 200000, + maxTokens: 64000, +}; + +export function mockGoogleGeminiCliProTemplateModel(): void { + mockDiscoveredModel({ + provider: "google-gemini-cli", + modelId: "gemini-3-pro-preview", + templateModel: GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL, + }); +} + +export function mockGoogleGeminiCliFlashTemplateModel(): void { + mockDiscoveredModel({ + provider: "google-gemini-cli", + modelId: "gemini-3-flash-preview", + templateModel: GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL, + }); +} + export function resetMockDiscoverModels(): void { vi.mocked(discoverModels).mockReturnValue({ find: vi.fn(() => null), diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index f9e95023d5e..16aea8b4c82 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -1,4 +1,5 @@ import type { Api, Model } from "@mariozechner/pi-ai"; +import type { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent"; import type { OpenClawConfig } from "../../config/config.js"; import type { ModelDefinitionConfig } from "../../config/types.js"; import { resolveOpenClawAgentDir } from "../agent-paths.js"; @@ -7,12 +8,7 @@ import { buildModelAliasLines } from "../model-alias-lines.js"; import { normalizeModelCompat } from "../model-compat.js"; import { resolveForwardCompatModel } from "../model-forward-compat.js"; import { normalizeProviderId } from "../model-selection.js"; -import { - discoverAuthStorage, - discoverModels, - type AuthStorage, - type ModelRegistry, -} from "../pi-model-discovery.js"; +import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js"; type InlineModelEntry = ModelDefinitionConfig & { provider: string; diff --git a/src/agents/pi-embedded-runner/run/attempt.test.ts b/src/agents/pi-embedded-runner/run/attempt.test.ts index 97a881cf849..f314353513b 100644 --- a/src/agents/pi-embedded-runner/run/attempt.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.test.ts @@ -1,70 +1,11 @@ -import type { AgentMessage } from "@mariozechner/pi-agent-core"; -import type { ImageContent } from "@mariozechner/pi-ai"; import { describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../../../config/config.js"; import { - injectHistoryImagesIntoMessages, resolveAttemptFsWorkspaceOnly, resolvePromptBuildHookResult, resolvePromptModeForSession, } from "./attempt.js"; -describe("injectHistoryImagesIntoMessages", () => { - const image: ImageContent = { type: "image", data: "abc", mimeType: "image/png" }; - - it("injects history images and converts string content", () => { - const messages: AgentMessage[] = [ - { - role: "user", - content: "See /tmp/photo.png", - } as AgentMessage, - ]; - - const didMutate = injectHistoryImagesIntoMessages(messages, new Map([[0, [image]]])); - - expect(didMutate).toBe(true); - const firstUser = messages[0] as Extract | undefined; - expect(Array.isArray(firstUser?.content)).toBe(true); - const content = firstUser?.content as Array<{ type: string; text?: string; data?: string }>; - expect(content).toHaveLength(2); - expect(content[0]?.type).toBe("text"); - expect(content[1]).toMatchObject({ type: "image", data: "abc" }); - }); - - it("avoids duplicating existing image content", () => { - const messages: AgentMessage[] = [ - { - role: "user", - content: [{ type: "text", text: "See /tmp/photo.png" }, { ...image }], - } as AgentMessage, - ]; - - const didMutate = injectHistoryImagesIntoMessages(messages, new Map([[0, [image]]])); - - expect(didMutate).toBe(false); - const first = messages[0] as Extract | undefined; - if (!first || !Array.isArray(first.content)) { - throw new Error("expected array content"); - } - expect(first.content).toHaveLength(2); - }); - - it("ignores non-user messages and out-of-range indices", () => { - const messages: AgentMessage[] = [ - { - role: "assistant", - content: "noop", - } as unknown as AgentMessage, - ]; - - const didMutate = injectHistoryImagesIntoMessages(messages, new Map([[1, [image]]])); - - expect(didMutate).toBe(false); - const firstAssistant = messages[0] as Extract | undefined; - expect(firstAssistant?.content).toBe("noop"); - }); -}); - describe("resolvePromptBuildHookResult", () => { function createLegacyOnlyHookRunner() { return { diff --git a/src/agents/pi-embedded-runner/run/attempt.ts b/src/agents/pi-embedded-runner/run/attempt.ts index 25d8528fc48..060c53e306a 100644 --- a/src/agents/pi-embedded-runner/run/attempt.ts +++ b/src/agents/pi-embedded-runner/run/attempt.ts @@ -1,13 +1,11 @@ import fs from "node:fs/promises"; import os from "node:os"; import type { AgentMessage } from "@mariozechner/pi-agent-core"; -import type { ImageContent } from "@mariozechner/pi-ai"; import { streamSimple } from "@mariozechner/pi-ai"; import { createAgentSession, DefaultResourceLoader, SessionManager, - SettingsManager, } from "@mariozechner/pi-coding-agent"; import { resolveHeartbeatPrompt } from "../../../auto-reply/heartbeat.js"; import { resolveChannelCapabilities } from "../../../config/channel-capabilities.js"; @@ -53,7 +51,7 @@ import { validateGeminiTurns, } from "../../pi-embedded-helpers.js"; import { subscribeEmbeddedPiSession } from "../../pi-embedded-subscribe.js"; -import { applyPiCompactionSettingsFromConfig } from "../../pi-settings.js"; +import { createPreparedEmbeddedPiSettingsManager } from "../../pi-project-settings.js"; import { toClientToolDefinitions } from "../../pi-tool-definition-adapter.js"; import { createOpenClawCodingTools, resolveToolLoopDetectionConfig } from "../../pi-tools.js"; import { resolveSandboxContext } from "../../sandbox.js"; @@ -113,6 +111,7 @@ import { selectCompactionTimeoutSnapshot, shouldFlagCompactionTimeout, } from "./compaction-timeout.js"; +import { pruneProcessedHistoryImages } from "./history-image-prune.js"; import { detectAndLoadPromptImages } from "./images.js"; import type { EmbeddedRunAttemptParams, EmbeddedRunAttemptResult } from "./types.js"; @@ -128,54 +127,6 @@ type PromptBuildHookRunner = { ) => Promise; }; -export function injectHistoryImagesIntoMessages( - messages: AgentMessage[], - historyImagesByIndex: Map, -): boolean { - if (historyImagesByIndex.size === 0) { - return false; - } - let didMutate = false; - - for (const [msgIndex, images] of historyImagesByIndex) { - // Bounds check: ensure index is valid before accessing - if (msgIndex < 0 || msgIndex >= messages.length) { - continue; - } - const msg = messages[msgIndex]; - if (msg && msg.role === "user") { - // Convert string content to array format if needed - if (typeof msg.content === "string") { - msg.content = [{ type: "text", text: msg.content }]; - didMutate = true; - } - if (Array.isArray(msg.content)) { - // Check for existing image content to avoid duplicates across turns - const existingImageData = new Set( - msg.content - .filter( - (c): c is ImageContent => - c != null && - typeof c === "object" && - c.type === "image" && - typeof c.data === "string", - ) - .map((c) => c.data), - ); - for (const img of images) { - // Only add if this image isn't already in the message - if (!existingImageData.has(img.data)) { - msg.content.push(img); - didMutate = true; - } - } - } - } - } - - return didMutate; -} - export async function resolvePromptBuildHookResult(params: { prompt: string; messages: unknown[]; @@ -545,6 +496,7 @@ export async function runEmbeddedAttempt( workspaceNotes, reactionGuidance, promptMode, + acpEnabled: params.config?.acp?.enabled !== false, runtimeInfo, messageToolHints, sandboxInfo, @@ -626,9 +578,9 @@ export async function runEmbeddedAttempt( cwd: effectiveWorkspace, }); - const settingsManager = SettingsManager.create(effectiveWorkspace, agentDir); - applyPiCompactionSettingsFromConfig({ - settingsManager, + const settingsManager = createPreparedEmbeddedPiSettingsManager({ + cwd: effectiveWorkspace, + agentDir, cfg: params.config, }); @@ -1091,16 +1043,20 @@ export async function runEmbeddedAttempt( } try { + // Idempotent cleanup for legacy sessions with persisted image payloads. + // Called each run; only mutates already-answered user turns that still carry image blocks. + const didPruneImages = pruneProcessedHistoryImages(activeSession.messages); + if (didPruneImages) { + activeSession.agent.replaceMessages(activeSession.messages); + } + // Detect and load images referenced in the prompt for vision-capable models. - // This eliminates the need for an explicit "view" tool call by injecting - // images directly into the prompt when the model supports it. - // Also scans conversation history to enable follow-up questions about earlier images. + // Images are prompt-local only (pi-like behavior). const imageResult = await detectAndLoadPromptImages({ prompt: effectivePrompt, workspaceDir: effectiveWorkspace, model: params.model, existingImages: params.images, - historyMessages: activeSession.messages, maxBytes: MAX_IMAGE_BYTES, maxDimensionPx: resolveImageSanitizationLimits(params.config).maxDimensionPx, workspaceOnly: effectiveFsWorkspaceOnly, @@ -1111,21 +1067,10 @@ export async function runEmbeddedAttempt( : undefined, }); - // Inject history images into their original message positions. - // This ensures the model sees images in context (e.g., "compare to the first image"). - const didMutate = injectHistoryImagesIntoMessages( - activeSession.messages, - imageResult.historyImagesByIndex, - ); - if (didMutate) { - // Persist message mutations (e.g., injected history images) so we don't re-scan/reload. - activeSession.agent.replaceMessages(activeSession.messages); - } - cacheTrace?.recordStage("prompt:images", { prompt: effectivePrompt, messages: activeSession.messages, - note: `images: prompt=${imageResult.images.length} history=${imageResult.historyImagesByIndex.size}`, + note: `images: prompt=${imageResult.images.length}`, }); // Diagnostic: log context sizes before prompt to help debug early overflow errors. @@ -1142,7 +1087,6 @@ export async function runEmbeddedAttempt( `historyImageBlocks=${sessionSummary.totalImageBlocks} ` + `systemPromptChars=${systemLen} promptChars=${promptLen} ` + `promptImages=${imageResult.images.length} ` + - `historyImageMessages=${imageResult.historyImagesByIndex.size} ` + `provider=${params.provider}/${params.modelId} sessionFile=${params.sessionFile}`, ); } @@ -1217,13 +1161,15 @@ export async function runEmbeddedAttempt( } } + const compactionOccurredThisAttempt = getCompactionCount() > 0; + // Append cache-TTL timestamp AFTER prompt + compaction retry completes. // Previously this was before the prompt, which caused a custom entry to be // inserted between compaction and the next prompt — breaking the // prepareCompaction() guard that checks the last entry type, leading to // double-compaction. See: https://github.com/openclaw/openclaw/issues/9282 // Skip when timed out during compaction — session state may be inconsistent. - if (!timedOutDuringCompaction) { + if (!timedOutDuringCompaction && !compactionOccurredThisAttempt) { const shouldTrackCacheTtl = params.config?.agents?.defaults?.contextPruning?.mode === "cache-ttl" && isCacheTtlEligibleProvider(params.provider, params.modelId); @@ -1255,7 +1201,7 @@ export async function runEmbeddedAttempt( messagesSnapshot = snapshotSelection.messagesSnapshot; sessionIdUsed = snapshotSelection.sessionIdUsed; - if (promptError && promptErrorSource === "prompt") { + if (promptError && promptErrorSource === "prompt" && !compactionOccurredThisAttempt) { try { sessionManager.appendCustomEntry("openclaw:prompt-error", { timestamp: Date.now(), diff --git a/src/agents/pi-embedded-runner/run/history-image-prune.test.ts b/src/agents/pi-embedded-runner/run/history-image-prune.test.ts new file mode 100644 index 00000000000..0e171352e58 --- /dev/null +++ b/src/agents/pi-embedded-runner/run/history-image-prune.test.ts @@ -0,0 +1,65 @@ +import type { AgentMessage } from "@mariozechner/pi-agent-core"; +import type { ImageContent } from "@mariozechner/pi-ai"; +import { describe, expect, it } from "vitest"; +import { PRUNED_HISTORY_IMAGE_MARKER, pruneProcessedHistoryImages } from "./history-image-prune.js"; + +describe("pruneProcessedHistoryImages", () => { + const image: ImageContent = { type: "image", data: "abc", mimeType: "image/png" }; + + it("prunes image blocks from user messages that already have assistant replies", () => { + const messages: AgentMessage[] = [ + { + role: "user", + content: [{ type: "text", text: "See /tmp/photo.png" }, { ...image }], + } as AgentMessage, + { + role: "assistant", + content: "got it", + } as unknown as AgentMessage, + ]; + + const didMutate = pruneProcessedHistoryImages(messages); + + expect(didMutate).toBe(true); + const firstUser = messages[0] as Extract | undefined; + expect(Array.isArray(firstUser?.content)).toBe(true); + const content = firstUser?.content as Array<{ type: string; text?: string; data?: string }>; + expect(content).toHaveLength(2); + expect(content[0]?.type).toBe("text"); + expect(content[1]).toMatchObject({ type: "text", text: PRUNED_HISTORY_IMAGE_MARKER }); + }); + + it("does not prune latest user message when no assistant response exists yet", () => { + const messages: AgentMessage[] = [ + { + role: "user", + content: [{ type: "text", text: "See /tmp/photo.png" }, { ...image }], + } as AgentMessage, + ]; + + const didMutate = pruneProcessedHistoryImages(messages); + + expect(didMutate).toBe(false); + const first = messages[0] as Extract | undefined; + if (!first || !Array.isArray(first.content)) { + throw new Error("expected array content"); + } + expect(first.content).toHaveLength(2); + expect(first.content[1]).toMatchObject({ type: "image", data: "abc" }); + }); + + it("does not change messages when no assistant turn exists", () => { + const messages: AgentMessage[] = [ + { + role: "user", + content: "noop", + } as AgentMessage, + ]; + + const didMutate = pruneProcessedHistoryImages(messages); + + expect(didMutate).toBe(false); + const firstUser = messages[0] as Extract | undefined; + expect(firstUser?.content).toBe("noop"); + }); +}); diff --git a/src/agents/pi-embedded-runner/run/history-image-prune.ts b/src/agents/pi-embedded-runner/run/history-image-prune.ts new file mode 100644 index 00000000000..d7dbea5de38 --- /dev/null +++ b/src/agents/pi-embedded-runner/run/history-image-prune.ts @@ -0,0 +1,44 @@ +import type { AgentMessage } from "@mariozechner/pi-agent-core"; + +export const PRUNED_HISTORY_IMAGE_MARKER = "[image data removed - already processed by model]"; + +/** + * Idempotent cleanup for legacy sessions that persisted image blocks in history. + * Called each run; mutates only user turns that already have an assistant reply. + */ +export function pruneProcessedHistoryImages(messages: AgentMessage[]): boolean { + let lastAssistantIndex = -1; + for (let i = messages.length - 1; i >= 0; i--) { + if (messages[i]?.role === "assistant") { + lastAssistantIndex = i; + break; + } + } + if (lastAssistantIndex < 0) { + return false; + } + + let didMutate = false; + for (let i = 0; i < lastAssistantIndex; i++) { + const message = messages[i]; + if (!message || message.role !== "user" || !Array.isArray(message.content)) { + continue; + } + for (let j = 0; j < message.content.length; j++) { + const block = message.content[j]; + if (!block || typeof block !== "object") { + continue; + } + if ((block as { type?: string }).type !== "image") { + continue; + } + message.content[j] = { + type: "text", + text: PRUNED_HISTORY_IMAGE_MARKER, + } as (typeof message.content)[number]; + didMutate = true; + } + } + + return didMutate; +} diff --git a/src/agents/pi-embedded-runner/run/images.test.ts b/src/agents/pi-embedded-runner/run/images.test.ts index f9cb846da40..8a879a1bb36 100644 --- a/src/agents/pi-embedded-runner/run/images.test.ts +++ b/src/agents/pi-embedded-runner/run/images.test.ts @@ -63,7 +63,6 @@ describe("detectImageReferences", () => { expect(refs).toHaveLength(1); expect(refs.some((r) => r.type === "path")).toBe(true); - expect(refs.some((r) => r.type === "url")).toBe(false); }); it("handles various image extensions", () => { @@ -83,6 +82,17 @@ describe("detectImageReferences", () => { expect(refs).toHaveLength(1); }); + it("dedupe casing follows host filesystem conventions", () => { + const prompt = "Look at /tmp/Image.png and /tmp/image.png"; + const refs = detectImageReferences(prompt); + + if (process.platform === "win32") { + expect(refs).toHaveLength(1); + return; + } + expect(refs).toHaveLength(2); + }); + it("returns empty array when no images found", () => { const prompt = "Just some text without any image references"; const refs = detectImageReferences(prompt); @@ -256,25 +266,15 @@ describe("detectAndLoadPromptImages", () => { expect(result.detectedRefs).toHaveLength(0); }); - it("skips history messages that already include image content", async () => { + it("returns no detected refs when prompt has no image references", async () => { const result = await detectAndLoadPromptImages({ prompt: "no images here", workspaceDir: "/tmp", model: { input: ["text", "image"] }, - historyMessages: [ - { - role: "user", - content: [ - { type: "text", text: "See /tmp/should-not-load.png" }, - { type: "image", data: "abc", mimeType: "image/png" }, - ], - }, - ], }); expect(result.detectedRefs).toHaveLength(0); expect(result.images).toHaveLength(0); - expect(result.historyImagesByIndex.size).toBe(0); }); it("blocks prompt image refs outside workspace when sandbox workspaceOnly is enabled", async () => { @@ -309,43 +309,4 @@ describe("detectAndLoadPromptImages", () => { await fs.rm(stateDir, { recursive: true, force: true }); } }); - - it("blocks history image refs outside workspace when sandbox workspaceOnly is enabled", async () => { - const stateDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-native-image-sandbox-")); - const sandboxRoot = path.join(stateDir, "sandbox"); - const agentRoot = path.join(stateDir, "agent"); - await fs.mkdir(sandboxRoot, { recursive: true }); - await fs.mkdir(agentRoot, { recursive: true }); - const pngB64 = - "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/woAAn8B9FD5fHAAAAAASUVORK5CYII="; - await fs.writeFile(path.join(agentRoot, "secret.png"), Buffer.from(pngB64, "base64")); - const sandbox = createUnsafeMountedSandbox({ sandboxRoot, agentRoot }); - const bridge = sandbox.fsBridge; - if (!bridge) { - throw new Error("sandbox fs bridge missing"); - } - - try { - const result = await detectAndLoadPromptImages({ - prompt: "No inline image in this turn.", - workspaceDir: sandboxRoot, - model: { input: ["text", "image"] }, - workspaceOnly: true, - historyMessages: [ - { - role: "user", - content: [{ type: "text", text: "Previous image /agent/secret.png" }], - }, - ], - sandbox: { root: sandbox.workspaceDir, bridge }, - }); - - expect(result.detectedRefs).toHaveLength(1); - expect(result.loadedCount).toBe(0); - expect(result.skippedCount).toBe(1); - expect(result.historyImagesByIndex.size).toBe(0); - } finally { - await fs.rm(stateDir, { recursive: true, force: true }); - } - }); }); diff --git a/src/agents/pi-embedded-runner/run/images.ts b/src/agents/pi-embedded-runner/run/images.ts index 897e8ca16e2..bcd25e724c5 100644 --- a/src/agents/pi-embedded-runner/run/images.ts +++ b/src/agents/pi-embedded-runner/run/images.ts @@ -13,18 +13,27 @@ import { log } from "../logger.js"; /** * Common image file extensions for detection. */ -const IMAGE_EXTENSIONS = new Set([ - ".png", - ".jpg", - ".jpeg", - ".gif", - ".webp", - ".bmp", - ".tiff", - ".tif", - ".heic", - ".heif", -]); +const IMAGE_EXTENSION_NAMES = [ + "png", + "jpg", + "jpeg", + "gif", + "webp", + "bmp", + "tiff", + "tif", + "heic", + "heif", +] as const; +const IMAGE_EXTENSIONS = new Set(IMAGE_EXTENSION_NAMES.map((ext) => `.${ext}`)); +const IMAGE_EXTENSION_PATTERN = IMAGE_EXTENSION_NAMES.join("|"); +const MEDIA_ATTACHED_PATH_REGEX_SOURCE = + "^\\s*(.+?\\.(?:" + IMAGE_EXTENSION_PATTERN + "))\\s*(?:\\(|$|\\|)"; +const MESSAGE_IMAGE_REGEX_SOURCE = + "\\[Image:\\s*source:\\s*([^\\]]+\\.(?:" + IMAGE_EXTENSION_PATTERN + "))\\]"; +const FILE_URL_REGEX_SOURCE = "file://[^\\s<>\"'`\\]]+\\.(?:" + IMAGE_EXTENSION_PATTERN + ")"; +const PATH_REGEX_SOURCE = + "(?:^|\\s|[\"'`(])((\\.\\.?/|[~/])[^\\s\"'`()\\[\\]]*\\.(?:" + IMAGE_EXTENSION_PATTERN + "))"; /** * Result of detecting an image reference in text. @@ -32,12 +41,10 @@ const IMAGE_EXTENSIONS = new Set([ export interface DetectedImageRef { /** The raw matched string from the prompt */ raw: string; - /** The type of reference (path or url) */ - type: "path" | "url"; - /** The resolved/normalized path or URL */ + /** The type of reference */ + type: "path"; + /** The resolved/normalized path */ resolved: string; - /** Index of the message this ref was found in (for history images) */ - messageIndex?: number; } /** @@ -48,6 +55,10 @@ function isImageExtension(filePath: string): boolean { return IMAGE_EXTENSIONS.has(ext); } +function normalizeRefForDedupe(raw: string): string { + return process.platform === "win32" ? raw.toLowerCase() : raw; +} + async function sanitizeImagesWithLog( images: ImageContent[], label: string, @@ -84,7 +95,8 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { // Helper to add a path ref const addPathRef = (raw: string) => { const trimmed = raw.trim(); - if (!trimmed || seen.has(trimmed.toLowerCase())) { + const dedupeKey = normalizeRefForDedupe(trimmed); + if (!trimmed || seen.has(dedupeKey)) { return; } if (trimmed.startsWith("http://") || trimmed.startsWith("https://")) { @@ -93,7 +105,7 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { if (!isImageExtension(trimmed)) { return; } - seen.add(trimmed.toLowerCase()); + seen.add(dedupeKey); const resolved = trimmed.startsWith("~") ? resolveUserPath(trimmed) : trimmed; refs.push({ raw: trimmed, type: "path", resolved }); }; @@ -102,6 +114,10 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { // Each bracket = ONE file. The | separates path from URL, not multiple files. // Multi-file format uses separate brackets on separate lines. const mediaAttachedPattern = /\[media attached(?:\s+\d+\/\d+)?:\s*([^\]]+)\]/gi; + const mediaAttachedPathPattern = new RegExp(MEDIA_ATTACHED_PATH_REGEX_SOURCE, "i"); + const messageImagePattern = new RegExp(MESSAGE_IMAGE_REGEX_SOURCE, "gi"); + const fileUrlPattern = new RegExp(FILE_URL_REGEX_SOURCE, "gi"); + const pathPattern = new RegExp(PATH_REGEX_SOURCE, "gi"); let match: RegExpExecArray | null; while ((match = mediaAttachedPattern.exec(prompt)) !== null) { const content = match[1]; @@ -115,17 +131,13 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { // Format is: path (type) | url OR just: path (type) // Path may contain spaces (e.g., "ChatGPT Image Apr 21.png") // Use non-greedy .+? to stop at first image extension - const pathMatch = content.match( - /^\s*(.+?\.(?:png|jpe?g|gif|webp|bmp|tiff?|heic|heif))\s*(?:\(|$|\|)/i, - ); + const pathMatch = content.match(mediaAttachedPathPattern); if (pathMatch?.[1]) { addPathRef(pathMatch[1].trim()); } } // Pattern for [Image: source: /path/...] format from messaging systems - const messageImagePattern = - /\[Image:\s*source:\s*([^\]]+\.(?:png|jpe?g|gif|webp|bmp|tiff?|heic|heif))\]/gi; while ((match = messageImagePattern.exec(prompt)) !== null) { const raw = match[1]?.trim(); if (raw) { @@ -136,13 +148,13 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { // Remote HTTP(S) URLs are intentionally ignored. Native image injection is local-only. // Pattern for file:// URLs - treat as paths since loadWebMedia handles them - const fileUrlPattern = /file:\/\/[^\s<>"'`\]]+\.(?:png|jpe?g|gif|webp|bmp|tiff?|heic|heif)/gi; while ((match = fileUrlPattern.exec(prompt)) !== null) { const raw = match[0]; - if (seen.has(raw.toLowerCase())) { + const dedupeKey = normalizeRefForDedupe(raw); + if (seen.has(dedupeKey)) { continue; } - seen.add(raw.toLowerCase()); + seen.add(dedupeKey); // Use fileURLToPath for proper handling (e.g., file://localhost/path) try { const resolved = fileURLToPath(raw); @@ -158,8 +170,6 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { // - ./relative/path.ext // - ../parent/path.ext // - ~/home/path.ext - const pathPattern = - /(?:^|\s|["'`(])((\.\.?\/|[~/])[^\s"'`()[\]]*\.(?:png|jpe?g|gif|webp|bmp|tiff?|heic|heif))/gi; while ((match = pathPattern.exec(prompt)) !== null) { // Use capture group 1 (the path without delimiter prefix); skip if undefined if (match[1]) { @@ -171,7 +181,7 @@ export function detectImageReferences(prompt: string): DetectedImageRef[] { } /** - * Loads an image from a file path or URL and returns it as ImageContent. + * Loads an image from a file path and returns it as ImageContent. * * @param ref The detected image reference * @param workspaceDir The current workspace directory for resolving relative paths @@ -190,42 +200,34 @@ export async function loadImageFromRef( try { let targetPath = ref.resolved; - // Remote URL loading is disabled (local-only). - if (ref.type === "url") { - log.debug(`Native image: rejecting remote URL (local-only): ${ref.resolved}`); - return null; - } - // Resolve paths relative to sandbox or workspace as needed - if (ref.type === "path") { - if (options?.sandbox) { - try { - const resolved = await resolveSandboxedBridgeMediaPath({ - sandbox: { - root: options.sandbox.root, - bridge: options.sandbox.bridge, - workspaceOnly: options.workspaceOnly, - }, - mediaPath: targetPath, - }); - targetPath = resolved.resolved; - } catch (err) { - log.debug( - `Native image: sandbox validation failed for ${ref.resolved}: ${err instanceof Error ? err.message : String(err)}`, - ); - return null; - } - } else if (!path.isAbsolute(targetPath)) { - targetPath = path.resolve(workspaceDir, targetPath); - } - if (options?.workspaceOnly && !options?.sandbox) { - const root = options?.sandbox?.root ?? workspaceDir; - await assertSandboxPath({ - filePath: targetPath, - cwd: root, - root, + if (options?.sandbox) { + try { + const resolved = await resolveSandboxedBridgeMediaPath({ + sandbox: { + root: options.sandbox.root, + bridge: options.sandbox.bridge, + workspaceOnly: options.workspaceOnly, + }, + mediaPath: targetPath, }); + targetPath = resolved.resolved; + } catch (err) { + log.debug( + `Native image: sandbox validation failed for ${ref.resolved}: ${err instanceof Error ? err.message : String(err)}`, + ); + return null; } + } else if (!path.isAbsolute(targetPath)) { + targetPath = path.resolve(workspaceDir, targetPath); + } + if (options?.workspaceOnly && !options?.sandbox) { + const root = options?.sandbox?.root ?? workspaceDir; + await assertSandboxPath({ + filePath: targetPath, + cwd: root, + root, + }); } // loadWebMedia handles local file paths (including file:// URLs) @@ -268,93 +270,6 @@ export function modelSupportsImages(model: { input?: string[] }): boolean { return model.input?.includes("image") ?? false; } -function extractTextFromMessage(message: unknown): string { - if (!message || typeof message !== "object") { - return ""; - } - const content = (message as { content?: unknown }).content; - if (typeof content === "string") { - return content; - } - if (!Array.isArray(content)) { - return ""; - } - const textParts: string[] = []; - for (const part of content) { - if (!part || typeof part !== "object") { - continue; - } - const record = part as Record; - if (record.type === "text" && typeof record.text === "string") { - textParts.push(record.text); - } - } - return textParts.join("\n").trim(); -} - -/** - * Extracts image references from conversation history messages. - * Scans user messages for image paths/URLs that can be loaded. - * Each ref includes the messageIndex so images can be injected at their original location. - * - * Note: Global deduplication is intentional - if the same image appears in multiple - * messages, we only inject it at the FIRST occurrence. This is sufficient because: - * 1. The model sees all message content including the image - * 2. Later references to "the image" or "that picture" will work since it's in context - * 3. Injecting duplicates would waste tokens and potentially hit size limits - */ -function detectImagesFromHistory(messages: unknown[]): DetectedImageRef[] { - const allRefs: DetectedImageRef[] = []; - const seen = new Set(); - - const messageHasImageContent = (msg: unknown): boolean => { - if (!msg || typeof msg !== "object") { - return false; - } - const content = (msg as { content?: unknown }).content; - if (!Array.isArray(content)) { - return false; - } - return content.some( - (part) => - part != null && typeof part === "object" && (part as { type?: string }).type === "image", - ); - }; - - for (let i = 0; i < messages.length; i++) { - const msg = messages[i]; - if (!msg || typeof msg !== "object") { - continue; - } - const message = msg as { role?: string }; - // Only scan user messages for image references - if (message.role !== "user") { - continue; - } - // Skip if message already has image content (prevents reloading each turn) - if (messageHasImageContent(msg)) { - continue; - } - - const text = extractTextFromMessage(msg); - if (!text) { - continue; - } - - const refs = detectImageReferences(text); - for (const ref of refs) { - const key = ref.resolved.toLowerCase(); - if (seen.has(key)) { - continue; - } - seen.add(key); - allRefs.push({ ...ref, messageIndex: i }); - } - } - - return allRefs; -} - /** * Detects and loads images referenced in a prompt for models with vision capability. * @@ -362,18 +277,14 @@ function detectImagesFromHistory(messages: unknown[]): DetectedImageRef[] { * loads them, and returns them as ImageContent array ready to be passed to * the model's prompt method. * - * Also scans conversation history for images from previous turns and returns - * them mapped by message index so they can be injected at their original location. - * * @param params Configuration for image detection and loading - * @returns Object with loaded images for current prompt and history images by message index + * @returns Object with loaded images for current prompt only */ export async function detectAndLoadPromptImages(params: { prompt: string; workspaceDir: string; model: { input?: string[] }; existingImages?: ImageContent[]; - historyMessages?: unknown[]; maxBytes?: number; maxDimensionPx?: number; workspaceOnly?: boolean; @@ -381,8 +292,6 @@ export async function detectAndLoadPromptImages(params: { }): Promise<{ /** Images for the current prompt (existingImages + detected in current prompt) */ images: ImageContent[]; - /** Images from history messages, keyed by message index */ - historyImagesByIndex: Map; detectedRefs: DetectedImageRef[]; loadedCount: number; skippedCount: number; @@ -391,7 +300,6 @@ export async function detectAndLoadPromptImages(params: { if (!modelSupportsImages(params.model)) { return { images: [], - historyImagesByIndex: new Map(), detectedRefs: [], loadedCount: 0, skippedCount: 0, @@ -399,38 +307,20 @@ export async function detectAndLoadPromptImages(params: { } // Detect images from current prompt - const promptRefs = detectImageReferences(params.prompt); - - // Detect images from conversation history (with message indices) - const historyRefs = params.historyMessages ? detectImagesFromHistory(params.historyMessages) : []; - - // Deduplicate: if an image is in the current prompt, don't also load it from history. - // Current prompt images are passed via the `images` parameter to prompt(), while history - // images are injected into their original message positions. We don't want the same - // image loaded and sent twice (wasting tokens and potentially causing confusion). - const seenPaths = new Set(promptRefs.map((r) => r.resolved.toLowerCase())); - const uniqueHistoryRefs = historyRefs.filter((r) => !seenPaths.has(r.resolved.toLowerCase())); - - const allRefs = [...promptRefs, ...uniqueHistoryRefs]; + const allRefs = detectImageReferences(params.prompt); if (allRefs.length === 0) { return { images: params.existingImages ?? [], - historyImagesByIndex: new Map(), detectedRefs: [], loadedCount: 0, skippedCount: 0, }; } - log.debug( - `Native image: detected ${allRefs.length} image refs (${promptRefs.length} in prompt, ${uniqueHistoryRefs.length} in history)`, - ); + log.debug(`Native image: detected ${allRefs.length} image refs in prompt`); - // Load images for current prompt const promptImages: ImageContent[] = [...(params.existingImages ?? [])]; - // Load images for history, grouped by message index - const historyImagesByIndex = new Map(); let loadedCount = 0; let skippedCount = 0; @@ -442,18 +332,7 @@ export async function detectAndLoadPromptImages(params: { sandbox: params.sandbox, }); if (image) { - if (ref.messageIndex !== undefined) { - // History image - add to the appropriate message index - const existing = historyImagesByIndex.get(ref.messageIndex); - if (existing) { - existing.push(image); - } else { - historyImagesByIndex.set(ref.messageIndex, [image]); - } - } else { - // Current prompt image - promptImages.push(image); - } + promptImages.push(image); loadedCount++; log.debug(`Native image: loaded ${ref.type} ${ref.resolved}`); } else { @@ -469,21 +348,9 @@ export async function detectAndLoadPromptImages(params: { "prompt:images", imageSanitization, ); - const sanitizedHistoryImagesByIndex = new Map(); - for (const [index, images] of historyImagesByIndex) { - const sanitized = await sanitizeImagesWithLog( - images, - `history:images:${index}`, - imageSanitization, - ); - if (sanitized.length > 0) { - sanitizedHistoryImagesByIndex.set(index, sanitized); - } - } return { images: sanitizedPromptImages, - historyImagesByIndex: sanitizedHistoryImagesByIndex, detectedRefs: allRefs, loadedCount, skippedCount, diff --git a/src/agents/pi-embedded-runner/run/types.ts b/src/agents/pi-embedded-runner/run/types.ts index e908dadeb87..469ff8bb33a 100644 --- a/src/agents/pi-embedded-runner/run/types.ts +++ b/src/agents/pi-embedded-runner/run/types.ts @@ -1,10 +1,10 @@ import type { AgentMessage } from "@mariozechner/pi-agent-core"; import type { Api, AssistantMessage, Model } from "@mariozechner/pi-ai"; +import type { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent"; import type { ThinkLevel } from "../../../auto-reply/thinking.js"; import type { SessionSystemPromptReport } from "../../../config/sessions/types.js"; import type { PluginHookBeforeAgentStartResult } from "../../../plugins/types.js"; import type { MessagingToolSend } from "../../pi-embedded-messaging.js"; -import type { AuthStorage, ModelRegistry } from "../../pi-model-discovery.js"; import type { NormalizedUsage } from "../../usage.js"; import type { RunEmbeddedPiAgentParams } from "./params.js"; diff --git a/src/agents/pi-embedded-runner/system-prompt.ts b/src/agents/pi-embedded-runner/system-prompt.ts index 67df4493695..ef246d1af23 100644 --- a/src/agents/pi-embedded-runner/system-prompt.ts +++ b/src/agents/pi-embedded-runner/system-prompt.ts @@ -28,6 +28,8 @@ export function buildEmbeddedSystemPrompt(params: { workspaceNotes?: string[]; /** Controls which hardcoded sections to include. Defaults to "full". */ promptMode?: PromptMode; + /** Whether ACP-specific routing guidance should be included. Defaults to true. */ + acpEnabled?: boolean; runtimeInfo: { agentId?: string; host: string; @@ -67,6 +69,7 @@ export function buildEmbeddedSystemPrompt(params: { workspaceNotes: params.workspaceNotes, reactionGuidance: params.reactionGuidance, promptMode: params.promptMode, + acpEnabled: params.acpEnabled, runtimeInfo: params.runtimeInfo, messageToolHints: params.messageToolHints, sandboxInfo: params.sandboxInfo, diff --git a/src/agents/pi-embedded-subscribe.handlers.compaction.ts b/src/agents/pi-embedded-subscribe.handlers.compaction.ts index a8072bf2e1a..f25d05f0065 100644 --- a/src/agents/pi-embedded-subscribe.handlers.compaction.ts +++ b/src/agents/pi-embedded-subscribe.handlers.compaction.ts @@ -2,6 +2,7 @@ import type { AgentEvent } from "@mariozechner/pi-agent-core"; import { emitAgentEvent } from "../infra/agent-events.js"; import { getGlobalHookRunner } from "../plugins/hook-runner-global.js"; import type { EmbeddedPiSubscribeContext } from "./pi-embedded-subscribe.handlers.types.js"; +import { makeZeroUsageSnapshot } from "./usage.js"; export function handleAutoCompactionStart(ctx: EmbeddedPiSubscribeContext) { ctx.state.compactionInFlight = true; @@ -52,6 +53,7 @@ export function handleAutoCompactionEnd( ctx.log.debug(`embedded run compaction retry: runId=${ctx.params.runId}`); } else { ctx.maybeResolveCompactionWait(); + clearStaleAssistantUsageOnSessionMessages(ctx); } emitAgentEvent({ runId: ctx.params.runId, @@ -81,3 +83,22 @@ export function handleAutoCompactionEnd( } } } + +function clearStaleAssistantUsageOnSessionMessages(ctx: EmbeddedPiSubscribeContext): void { + const messages = ctx.params.session.messages; + if (!Array.isArray(messages)) { + return; + } + for (const message of messages) { + if (!message || typeof message !== "object") { + continue; + } + const candidate = message as { role?: unknown; usage?: unknown }; + if (candidate.role !== "assistant") { + continue; + } + // pi-coding-agent expects assistant usage to exist when computing context usage. + // Reset stale snapshots to zeros instead of deleting the field. + candidate.usage = makeZeroUsageSnapshot(); + } +} diff --git a/src/agents/pi-embedded-subscribe.subscribe-embedded-pi-session.splits-long-single-line-fenced-blocks-reopen.test.ts b/src/agents/pi-embedded-subscribe.subscribe-embedded-pi-session.splits-long-single-line-fenced-blocks-reopen.test.ts index bbc2a019286..bff7046cc80 100644 --- a/src/agents/pi-embedded-subscribe.subscribe-embedded-pi-session.splits-long-single-line-fenced-blocks-reopen.test.ts +++ b/src/agents/pi-embedded-subscribe.subscribe-embedded-pi-session.splits-long-single-line-fenced-blocks-reopen.test.ts @@ -6,6 +6,7 @@ import { expectFencedChunks, } from "./pi-embedded-subscribe.e2e-harness.js"; import { subscribeEmbeddedPiSession } from "./pi-embedded-subscribe.js"; +import { makeZeroUsageSnapshot } from "./usage.js"; type SessionEventHandler = (evt: unknown) => void; @@ -115,4 +116,40 @@ describe("subscribeEmbeddedPiSession", () => { expect(resolved).toBe(true); expect(subscription.isCompacting()).toBe(false); }); + + it("resets assistant usage to a zero snapshot after compaction without retry", () => { + const listeners: SessionEventHandler[] = []; + const session = { + messages: [ + { + role: "assistant", + content: [{ type: "text", text: "old" }], + usage: { + input: 120, + output: 30, + cacheRead: 5, + cacheWrite: 0, + totalTokens: 155, + cost: { input: 0.001, output: 0.002, cacheRead: 0, cacheWrite: 0, total: 0.003 }, + }, + }, + ], + subscribe: (listener: SessionEventHandler) => { + listeners.push(listener); + return () => {}; + }, + } as unknown as Parameters[0]["session"]; + + subscribeEmbeddedPiSession({ + session, + runId: "run-3", + }); + + for (const listener of listeners) { + listener({ type: "auto_compaction_end", willRetry: false }); + } + + const usage = (session.messages?.[0] as { usage?: unknown } | undefined)?.usage; + expect(usage).toEqual(makeZeroUsageSnapshot()); + }); }); diff --git a/src/agents/pi-extensions/compaction-safeguard.test.ts b/src/agents/pi-extensions/compaction-safeguard.test.ts index 1c75139df97..60d3858c5d0 100644 --- a/src/agents/pi-extensions/compaction-safeguard.test.ts +++ b/src/agents/pi-extensions/compaction-safeguard.test.ts @@ -428,3 +428,59 @@ describe("compaction-safeguard extension model fallback", () => { expect(getApiKeyMock).not.toHaveBeenCalled(); }); }); + +describe("compaction-safeguard double-compaction guard", () => { + it("cancels compaction when there are no real messages to summarize", async () => { + const sessionManager = stubSessionManager(); + const model = createAnthropicModelFixture(); + setCompactionSafeguardRuntime(sessionManager, { model }); + + const compactionHandler = createCompactionHandler(); + const mockEvent = { + preparation: { + messagesToSummarize: [] as AgentMessage[], + turnPrefixMessages: [] as AgentMessage[], + firstKeptEntryId: "entry-1", + tokensBefore: 1500, + fileOps: { read: [], edited: [], written: [] }, + }, + customInstructions: "", + signal: new AbortController().signal, + }; + + const getApiKeyMock = vi.fn().mockResolvedValue("sk-test"); + const mockContext = createCompactionContext({ + sessionManager, + getApiKeyMock, + }); + + const result = (await compactionHandler(mockEvent, mockContext)) as { + cancel?: boolean; + }; + expect(result).toEqual({ cancel: true }); + expect(getApiKeyMock).not.toHaveBeenCalled(); + }); + + it("continues when messages include real conversation content", async () => { + const sessionManager = stubSessionManager(); + const model = createAnthropicModelFixture(); + setCompactionSafeguardRuntime(sessionManager, { model }); + + const compactionHandler = createCompactionHandler(); + const mockEvent = createCompactionEvent({ + messageText: "real message", + tokensBefore: 1500, + }); + const getApiKeyMock = vi.fn().mockResolvedValue(null); + const mockContext = createCompactionContext({ + sessionManager, + getApiKeyMock, + }); + + const result = (await compactionHandler(mockEvent, mockContext)) as { + cancel?: boolean; + }; + expect(result).toEqual({ cancel: true }); + expect(getApiKeyMock).toHaveBeenCalled(); + }); +}); diff --git a/src/agents/pi-extensions/compaction-safeguard.ts b/src/agents/pi-extensions/compaction-safeguard.ts index b7c15d50397..fbcf82b2003 100644 --- a/src/agents/pi-extensions/compaction-safeguard.ts +++ b/src/agents/pi-extensions/compaction-safeguard.ts @@ -130,6 +130,10 @@ function formatToolFailuresSection(failures: ToolFailure[]): string { return `\n\n## Tool Failures\n${lines.join("\n")}`; } +function isRealConversationMessage(message: AgentMessage): boolean { + return message.role === "user" || message.role === "assistant" || message.role === "toolResult"; +} + function computeFileLists(fileOps: FileOperations): { readFiles: string[]; modifiedFiles: string[]; @@ -191,6 +195,12 @@ async function readWorkspaceContextForSummary(): Promise { export default function compactionSafeguardExtension(api: ExtensionAPI): void { api.on("session_before_compact", async (event, ctx) => { const { preparation, customInstructions, signal } = event; + if (!preparation.messagesToSummarize.some(isRealConversationMessage)) { + log.warn( + "Compaction safeguard: cancelling compaction with no real conversation messages to summarize.", + ); + return { cancel: true }; + } const { readFiles, modifiedFiles } = computeFileLists(preparation.fileOps); const fileOpsSummary = formatFileOperations(readFiles, modifiedFiles); const toolFailures = collectToolFailures([ diff --git a/src/agents/pi-model-discovery.auth.test.ts b/src/agents/pi-model-discovery.auth.test.ts new file mode 100644 index 00000000000..0804ed42312 --- /dev/null +++ b/src/agents/pi-model-discovery.auth.test.ts @@ -0,0 +1,161 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { saveAuthProfileStore } from "./auth-profiles.js"; +import { discoverAuthStorage } from "./pi-model-discovery.js"; + +async function createAgentDir(): Promise { + return await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-pi-auth-storage-")); +} + +async function pathExists(pathname: string): Promise { + try { + await fs.stat(pathname); + return true; + } catch { + return false; + } +} + +describe("discoverAuthStorage", () => { + it("loads runtime credentials from auth-profiles without writing auth.json", async () => { + const agentDir = await createAgentDir(); + try { + saveAuthProfileStore( + { + version: 1, + profiles: { + "openrouter:default": { + type: "api_key", + provider: "openrouter", + key: "sk-or-v1-runtime", + }, + "anthropic:default": { + type: "token", + provider: "anthropic", + token: "sk-ant-runtime", + }, + "openai-codex:default": { + type: "oauth", + provider: "openai-codex", + access: "oauth-access", + refresh: "oauth-refresh", + expires: Date.now() + 60_000, + }, + }, + }, + agentDir, + ); + + const authStorage = discoverAuthStorage(agentDir); + + expect(authStorage.hasAuth("openrouter")).toBe(true); + expect(authStorage.hasAuth("anthropic")).toBe(true); + expect(authStorage.hasAuth("openai-codex")).toBe(true); + await expect(authStorage.getApiKey("openrouter")).resolves.toBe("sk-or-v1-runtime"); + await expect(authStorage.getApiKey("anthropic")).resolves.toBe("sk-ant-runtime"); + expect(authStorage.get("openai-codex")).toMatchObject({ + type: "oauth", + access: "oauth-access", + }); + + expect(await pathExists(path.join(agentDir, "auth.json"))).toBe(false); + } finally { + await fs.rm(agentDir, { recursive: true, force: true }); + } + }); + + it("scrubs static api_key entries from legacy auth.json and keeps oauth entries", async () => { + const agentDir = await createAgentDir(); + try { + saveAuthProfileStore( + { + version: 1, + profiles: { + "openrouter:default": { + type: "api_key", + provider: "openrouter", + key: "sk-or-v1-runtime", + }, + }, + }, + agentDir, + ); + await fs.writeFile( + path.join(agentDir, "auth.json"), + JSON.stringify( + { + openrouter: { type: "api_key", key: "legacy-static-key" }, + "openai-codex": { + type: "oauth", + access: "oauth-access", + refresh: "oauth-refresh", + expires: Date.now() + 60_000, + }, + }, + null, + 2, + ), + ); + + discoverAuthStorage(agentDir); + + const parsed = JSON.parse(await fs.readFile(path.join(agentDir, "auth.json"), "utf8")) as { + [key: string]: unknown; + }; + expect(parsed.openrouter).toBeUndefined(); + expect(parsed["openai-codex"]).toMatchObject({ + type: "oauth", + access: "oauth-access", + }); + } finally { + await fs.rm(agentDir, { recursive: true, force: true }); + } + }); + + it("preserves legacy auth.json when auth store is forced read-only", async () => { + const agentDir = await createAgentDir(); + const previous = process.env.OPENCLAW_AUTH_STORE_READONLY; + process.env.OPENCLAW_AUTH_STORE_READONLY = "1"; + try { + saveAuthProfileStore( + { + version: 1, + profiles: { + "openrouter:default": { + type: "api_key", + provider: "openrouter", + key: "sk-or-v1-runtime", + }, + }, + }, + agentDir, + ); + await fs.writeFile( + path.join(agentDir, "auth.json"), + JSON.stringify( + { + openrouter: { type: "api_key", key: "legacy-static-key" }, + }, + null, + 2, + ), + ); + + discoverAuthStorage(agentDir); + + const parsed = JSON.parse(await fs.readFile(path.join(agentDir, "auth.json"), "utf8")) as { + [key: string]: unknown; + }; + expect(parsed.openrouter).toMatchObject({ type: "api_key", key: "legacy-static-key" }); + } finally { + if (previous === undefined) { + delete process.env.OPENCLAW_AUTH_STORE_READONLY; + } else { + process.env.OPENCLAW_AUTH_STORE_READONLY = previous; + } + await fs.rm(agentDir, { recursive: true, force: true }); + } + }); +}); diff --git a/src/agents/pi-model-discovery.compat.test.ts b/src/agents/pi-model-discovery.compat.test.ts new file mode 100644 index 00000000000..dcba11e7cd0 --- /dev/null +++ b/src/agents/pi-model-discovery.compat.test.ts @@ -0,0 +1,26 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; + +describe("pi-model-discovery module compatibility", () => { + afterEach(() => { + vi.resetModules(); + vi.doUnmock("@mariozechner/pi-coding-agent"); + }); + + it("loads when InMemoryAuthStorageBackend is not exported", async () => { + vi.resetModules(); + vi.doMock("@mariozechner/pi-coding-agent", () => { + class MockAuthStorage {} + class MockModelRegistry {} + + return { + AuthStorage: MockAuthStorage, + ModelRegistry: MockModelRegistry, + }; + }); + + await expect(import("./pi-model-discovery.js")).resolves.toMatchObject({ + discoverAuthStorage: expect.any(Function), + discoverModels: expect.any(Function), + }); + }); +}); diff --git a/src/agents/pi-model-discovery.ts b/src/agents/pi-model-discovery.ts index 51ac1aeb8e5..c283a653310 100644 --- a/src/agents/pi-model-discovery.ts +++ b/src/agents/pi-model-discovery.ts @@ -1,21 +1,151 @@ +import fs from "node:fs"; import path from "node:path"; -import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent"; +import * as PiCodingAgent from "@mariozechner/pi-coding-agent"; +import type { + AuthStorage as PiAuthStorage, + ModelRegistry as PiModelRegistry, +} from "@mariozechner/pi-coding-agent"; +import { ensureAuthProfileStore } from "./auth-profiles.js"; +import { resolvePiCredentialMapFromStore, type PiCredentialMap } from "./pi-auth-credentials.js"; -export { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent"; +const PiAuthStorageClass = PiCodingAgent.AuthStorage; +const PiModelRegistryClass = PiCodingAgent.ModelRegistry; -function createAuthStorage(AuthStorageLike: unknown, path: string) { - const withFactory = AuthStorageLike as { create?: (path: string) => unknown }; - if (typeof withFactory.create === "function") { - return withFactory.create(path) as AuthStorage; +export { PiAuthStorageClass as AuthStorage, PiModelRegistryClass as ModelRegistry }; + +type InMemoryAuthStorageBackendLike = { + withLock( + update: (current: string) => { + result: T; + next?: string; + }, + ): T; +}; + +function createInMemoryAuthStorageBackend( + initialData: PiCredentialMap, +): InMemoryAuthStorageBackendLike { + let snapshot = JSON.stringify(initialData, null, 2); + return { + withLock( + update: (current: string) => { + result: T; + next?: string; + }, + ): T { + const { result, next } = update(snapshot); + if (typeof next === "string") { + snapshot = next; + } + return result; + }, + }; +} + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +function scrubLegacyStaticAuthJsonEntries(pathname: string): void { + if (process.env.OPENCLAW_AUTH_STORE_READONLY === "1") { + return; } - return new (AuthStorageLike as { new (path: string): unknown })(path) as AuthStorage; + if (!fs.existsSync(pathname)) { + return; + } + + let parsed: unknown; + try { + parsed = JSON.parse(fs.readFileSync(pathname, "utf8")) as unknown; + } catch { + return; + } + if (!isRecord(parsed)) { + return; + } + + let changed = false; + for (const [provider, value] of Object.entries(parsed)) { + if (!isRecord(value)) { + continue; + } + if (value.type !== "api_key") { + continue; + } + delete parsed[provider]; + changed = true; + } + + if (!changed) { + return; + } + + if (Object.keys(parsed).length === 0) { + fs.rmSync(pathname, { force: true }); + return; + } + + fs.writeFileSync(pathname, `${JSON.stringify(parsed, null, 2)}\n`, "utf8"); + fs.chmodSync(pathname, 0o600); +} + +function createAuthStorage(AuthStorageLike: unknown, path: string, creds: PiCredentialMap) { + const withInMemory = AuthStorageLike as { inMemory?: (data?: unknown) => unknown }; + if (typeof withInMemory.inMemory === "function") { + return withInMemory.inMemory(creds) as PiAuthStorage; + } + + const withFromStorage = AuthStorageLike as { + fromStorage?: (storage: unknown) => unknown; + }; + if (typeof withFromStorage.fromStorage === "function") { + const backendCtor = ( + PiCodingAgent as { InMemoryAuthStorageBackend?: new () => InMemoryAuthStorageBackendLike } + ).InMemoryAuthStorageBackend; + const backend = + typeof backendCtor === "function" + ? new backendCtor() + : createInMemoryAuthStorageBackend(creds); + backend.withLock(() => ({ + result: undefined, + next: JSON.stringify(creds, null, 2), + })); + return withFromStorage.fromStorage(backend) as PiAuthStorage; + } + + const withFactory = AuthStorageLike as { create?: (path: string) => unknown }; + const withRuntimeOverride = ( + typeof withFactory.create === "function" + ? withFactory.create(path) + : new (AuthStorageLike as { new (path: string): unknown })(path) + ) as PiAuthStorage & { + setRuntimeApiKey?: (provider: string, apiKey: string) => void; + }; + if (typeof withRuntimeOverride.setRuntimeApiKey === "function") { + for (const [provider, credential] of Object.entries(creds)) { + if (credential.type === "api_key") { + withRuntimeOverride.setRuntimeApiKey(provider, credential.key); + continue; + } + withRuntimeOverride.setRuntimeApiKey(provider, credential.access); + } + } + return withRuntimeOverride; +} + +function resolvePiCredentials(agentDir: string): PiCredentialMap { + const store = ensureAuthProfileStore(agentDir, { allowKeychainPrompt: false }); + return resolvePiCredentialMapFromStore(store); } // Compatibility helpers for pi-coding-agent 0.50+ (discover* helpers removed). -export function discoverAuthStorage(agentDir: string): AuthStorage { - return createAuthStorage(AuthStorage, path.join(agentDir, "auth.json")); +export function discoverAuthStorage(agentDir: string): PiAuthStorage { + const credentials = resolvePiCredentials(agentDir); + const authPath = path.join(agentDir, "auth.json"); + scrubLegacyStaticAuthJsonEntries(authPath); + return createAuthStorage(PiAuthStorageClass, authPath, credentials); } -export function discoverModels(authStorage: AuthStorage, agentDir: string): ModelRegistry { - return new ModelRegistry(authStorage, path.join(agentDir, "models.json")); +export function discoverModels(authStorage: PiAuthStorage, agentDir: string): PiModelRegistry { + return new PiModelRegistryClass(authStorage, path.join(agentDir, "models.json")); } diff --git a/src/agents/pi-project-settings.test.ts b/src/agents/pi-project-settings.test.ts new file mode 100644 index 00000000000..07f86421f84 --- /dev/null +++ b/src/agents/pi-project-settings.test.ts @@ -0,0 +1,76 @@ +import { describe, expect, it } from "vitest"; +import { + buildEmbeddedPiSettingsSnapshot, + DEFAULT_EMBEDDED_PI_PROJECT_SETTINGS_POLICY, + resolveEmbeddedPiProjectSettingsPolicy, +} from "./pi-project-settings.js"; + +describe("resolveEmbeddedPiProjectSettingsPolicy", () => { + it("defaults to sanitize", () => { + expect(resolveEmbeddedPiProjectSettingsPolicy()).toBe( + DEFAULT_EMBEDDED_PI_PROJECT_SETTINGS_POLICY, + ); + }); + + it("accepts trusted and ignore modes", () => { + expect( + resolveEmbeddedPiProjectSettingsPolicy({ + agents: { defaults: { embeddedPi: { projectSettingsPolicy: "trusted" } } }, + }), + ).toBe("trusted"); + expect( + resolveEmbeddedPiProjectSettingsPolicy({ + agents: { defaults: { embeddedPi: { projectSettingsPolicy: "ignore" } } }, + }), + ).toBe("ignore"); + }); +}); + +describe("buildEmbeddedPiSettingsSnapshot", () => { + const globalSettings = { + shellPath: "/bin/zsh", + compaction: { reserveTokens: 20_000, keepRecentTokens: 20_000 }, + }; + const projectSettings = { + shellPath: "/tmp/evil-shell", + shellCommandPrefix: "echo hacked &&", + compaction: { reserveTokens: 32_000 }, + hideThinkingBlock: true, + }; + + it("sanitize mode strips shell path + prefix but keeps other project settings", () => { + const snapshot = buildEmbeddedPiSettingsSnapshot({ + globalSettings, + projectSettings, + policy: "sanitize", + }); + expect(snapshot.shellPath).toBe("/bin/zsh"); + expect(snapshot.shellCommandPrefix).toBeUndefined(); + expect(snapshot.compaction?.reserveTokens).toBe(32_000); + expect(snapshot.hideThinkingBlock).toBe(true); + }); + + it("ignore mode drops all project settings", () => { + const snapshot = buildEmbeddedPiSettingsSnapshot({ + globalSettings, + projectSettings, + policy: "ignore", + }); + expect(snapshot.shellPath).toBe("/bin/zsh"); + expect(snapshot.shellCommandPrefix).toBeUndefined(); + expect(snapshot.compaction?.reserveTokens).toBe(20_000); + expect(snapshot.hideThinkingBlock).toBeUndefined(); + }); + + it("trusted mode keeps project settings as-is", () => { + const snapshot = buildEmbeddedPiSettingsSnapshot({ + globalSettings, + projectSettings, + policy: "trusted", + }); + expect(snapshot.shellPath).toBe("/tmp/evil-shell"); + expect(snapshot.shellCommandPrefix).toBe("echo hacked &&"); + expect(snapshot.compaction?.reserveTokens).toBe(32_000); + expect(snapshot.hideThinkingBlock).toBe(true); + }); +}); diff --git a/src/agents/pi-project-settings.ts b/src/agents/pi-project-settings.ts new file mode 100644 index 00000000000..7ddd9b6a1e9 --- /dev/null +++ b/src/agents/pi-project-settings.ts @@ -0,0 +1,75 @@ +import { SettingsManager } from "@mariozechner/pi-coding-agent"; +import type { OpenClawConfig } from "../config/config.js"; +import { applyMergePatch } from "../config/merge-patch.js"; +import { applyPiCompactionSettingsFromConfig } from "./pi-settings.js"; + +export const DEFAULT_EMBEDDED_PI_PROJECT_SETTINGS_POLICY = "sanitize"; +export const SANITIZED_PROJECT_PI_KEYS = ["shellPath", "shellCommandPrefix"] as const; + +export type EmbeddedPiProjectSettingsPolicy = "trusted" | "sanitize" | "ignore"; + +type PiSettingsSnapshot = ReturnType; + +function sanitizeProjectSettings(settings: PiSettingsSnapshot): PiSettingsSnapshot { + const sanitized = { ...settings }; + // Never allow workspace-local settings to override shell execution behavior. + for (const key of SANITIZED_PROJECT_PI_KEYS) { + delete sanitized[key]; + } + return sanitized; +} + +export function resolveEmbeddedPiProjectSettingsPolicy( + cfg?: OpenClawConfig, +): EmbeddedPiProjectSettingsPolicy { + const raw = cfg?.agents?.defaults?.embeddedPi?.projectSettingsPolicy; + if (raw === "trusted" || raw === "sanitize" || raw === "ignore") { + return raw; + } + return DEFAULT_EMBEDDED_PI_PROJECT_SETTINGS_POLICY; +} + +export function buildEmbeddedPiSettingsSnapshot(params: { + globalSettings: PiSettingsSnapshot; + projectSettings: PiSettingsSnapshot; + policy: EmbeddedPiProjectSettingsPolicy; +}): PiSettingsSnapshot { + const effectiveProjectSettings = + params.policy === "ignore" + ? {} + : params.policy === "sanitize" + ? sanitizeProjectSettings(params.projectSettings) + : params.projectSettings; + return applyMergePatch(params.globalSettings, effectiveProjectSettings) as PiSettingsSnapshot; +} + +export function createEmbeddedPiSettingsManager(params: { + cwd: string; + agentDir: string; + cfg?: OpenClawConfig; +}): SettingsManager { + const fileSettingsManager = SettingsManager.create(params.cwd, params.agentDir); + const policy = resolveEmbeddedPiProjectSettingsPolicy(params.cfg); + if (policy === "trusted") { + return fileSettingsManager; + } + const settings = buildEmbeddedPiSettingsSnapshot({ + globalSettings: fileSettingsManager.getGlobalSettings(), + projectSettings: fileSettingsManager.getProjectSettings(), + policy, + }); + return SettingsManager.inMemory(settings); +} + +export function createPreparedEmbeddedPiSettingsManager(params: { + cwd: string; + agentDir: string; + cfg?: OpenClawConfig; +}): SettingsManager { + const settingsManager = createEmbeddedPiSettingsManager(params); + applyPiCompactionSettingsFromConfig({ + settingsManager, + cfg: params.cfg, + }); + return settingsManager; +} diff --git a/src/agents/pi-tool-definition-adapter.test.ts b/src/agents/pi-tool-definition-adapter.test.ts index 1b11bbf49be..6def07167cb 100644 --- a/src/agents/pi-tool-definition-adapter.test.ts +++ b/src/agents/pi-tool-definition-adapter.test.ts @@ -25,6 +25,15 @@ async function executeThrowingTool(name: string, callId: string) { return await def.execute(callId, {}, undefined, undefined, extensionContext); } +async function executeTool(tool: AgentTool, callId: string) { + const defs = toToolDefinitions([tool]); + const def = defs[0]; + if (!def) { + throw new Error("missing tool definition"); + } + return await def.execute(callId, {}, undefined, undefined, extensionContext); +} + describe("pi tool definition adapter", () => { it("wraps tool errors into a tool result", async () => { const result = await executeThrowingTool("boom", "call1"); @@ -46,4 +55,46 @@ describe("pi tool definition adapter", () => { error: "nope", }); }); + + it("coerces details-only tool results to include content", async () => { + const tool = { + name: "memory_query", + label: "Memory Query", + description: "returns details only", + parameters: Type.Object({}), + execute: (async () => ({ + details: { + hits: [{ id: "a1", score: 0.9 }], + }, + })) as unknown as AgentTool["execute"], + } satisfies AgentTool; + + const result = await executeTool(tool, "call3"); + expect(result.details).toEqual({ + hits: [{ id: "a1", score: 0.9 }], + }); + expect(result.content[0]).toMatchObject({ type: "text" }); + expect((result.content[0] as { text?: string }).text).toContain('"hits"'); + }); + + it("coerces non-standard object results to include content", async () => { + const tool = { + name: "memory_query_raw", + label: "Memory Query Raw", + description: "returns plain object", + parameters: Type.Object({}), + execute: (async () => ({ + count: 2, + ids: ["m1", "m2"], + })) as unknown as AgentTool["execute"], + } satisfies AgentTool; + + const result = await executeTool(tool, "call4"); + expect(result.details).toEqual({ + count: 2, + ids: ["m1", "m2"], + }); + expect(result.content[0]).toMatchObject({ type: "text" }); + expect((result.content[0] as { text?: string }).text).toContain('"count"'); + }); }); diff --git a/src/agents/pi-tool-definition-adapter.ts b/src/agents/pi-tool-definition-adapter.ts index f3963600c80..a6221586242 100644 --- a/src/agents/pi-tool-definition-adapter.ts +++ b/src/agents/pi-tool-definition-adapter.ts @@ -62,6 +62,56 @@ function describeToolExecutionError(err: unknown): { return { message: String(err) }; } +function stringifyToolPayload(payload: unknown): string { + if (typeof payload === "string") { + return payload; + } + try { + const encoded = JSON.stringify(payload, null, 2); + if (typeof encoded === "string") { + return encoded; + } + } catch { + // Fall through to String(payload) for non-serializable values. + } + return String(payload); +} + +function normalizeToolExecutionResult(params: { + toolName: string; + result: unknown; +}): AgentToolResult { + const { toolName, result } = params; + if (result && typeof result === "object") { + const record = result as Record; + if (Array.isArray(record.content)) { + return result as AgentToolResult; + } + logDebug(`tools: ${toolName} returned non-standard result (missing content[]); coercing`); + const details = "details" in record ? record.details : record; + const safeDetails = details ?? { status: "ok", tool: toolName }; + return { + content: [ + { + type: "text", + text: stringifyToolPayload(safeDetails), + }, + ], + details: safeDetails, + }; + } + const safeDetails = result ?? { status: "ok", tool: toolName }; + return { + content: [ + { + type: "text", + text: stringifyToolPayload(safeDetails), + }, + ], + details: safeDetails, + }; +} + function splitToolExecuteArgs(args: ToolExecuteArgsAny): { toolCallId: string; params: unknown; @@ -111,7 +161,11 @@ export function toToolDefinitions(tools: AnyAgentTool[]): ToolDefinition[] { } executeParams = hookOutcome.params; } - const result = await tool.execute(toolCallId, executeParams, signal, onUpdate); + const rawResult = await tool.execute(toolCallId, executeParams, signal, onUpdate); + const result = normalizeToolExecutionResult({ + toolName: normalizedName, + result: rawResult, + }); const afterParams = beforeHookWrapped ? (consumeAdjustedParamsForToolCall(toolCallId) ?? executeParams) : executeParams; diff --git a/src/agents/pi-tools.message-provider-policy.test.ts b/src/agents/pi-tools.message-provider-policy.test.ts new file mode 100644 index 00000000000..0bcdd5144f0 --- /dev/null +++ b/src/agents/pi-tools.message-provider-policy.test.ts @@ -0,0 +1,19 @@ +import { describe, expect, it } from "vitest"; +import { createOpenClawCodingTools } from "./pi-tools.js"; + +describe("createOpenClawCodingTools message provider policy", () => { + it.each(["voice", "VOICE", " Voice "])( + "does not expose tts tool for normalized voice provider: %s", + (messageProvider) => { + const tools = createOpenClawCodingTools({ messageProvider }); + const names = new Set(tools.map((tool) => tool.name)); + expect(names.has("tts")).toBe(false); + }, + ); + + it("keeps tts tool for non-voice providers", () => { + const tools = createOpenClawCodingTools({ messageProvider: "discord" }); + const names = new Set(tools.map((tool) => tool.name)); + expect(names.has("tts")).toBe(true); + }); +}); diff --git a/src/agents/pi-tools.read.ts b/src/agents/pi-tools.read.ts index 4fe53c3317c..923be3aa7af 100644 --- a/src/agents/pi-tools.read.ts +++ b/src/agents/pi-tools.read.ts @@ -1,7 +1,9 @@ +import fs from "node:fs/promises"; import path from "node:path"; import { fileURLToPath } from "node:url"; import type { AgentToolResult } from "@mariozechner/pi-agent-core"; import { createEditTool, createReadTool, createWriteTool } from "@mariozechner/pi-coding-agent"; +import { SafeOpenError, openFileWithinRoot, writeFileWithinRoot } from "../infra/fs-safe.js"; import { detectMime } from "../media/mime.js"; import { sniffMimeFromBase64 } from "../media/sniff-mime-from-base64.js"; import type { ImageSanitizationLimits } from "./image-sanitization.js"; @@ -665,6 +667,20 @@ export function createSandboxedEditTool(params: SandboxToolParams) { return wrapToolParamNormalization(base, CLAUDE_PARAM_GROUPS.edit); } +export function createHostWorkspaceWriteTool(root: string) { + const base = createWriteTool(root, { + operations: createHostWriteOperations(root), + }) as unknown as AnyAgentTool; + return wrapToolParamNormalization(base, CLAUDE_PARAM_GROUPS.write); +} + +export function createHostWorkspaceEditTool(root: string) { + const base = createEditTool(root, { + operations: createHostEditOperations(root), + }) as unknown as AnyAgentTool; + return wrapToolParamNormalization(base, CLAUDE_PARAM_GROUPS.edit); +} + export function createOpenClawReadTool( base: AnyAgentTool, options?: OpenClawReadToolOptions, @@ -741,6 +757,87 @@ function createSandboxEditOperations(params: SandboxToolParams) { } as const; } +function createHostWriteOperations(root: string) { + return { + mkdir: async (dir: string) => { + const relative = toRelativePathInRoot(root, dir, { allowRoot: true }); + const resolved = relative ? path.resolve(root, relative) : path.resolve(root); + await assertSandboxPath({ filePath: resolved, cwd: root, root }); + await fs.mkdir(resolved, { recursive: true }); + }, + writeFile: async (absolutePath: string, content: string) => { + const relative = toRelativePathInRoot(root, absolutePath); + await writeFileWithinRoot({ + rootDir: root, + relativePath: relative, + data: content, + mkdir: true, + }); + }, + } as const; +} + +function createHostEditOperations(root: string) { + return { + readFile: async (absolutePath: string) => { + const relative = toRelativePathInRoot(root, absolutePath); + const opened = await openFileWithinRoot({ + rootDir: root, + relativePath: relative, + }); + try { + return await opened.handle.readFile(); + } finally { + await opened.handle.close().catch(() => {}); + } + }, + writeFile: async (absolutePath: string, content: string) => { + const relative = toRelativePathInRoot(root, absolutePath); + await writeFileWithinRoot({ + rootDir: root, + relativePath: relative, + data: content, + mkdir: true, + }); + }, + access: async (absolutePath: string) => { + const relative = toRelativePathInRoot(root, absolutePath); + try { + const opened = await openFileWithinRoot({ + rootDir: root, + relativePath: relative, + }); + await opened.handle.close().catch(() => {}); + } catch (error) { + if (error instanceof SafeOpenError && error.code === "not-found") { + throw createFsAccessError("ENOENT", absolutePath); + } + throw error; + } + }, + } as const; +} + +function toRelativePathInRoot( + root: string, + candidate: string, + options?: { allowRoot?: boolean }, +): string { + const rootResolved = path.resolve(root); + const resolved = path.resolve(candidate); + const relative = path.relative(rootResolved, resolved); + if (relative === "" || relative === ".") { + if (options?.allowRoot) { + return ""; + } + throw new Error(`Path escapes workspace root: ${candidate}`); + } + if (relative.startsWith("..") || path.isAbsolute(relative)) { + throw new Error(`Path escapes workspace root: ${candidate}`); + } + return relative; +} + function createFsAccessError(code: string, filePath: string): NodeJS.ErrnoException { const error = new Error(`Sandbox FS error (${code}): ${filePath}`) as NodeJS.ErrnoException; error.code = code; diff --git a/src/agents/pi-tools.ts b/src/agents/pi-tools.ts index e2d29d375da..c5120f7438e 100644 --- a/src/agents/pi-tools.ts +++ b/src/agents/pi-tools.ts @@ -1,10 +1,4 @@ -import { - codingTools, - createEditTool, - createReadTool, - createWriteTool, - readTool, -} from "@mariozechner/pi-coding-agent"; +import { codingTools, createReadTool, readTool } from "@mariozechner/pi-coding-agent"; import type { OpenClawConfig } from "../config/config.js"; import type { ToolLoopDetectionConfig } from "../config/types.tools.js"; import { resolveMergedSafeBinProfileFixtures } from "../infra/exec-safe-bin-runtime-policy.js"; @@ -34,7 +28,8 @@ import { } from "./pi-tools.policy.js"; import { assertRequiredParams, - CLAUDE_PARAM_GROUPS, + createHostWorkspaceEditTool, + createHostWorkspaceWriteTool, createOpenClawReadTool, createSandboxedEditTool, createSandboxedReadTool, @@ -67,6 +62,31 @@ function isOpenAIProvider(provider?: string) { return normalized === "openai" || normalized === "openai-codex"; } +const TOOL_DENY_BY_MESSAGE_PROVIDER: Readonly> = { + voice: ["tts"], +}; + +function normalizeMessageProvider(messageProvider?: string): string | undefined { + const normalized = messageProvider?.trim().toLowerCase(); + return normalized && normalized.length > 0 ? normalized : undefined; +} + +function applyMessageProviderToolPolicy( + tools: AnyAgentTool[], + messageProvider?: string, +): AnyAgentTool[] { + const normalizedProvider = normalizeMessageProvider(messageProvider); + if (!normalizedProvider) { + return tools; + } + const deniedTools = TOOL_DENY_BY_MESSAGE_PROVIDER[normalizedProvider]; + if (!deniedTools || deniedTools.length === 0) { + return tools; + } + const deniedSet = new Set(deniedTools); + return tools.filter((tool) => !deniedSet.has(tool.name)); +} + function isApplyPatchAllowedForModel(params: { modelProvider?: string; modelId?: string; @@ -339,22 +359,14 @@ export function createOpenClawCodingTools(options?: { if (sandboxRoot) { return []; } - // Wrap with param normalization for Claude Code compatibility - const wrapped = wrapToolParamNormalization( - createWriteTool(workspaceRoot), - CLAUDE_PARAM_GROUPS.write, - ); + const wrapped = createHostWorkspaceWriteTool(workspaceRoot); return [workspaceOnly ? wrapToolWorkspaceRootGuard(wrapped, workspaceRoot) : wrapped]; } if (tool.name === "edit") { if (sandboxRoot) { return []; } - // Wrap with param normalization for Claude Code compatibility - const wrapped = wrapToolParamNormalization( - createEditTool(workspaceRoot), - CLAUDE_PARAM_GROUPS.edit, - ); + const wrapped = createHostWorkspaceEditTool(workspaceRoot); return [workspaceOnly ? wrapToolWorkspaceRootGuard(wrapped, workspaceRoot) : wrapped]; } return [tool]; @@ -376,6 +388,9 @@ export function createOpenClawCodingTools(options?: { scopeKey, sessionKey: options?.sessionKey, messageProvider: options?.messageProvider, + currentChannelId: options?.currentChannelId, + currentThreadTs: options?.currentThreadTs, + accountId: options?.agentAccountId, backgroundMs: options?.exec?.backgroundMs ?? execConfig.backgroundMs, timeoutSec: options?.exec?.timeoutSec ?? execConfig.timeoutSec, approvalRunningNoticeMs: @@ -480,9 +495,10 @@ export function createOpenClawCodingTools(options?: { senderIsOwner: options?.senderIsOwner, }), ]; + const toolsForMessageProvider = applyMessageProviderToolPolicy(tools, options?.messageProvider); // Security: treat unknown/undefined as unauthorized (opt-in, not opt-out) const senderIsOwner = options?.senderIsOwner === true; - const toolsByAuthorization = applyOwnerOnlyToolPolicy(tools, senderIsOwner); + const toolsByAuthorization = applyOwnerOnlyToolPolicy(toolsForMessageProvider, senderIsOwner); const subagentFiltered = applyToolPolicyPipeline({ tools: toolsByAuthorization, toolMeta: (tool) => getPluginToolMeta(tool), diff --git a/src/agents/pi-tools.workspace-paths.test.ts b/src/agents/pi-tools.workspace-paths.test.ts index 6fe98ff03f8..4efa494555e 100644 --- a/src/agents/pi-tools.workspace-paths.test.ts +++ b/src/agents/pi-tools.workspace-paths.test.ts @@ -151,6 +151,46 @@ describe("workspace path resolution", () => { ).rejects.toThrow(/Path escapes sandbox root/i); }); }); + + it("rejects hardlinked file aliases when workspaceOnly is enabled", async () => { + if (process.platform === "win32") { + return; + } + await withTempDir("openclaw-ws-", async (workspaceDir) => { + const cfg: OpenClawConfig = { tools: { fs: { workspaceOnly: true } } }; + const tools = createOpenClawCodingTools({ workspaceDir, config: cfg }); + const { readTool, writeTool } = expectReadWriteEditTools(tools); + const outsidePath = path.join( + path.dirname(workspaceDir), + `outside-hardlink-${process.pid}-${Date.now()}.txt`, + ); + const hardlinkPath = path.join(workspaceDir, "linked.txt"); + await fs.writeFile(outsidePath, "top-secret", "utf8"); + try { + try { + await fs.link(outsidePath, hardlinkPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + await expect(readTool.execute("ws-read-hardlink", { path: "linked.txt" })).rejects.toThrow( + /hardlink|sandbox/i, + ); + await expect( + writeTool.execute("ws-write-hardlink", { + path: "linked.txt", + content: "pwned", + }), + ).rejects.toThrow(/hardlink|sandbox/i); + expect(await fs.readFile(outsidePath, "utf8")).toBe("top-secret"); + } finally { + await fs.rm(hardlinkPath, { force: true }); + await fs.rm(outsidePath, { force: true }); + } + }); + }); }); describe("sandboxed workspace paths", () => { diff --git a/src/agents/sandbox-paths.test.ts b/src/agents/sandbox-paths.test.ts index 305da9eb40a..3deb30a0179 100644 --- a/src/agents/sandbox-paths.test.ts +++ b/src/agents/sandbox-paths.test.ts @@ -195,6 +195,26 @@ describe("resolveSandboxedMediaSource", () => { }); }); + it("rejects sandbox symlink escapes when the outside leaf does not exist yet", async () => { + if (process.platform === "win32") { + return; + } + await withSandboxRoot(async (sandboxDir) => { + const outsideDir = await fs.mkdtemp( + path.join(process.cwd(), "sandbox-media-outside-missing-"), + ); + const linkDir = path.join(sandboxDir, "escape-link"); + await fs.symlink(outsideDir, linkDir); + try { + const missingOutsidePath = path.join(linkDir, "new-file.txt"); + await expectSandboxRejection(missingOutsidePath, sandboxDir, /symlink|sandbox/i); + } finally { + await fs.rm(linkDir, { force: true }); + await fs.rm(outsideDir, { recursive: true, force: true }); + } + }); + }); + it("rejects hardlinked OpenClaw tmp paths to outside files", async () => { if (process.platform === "win32") { return; diff --git a/src/agents/sandbox-paths.ts b/src/agents/sandbox-paths.ts index 761106e8574..1d46d02db63 100644 --- a/src/agents/sandbox-paths.ts +++ b/src/agents/sandbox-paths.ts @@ -1,8 +1,8 @@ -import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { fileURLToPath, URL } from "node:url"; -import { isNotFoundPathError, isPathInside } from "../infra/path-guards.js"; +import { assertNoPathAliasEscape, type PathAliasPolicy } from "../infra/path-alias-guards.js"; +import { isPathInside } from "../infra/path-guards.js"; import { resolvePreferredOpenClawTmpDir } from "../infra/tmp-openclaw-dir.js"; const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g; @@ -61,11 +61,19 @@ export async function assertSandboxPath(params: { filePath: string; cwd: string; root: string; - allowFinalSymlink?: boolean; + allowFinalSymlinkForUnlink?: boolean; + allowFinalHardlinkForUnlink?: boolean; }) { const resolved = resolveSandboxPath(params); - await assertNoSymlinkEscape(resolved.relative, path.resolve(params.root), { - allowFinalSymlink: params.allowFinalSymlink, + const policy: PathAliasPolicy = { + allowFinalSymlinkForUnlink: params.allowFinalSymlinkForUnlink, + allowFinalHardlinkForUnlink: params.allowFinalHardlinkForUnlink, + }; + await assertNoPathAliasEscape({ + absolutePath: resolved.resolved, + rootPath: params.root, + boundaryLabel: "sandbox root", + policy, }); return resolved; } @@ -194,76 +202,11 @@ async function assertNoTmpAliasEscape(params: { filePath: string; tmpRoot: string; }): Promise { - await assertNoSymlinkEscape(path.relative(params.tmpRoot, params.filePath), params.tmpRoot); - await assertNoHardlinkedFinalPath(params.filePath, params.tmpRoot); -} - -async function assertNoHardlinkedFinalPath(filePath: string, tmpRoot: string): Promise { - let stat: Awaited>; - try { - stat = await fs.stat(filePath); - } catch (err) { - if (isNotFoundPathError(err)) { - return; - } - throw err; - } - if (!stat.isFile()) { - return; - } - if (stat.nlink > 1) { - throw new Error( - `Hardlinked tmp media path is not allowed under tmp root (${shortPath(tmpRoot)}): ${shortPath(filePath)}`, - ); - } -} - -async function assertNoSymlinkEscape( - relative: string, - root: string, - options?: { allowFinalSymlink?: boolean }, -) { - if (!relative) { - return; - } - const rootReal = await tryRealpath(root); - const parts = relative.split(path.sep).filter(Boolean); - let current = root; - for (let idx = 0; idx < parts.length; idx += 1) { - const part = parts[idx]; - const isLast = idx === parts.length - 1; - current = path.join(current, part); - try { - const stat = await fs.lstat(current); - if (stat.isSymbolicLink()) { - // Unlinking a symlink itself is safe even if it points outside the root. What we - // must prevent is traversing through a symlink to reach targets outside root. - if (options?.allowFinalSymlink && isLast) { - return; - } - const target = await tryRealpath(current); - if (!isPathInside(rootReal, target)) { - throw new Error( - `Symlink escapes sandbox root (${shortPath(rootReal)}): ${shortPath(current)}`, - ); - } - current = target; - } - } catch (err) { - if (isNotFoundPathError(err)) { - return; - } - throw err; - } - } -} - -async function tryRealpath(value: string): Promise { - try { - return await fs.realpath(value); - } catch { - return path.resolve(value); - } + await assertNoPathAliasEscape({ + absolutePath: params.filePath, + rootPath: params.tmpRoot, + boundaryLabel: "tmp root", + }); } function shortPath(value: string) { diff --git a/src/agents/sandbox/fs-bridge.test.ts b/src/agents/sandbox/fs-bridge.test.ts index d3bcd735e9e..f5c9aaedd6d 100644 --- a/src/agents/sandbox/fs-bridge.test.ts +++ b/src/agents/sandbox/fs-bridge.test.ts @@ -195,6 +195,42 @@ describe("sandbox fs bridge shell compatibility", () => { await fs.rm(stateDir, { recursive: true, force: true }); }); + it("rejects pre-existing host hardlink escapes before docker exec", async () => { + if (process.platform === "win32") { + return; + } + const stateDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-fs-bridge-hardlink-")); + const workspaceDir = path.join(stateDir, "workspace"); + const outsideDir = path.join(stateDir, "outside"); + const outsideFile = path.join(outsideDir, "secret.txt"); + await fs.mkdir(workspaceDir, { recursive: true }); + await fs.mkdir(outsideDir, { recursive: true }); + await fs.writeFile(outsideFile, "classified"); + const hardlinkPath = path.join(workspaceDir, "link.txt"); + try { + try { + await fs.link(outsideFile, hardlinkPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await expect(bridge.readFile({ filePath: "link.txt" })).rejects.toThrow(/hardlink|sandbox/i); + expect(mockedExecDockerRaw).not.toHaveBeenCalled(); + } finally { + await fs.rm(stateDir, { recursive: true, force: true }); + } + }); + it("rejects container-canonicalized paths outside allowed mounts", async () => { mockedExecDockerRaw.mockImplementation(async (args) => { const script = getDockerScript(args); diff --git a/src/agents/sandbox/fs-bridge.ts b/src/agents/sandbox/fs-bridge.ts index 226fc39ca1d..3c92297663a 100644 --- a/src/agents/sandbox/fs-bridge.ts +++ b/src/agents/sandbox/fs-bridge.ts @@ -1,6 +1,6 @@ -import fs from "node:fs/promises"; -import path from "node:path"; -import { isNotFoundPathError, isPathInside } from "../../infra/path-guards.js"; +import fs from "node:fs"; +import { openBoundaryFile } from "../../infra/boundary-file-read.js"; +import { PATH_ALIAS_POLICIES, type PathAliasPolicy } from "../../infra/path-alias-guards.js"; import { execDockerRaw, type ExecDockerRawResult } from "./docker.js"; import { buildSandboxFsMounts, @@ -20,8 +20,9 @@ type RunCommandOptions = { type PathSafetyOptions = { action: string; - allowFinalSymlink?: boolean; + aliasPolicy?: PathAliasPolicy; requireWritable?: boolean; + allowMissingTarget?: boolean; }; export type SandboxResolvedPath = { @@ -150,7 +151,7 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { await this.assertPathSafety(target, { action: "remove files", requireWritable: true, - allowFinalSymlink: true, + aliasPolicy: PATH_ALIAS_POLICIES.unlinkTarget, }); const flags = [params.force === false ? "" : "-f", params.recursive ? "-r" : ""].filter( Boolean, @@ -175,7 +176,7 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { await this.assertPathSafety(from, { action: "rename files", requireWritable: true, - allowFinalSymlink: true, + aliasPolicy: PATH_ALIAS_POLICIES.unlinkTarget, }); await this.assertPathSafety(to, { action: "rename files", @@ -252,15 +253,27 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { ); } - await assertNoHostSymlinkEscape({ + const guarded = await openBoundaryFile({ absolutePath: target.hostPath, rootPath: lexicalMount.hostRoot, - allowFinalSymlink: options.allowFinalSymlink === true, + boundaryLabel: "sandbox mount root", + aliasPolicy: options.aliasPolicy, }); + if (!guarded.ok) { + if (guarded.reason !== "path" || options.allowMissingTarget === false) { + throw guarded.error instanceof Error + ? guarded.error + : new Error( + `Sandbox boundary checks failed; cannot ${options.action}: ${target.containerPath}`, + ); + } + } else { + fs.closeSync(guarded.fd); + } const canonicalContainerPath = await this.resolveCanonicalContainerPath({ containerPath: target.containerPath, - allowFinalSymlink: options.allowFinalSymlink === true, + allowFinalSymlinkForUnlink: options.aliasPolicy?.allowFinalSymlinkForUnlink === true, }); const canonicalMount = this.resolveMountByContainerPath(canonicalContainerPath); if (!canonicalMount) { @@ -287,7 +300,7 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { private async resolveCanonicalContainerPath(params: { containerPath: string; - allowFinalSymlink: boolean; + allowFinalSymlinkForUnlink: boolean; }): Promise { const script = [ "set -eu", @@ -308,7 +321,7 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { 'printf "%s%s\\n" "$canonical" "$suffix"', ].join("\n"); const result = await this.runCommand(script, { - args: [params.containerPath, params.allowFinalSymlink ? "1" : "0"], + args: [params.containerPath, params.allowFinalSymlinkForUnlink ? "1" : "0"], }); const canonical = result.stdout.toString("utf8").trim(); if (!canonical.startsWith("/")) { @@ -351,53 +364,3 @@ function coerceStatType(typeRaw?: string): "file" | "directory" | "other" { } return "other"; } - -async function assertNoHostSymlinkEscape(params: { - absolutePath: string; - rootPath: string; - allowFinalSymlink: boolean; -}): Promise { - const root = path.resolve(params.rootPath); - const target = path.resolve(params.absolutePath); - if (!isPathInside(root, target)) { - throw new Error(`Sandbox path escapes mount root (${root}): ${params.absolutePath}`); - } - const relative = path.relative(root, target); - if (!relative) { - return; - } - const rootReal = await tryRealpath(root); - const parts = relative.split(path.sep).filter(Boolean); - let current = root; - for (let idx = 0; idx < parts.length; idx += 1) { - current = path.join(current, parts[idx] ?? ""); - const isLast = idx === parts.length - 1; - try { - const stat = await fs.lstat(current); - if (!stat.isSymbolicLink()) { - continue; - } - if (params.allowFinalSymlink && isLast) { - return; - } - const symlinkTarget = await tryRealpath(current); - if (!isPathInside(rootReal, symlinkTarget)) { - throw new Error(`Symlink escapes sandbox mount root (${rootReal}): ${current}`); - } - current = symlinkTarget; - } catch (error) { - if (isNotFoundPathError(error)) { - return; - } - throw error; - } - } -} - -async function tryRealpath(value: string): Promise { - try { - return await fs.realpath(value); - } catch { - return path.resolve(value); - } -} diff --git a/src/agents/sandbox/host-paths.ts b/src/agents/sandbox/host-paths.ts index 7b99ed0a53c..f07f44d2ff4 100644 --- a/src/agents/sandbox/host-paths.ts +++ b/src/agents/sandbox/host-paths.ts @@ -1,12 +1,34 @@ -import { existsSync, realpathSync } from "node:fs"; import { posix } from "node:path"; +import { resolvePathViaExistingAncestorSync } from "../../infra/boundary-path.js"; + +function stripWindowsNamespacePrefix(input: string): string { + if (input.startsWith("\\\\?\\")) { + const withoutPrefix = input.slice(4); + if (withoutPrefix.toUpperCase().startsWith("UNC\\")) { + return `\\\\${withoutPrefix.slice(4)}`; + } + return withoutPrefix; + } + if (input.startsWith("//?/")) { + const withoutPrefix = input.slice(4); + if (withoutPrefix.toUpperCase().startsWith("UNC/")) { + return `//${withoutPrefix.slice(4)}`; + } + return withoutPrefix; + } + return input; +} /** * Normalize a POSIX host path: resolve `.`, `..`, collapse `//`, strip trailing `/`. */ export function normalizeSandboxHostPath(raw: string): string { - const trimmed = raw.trim(); - return posix.normalize(trimmed).replace(/\/+$/, "") || "/"; + const trimmed = stripWindowsNamespacePrefix(raw.trim()); + if (!trimmed) { + return "/"; + } + const normalized = posix.normalize(trimmed.replaceAll("\\", "/")); + return normalized.replace(/\/+$/, "") || "/"; } /** @@ -17,31 +39,5 @@ export function resolveSandboxHostPathViaExistingAncestor(sourcePath: string): s if (!sourcePath.startsWith("/")) { return sourcePath; } - - const normalized = normalizeSandboxHostPath(sourcePath); - let current = normalized; - const missingSegments: string[] = []; - - while (current !== "/" && !existsSync(current)) { - missingSegments.unshift(posix.basename(current)); - const parent = posix.dirname(current); - if (parent === current) { - break; - } - current = parent; - } - - if (!existsSync(current)) { - return normalized; - } - - try { - const resolvedAncestor = normalizeSandboxHostPath(realpathSync.native(current)); - if (missingSegments.length === 0) { - return resolvedAncestor; - } - return normalizeSandboxHostPath(posix.join(resolvedAncestor, ...missingSegments)); - } catch { - return normalized; - } + return normalizeSandboxHostPath(resolvePathViaExistingAncestorSync(sourcePath)); } diff --git a/src/agents/skills/env-overrides.ts b/src/agents/skills/env-overrides.ts index bb8bec22503..b16b0249e50 100644 --- a/src/agents/skills/env-overrides.ts +++ b/src/agents/skills/env-overrides.ts @@ -105,9 +105,10 @@ function applySkillConfigEnvOverrides(params: { } } - if (normalizedPrimaryEnv && skillConfig.apiKey && !process.env[normalizedPrimaryEnv]) { + const resolvedApiKey = typeof skillConfig.apiKey === "string" ? skillConfig.apiKey.trim() : ""; + if (normalizedPrimaryEnv && resolvedApiKey && !process.env[normalizedPrimaryEnv]) { if (!pendingOverrides[normalizedPrimaryEnv]) { - pendingOverrides[normalizedPrimaryEnv] = skillConfig.apiKey; + pendingOverrides[normalizedPrimaryEnv] = resolvedApiKey; } } diff --git a/src/agents/skills/plugin-skills.test.ts b/src/agents/skills/plugin-skills.test.ts new file mode 100644 index 00000000000..4747d59bf5c --- /dev/null +++ b/src/agents/skills/plugin-skills.test.ts @@ -0,0 +1,103 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import type { PluginManifestRegistry } from "../../plugins/manifest-registry.js"; +import { createTrackedTempDirs } from "../../test-utils/tracked-temp-dirs.js"; + +const hoisted = vi.hoisted(() => ({ + loadPluginManifestRegistry: vi.fn(), +})); + +vi.mock("../../plugins/manifest-registry.js", () => ({ + loadPluginManifestRegistry: (...args: unknown[]) => hoisted.loadPluginManifestRegistry(...args), +})); + +const { resolvePluginSkillDirs } = await import("./plugin-skills.js"); + +const tempDirs = createTrackedTempDirs(); + +function buildRegistry(params: { acpxRoot: string; helperRoot: string }): PluginManifestRegistry { + return { + diagnostics: [], + plugins: [ + { + id: "acpx", + name: "ACPX Runtime", + channels: [], + providers: [], + skills: ["./skills"], + origin: "workspace", + rootDir: params.acpxRoot, + source: params.acpxRoot, + manifestPath: path.join(params.acpxRoot, "openclaw.plugin.json"), + }, + { + id: "helper", + name: "Helper", + channels: [], + providers: [], + skills: ["./skills"], + origin: "workspace", + rootDir: params.helperRoot, + source: params.helperRoot, + manifestPath: path.join(params.helperRoot, "openclaw.plugin.json"), + }, + ], + }; +} + +afterEach(async () => { + hoisted.loadPluginManifestRegistry.mockReset(); + await tempDirs.cleanup(); +}); + +describe("resolvePluginSkillDirs", () => { + it("keeps acpx plugin skills when ACP is enabled", async () => { + const workspaceDir = await tempDirs.make("openclaw-"); + const acpxRoot = await tempDirs.make("openclaw-acpx-plugin-"); + const helperRoot = await tempDirs.make("openclaw-helper-plugin-"); + await fs.mkdir(path.join(acpxRoot, "skills"), { recursive: true }); + await fs.mkdir(path.join(helperRoot, "skills"), { recursive: true }); + + hoisted.loadPluginManifestRegistry.mockReturnValue( + buildRegistry({ + acpxRoot, + helperRoot, + }), + ); + + const dirs = resolvePluginSkillDirs({ + workspaceDir, + config: { + acp: { enabled: true }, + } as OpenClawConfig, + }); + + expect(dirs).toEqual([path.resolve(acpxRoot, "skills"), path.resolve(helperRoot, "skills")]); + }); + + it("skips acpx plugin skills when ACP is disabled", async () => { + const workspaceDir = await tempDirs.make("openclaw-"); + const acpxRoot = await tempDirs.make("openclaw-acpx-plugin-"); + const helperRoot = await tempDirs.make("openclaw-helper-plugin-"); + await fs.mkdir(path.join(acpxRoot, "skills"), { recursive: true }); + await fs.mkdir(path.join(helperRoot, "skills"), { recursive: true }); + + hoisted.loadPluginManifestRegistry.mockReturnValue( + buildRegistry({ + acpxRoot, + helperRoot, + }), + ); + + const dirs = resolvePluginSkillDirs({ + workspaceDir, + config: { + acp: { enabled: false }, + } as OpenClawConfig, + }); + + expect(dirs).toEqual([path.resolve(helperRoot, "skills")]); + }); +}); diff --git a/src/agents/skills/plugin-skills.ts b/src/agents/skills/plugin-skills.ts index 90c8711cd74..594bfcdabb3 100644 --- a/src/agents/skills/plugin-skills.ts +++ b/src/agents/skills/plugin-skills.ts @@ -27,6 +27,7 @@ export function resolvePluginSkillDirs(params: { return []; } const normalizedPlugins = normalizePluginsConfig(params.config?.plugins); + const acpEnabled = params.config?.acp?.enabled !== false; const memorySlot = normalizedPlugins.slots.memory; let selectedMemoryPluginId: string | null = null; const seen = new Set(); @@ -45,6 +46,10 @@ export function resolvePluginSkillDirs(params: { if (!enableState.enabled) { continue; } + // ACP router skills should not be attached when ACP is explicitly disabled. + if (!acpEnabled && record.id === "acpx") { + continue; + } const memoryDecision = resolveMemorySlotDecision({ id: record.id, kind: record.kind, diff --git a/src/agents/subagent-announce-dispatch.test.ts b/src/agents/subagent-announce-dispatch.test.ts new file mode 100644 index 00000000000..fcc2f992e2b --- /dev/null +++ b/src/agents/subagent-announce-dispatch.test.ts @@ -0,0 +1,156 @@ +import { describe, expect, it, vi } from "vitest"; +import { + mapQueueOutcomeToDeliveryResult, + runSubagentAnnounceDispatch, +} from "./subagent-announce-dispatch.js"; + +describe("mapQueueOutcomeToDeliveryResult", () => { + it("maps steered to delivered", () => { + expect(mapQueueOutcomeToDeliveryResult("steered")).toEqual({ + delivered: true, + path: "steered", + }); + }); + + it("maps queued to delivered", () => { + expect(mapQueueOutcomeToDeliveryResult("queued")).toEqual({ + delivered: true, + path: "queued", + }); + }); + + it("maps none to not-delivered", () => { + expect(mapQueueOutcomeToDeliveryResult("none")).toEqual({ + delivered: false, + path: "none", + }); + }); +}); + +describe("runSubagentAnnounceDispatch", () => { + it("uses queue-first ordering for non-completion mode", async () => { + const queue = vi.fn(async () => "none" as const); + const direct = vi.fn(async () => ({ delivered: true, path: "direct" as const })); + + const result = await runSubagentAnnounceDispatch({ + expectsCompletionMessage: false, + queue, + direct, + }); + + expect(queue).toHaveBeenCalledTimes(1); + expect(direct).toHaveBeenCalledTimes(1); + expect(result.delivered).toBe(true); + expect(result.path).toBe("direct"); + expect(result.phases).toEqual([ + { phase: "queue-primary", delivered: false, path: "none", error: undefined }, + { phase: "direct-primary", delivered: true, path: "direct", error: undefined }, + ]); + }); + + it("short-circuits direct send when non-completion queue delivers", async () => { + const queue = vi.fn(async () => "queued" as const); + const direct = vi.fn(async () => ({ delivered: true, path: "direct" as const })); + + const result = await runSubagentAnnounceDispatch({ + expectsCompletionMessage: false, + queue, + direct, + }); + + expect(queue).toHaveBeenCalledTimes(1); + expect(direct).not.toHaveBeenCalled(); + expect(result.path).toBe("queued"); + expect(result.phases).toEqual([ + { phase: "queue-primary", delivered: true, path: "queued", error: undefined }, + ]); + }); + + it("uses direct-first ordering for completion mode", async () => { + const queue = vi.fn(async () => "queued" as const); + const direct = vi.fn(async () => ({ delivered: true, path: "direct" as const })); + + const result = await runSubagentAnnounceDispatch({ + expectsCompletionMessage: true, + queue, + direct, + }); + + expect(direct).toHaveBeenCalledTimes(1); + expect(queue).not.toHaveBeenCalled(); + expect(result.path).toBe("direct"); + expect(result.phases).toEqual([ + { phase: "direct-primary", delivered: true, path: "direct", error: undefined }, + ]); + }); + + it("falls back to queue when completion direct send fails", async () => { + const queue = vi.fn(async () => "steered" as const); + const direct = vi.fn(async () => ({ + delivered: false, + path: "direct" as const, + error: "network", + })); + + const result = await runSubagentAnnounceDispatch({ + expectsCompletionMessage: true, + queue, + direct, + }); + + expect(direct).toHaveBeenCalledTimes(1); + expect(queue).toHaveBeenCalledTimes(1); + expect(result.path).toBe("steered"); + expect(result.phases).toEqual([ + { phase: "direct-primary", delivered: false, path: "direct", error: "network" }, + { phase: "queue-fallback", delivered: true, path: "steered", error: undefined }, + ]); + }); + + it("returns direct failure when completion fallback queue cannot deliver", async () => { + const queue = vi.fn(async () => "none" as const); + const direct = vi.fn(async () => ({ + delivered: false, + path: "direct" as const, + error: "failed", + })); + + const result = await runSubagentAnnounceDispatch({ + expectsCompletionMessage: true, + queue, + direct, + }); + + expect(result).toMatchObject({ + delivered: false, + path: "direct", + error: "failed", + }); + expect(result.phases).toEqual([ + { phase: "direct-primary", delivered: false, path: "direct", error: "failed" }, + { phase: "queue-fallback", delivered: false, path: "none", error: undefined }, + ]); + }); + + it("returns none immediately when signal is already aborted", async () => { + const queue = vi.fn(async () => "none" as const); + const direct = vi.fn(async () => ({ delivered: true, path: "direct" as const })); + const controller = new AbortController(); + controller.abort(); + + const result = await runSubagentAnnounceDispatch({ + expectsCompletionMessage: true, + signal: controller.signal, + queue, + direct, + }); + + expect(queue).not.toHaveBeenCalled(); + expect(direct).not.toHaveBeenCalled(); + expect(result).toEqual({ + delivered: false, + path: "none", + phases: [], + }); + }); +}); diff --git a/src/agents/subagent-announce-dispatch.ts b/src/agents/subagent-announce-dispatch.ts new file mode 100644 index 00000000000..93aa0dd9092 --- /dev/null +++ b/src/agents/subagent-announce-dispatch.ts @@ -0,0 +1,104 @@ +export type SubagentDeliveryPath = "queued" | "steered" | "direct" | "none"; + +export type SubagentAnnounceQueueOutcome = "steered" | "queued" | "none"; + +export type SubagentAnnounceDeliveryResult = { + delivered: boolean; + path: SubagentDeliveryPath; + error?: string; + phases?: SubagentAnnounceDispatchPhaseResult[]; +}; + +export type SubagentAnnounceDispatchPhase = "queue-primary" | "direct-primary" | "queue-fallback"; + +export type SubagentAnnounceDispatchPhaseResult = { + phase: SubagentAnnounceDispatchPhase; + delivered: boolean; + path: SubagentDeliveryPath; + error?: string; +}; + +export function mapQueueOutcomeToDeliveryResult( + outcome: SubagentAnnounceQueueOutcome, +): SubagentAnnounceDeliveryResult { + if (outcome === "steered") { + return { + delivered: true, + path: "steered", + }; + } + if (outcome === "queued") { + return { + delivered: true, + path: "queued", + }; + } + return { + delivered: false, + path: "none", + }; +} + +export async function runSubagentAnnounceDispatch(params: { + expectsCompletionMessage: boolean; + signal?: AbortSignal; + queue: () => Promise; + direct: () => Promise; +}): Promise { + const phases: SubagentAnnounceDispatchPhaseResult[] = []; + const appendPhase = ( + phase: SubagentAnnounceDispatchPhase, + result: SubagentAnnounceDeliveryResult, + ) => { + phases.push({ + phase, + delivered: result.delivered, + path: result.path, + error: result.error, + }); + }; + const withPhases = (result: SubagentAnnounceDeliveryResult): SubagentAnnounceDeliveryResult => ({ + ...result, + phases, + }); + + if (params.signal?.aborted) { + return withPhases({ + delivered: false, + path: "none", + }); + } + + if (!params.expectsCompletionMessage) { + const primaryQueue = mapQueueOutcomeToDeliveryResult(await params.queue()); + appendPhase("queue-primary", primaryQueue); + if (primaryQueue.delivered) { + return withPhases(primaryQueue); + } + + const primaryDirect = await params.direct(); + appendPhase("direct-primary", primaryDirect); + return withPhases(primaryDirect); + } + + const primaryDirect = await params.direct(); + appendPhase("direct-primary", primaryDirect); + if (primaryDirect.delivered) { + return withPhases(primaryDirect); + } + + if (params.signal?.aborted) { + return withPhases({ + delivered: false, + path: "none", + }); + } + + const fallbackQueue = mapQueueOutcomeToDeliveryResult(await params.queue()); + appendPhase("queue-fallback", fallbackQueue); + if (fallbackQueue.delivered) { + return withPhases(fallbackQueue); + } + + return withPhases(primaryDirect); +} diff --git a/src/agents/subagent-announce.format.test.ts b/src/agents/subagent-announce.format.test.ts index 91f4b0d6752..712d1d204b9 100644 --- a/src/agents/subagent-announce.format.test.ts +++ b/src/agents/subagent-announce.format.test.ts @@ -435,6 +435,23 @@ describe("subagent announce formatting", () => { expect(sessionsDeleteSpy).toHaveBeenCalledTimes(1); }); + it("suppresses completion delivery when subagent reply is NO_REPLY", async () => { + const didAnnounce = await runSubagentAnnounceFlow({ + childSessionKey: "agent:main:subagent:test", + childRunId: "run-direct-completion-no-reply", + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + requesterOrigin: { channel: "slack", to: "channel:C123", accountId: "acct-1" }, + ...defaultOutcomeAnnounce, + expectsCompletionMessage: true, + roundOneReply: " NO_REPLY ", + }); + + expect(didAnnounce).toBe(true); + expect(sendSpy).not.toHaveBeenCalled(); + expect(agentSpy).not.toHaveBeenCalled(); + }); + it("retries completion direct send on transient channel-unavailable errors", async () => { sendSpy .mockRejectedValueOnce(new Error("Error: No active WhatsApp Web listener (account: default)")) @@ -825,6 +842,47 @@ describe("subagent announce formatting", () => { } }); + it("routes manual completion direct-send for telegram forum topics", async () => { + sendSpy.mockClear(); + agentSpy.mockClear(); + sessionStore = { + "agent:main:subagent:test": { + sessionId: "child-session-telegram-topic", + }, + "agent:main:main": { + sessionId: "requester-session-telegram-topic", + lastChannel: "telegram", + lastTo: "123:topic:999", + lastThreadId: 999, + }, + }; + chatHistoryMock.mockResolvedValueOnce({ + messages: [{ role: "assistant", content: [{ type: "text", text: "done" }] }], + }); + + const didAnnounce = await runSubagentAnnounceFlow({ + childSessionKey: "agent:main:subagent:test", + childRunId: "run-direct-telegram-topic", + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + requesterOrigin: { + channel: "telegram", + to: "123", + threadId: 42, + }, + ...defaultOutcomeAnnounce, + expectsCompletionMessage: true, + }); + + expect(didAnnounce).toBe(true); + expect(sendSpy).toHaveBeenCalledTimes(1); + expect(agentSpy).not.toHaveBeenCalled(); + const call = sendSpy.mock.calls[0]?.[0] as { params?: Record }; + expect(call?.params?.channel).toBe("telegram"); + expect(call?.params?.to).toBe("123"); + expect(call?.params?.threadId).toBe("42"); + }); + it("uses hook-provided thread target across requester thread variants", async () => { const cases = [ { diff --git a/src/agents/subagent-announce.ts b/src/agents/subagent-announce.ts index 7d7fd7ceb48..32cf49cc2db 100644 --- a/src/agents/subagent-announce.ts +++ b/src/agents/subagent-announce.ts @@ -1,5 +1,5 @@ import { resolveQueueSettings } from "../auto-reply/reply/queue.js"; -import { SILENT_REPLY_TOKEN } from "../auto-reply/tokens.js"; +import { isSilentReplyText, SILENT_REPLY_TOKEN } from "../auto-reply/tokens.js"; import { DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH } from "../config/agent-limits.js"; import { loadConfig } from "../config/config.js"; import { @@ -32,6 +32,10 @@ import { queueEmbeddedPiMessage, waitForEmbeddedPiRunEnd, } from "./pi-embedded.js"; +import { + runSubagentAnnounceDispatch, + type SubagentAnnounceDeliveryResult, +} from "./subagent-announce-dispatch.js"; import { type AnnounceQueueItem, enqueueAnnounce } from "./subagent-announce-queue.js"; import { getSubagentDepthFromSessionStore } from "./subagent-depth.js"; import type { SpawnSubagentMode } from "./subagent-spawn.js"; @@ -53,14 +57,6 @@ type ToolResultMessage = { content?: unknown; }; -type SubagentDeliveryPath = "queued" | "steered" | "direct" | "none"; - -type SubagentAnnounceDeliveryResult = { - delivered: boolean; - path: SubagentDeliveryPath; - error?: string; -}; - function resolveSubagentAnnounceTimeoutMs(cfg: ReturnType): number { const configured = cfg.agents?.defaults?.subagents?.announceTimeoutMs; if (typeof configured !== "number" || !Number.isFinite(configured)) { @@ -705,27 +701,6 @@ async function maybeQueueSubagentAnnounce(params: { return "none"; } -function queueOutcomeToDeliveryResult( - outcome: "steered" | "queued" | "none", -): SubagentAnnounceDeliveryResult { - if (outcome === "steered") { - return { - delivered: true, - path: "steered", - }; - } - if (outcome === "queued") { - return { - delivered: true, - path: "queued", - }; - } - return { - delivered: false, - path: "none", - }; -} - async function sendSubagentAnnounceDirectly(params: { targetRequesterSessionKey: string; triggerMessage: string; @@ -905,64 +880,34 @@ async function deliverSubagentAnnouncement(params: { directIdempotencyKey: string; signal?: AbortSignal; }): Promise { - if (params.signal?.aborted) { - return { - delivered: false, - path: "none", - }; - } - // Non-completion mode mirrors historical behavior: try queued/steered delivery first, - // then (only if not queued) attempt direct delivery. - if (!params.expectsCompletionMessage) { - const queueOutcome = await maybeQueueSubagentAnnounce({ - requesterSessionKey: params.requesterSessionKey, - announceId: params.announceId, - triggerMessage: params.triggerMessage, - summaryLine: params.summaryLine, - requesterOrigin: params.requesterOrigin, - signal: params.signal, - }); - const queued = queueOutcomeToDeliveryResult(queueOutcome); - if (queued.delivered) { - return queued; - } - } - - // Completion-mode uses direct send first so manual spawns can return immediately - // in the common ready-to-deliver case. - const direct = await sendSubagentAnnounceDirectly({ - targetRequesterSessionKey: params.targetRequesterSessionKey, - triggerMessage: params.triggerMessage, - completionMessage: params.completionMessage, - directIdempotencyKey: params.directIdempotencyKey, - completionDirectOrigin: params.completionDirectOrigin, - completionRouteMode: params.completionRouteMode, - spawnMode: params.spawnMode, - directOrigin: params.directOrigin, - requesterIsSubagent: params.requesterIsSubagent, + return await runSubagentAnnounceDispatch({ expectsCompletionMessage: params.expectsCompletionMessage, signal: params.signal, - bestEffortDeliver: params.bestEffortDeliver, + queue: async () => + await maybeQueueSubagentAnnounce({ + requesterSessionKey: params.requesterSessionKey, + announceId: params.announceId, + triggerMessage: params.triggerMessage, + summaryLine: params.summaryLine, + requesterOrigin: params.requesterOrigin, + signal: params.signal, + }), + direct: async () => + await sendSubagentAnnounceDirectly({ + targetRequesterSessionKey: params.targetRequesterSessionKey, + triggerMessage: params.triggerMessage, + completionMessage: params.completionMessage, + directIdempotencyKey: params.directIdempotencyKey, + completionDirectOrigin: params.completionDirectOrigin, + completionRouteMode: params.completionRouteMode, + spawnMode: params.spawnMode, + directOrigin: params.directOrigin, + requesterIsSubagent: params.requesterIsSubagent, + expectsCompletionMessage: params.expectsCompletionMessage, + signal: params.signal, + bestEffortDeliver: params.bestEffortDeliver, + }), }); - if (direct.delivered || !params.expectsCompletionMessage) { - return direct; - } - - // If completion path failed direct delivery, try queueing as a fallback so the - // report can still be delivered once the requester session is idle. - const queueOutcome = await maybeQueueSubagentAnnounce({ - requesterSessionKey: params.requesterSessionKey, - announceId: params.announceId, - triggerMessage: params.triggerMessage, - summaryLine: params.summaryLine, - requesterOrigin: params.requesterOrigin, - signal: params.signal, - }); - if (queueOutcome === "steered" || queueOutcome === "queued") { - return queueOutcomeToDeliveryResult(queueOutcome); - } - - return direct; } function loadSessionEntryByKey(sessionKey: string) { @@ -979,6 +924,8 @@ export function buildSubagentSystemPrompt(params: { childSessionKey: string; label?: string; task?: string; + /** Whether ACP-specific routing guidance should be included. Defaults to true. */ + acpEnabled?: boolean; /** Depth of the child being spawned (1 = sub-agent, 2 = sub-sub-agent). */ childDepth?: number; /** Config value: max allowed spawn depth. */ @@ -993,6 +940,7 @@ export function buildSubagentSystemPrompt(params: { typeof params.maxSpawnDepth === "number" ? params.maxSpawnDepth : DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH; + const acpEnabled = params.acpEnabled !== false; const canSpawn = childDepth < maxSpawnDepth; const parentLabel = childDepth >= 2 ? "parent orchestrator" : "main agent"; @@ -1038,6 +986,17 @@ export function buildSubagentSystemPrompt(params: { "Default workflow: spawn work, continue orchestrating, and wait for auto-announced completions.", "Do NOT repeatedly poll `subagents list` in a loop unless you are actively debugging or intervening.", "Coordinate their work and synthesize results before reporting back.", + ...(acpEnabled + ? [ + 'For ACP harness sessions (codex/claudecode/gemini), use `sessions_spawn` with `runtime: "acp"` (set `agentId` unless `acp.defaultAgent` is configured).', + '`agents_list` and `subagents` apply to OpenClaw sub-agents (`runtime: "subagent"`); ACP harness ids are controlled by `acp.allowedAgents`.', + "Do not ask users to run slash commands or CLI when `sessions_spawn` can do it directly.", + "Do not use `exec` (`openclaw ...`, `acpx ...`) to spawn ACP sessions.", + 'Use `subagents` only for OpenClaw subagents (`runtime: "subagent"`).', + "Subagent results auto-announce back to you; ACP sessions continue in their bound thread.", + "Avoid polling loops; spawn, orchestrate, and synthesize results.", + ] + : []), "", ); } else if (childDepth >= 2) { @@ -1202,6 +1161,9 @@ export async function runSubagentAnnounceFlow(params: { if (isAnnounceSkip(reply)) { return true; } + if (isSilentReplyText(reply, SILENT_REPLY_TOKEN)) { + return true; + } if (!outcome) { outcome = { status: "unknown" }; diff --git a/src/agents/subagent-registry.announce-loop-guard.test.ts b/src/agents/subagent-registry.announce-loop-guard.test.ts index 8389c53503c..498b38aaedc 100644 --- a/src/agents/subagent-registry.announce-loop-guard.test.ts +++ b/src/agents/subagent-registry.announce-loop-guard.test.ts @@ -155,4 +155,43 @@ describe("announce loop guard (#18264)", () => { const stored = runs.find((run) => run.runId === entry.runId); expect(stored?.cleanupCompletedAt).toBeDefined(); }); + + test("announce rejection resets cleanupHandled so retries can resume", async () => { + announceFn.mockReset(); + announceFn.mockRejectedValueOnce(new Error("announce failed")); + registry.resetSubagentRegistryForTests(); + + const now = Date.now(); + const runId = "test-announce-rejection"; + loadSubagentRegistryFromDisk.mockReturnValue( + new Map([ + [ + runId, + { + runId, + childSessionKey: "agent:main:subagent:child-1", + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "agent:main:main", + task: "rejection test", + cleanup: "keep" as const, + createdAt: now - 30_000, + startedAt: now - 20_000, + endedAt: now - 10_000, + cleanupHandled: false, + }, + ], + ]), + ); + + registry.initSubagentRegistry(); + await Promise.resolve(); + await Promise.resolve(); + + const runs = registry.listSubagentRunsForRequester("agent:main:main"); + const stored = runs.find((run) => run.runId === runId); + expect(stored?.cleanupHandled).toBe(false); + expect(stored?.cleanupCompletedAt).toBeUndefined(); + expect(stored?.announceRetryCount).toBe(1); + expect(stored?.lastAnnounceRetryAt).toBeTypeOf("number"); + }); }); diff --git a/src/agents/subagent-registry.lifecycle-retry-grace.test.ts b/src/agents/subagent-registry.lifecycle-retry-grace.test.ts new file mode 100644 index 00000000000..7f919c4fd49 --- /dev/null +++ b/src/agents/subagent-registry.lifecycle-retry-grace.test.ts @@ -0,0 +1,157 @@ +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; + +const noop = () => {}; + +let lifecycleHandler: + | ((evt: { + stream?: string; + runId: string; + data?: { + phase?: string; + startedAt?: number; + endedAt?: number; + aborted?: boolean; + error?: string; + }; + }) => void) + | undefined; + +vi.mock("../gateway/call.js", () => ({ + callGateway: vi.fn(async (request: unknown) => { + const method = (request as { method?: string }).method; + if (method === "agent.wait") { + // Keep wait unresolved from the RPC path so lifecycle fallback logic is exercised. + return { status: "pending" }; + } + return {}; + }), +})); + +vi.mock("../infra/agent-events.js", () => ({ + onAgentEvent: vi.fn((handler: typeof lifecycleHandler) => { + lifecycleHandler = handler; + return noop; + }), +})); + +vi.mock("../config/config.js", () => ({ + loadConfig: vi.fn(() => ({ + agents: { defaults: { subagents: { archiveAfterMinutes: 0 } } }, + })), +})); + +const announceSpy = vi.fn(async () => true); +vi.mock("./subagent-announce.js", () => ({ + runSubagentAnnounceFlow: announceSpy, +})); + +vi.mock("../plugins/hook-runner-global.js", () => ({ + getGlobalHookRunner: vi.fn(() => null), +})); + +vi.mock("./subagent-registry.store.js", () => ({ + loadSubagentRegistryFromDisk: vi.fn(() => new Map()), + saveSubagentRegistryToDisk: vi.fn(() => {}), +})); + +describe("subagent registry lifecycle error grace", () => { + let mod: typeof import("./subagent-registry.js"); + + beforeAll(async () => { + mod = await import("./subagent-registry.js"); + }); + + beforeEach(() => { + vi.useFakeTimers(); + }); + + afterEach(() => { + announceSpy.mockClear(); + lifecycleHandler = undefined; + mod.resetSubagentRegistryForTests({ persist: false }); + vi.useRealTimers(); + }); + + const flushAsync = async () => { + await Promise.resolve(); + await Promise.resolve(); + }; + + it("ignores transient lifecycle errors when run retries and then ends successfully", async () => { + mod.registerSubagentRun({ + runId: "run-transient-error", + childSessionKey: "agent:main:subagent:transient-error", + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + task: "transient error test", + cleanup: "keep", + expectsCompletionMessage: true, + }); + + lifecycleHandler?.({ + stream: "lifecycle", + runId: "run-transient-error", + data: { phase: "error", error: "rate limit", endedAt: 1_000 }, + }); + await flushAsync(); + expect(announceSpy).not.toHaveBeenCalled(); + + await vi.advanceTimersByTimeAsync(14_999); + expect(announceSpy).not.toHaveBeenCalled(); + + lifecycleHandler?.({ + stream: "lifecycle", + runId: "run-transient-error", + data: { phase: "start", startedAt: 1_050 }, + }); + await flushAsync(); + + await vi.advanceTimersByTimeAsync(20_000); + expect(announceSpy).not.toHaveBeenCalled(); + + lifecycleHandler?.({ + stream: "lifecycle", + runId: "run-transient-error", + data: { phase: "end", endedAt: 1_250 }, + }); + await flushAsync(); + + expect(announceSpy).toHaveBeenCalledTimes(1); + const announceCalls = announceSpy.mock.calls as unknown as Array>; + const first = (announceCalls[0]?.[0] ?? {}) as { + outcome?: { status?: string; error?: string }; + }; + expect(first.outcome?.status).toBe("ok"); + }); + + it("announces error when lifecycle error remains terminal after grace window", async () => { + mod.registerSubagentRun({ + runId: "run-terminal-error", + childSessionKey: "agent:main:subagent:terminal-error", + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + task: "terminal error test", + cleanup: "keep", + expectsCompletionMessage: true, + }); + + lifecycleHandler?.({ + stream: "lifecycle", + runId: "run-terminal-error", + data: { phase: "error", error: "fatal failure", endedAt: 2_000 }, + }); + await flushAsync(); + expect(announceSpy).not.toHaveBeenCalled(); + + await vi.advanceTimersByTimeAsync(15_000); + await flushAsync(); + + expect(announceSpy).toHaveBeenCalledTimes(1); + const announceCalls = announceSpy.mock.calls as unknown as Array>; + const first = (announceCalls[0]?.[0] ?? {}) as { + outcome?: { status?: string; error?: string }; + }; + expect(first.outcome?.status).toBe("error"); + expect(first.outcome?.error).toBe("fatal failure"); + }); +}); diff --git a/src/agents/subagent-registry.ts b/src/agents/subagent-registry.ts index edb8f228b07..10a6416f4ce 100644 --- a/src/agents/subagent-registry.ts +++ b/src/agents/subagent-registry.ts @@ -66,6 +66,12 @@ const MAX_ANNOUNCE_RETRY_COUNT = 3; */ const ANNOUNCE_EXPIRY_MS = 5 * 60_000; // 5 minutes type SubagentRunOrphanReason = "missing-session-entry" | "missing-session-id"; +/** + * Embedded runs can emit transient lifecycle `error` events while provider/model + * retry is still in progress. Defer terminal error cleanup briefly so a + * subsequent lifecycle `start` / `end` can cancel premature failure announces. + */ +const LIFECYCLE_ERROR_RETRY_GRACE_MS = 15_000; function resolveAnnounceRetryDelayMs(retryCount: number) { const boundedRetryCount = Math.max(0, Math.min(retryCount, 10)); @@ -204,6 +210,66 @@ function reconcileOrphanedRestoredRuns() { const resumedRuns = new Set(); const endedHookInFlightRunIds = new Set(); +const pendingLifecycleErrorByRunId = new Map< + string, + { + timer: NodeJS.Timeout; + endedAt: number; + error?: string; + } +>(); + +function clearPendingLifecycleError(runId: string) { + const pending = pendingLifecycleErrorByRunId.get(runId); + if (!pending) { + return; + } + clearTimeout(pending.timer); + pendingLifecycleErrorByRunId.delete(runId); +} + +function clearAllPendingLifecycleErrors() { + for (const pending of pendingLifecycleErrorByRunId.values()) { + clearTimeout(pending.timer); + } + pendingLifecycleErrorByRunId.clear(); +} + +function schedulePendingLifecycleError(params: { runId: string; endedAt: number; error?: string }) { + clearPendingLifecycleError(params.runId); + const timer = setTimeout(() => { + const pending = pendingLifecycleErrorByRunId.get(params.runId); + if (!pending || pending.timer !== timer) { + return; + } + pendingLifecycleErrorByRunId.delete(params.runId); + const entry = subagentRuns.get(params.runId); + if (!entry) { + return; + } + if (entry.endedReason === SUBAGENT_ENDED_REASON_COMPLETE || entry.outcome?.status === "ok") { + return; + } + void completeSubagentRun({ + runId: params.runId, + endedAt: pending.endedAt, + outcome: { + status: "error", + error: pending.error, + }, + reason: SUBAGENT_ENDED_REASON_ERROR, + sendFarewell: true, + accountId: entry.requesterOrigin?.accountId, + triggerCleanup: true, + }); + }, LIFECYCLE_ERROR_RETRY_GRACE_MS); + timer.unref?.(); + pendingLifecycleErrorByRunId.set(params.runId, { + timer, + endedAt: params.endedAt, + error: params.error, + }); +} function suppressAnnounceForSteerRestart(entry?: SubagentRunRecord) { return entry?.suppressAnnounceReason === "steer-restart"; @@ -256,6 +322,7 @@ async function completeSubagentRun(params: { accountId?: string; triggerCleanup: boolean; }) { + clearPendingLifecycleError(params.runId); const entry = subagentRuns.get(params.runId); if (!entry) { return; @@ -331,9 +398,16 @@ function startSubagentAnnounceCleanupFlow(runId: string, entry: SubagentRunRecor outcome: entry.outcome, spawnMode: entry.spawnMode, expectsCompletionMessage: entry.expectsCompletionMessage, - }).then((didAnnounce) => { - void finalizeSubagentCleanup(runId, entry.cleanup, didAnnounce); - }); + }) + .then((didAnnounce) => { + void finalizeSubagentCleanup(runId, entry.cleanup, didAnnounce); + }) + .catch((error) => { + defaultRuntime.log( + `[warn] Subagent announce flow failed during cleanup for run ${runId}: ${String(error)}`, + ); + void finalizeSubagentCleanup(runId, entry.cleanup, false); + }); return true; } @@ -484,6 +558,7 @@ async function sweepSubagentRuns() { if (!entry.archiveAtMs || entry.archiveAtMs > now) { continue; } + clearPendingLifecycleError(runId); subagentRuns.delete(runId); mutated = true; try { @@ -524,6 +599,7 @@ function ensureListener() { } const phase = evt.data?.phase; if (phase === "start") { + clearPendingLifecycleError(evt.runId); const startedAt = typeof evt.data?.startedAt === "number" ? evt.data.startedAt : undefined; if (startedAt) { entry.startedAt = startedAt; @@ -536,17 +612,23 @@ function ensureListener() { } const endedAt = typeof evt.data?.endedAt === "number" ? evt.data.endedAt : Date.now(); const error = typeof evt.data?.error === "string" ? evt.data.error : undefined; - const outcome: SubagentRunOutcome = - phase === "error" - ? { status: "error", error } - : evt.data?.aborted - ? { status: "timeout" } - : { status: "ok" }; + if (phase === "error") { + schedulePendingLifecycleError({ + runId: evt.runId, + endedAt, + error, + }); + return; + } + clearPendingLifecycleError(evt.runId); + const outcome: SubagentRunOutcome = evt.data?.aborted + ? { status: "timeout" } + : { status: "ok" }; await completeSubagentRun({ runId: evt.runId, endedAt, outcome, - reason: phase === "error" ? SUBAGENT_ENDED_REASON_ERROR : SUBAGENT_ENDED_REASON_COMPLETE, + reason: SUBAGENT_ENDED_REASON_COMPLETE, sendFarewell: true, accountId: entry.requesterOrigin?.accountId, triggerCleanup: true, @@ -654,6 +736,7 @@ function completeCleanupBookkeeping(params: { completedAt: number; }) { if (params.cleanup === "delete") { + clearPendingLifecycleError(params.runId); subagentRuns.delete(params.runId); persistSubagentRuns(); retryDeferredCompletedAnnounces(params.runId); @@ -767,6 +850,7 @@ export function replaceSubagentRunAfterSteer(params: { } if (previousRunId !== nextRunId) { + clearPendingLifecycleError(previousRunId); subagentRuns.delete(previousRunId); resumedRuns.delete(previousRunId); } @@ -928,6 +1012,7 @@ export function resetSubagentRegistryForTests(opts?: { persist?: boolean }) { subagentRuns.clear(); resumedRuns.clear(); endedHookInFlightRunIds.clear(); + clearAllPendingLifecycleErrors(); resetAnnounceQueuesForTests(); stopSweeper(); restoreAttempted = false; @@ -946,6 +1031,7 @@ export function addSubagentRunForTests(entry: SubagentRunRecord) { } export function releaseSubagentRun(runId: string) { + clearPendingLifecycleError(runId); const didDelete = subagentRuns.delete(runId); if (didDelete) { persistSubagentRuns(); @@ -1013,6 +1099,7 @@ export function markSubagentRunTerminated(params: { let updated = 0; const entriesByChildSessionKey = new Map(); for (const runId of runIds) { + clearPendingLifecycleError(runId); const entry = subagentRuns.get(runId); if (!entry) { continue; diff --git a/src/agents/subagent-spawn.ts b/src/agents/subagent-spawn.ts index 7d4f672f2f1..9624d09aece 100644 --- a/src/agents/subagent-spawn.ts +++ b/src/agents/subagent-spawn.ts @@ -4,7 +4,11 @@ import { DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH } from "../config/agent-limits.js"; import { loadConfig } from "../config/config.js"; import { callGateway } from "../gateway/call.js"; import { getGlobalHookRunner } from "../plugins/hook-runner-global.js"; -import { normalizeAgentId, parseAgentSessionKey } from "../routing/session-key.js"; +import { + isCronSessionKey, + normalizeAgentId, + parseAgentSessionKey, +} from "../routing/session-key.js"; import { normalizeDeliveryContext } from "../utils/delivery-context.js"; import { resolveAgentConfig } from "./agent-scope.js"; import { AGENT_LANE_SUBAGENT } from "./lanes.js"; @@ -385,6 +389,7 @@ export async function spawnSubagentDirect( childSessionKey, label: label || undefined, task, + acpEnabled: cfg.acp?.enabled !== false, childDepth, maxSpawnDepth, }); @@ -523,13 +528,23 @@ export async function spawnSubagentDirect( } } + // Check if we're in a cron isolated session - don't add "do not poll" note + // because cron sessions end immediately after the agent produces a response, + // so the agent needs to wait for subagent results to keep the turn alive. + const isCronSession = isCronSessionKey(ctx.agentSessionKey); + const note = + spawnMode === "session" + ? SUBAGENT_SPAWN_SESSION_ACCEPTED_NOTE + : isCronSession + ? undefined + : SUBAGENT_SPAWN_ACCEPTED_NOTE; + return { status: "accepted", childSessionKey, runId: childRunId, mode: spawnMode, - note: - spawnMode === "session" ? SUBAGENT_SPAWN_SESSION_ACCEPTED_NOTE : SUBAGENT_SPAWN_ACCEPTED_NOTE, + note, modelApplied: resolvedModel ? modelApplied : undefined, }; } diff --git a/src/agents/system-prompt.test.ts b/src/agents/system-prompt.test.ts index b45c64e72ec..01cdfb2cc3a 100644 --- a/src/agents/system-prompt.test.ts +++ b/src/agents/system-prompt.test.ts @@ -221,6 +221,9 @@ describe("buildAgentSystemPrompt", () => { ); expect(prompt).toContain("Completion is push-based: it will auto-announce when done."); expect(prompt).toContain("Do not poll `subagents list` / `sessions_list` in a loop"); + expect(prompt).toContain( + "When a first-class tool exists for an action, use the tool directly instead of asking the user to run equivalent CLI or slash commands.", + ); }); it("lists available tools when provided", () => { @@ -235,6 +238,52 @@ describe("buildAgentSystemPrompt", () => { expect(prompt).toContain("sessions_send"); }); + it("documents ACP sessions_spawn agent targeting requirements", () => { + const prompt = buildAgentSystemPrompt({ + workspaceDir: "/tmp/openclaw", + toolNames: ["sessions_spawn"], + }); + + expect(prompt).toContain("sessions_spawn"); + expect(prompt).toContain( + 'runtime="acp" requires `agentId` unless `acp.defaultAgent` is configured', + ); + expect(prompt).toContain("not agents_list"); + }); + + it("guides harness requests to ACP thread-bound spawns", () => { + const prompt = buildAgentSystemPrompt({ + workspaceDir: "/tmp/openclaw", + toolNames: ["sessions_spawn", "subagents", "agents_list", "exec"], + }); + + expect(prompt).toContain( + 'For requests like "do this in codex/claude code/gemini", treat it as ACP harness intent', + ); + expect(prompt).toContain( + 'On Discord, default ACP harness requests to thread-bound persistent sessions (`thread: true`, `mode: "session"`)', + ); + expect(prompt).toContain( + "do not route ACP harness requests through `subagents`/`agents_list` or local PTY exec flows", + ); + }); + + it("omits ACP harness guidance when ACP is disabled", () => { + const prompt = buildAgentSystemPrompt({ + workspaceDir: "/tmp/openclaw", + toolNames: ["sessions_spawn", "subagents", "agents_list", "exec"], + acpEnabled: false, + }); + + expect(prompt).not.toContain( + 'For requests like "do this in codex/claude code/gemini", treat it as ACP harness intent', + ); + expect(prompt).not.toContain('runtime="acp" requires `agentId`'); + expect(prompt).not.toContain("not ACP harness ids"); + expect(prompt).toContain("- sessions_spawn: Spawn an isolated sub-agent session"); + expect(prompt).toContain("- agents_list: List OpenClaw agent ids allowed for sessions_spawn"); + }); + it("preserves tool casing in the prompt", () => { const prompt = buildAgentSystemPrompt({ workspaceDir: "/tmp/openclaw", @@ -599,11 +648,18 @@ describe("buildSubagentSystemPrompt", () => { }); expect(prompt).toContain("## Sub-Agent Spawning"); - expect(prompt).toContain("You CAN spawn your own sub-agents"); + expect(prompt).toContain( + "You CAN spawn your own sub-agents for parallel or complex work using `sessions_spawn`.", + ); expect(prompt).toContain("sessions_spawn"); - expect(prompt).toContain("`subagents` tool"); - expect(prompt).toContain("announce their results back to you automatically"); - expect(prompt).toContain("Do NOT repeatedly poll `subagents list`"); + expect(prompt).toContain('runtime: "acp"'); + expect(prompt).toContain("For ACP harness sessions (codex/claudecode/gemini)"); + expect(prompt).toContain("set `agentId` unless `acp.defaultAgent` is configured"); + expect(prompt).toContain("Do not ask users to run slash commands or CLI"); + expect(prompt).toContain("Do not use `exec` (`openclaw ...`, `acpx ...`)"); + expect(prompt).toContain("Use `subagents` only for OpenClaw subagents"); + expect(prompt).toContain("Subagent results auto-announce back to you"); + expect(prompt).toContain("Avoid polling loops"); expect(prompt).toContain("spawned by the main agent"); expect(prompt).toContain("reported to the main agent"); expect(prompt).toContain("[compacted: tool output removed to free context]"); @@ -612,6 +668,21 @@ describe("buildSubagentSystemPrompt", () => { expect(prompt).toContain("instead of full-file `cat`"); }); + it("omits ACP spawning guidance when ACP is disabled", () => { + const prompt = buildSubagentSystemPrompt({ + childSessionKey: "agent:main:subagent:abc", + task: "research task", + childDepth: 1, + maxSpawnDepth: 2, + acpEnabled: false, + }); + + expect(prompt).not.toContain('runtime: "acp"'); + expect(prompt).not.toContain("For ACP harness sessions (codex/claudecode/gemini)"); + expect(prompt).not.toContain("set `agentId` unless `acp.defaultAgent` is configured"); + expect(prompt).toContain("You CAN spawn your own sub-agents"); + }); + it("renders depth-2 leaf guidance with parent orchestrator labels", () => { const prompt = buildSubagentSystemPrompt({ childSessionKey: "agent:main:subagent:abc:subagent:def", diff --git a/src/agents/system-prompt.ts b/src/agents/system-prompt.ts index d052daf5f7d..3b3453be6f7 100644 --- a/src/agents/system-prompt.ts +++ b/src/agents/system-prompt.ts @@ -209,6 +209,8 @@ export function buildAgentSystemPrompt(params: { ttsHint?: string; /** Controls which hardcoded sections to include. Defaults to "full". */ promptMode?: PromptMode; + /** Whether ACP-specific routing guidance should be included. Defaults to true. */ + acpEnabled?: boolean; runtimeInfo?: { agentId?: string; host?: string; @@ -231,6 +233,7 @@ export function buildAgentSystemPrompt(params: { }; memoryCitationsMode?: MemoryCitationsMode; }) { + const acpEnabled = params.acpEnabled !== false; const coreToolSummaries: Record = { read: "Read file contents", write: "Create or overwrite files", @@ -250,11 +253,15 @@ export function buildAgentSystemPrompt(params: { cron: "Manage cron jobs and wake events (use for reminders; when scheduling a reminder, write the systemEvent text as something that will read like a reminder when it fires, and mention that it is a reminder depending on the time gap between setting and firing; include recent context in reminder text if appropriate)", message: "Send messages and channel actions", gateway: "Restart, apply config, or run updates on the running OpenClaw process", - agents_list: "List agent ids allowed for sessions_spawn", + agents_list: acpEnabled + ? 'List OpenClaw agent ids allowed for sessions_spawn when runtime="subagent" (not ACP harness ids)' + : "List OpenClaw agent ids allowed for sessions_spawn", sessions_list: "List other sessions (incl. sub-agents) with filters/last", sessions_history: "Fetch history for another session/sub-agent", sessions_send: "Send a message to another session/sub-agent", - sessions_spawn: "Spawn a sub-agent session", + sessions_spawn: acpEnabled + ? 'Spawn an isolated sub-agent or ACP coding session (runtime="acp" requires `agentId` unless `acp.defaultAgent` is configured; ACP harness ids follow acp.allowedAgents, not agents_list)' + : "Spawn an isolated sub-agent session", subagents: "List, steer, or kill sub-agent runs for this requester session", session_status: "Show a /status-equivalent status card (usage + time + Reasoning/Verbose/Elevated); use for model-use questions (📊 session_status); optional per-session model override", @@ -303,6 +310,7 @@ export function buildAgentSystemPrompt(params: { const normalizedTools = canonicalToolNames.map((tool) => tool.toLowerCase()); const availableTools = new Set(normalizedTools); + const hasSessionsSpawn = availableTools.has("sessions_spawn"); const externalToolSummaries = new Map(); for (const [key, value] of Object.entries(params.toolSummaries ?? {})) { const normalized = key.trim().toLowerCase(); @@ -436,6 +444,13 @@ export function buildAgentSystemPrompt(params: { "TOOLS.md does not control tool availability; it is user guidance for how to use external tools.", `For long waits, avoid rapid poll loops: use ${execToolName} with enough yieldMs or ${processToolName}(action=poll, timeout=).`, "If a task is more complex or takes longer, spawn a sub-agent. Completion is push-based: it will auto-announce when done.", + ...(hasSessionsSpawn && acpEnabled + ? [ + 'For requests like "do this in codex/claude code/gemini", treat it as ACP harness intent and call `sessions_spawn` with `runtime: "acp"`.', + 'On Discord, default ACP harness requests to thread-bound persistent sessions (`thread: true`, `mode: "session"`) unless the user asks otherwise.', + "Set `agentId` explicitly unless `acp.defaultAgent` is configured, and do not route ACP harness requests through `subagents`/`agents_list` or local PTY exec flows.", + ] + : []), "Do not poll `subagents list` / `sessions_list` in a loop; only check status on-demand (for intervention, debugging, or when explicitly asked).", "", "## Tool Call Style", @@ -443,6 +458,7 @@ export function buildAgentSystemPrompt(params: { "Narrate only when it helps: multi-step work, complex/challenging problems, sensitive actions (e.g., deletions), or when the user explicitly asks.", "Keep narration brief and value-dense; avoid repeating obvious steps.", "Use plain human language for narration unless in a technical context.", + "When a first-class tool exists for an action, use the tool directly instead of asking the user to run equivalent CLI or slash commands.", "", ...safetySection, "## OpenClaw CLI Quick Reference", @@ -462,6 +478,7 @@ export function buildAgentSystemPrompt(params: { ? [ "Get Updates (self-update) is ONLY allowed when the user explicitly asks for it.", "Do not run config.apply or update.run unless the user explicitly requests an update or config change; if it's not explicit, ask first.", + "Use config.schema to fetch the current JSON Schema (includes plugins/channels) before making config changes or answering config-field questions; avoid guessing field names/types.", "Actions: config.get, config.schema, config.apply (validate + write full config, then restart), update.run (update deps or git, then restart).", "After restart, OpenClaw pings the last active session automatically.", ].join("\n") diff --git a/src/agents/tools/agents-list-tool.ts b/src/agents/tools/agents-list-tool.ts index 277ac990647..879ad96de06 100644 --- a/src/agents/tools/agents-list-tool.ts +++ b/src/agents/tools/agents-list-tool.ts @@ -26,7 +26,8 @@ export function createAgentsListTool(opts?: { return { label: "Agents", name: "agents_list", - description: "List agent ids you can target with sessions_spawn (based on allowlists).", + description: + 'List OpenClaw agent ids you can target with `sessions_spawn` when `runtime="subagent"` (based on subagent allowlists).', parameters: AgentsListToolSchema, execute: async () => { const cfg = loadConfig(); diff --git a/src/agents/tools/browser-tool.ts b/src/agents/tools/browser-tool.ts index b99adb4bfff..03138c3d54e 100644 --- a/src/agents/tools/browser-tool.ts +++ b/src/agents/tools/browser-tool.ts @@ -29,7 +29,12 @@ import { wrapExternalContent } from "../../security/external-content.js"; import { BrowserToolSchema } from "./browser-tool.schema.js"; import { type AnyAgentTool, imageResultFromFile, jsonResult, readStringParam } from "./common.js"; import { callGatewayTool } from "./gateway.js"; -import { listNodes, resolveNodeIdFromList, type NodeListNode } from "./nodes-utils.js"; +import { + listNodes, + resolveNodeIdFromList, + selectDefaultNodeFromList, + type NodeListNode, +} from "./nodes-utils.js"; function wrapBrowserExternalJson(params: { kind: "snapshot" | "console" | "tabs"; @@ -143,10 +148,17 @@ async function resolveBrowserNodeTarget(params: { return { nodeId, label: node?.displayName ?? node?.remoteIp ?? nodeId }; } + const selected = selectDefaultNodeFromList(browserNodes, { + preferLocalMac: false, + fallback: "none", + }); + if (params.target === "node") { - if (browserNodes.length === 1) { - const node = browserNodes[0]; - return { nodeId: node.nodeId, label: node.displayName ?? node.remoteIp ?? node.nodeId }; + if (selected) { + return { + nodeId: selected.nodeId, + label: selected.displayName ?? selected.remoteIp ?? selected.nodeId, + }; } throw new Error( `Multiple browser-capable nodes connected (${browserNodes.length}). Set gateway.nodes.browser.node or pass node=.`, @@ -157,9 +169,11 @@ async function resolveBrowserNodeTarget(params: { return null; } - if (browserNodes.length === 1) { - const node = browserNodes[0]; - return { nodeId: node.nodeId, label: node.displayName ?? node.remoteIp ?? node.nodeId }; + if (selected) { + return { + nodeId: selected.nodeId, + label: selected.displayName ?? selected.remoteIp ?? selected.nodeId, + }; } return null; } diff --git a/src/agents/tools/nodes-tool.ts b/src/agents/tools/nodes-tool.ts index c17ff9f9c48..6b18e7d9782 100644 --- a/src/agents/tools/nodes-tool.ts +++ b/src/agents/tools/nodes-tool.ts @@ -18,7 +18,9 @@ import { } from "../../cli/nodes-screen.js"; import { parseDurationMs } from "../../cli/parse-duration.js"; import type { OpenClawConfig } from "../../config/config.js"; +import { formatExecCommand } from "../../infra/system-run-command.js"; import { imageMimeFromFormat } from "../../media/mime.js"; +import type { GatewayMessageChannel } from "../../utils/message-channel.js"; import { resolveSessionAgentId } from "../agent-scope.js"; import { resolveImageSanitizationLimits } from "../image-sanitization.js"; import { optionalStringEnum, stringEnum } from "../schema/typebox.js"; @@ -39,6 +41,7 @@ const NODES_TOOL_ACTIONS = [ "camera_clip", "screen_record", "location_get", + "notifications_list", "run", "invoke", ] as const; @@ -47,6 +50,23 @@ const NOTIFY_PRIORITIES = ["passive", "active", "timeSensitive"] as const; const NOTIFY_DELIVERIES = ["system", "overlay", "auto"] as const; const CAMERA_FACING = ["front", "back", "both"] as const; const LOCATION_ACCURACY = ["coarse", "balanced", "precise"] as const; +type GatewayCallOptions = ReturnType; + +async function invokeNodeCommandPayload(params: { + gatewayOpts: GatewayCallOptions; + node: string; + command: string; + commandParams?: Record; +}): Promise { + const nodeId = await resolveNodeId(params.gatewayOpts, params.node); + const raw = await callGatewayTool<{ payload: unknown }>("node.invoke", params.gatewayOpts, { + nodeId, + command: params.command, + params: params.commandParams ?? {}, + idempotencyKey: crypto.randomUUID(), + }); + return raw?.payload ?? {}; +} function isPairingRequiredMessage(message: string): boolean { const lower = message.toLowerCase(); @@ -109,9 +129,17 @@ const NodesToolSchema = Type.Object({ export function createNodesTool(options?: { agentSessionKey?: string; + agentChannel?: GatewayMessageChannel; + agentAccountId?: string; + currentChannelId?: string; + currentThreadTs?: string | number; config?: OpenClawConfig; }): AnyAgentTool { const sessionKey = options?.agentSessionKey?.trim() || undefined; + const turnSourceChannel = options?.agentChannel?.trim() || undefined; + const turnSourceTo = options?.currentChannelId?.trim() || undefined; + const turnSourceAccountId = options?.agentAccountId?.trim() || undefined; + const turnSourceThreadId = options?.currentThreadTs; const agentId = resolveSessionAgentId({ sessionKey: options?.agentSessionKey, config: options?.config, @@ -121,7 +149,7 @@ export function createNodesTool(options?: { label: "Nodes", name: "nodes", description: - "Discover and control paired nodes (status/describe/pairing/notify/camera/screen/location/run/invoke).", + "Discover and control paired nodes (status/describe/pairing/notify/camera/screen/location/notifications/run/invoke).", parameters: NodesToolSchema, execute: async (_toolCallId, args) => { const params = args as Record; @@ -185,7 +213,7 @@ export function createNodesTool(options?: { const node = readStringParam(params, "node", { required: true }); const nodeId = await resolveNodeId(gatewayOpts, node); const facingRaw = - typeof params.facing === "string" ? params.facing.toLowerCase() : "both"; + typeof params.facing === "string" ? params.facing.toLowerCase() : "front"; const facings: CameraFacing[] = facingRaw === "both" ? ["front", "back"] @@ -197,11 +225,11 @@ export function createNodesTool(options?: { const maxWidth = typeof params.maxWidth === "number" && Number.isFinite(params.maxWidth) ? params.maxWidth - : undefined; + : 1600; const quality = typeof params.quality === "number" && Number.isFinite(params.quality) ? params.quality - : undefined; + : 0.95; const delayMs = typeof params.delayMs === "number" && Number.isFinite(params.delayMs) ? params.delayMs @@ -271,15 +299,13 @@ export function createNodesTool(options?: { } case "camera_list": { const node = readStringParam(params, "node", { required: true }); - const nodeId = await resolveNodeId(gatewayOpts, node); - const raw = await callGatewayTool<{ payload: unknown }>("node.invoke", gatewayOpts, { - nodeId, + const payloadRaw = await invokeNodeCommandPayload({ + gatewayOpts, + node, command: "camera.list", - params: {}, - idempotencyKey: crypto.randomUUID(), }); const payload = - raw && typeof raw.payload === "object" && raw.payload !== null ? raw.payload : {}; + payloadRaw && typeof payloadRaw === "object" && payloadRaw !== null ? payloadRaw : {}; return jsonResult(payload); } case "camera_clip": { @@ -377,7 +403,6 @@ export function createNodesTool(options?: { } case "location_get": { const node = readStringParam(params, "node", { required: true }); - const nodeId = await resolveNodeId(gatewayOpts, node); const maxAgeMs = typeof params.maxAgeMs === "number" && Number.isFinite(params.maxAgeMs) ? params.maxAgeMs @@ -393,17 +418,26 @@ export function createNodesTool(options?: { Number.isFinite(params.locationTimeoutMs) ? params.locationTimeoutMs : undefined; - const raw = await callGatewayTool<{ payload: unknown }>("node.invoke", gatewayOpts, { - nodeId, + const payload = await invokeNodeCommandPayload({ + gatewayOpts, + node, command: "location.get", - params: { + commandParams: { maxAgeMs, desiredAccuracy, timeoutMs: locationTimeoutMs, }, - idempotencyKey: crypto.randomUUID(), }); - return jsonResult(raw?.payload ?? {}); + return jsonResult(payload); + } + case "notifications_list": { + const node = readStringParam(params, "node", { required: true }); + const payload = await invokeNodeCommandPayload({ + gatewayOpts, + node, + command: "notifications.list", + }); + return jsonResult(payload); } case "run": { const node = readStringParam(params, "node", { required: true }); @@ -473,7 +507,7 @@ export function createNodesTool(options?: { // Node requires approval – create a pending approval request on // the gateway and wait for the user to approve/deny via the UI. const APPROVAL_TIMEOUT_MS = 120_000; - const cmdText = command.join(" "); + const cmdText = formatExecCommand(command); const approvalId = crypto.randomUUID(); const approvalResult = await callGatewayTool( "exec.approval.request", @@ -481,11 +515,16 @@ export function createNodesTool(options?: { { id: approvalId, command: cmdText, + commandArgv: command, cwd, nodeId, host: "node", agentId, sessionKey, + turnSourceChannel, + turnSourceTo, + turnSourceAccountId, + turnSourceThreadId, timeoutMs: APPROVAL_TIMEOUT_MS, }, ); diff --git a/src/agents/tools/nodes-utils.test.ts b/src/agents/tools/nodes-utils.test.ts new file mode 100644 index 00000000000..f81e188c9e2 --- /dev/null +++ b/src/agents/tools/nodes-utils.test.ts @@ -0,0 +1,85 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const gatewayMocks = vi.hoisted(() => ({ + callGatewayTool: vi.fn(), +})); +vi.mock("./gateway.js", () => ({ + callGatewayTool: (...args: unknown[]) => gatewayMocks.callGatewayTool(...args), +})); + +import type { NodeListNode } from "./nodes-utils.js"; +import { listNodes, resolveNodeIdFromList } from "./nodes-utils.js"; + +function node({ nodeId, ...overrides }: Partial & { nodeId: string }): NodeListNode { + return { + nodeId, + caps: ["canvas"], + connected: true, + ...overrides, + }; +} + +beforeEach(() => { + gatewayMocks.callGatewayTool.mockReset(); +}); + +describe("resolveNodeIdFromList defaults", () => { + it("falls back to most recently connected node when multiple non-Mac candidates exist", () => { + const nodes: NodeListNode[] = [ + node({ nodeId: "ios-1", platform: "ios", connectedAtMs: 1 }), + node({ nodeId: "android-1", platform: "android", connectedAtMs: 2 }), + ]; + + expect(resolveNodeIdFromList(nodes, undefined, true)).toBe("android-1"); + }); + + it("preserves local Mac preference when exactly one local Mac candidate exists", () => { + const nodes: NodeListNode[] = [ + node({ nodeId: "ios-1", platform: "ios" }), + node({ nodeId: "mac-1", platform: "macos" }), + ]; + + expect(resolveNodeIdFromList(nodes, undefined, true)).toBe("mac-1"); + }); + + it("uses stable nodeId ordering when connectedAtMs is unavailable", () => { + const nodes: NodeListNode[] = [ + node({ nodeId: "z-node", platform: "ios", connectedAtMs: undefined }), + node({ nodeId: "a-node", platform: "android", connectedAtMs: undefined }), + ]; + + expect(resolveNodeIdFromList(nodes, undefined, true)).toBe("a-node"); + }); +}); + +describe("listNodes", () => { + it("falls back to node.pair.list only when node.list is unavailable", async () => { + gatewayMocks.callGatewayTool + .mockRejectedValueOnce(new Error("unknown method: node.list")) + .mockResolvedValueOnce({ + pending: [], + paired: [{ nodeId: "pair-1", displayName: "Pair 1", platform: "ios", remoteIp: "1.2.3.4" }], + }); + + await expect(listNodes({})).resolves.toEqual([ + { + nodeId: "pair-1", + displayName: "Pair 1", + platform: "ios", + remoteIp: "1.2.3.4", + }, + ]); + expect(gatewayMocks.callGatewayTool).toHaveBeenNthCalledWith(1, "node.list", {}, {}); + expect(gatewayMocks.callGatewayTool).toHaveBeenNthCalledWith(2, "node.pair.list", {}, {}); + }); + + it("rethrows unexpected node.list failures without fallback", async () => { + gatewayMocks.callGatewayTool.mockRejectedValueOnce( + new Error("gateway closed (1008): unauthorized"), + ); + + await expect(listNodes({})).rejects.toThrow("gateway closed (1008): unauthorized"); + expect(gatewayMocks.callGatewayTool).toHaveBeenCalledTimes(1); + expect(gatewayMocks.callGatewayTool).toHaveBeenCalledWith("node.list", {}, {}); + }); +}); diff --git a/src/agents/tools/nodes-utils.ts b/src/agents/tools/nodes-utils.ts index 6350294eb55..e4d6e4280ae 100644 --- a/src/agents/tools/nodes-utils.ts +++ b/src/agents/tools/nodes-utils.ts @@ -5,11 +5,60 @@ import { callGatewayTool, type GatewayCallOptions } from "./gateway.js"; export type { NodeListNode }; +type DefaultNodeFallback = "none" | "first"; + +type DefaultNodeSelectionOptions = { + capability?: string; + fallback?: DefaultNodeFallback; + preferLocalMac?: boolean; +}; + +function messageFromError(error: unknown): string { + if (error instanceof Error) { + return error.message; + } + if (typeof error === "string") { + return error; + } + if ( + typeof error === "object" && + error !== null && + "message" in error && + typeof (error as { message?: unknown }).message === "string" + ) { + return (error as { message: string }).message; + } + if (typeof error === "object" && error !== null) { + try { + return JSON.stringify(error); + } catch { + return ""; + } + } + return ""; +} + +function shouldFallbackToPairList(error: unknown): boolean { + const message = messageFromError(error).toLowerCase(); + if (!message.includes("node.list")) { + return false; + } + return ( + message.includes("unknown method") || + message.includes("method not found") || + message.includes("not implemented") || + message.includes("unsupported") + ); +} + async function loadNodes(opts: GatewayCallOptions): Promise { try { const res = await callGatewayTool("node.list", opts, {}); return parseNodeList(res); - } catch { + } catch (error) { + if (!shouldFallbackToPairList(error)) { + throw error; + } const res = await callGatewayTool("node.pair.list", opts, {}); const { paired } = parsePairingList(res); return paired.map((n) => ({ @@ -21,31 +70,67 @@ async function loadNodes(opts: GatewayCallOptions): Promise { } } -function pickDefaultNode(nodes: NodeListNode[]): NodeListNode | null { - const withCanvas = nodes.filter((n) => - Array.isArray(n.caps) ? n.caps.includes("canvas") : true, +function isLocalMacNode(node: NodeListNode): boolean { + return ( + node.platform?.toLowerCase().startsWith("mac") === true && + typeof node.nodeId === "string" && + node.nodeId.startsWith("mac-") ); - if (withCanvas.length === 0) { +} + +function compareDefaultNodeOrder(a: NodeListNode, b: NodeListNode): number { + const aConnectedAt = Number.isFinite(a.connectedAtMs) ? (a.connectedAtMs ?? 0) : -1; + const bConnectedAt = Number.isFinite(b.connectedAtMs) ? (b.connectedAtMs ?? 0) : -1; + if (aConnectedAt !== bConnectedAt) { + return bConnectedAt - aConnectedAt; + } + return a.nodeId.localeCompare(b.nodeId); +} + +export function selectDefaultNodeFromList( + nodes: NodeListNode[], + options: DefaultNodeSelectionOptions = {}, +): NodeListNode | null { + const capability = options.capability?.trim(); + const withCapability = capability + ? nodes.filter((n) => (Array.isArray(n.caps) ? n.caps.includes(capability) : true)) + : nodes; + if (withCapability.length === 0) { return null; } - const connected = withCanvas.filter((n) => n.connected); - const candidates = connected.length > 0 ? connected : withCanvas; + const connected = withCapability.filter((n) => n.connected); + const candidates = connected.length > 0 ? connected : withCapability; if (candidates.length === 1) { return candidates[0]; } - const local = candidates.filter( - (n) => - n.platform?.toLowerCase().startsWith("mac") && - typeof n.nodeId === "string" && - n.nodeId.startsWith("mac-"), - ); - if (local.length === 1) { - return local[0]; + const preferLocalMac = options.preferLocalMac ?? true; + if (preferLocalMac) { + const local = candidates.filter(isLocalMacNode); + if (local.length === 1) { + return local[0]; + } } - return null; + const fallback = options.fallback ?? "none"; + if (fallback === "none") { + return null; + } + + const ordered = [...candidates].toSorted(compareDefaultNodeOrder); + // Multiple candidates — pick the first connected canvas-capable node. + // For A2UI and other canvas operations, any node works since multi-node + // setups broadcast surfaces across devices. + return ordered[0] ?? null; +} + +function pickDefaultNode(nodes: NodeListNode[]): NodeListNode | null { + return selectDefaultNodeFromList(nodes, { + capability: "canvas", + fallback: "first", + preferLocalMac: true, + }); } export async function listNodes(opts: GatewayCallOptions): Promise { diff --git a/src/agents/tools/sessions-spawn-tool.test.ts b/src/agents/tools/sessions-spawn-tool.test.ts new file mode 100644 index 00000000000..c18f5bb8682 --- /dev/null +++ b/src/agents/tools/sessions-spawn-tool.test.ts @@ -0,0 +1,118 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const hoisted = vi.hoisted(() => { + const spawnSubagentDirectMock = vi.fn(); + const spawnAcpDirectMock = vi.fn(); + return { + spawnSubagentDirectMock, + spawnAcpDirectMock, + }; +}); + +vi.mock("../subagent-spawn.js", () => ({ + SUBAGENT_SPAWN_MODES: ["run", "session"], + spawnSubagentDirect: (...args: unknown[]) => hoisted.spawnSubagentDirectMock(...args), +})); + +vi.mock("../acp-spawn.js", () => ({ + ACP_SPAWN_MODES: ["run", "session"], + spawnAcpDirect: (...args: unknown[]) => hoisted.spawnAcpDirectMock(...args), +})); + +const { createSessionsSpawnTool } = await import("./sessions-spawn-tool.js"); + +describe("sessions_spawn tool", () => { + beforeEach(() => { + hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({ + status: "accepted", + childSessionKey: "agent:main:subagent:1", + runId: "run-subagent", + }); + hoisted.spawnAcpDirectMock.mockReset().mockResolvedValue({ + status: "accepted", + childSessionKey: "agent:codex:acp:1", + runId: "run-acp", + }); + }); + + it("uses subagent runtime by default", async () => { + const tool = createSessionsSpawnTool({ + agentSessionKey: "agent:main:main", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:123", + agentThreadId: "456", + }); + + const result = await tool.execute("call-1", { + task: "build feature", + agentId: "main", + model: "anthropic/claude-sonnet-4-6", + thinking: "medium", + runTimeoutSeconds: 5, + thread: true, + mode: "session", + cleanup: "keep", + }); + + expect(result.details).toMatchObject({ + status: "accepted", + childSessionKey: "agent:main:subagent:1", + runId: "run-subagent", + }); + expect(hoisted.spawnSubagentDirectMock).toHaveBeenCalledWith( + expect.objectContaining({ + task: "build feature", + agentId: "main", + model: "anthropic/claude-sonnet-4-6", + thinking: "medium", + runTimeoutSeconds: 5, + thread: true, + mode: "session", + cleanup: "keep", + }), + expect.objectContaining({ + agentSessionKey: "agent:main:main", + }), + ); + expect(hoisted.spawnAcpDirectMock).not.toHaveBeenCalled(); + }); + + it("routes to ACP runtime when runtime=acp", async () => { + const tool = createSessionsSpawnTool({ + agentSessionKey: "agent:main:main", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:123", + agentThreadId: "456", + }); + + const result = await tool.execute("call-2", { + runtime: "acp", + task: "investigate the failing CI run", + agentId: "codex", + cwd: "/workspace", + thread: true, + mode: "session", + }); + + expect(result.details).toMatchObject({ + status: "accepted", + childSessionKey: "agent:codex:acp:1", + runId: "run-acp", + }); + expect(hoisted.spawnAcpDirectMock).toHaveBeenCalledWith( + expect.objectContaining({ + task: "investigate the failing CI run", + agentId: "codex", + cwd: "/workspace", + thread: true, + mode: "session", + }), + expect.objectContaining({ + agentSessionKey: "agent:main:main", + }), + ); + expect(hoisted.spawnSubagentDirectMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/agents/tools/sessions-spawn-tool.ts b/src/agents/tools/sessions-spawn-tool.ts index 9102d24847d..e8f23f75660 100644 --- a/src/agents/tools/sessions-spawn-tool.ts +++ b/src/agents/tools/sessions-spawn-tool.ts @@ -1,16 +1,21 @@ import { Type } from "@sinclair/typebox"; import type { GatewayMessageChannel } from "../../utils/message-channel.js"; +import { ACP_SPAWN_MODES, spawnAcpDirect } from "../acp-spawn.js"; import { optionalStringEnum } from "../schema/typebox.js"; import { SUBAGENT_SPAWN_MODES, spawnSubagentDirect } from "../subagent-spawn.js"; import type { AnyAgentTool } from "./common.js"; import { jsonResult, readStringParam } from "./common.js"; +const SESSIONS_SPAWN_RUNTIMES = ["subagent", "acp"] as const; + const SessionsSpawnToolSchema = Type.Object({ task: Type.String(), label: Type.Optional(Type.String()), + runtime: optionalStringEnum(SESSIONS_SPAWN_RUNTIMES), agentId: Type.Optional(Type.String()), model: Type.Optional(Type.String()), thinking: Type.Optional(Type.String()), + cwd: Type.Optional(Type.String()), runTimeoutSeconds: Type.Optional(Type.Number({ minimum: 0 })), // Back-compat: older callers used timeoutSeconds for this tool. timeoutSeconds: Type.Optional(Type.Number({ minimum: 0 })), @@ -36,15 +41,17 @@ export function createSessionsSpawnTool(opts?: { label: "Sessions", name: "sessions_spawn", description: - 'Spawn a sub-agent in an isolated session (mode="run" one-shot or mode="session" persistent) and route results back to the requester chat/thread.', + 'Spawn an isolated session (runtime="subagent" or runtime="acp"). mode="run" is one-shot and mode="session" is persistent/thread-bound.', parameters: SessionsSpawnToolSchema, execute: async (_toolCallId, args) => { const params = args as Record; const task = readStringParam(params, "task", { required: true }); const label = typeof params.label === "string" ? params.label.trim() : ""; + const runtime = params.runtime === "acp" ? "acp" : "subagent"; const requestedAgentId = readStringParam(params, "agentId"); const modelOverride = readStringParam(params, "model"); const thinkingOverrideRaw = readStringParam(params, "thinking"); + const cwd = readStringParam(params, "cwd"); const mode = params.mode === "run" || params.mode === "session" ? params.mode : undefined; const cleanup = params.cleanup === "keep" || params.cleanup === "delete" ? params.cleanup : "keep"; @@ -61,31 +68,50 @@ export function createSessionsSpawnTool(opts?: { : undefined; const thread = params.thread === true; - const result = await spawnSubagentDirect( - { - task, - label: label || undefined, - agentId: requestedAgentId, - model: modelOverride, - thinking: thinkingOverrideRaw, - runTimeoutSeconds, - thread, - mode, - cleanup, - expectsCompletionMessage: true, - }, - { - agentSessionKey: opts?.agentSessionKey, - agentChannel: opts?.agentChannel, - agentAccountId: opts?.agentAccountId, - agentTo: opts?.agentTo, - agentThreadId: opts?.agentThreadId, - agentGroupId: opts?.agentGroupId, - agentGroupChannel: opts?.agentGroupChannel, - agentGroupSpace: opts?.agentGroupSpace, - requesterAgentIdOverride: opts?.requesterAgentIdOverride, - }, - ); + const result = + runtime === "acp" + ? await spawnAcpDirect( + { + task, + label: label || undefined, + agentId: requestedAgentId, + cwd, + mode: mode && ACP_SPAWN_MODES.includes(mode) ? mode : undefined, + thread, + }, + { + agentSessionKey: opts?.agentSessionKey, + agentChannel: opts?.agentChannel, + agentAccountId: opts?.agentAccountId, + agentTo: opts?.agentTo, + agentThreadId: opts?.agentThreadId, + }, + ) + : await spawnSubagentDirect( + { + task, + label: label || undefined, + agentId: requestedAgentId, + model: modelOverride, + thinking: thinkingOverrideRaw, + runTimeoutSeconds, + thread, + mode, + cleanup, + expectsCompletionMessage: true, + }, + { + agentSessionKey: opts?.agentSessionKey, + agentChannel: opts?.agentChannel, + agentAccountId: opts?.agentAccountId, + agentTo: opts?.agentTo, + agentThreadId: opts?.agentThreadId, + agentGroupId: opts?.agentGroupId, + agentGroupChannel: opts?.agentGroupChannel, + agentGroupSpace: opts?.agentGroupSpace, + requesterAgentIdOverride: opts?.requesterAgentIdOverride, + }, + ); return jsonResult(result); }, diff --git a/src/agents/tools/web-fetch.ts b/src/agents/tools/web-fetch.ts index 06f4ac1d973..4ac7a1d7bfd 100644 --- a/src/agents/tools/web-fetch.ts +++ b/src/agents/tools/web-fetch.ts @@ -1,6 +1,5 @@ import { Type } from "@sinclair/typebox"; import type { OpenClawConfig } from "../../config/config.js"; -import { fetchWithSsrFGuard } from "../../infra/net/fetch-guard.js"; import { SsrFBlockedError } from "../../infra/net/ssrf.js"; import { logDebug } from "../../logger.js"; import { wrapExternalContent, wrapWebContent } from "../../security/external-content.js"; @@ -15,6 +14,7 @@ import { truncateText, type ExtractMode, } from "./web-fetch-utils.js"; +import { fetchWithWebToolsNetworkGuard } from "./web-guarded-fetch.js"; import { CacheEntry, DEFAULT_CACHE_TTL_MINUTES, @@ -523,10 +523,10 @@ async function runWebFetch(params: WebFetchRuntimeParams): Promise Promise) | null = null; let finalUrl = params.url; try { - const result = await fetchWithSsrFGuard({ + const result = await fetchWithWebToolsNetworkGuard({ url: params.url, maxRedirects: params.maxRedirects, - timeoutMs: params.timeoutSeconds * 1000, + timeoutSeconds: params.timeoutSeconds, init: { headers: { Accept: "text/markdown, text/html;q=0.9, */*;q=0.1", diff --git a/src/agents/tools/web-guarded-fetch.ts b/src/agents/tools/web-guarded-fetch.ts new file mode 100644 index 00000000000..02b69cd1f42 --- /dev/null +++ b/src/agents/tools/web-guarded-fetch.ts @@ -0,0 +1,50 @@ +import { + fetchWithSsrFGuard, + type GuardedFetchOptions, + type GuardedFetchResult, +} from "../../infra/net/fetch-guard.js"; +import type { SsrFPolicy } from "../../infra/net/ssrf.js"; + +export const WEB_TOOLS_TRUSTED_NETWORK_SSRF_POLICY: SsrFPolicy = { + dangerouslyAllowPrivateNetwork: true, +}; + +type WebToolGuardedFetchOptions = Omit & { + timeoutSeconds?: number; +}; + +function resolveTimeoutMs(params: { + timeoutMs?: number; + timeoutSeconds?: number; +}): number | undefined { + if (typeof params.timeoutMs === "number" && Number.isFinite(params.timeoutMs)) { + return params.timeoutMs; + } + if (typeof params.timeoutSeconds === "number" && Number.isFinite(params.timeoutSeconds)) { + return params.timeoutSeconds * 1000; + } + return undefined; +} + +export async function fetchWithWebToolsNetworkGuard( + params: WebToolGuardedFetchOptions, +): Promise { + const { timeoutSeconds, ...rest } = params; + return fetchWithSsrFGuard({ + ...rest, + timeoutMs: resolveTimeoutMs({ timeoutMs: rest.timeoutMs, timeoutSeconds }), + proxy: "env", + }); +} + +export async function withWebToolsNetworkGuard( + params: WebToolGuardedFetchOptions, + run: (result: { response: Response; finalUrl: string }) => Promise, +): Promise { + const { response, finalUrl, release } = await fetchWithWebToolsNetworkGuard(params); + try { + return await run({ response, finalUrl }); + } finally { + await release(); + } +} diff --git a/src/agents/tools/web-search.redirect.test.ts b/src/agents/tools/web-search.redirect.test.ts index b717c85e9a7..9b0758f26fa 100644 --- a/src/agents/tools/web-search.redirect.test.ts +++ b/src/agents/tools/web-search.redirect.test.ts @@ -33,6 +33,7 @@ describe("web_search redirect resolution hardening", () => { timeoutMs: 5000, init: { method: "HEAD" }, policy: { dangerouslyAllowPrivateNetwork: true }, + proxy: "env", }), ); expect(release).toHaveBeenCalledTimes(1); diff --git a/src/agents/tools/web-search.ts b/src/agents/tools/web-search.ts index 321f41e4d11..1608e9e8821 100644 --- a/src/agents/tools/web-search.ts +++ b/src/agents/tools/web-search.ts @@ -2,11 +2,14 @@ import { Type } from "@sinclair/typebox"; import { formatCliCommand } from "../../cli/command-format.js"; import type { OpenClawConfig } from "../../config/config.js"; import { logVerbose } from "../../globals.js"; -import { fetchWithSsrFGuard } from "../../infra/net/fetch-guard.js"; import { wrapWebContent } from "../../security/external-content.js"; import { normalizeSecretInput } from "../../utils/normalize-secret-input.js"; import type { AnyAgentTool } from "./common.js"; import { jsonResult, readNumberParam, readStringParam } from "./common.js"; +import { + WEB_TOOLS_TRUSTED_NETWORK_SSRF_POLICY, + withWebToolsNetworkGuard, +} from "./web-guarded-fetch.js"; import { CacheEntry, DEFAULT_CACHE_TTL_MINUTES, @@ -16,7 +19,6 @@ import { readResponseText, resolveCacheTtlMs, resolveTimeoutSeconds, - withTimeout, writeCache, } from "./web-shared.js"; @@ -45,7 +47,6 @@ const BRAVE_FRESHNESS_SHORTCUTS = new Set(["pd", "pw", "pm", "py"]); const BRAVE_FRESHNESS_RANGE = /^(\d{4}-\d{2}-\d{2})to(\d{4}-\d{2}-\d{2})$/; const BRAVE_SEARCH_LANG_CODE = /^[a-z]{2}$/i; const BRAVE_UI_LANG_LOCALE = /^([a-z]{2})-([a-z]{2})$/i; -const TRUSTED_NETWORK_SSRF_POLICY = { dangerouslyAllowPrivateNetwork: true } as const; const WebSearchSchema = Type.Object({ query: Type.String({ description: "Search query string." }), @@ -600,6 +601,25 @@ function resolveGeminiModel(gemini?: GeminiConfig): string { return fromConfig || DEFAULT_GEMINI_MODEL; } +async function withTrustedWebSearchEndpoint( + params: { + url: string; + timeoutSeconds: number; + init: RequestInit; + }, + run: (response: Response) => Promise, +): Promise { + return withWebToolsNetworkGuard( + { + url: params.url, + init: params.init, + timeoutSeconds: params.timeoutSeconds, + policy: WEB_TOOLS_TRUSTED_NETWORK_SSRF_POLICY, + }, + async ({ response }) => run(response), + ); +} + async function runGeminiSearch(params: { query: string; apiKey: string; @@ -608,75 +628,84 @@ async function runGeminiSearch(params: { }): Promise<{ content: string; citations: Array<{ url: string; title?: string }> }> { const endpoint = `${GEMINI_API_BASE}/models/${params.model}:generateContent`; - const res = await fetch(endpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - "x-goog-api-key": params.apiKey, - }, - body: JSON.stringify({ - contents: [ - { - parts: [{ text: params.query }], + return withTrustedWebSearchEndpoint( + { + url: endpoint, + timeoutSeconds: params.timeoutSeconds, + init: { + method: "POST", + headers: { + "Content-Type": "application/json", + "x-goog-api-key": params.apiKey, }, - ], - tools: [{ google_search: {} }], - }), - signal: withTimeout(undefined, params.timeoutSeconds * 1000), - }); + body: JSON.stringify({ + contents: [ + { + parts: [{ text: params.query }], + }, + ], + tools: [{ google_search: {} }], + }), + }, + }, + async (res) => { + if (!res.ok) { + const detailResult = await readResponseText(res, { maxBytes: 64_000 }); + // Strip API key from any error detail to prevent accidental key leakage in logs + const safeDetail = (detailResult.text || res.statusText).replace( + /key=[^&\s]+/gi, + "key=***", + ); + throw new Error(`Gemini API error (${res.status}): ${safeDetail}`); + } - if (!res.ok) { - const detailResult = await readResponseText(res, { maxBytes: 64_000 }); - // Strip API key from any error detail to prevent accidental key leakage in logs - const safeDetail = (detailResult.text || res.statusText).replace(/key=[^&\s]+/gi, "key=***"); - throw new Error(`Gemini API error (${res.status}): ${safeDetail}`); - } + let data: GeminiGroundingResponse; + try { + data = (await res.json()) as GeminiGroundingResponse; + } catch (err) { + const safeError = String(err).replace(/key=[^&\s]+/gi, "key=***"); + throw new Error(`Gemini API returned invalid JSON: ${safeError}`, { cause: err }); + } - let data: GeminiGroundingResponse; - try { - data = (await res.json()) as GeminiGroundingResponse; - } catch (err) { - const safeError = String(err).replace(/key=[^&\s]+/gi, "key=***"); - throw new Error(`Gemini API returned invalid JSON: ${safeError}`, { cause: err }); - } + if (data.error) { + const rawMsg = data.error.message || data.error.status || "unknown"; + const safeMsg = rawMsg.replace(/key=[^&\s]+/gi, "key=***"); + throw new Error(`Gemini API error (${data.error.code}): ${safeMsg}`); + } - if (data.error) { - const rawMsg = data.error.message || data.error.status || "unknown"; - const safeMsg = rawMsg.replace(/key=[^&\s]+/gi, "key=***"); - throw new Error(`Gemini API error (${data.error.code}): ${safeMsg}`); - } + const candidate = data.candidates?.[0]; + const content = + candidate?.content?.parts + ?.map((p) => p.text) + .filter(Boolean) + .join("\n") ?? "No response"; - const candidate = data.candidates?.[0]; - const content = - candidate?.content?.parts - ?.map((p) => p.text) - .filter(Boolean) - .join("\n") ?? "No response"; + const groundingChunks = candidate?.groundingMetadata?.groundingChunks ?? []; + const rawCitations = groundingChunks + .filter((chunk) => chunk.web?.uri) + .map((chunk) => ({ + url: chunk.web!.uri!, + title: chunk.web?.title || undefined, + })); - const groundingChunks = candidate?.groundingMetadata?.groundingChunks ?? []; - const rawCitations = groundingChunks - .filter((chunk) => chunk.web?.uri) - .map((chunk) => ({ - url: chunk.web!.uri!, - title: chunk.web?.title || undefined, - })); + // Resolve Google grounding redirect URLs to direct URLs with concurrency cap. + // Gemini typically returns 3-8 citations; cap at 10 concurrent to be safe. + const MAX_CONCURRENT_REDIRECTS = 10; + const citations: Array<{ url: string; title?: string }> = []; + for (let i = 0; i < rawCitations.length; i += MAX_CONCURRENT_REDIRECTS) { + const batch = rawCitations.slice(i, i + MAX_CONCURRENT_REDIRECTS); + const resolved = await Promise.all( + batch.map(async (citation) => { + const resolvedUrl = await resolveRedirectUrl(citation.url); + return { ...citation, url: resolvedUrl }; + }), + ); + citations.push(...resolved); + } - // Resolve Google grounding redirect URLs to direct URLs with concurrency cap. - // Gemini typically returns 3-8 citations; cap at 10 concurrent to be safe. - const MAX_CONCURRENT_REDIRECTS = 10; - const citations: Array<{ url: string; title?: string }> = []; - for (let i = 0; i < rawCitations.length; i += MAX_CONCURRENT_REDIRECTS) { - const batch = rawCitations.slice(i, i + MAX_CONCURRENT_REDIRECTS); - const resolved = await Promise.all( - batch.map(async (citation) => { - const resolvedUrl = await resolveRedirectUrl(citation.url); - return { ...citation, url: resolvedUrl }; - }), - ); - citations.push(...resolved); - } - - return { content, citations }; + return { content, citations }; + }, + ); } const REDIRECT_TIMEOUT_MS = 5000; @@ -687,17 +716,15 @@ const REDIRECT_TIMEOUT_MS = 5000; */ async function resolveRedirectUrl(url: string): Promise { try { - const { finalUrl, release } = await fetchWithSsrFGuard({ - url, - init: { method: "HEAD" }, - timeoutMs: REDIRECT_TIMEOUT_MS, - policy: TRUSTED_NETWORK_SSRF_POLICY, - }); - try { - return finalUrl || url; - } finally { - await release(); - } + return await withWebToolsNetworkGuard( + { + url, + init: { method: "HEAD" }, + timeoutMs: REDIRECT_TIMEOUT_MS, + policy: WEB_TOOLS_TRUSTED_NETWORK_SSRF_POLICY, + }, + async ({ finalUrl }) => finalUrl || url, + ); } catch { return url; } @@ -871,27 +898,33 @@ async function runPerplexitySearch(params: { body.search_recency_filter = recencyFilter; } - const res = await fetch(endpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${params.apiKey}`, - "HTTP-Referer": "https://openclaw.ai", - "X-Title": "OpenClaw Web Search", + return withTrustedWebSearchEndpoint( + { + url: endpoint, + timeoutSeconds: params.timeoutSeconds, + init: { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${params.apiKey}`, + "HTTP-Referer": "https://openclaw.ai", + "X-Title": "OpenClaw Web Search", + }, + body: JSON.stringify(body), + }, }, - body: JSON.stringify(body), - signal: withTimeout(undefined, params.timeoutSeconds * 1000), - }); + async (res) => { + if (!res.ok) { + return await throwWebSearchApiError(res, "Perplexity"); + } - if (!res.ok) { - return throwWebSearchApiError(res, "Perplexity"); - } + const data = (await res.json()) as PerplexitySearchResponse; + const content = data.choices?.[0]?.message?.content ?? "No response"; + const citations = data.citations ?? []; - const data = (await res.json()) as PerplexitySearchResponse; - const content = data.choices?.[0]?.message?.content ?? "No response"; - const citations = data.citations ?? []; - - return { content, citations }; + return { content, citations }; + }, + ); } async function runGrokSearch(params: { @@ -921,28 +954,34 @@ async function runGrokSearch(params: { // citations are returned automatically when available — we just parse // them from the response without requesting them explicitly (#12910). - const res = await fetch(XAI_API_ENDPOINT, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${params.apiKey}`, + return withTrustedWebSearchEndpoint( + { + url: XAI_API_ENDPOINT, + timeoutSeconds: params.timeoutSeconds, + init: { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${params.apiKey}`, + }, + body: JSON.stringify(body), + }, }, - body: JSON.stringify(body), - signal: withTimeout(undefined, params.timeoutSeconds * 1000), - }); + async (res) => { + if (!res.ok) { + return await throwWebSearchApiError(res, "xAI"); + } - if (!res.ok) { - return throwWebSearchApiError(res, "xAI"); - } + const data = (await res.json()) as GrokSearchResponse; + const { text: extractedText, annotationCitations } = extractGrokContent(data); + const content = extractedText ?? "No response"; + // Prefer top-level citations; fall back to annotation-derived ones + const citations = (data.citations ?? []).length > 0 ? data.citations! : annotationCitations; + const inlineCitations = data.inline_citations; - const data = (await res.json()) as GrokSearchResponse; - const { text: extractedText, annotationCitations } = extractGrokContent(data); - const content = extractedText ?? "No response"; - // Prefer top-level citations; fall back to annotation-derived ones - const citations = (data.citations ?? []).length > 0 ? data.citations! : annotationCitations; - const inlineCitations = data.inline_citations; - - return { content, citations, inlineCitations }; + return { content, citations, inlineCitations }; + }, + ); } function extractKimiMessageText(message: KimiMessage | undefined): string | undefined { @@ -1014,65 +1053,79 @@ async function runKimiSearch(params: { const MAX_ROUNDS = 3; for (let round = 0; round < MAX_ROUNDS; round += 1) { - const res = await fetch(endpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${params.apiKey}`, + const nextResult = await withTrustedWebSearchEndpoint( + { + url: endpoint, + timeoutSeconds: params.timeoutSeconds, + init: { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${params.apiKey}`, + }, + body: JSON.stringify({ + model: params.model, + messages, + tools: [KIMI_WEB_SEARCH_TOOL], + }), + }, }, - body: JSON.stringify({ - model: params.model, - messages, - tools: [KIMI_WEB_SEARCH_TOOL], - }), - signal: withTimeout(undefined, params.timeoutSeconds * 1000), - }); + async ( + res, + ): Promise<{ done: true; content: string; citations: string[] } | { done: false }> => { + if (!res.ok) { + return await throwWebSearchApiError(res, "Kimi"); + } - if (!res.ok) { - return throwWebSearchApiError(res, "Kimi"); - } + const data = (await res.json()) as KimiSearchResponse; + for (const citation of extractKimiCitations(data)) { + collectedCitations.add(citation); + } + const choice = data.choices?.[0]; + const message = choice?.message; + const text = extractKimiMessageText(message); + const toolCalls = message?.tool_calls ?? []; - const data = (await res.json()) as KimiSearchResponse; - for (const citation of extractKimiCitations(data)) { - collectedCitations.add(citation); - } - const choice = data.choices?.[0]; - const message = choice?.message; - const text = extractKimiMessageText(message); - const toolCalls = message?.tool_calls ?? []; + if (choice?.finish_reason !== "tool_calls" || toolCalls.length === 0) { + return { done: true, content: text ?? "No response", citations: [...collectedCitations] }; + } - if (choice?.finish_reason !== "tool_calls" || toolCalls.length === 0) { - return { content: text ?? "No response", citations: [...collectedCitations] }; - } + messages.push({ + role: "assistant", + content: message?.content ?? "", + ...(message?.reasoning_content + ? { + reasoning_content: message.reasoning_content, + } + : {}), + tool_calls: toolCalls, + }); - messages.push({ - role: "assistant", - content: message?.content ?? "", - ...(message?.reasoning_content - ? { - reasoning_content: message.reasoning_content, + const toolContent = buildKimiToolResultContent(data); + let pushedToolResult = false; + for (const toolCall of toolCalls) { + const toolCallId = toolCall.id?.trim(); + if (!toolCallId) { + continue; } - : {}), - tool_calls: toolCalls, - }); + pushedToolResult = true; + messages.push({ + role: "tool", + tool_call_id: toolCallId, + content: toolContent, + }); + } - const toolContent = buildKimiToolResultContent(data); - let pushedToolResult = false; - for (const toolCall of toolCalls) { - const toolCallId = toolCall.id?.trim(); - if (!toolCallId) { - continue; - } - pushedToolResult = true; - messages.push({ - role: "tool", - tool_call_id: toolCallId, - content: toolContent, - }); - } + if (!pushedToolResult) { + return { done: true, content: text ?? "No response", citations: [...collectedCitations] }; + } - if (!pushedToolResult) { - return { content: text ?? "No response", citations: [...collectedCitations] }; + return { done: false }; + }, + ); + + if (nextResult.done) { + return { content: nextResult.content, citations: nextResult.citations }; } } @@ -1248,36 +1301,42 @@ async function runWebSearch(params: { url.searchParams.set("freshness", params.freshness); } - const res = await fetch(url.toString(), { - method: "GET", - headers: { - Accept: "application/json", - "X-Subscription-Token": params.apiKey, + const mapped = await withTrustedWebSearchEndpoint( + { + url: url.toString(), + timeoutSeconds: params.timeoutSeconds, + init: { + method: "GET", + headers: { + Accept: "application/json", + "X-Subscription-Token": params.apiKey, + }, + }, }, - signal: withTimeout(undefined, params.timeoutSeconds * 1000), - }); + async (res) => { + if (!res.ok) { + const detailResult = await readResponseText(res, { maxBytes: 64_000 }); + const detail = detailResult.text; + throw new Error(`Brave Search API error (${res.status}): ${detail || res.statusText}`); + } - if (!res.ok) { - const detailResult = await readResponseText(res, { maxBytes: 64_000 }); - const detail = detailResult.text; - throw new Error(`Brave Search API error (${res.status}): ${detail || res.statusText}`); - } - - const data = (await res.json()) as BraveSearchResponse; - const results = Array.isArray(data.web?.results) ? (data.web?.results ?? []) : []; - const mapped = results.map((entry) => { - const description = entry.description ?? ""; - const title = entry.title ?? ""; - const url = entry.url ?? ""; - const rawSiteName = resolveSiteName(url); - return { - title: title ? wrapWebContent(title, "web_search") : "", - url, // Keep raw for tool chaining - description: description ? wrapWebContent(description, "web_search") : "", - published: entry.age || undefined, - siteName: rawSiteName || undefined, - }; - }); + const data = (await res.json()) as BraveSearchResponse; + const results = Array.isArray(data.web?.results) ? (data.web?.results ?? []) : []; + return results.map((entry) => { + const description = entry.description ?? ""; + const title = entry.title ?? ""; + const url = entry.url ?? ""; + const rawSiteName = resolveSiteName(url); + return { + title: title ? wrapWebContent(title, "web_search") : "", + url, // Keep raw for tool chaining + description: description ? wrapWebContent(description, "web_search") : "", + published: entry.age || undefined, + siteName: rawSiteName || undefined, + }; + }); + }, + ); const payload = { query: params.query, diff --git a/src/agents/tools/web-tools.enabled-defaults.test.ts b/src/agents/tools/web-tools.enabled-defaults.test.ts index b129581f5a0..e255570bec0 100644 --- a/src/agents/tools/web-tools.enabled-defaults.test.ts +++ b/src/agents/tools/web-tools.enabled-defaults.test.ts @@ -1,3 +1,4 @@ +import { EnvHttpProxyAgent } from "undici"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; import { createWebFetchTool, createWebSearchTool } from "./web-tools.js"; @@ -45,6 +46,29 @@ function createKimiSearchTool(kimiConfig?: { apiKey?: string; baseUrl?: string; }); } +function createProviderSearchTool(provider: "brave" | "perplexity" | "grok" | "gemini" | "kimi") { + const searchConfig = + provider === "perplexity" + ? { provider, perplexity: { apiKey: "pplx-config-test" } } + : provider === "grok" + ? { provider, grok: { apiKey: "xai-config-test" } } + : provider === "gemini" + ? { provider, gemini: { apiKey: "gemini-config-test" } } + : provider === "kimi" + ? { provider, kimi: { apiKey: "moonshot-config-test" } } + : { provider, apiKey: "brave-config-test" }; + return createWebSearchTool({ + config: { + tools: { + web: { + search: searchConfig, + }, + }, + }, + sandboxed: true, + }); +} + function parseFirstRequestBody(mockFetch: ReturnType) { const request = mockFetch.mock.calls[0]?.[1] as RequestInit | undefined; const requestBody = request?.body; @@ -61,6 +85,34 @@ function installPerplexitySuccessFetch() { }); } +function createProviderSuccessPayload( + provider: "brave" | "perplexity" | "grok" | "gemini" | "kimi", +) { + if (provider === "brave") { + return { web: { results: [] } }; + } + if (provider === "perplexity") { + return { choices: [{ message: { content: "ok" } }], citations: [] }; + } + if (provider === "grok") { + return { output_text: "ok", citations: [] }; + } + if (provider === "gemini") { + return { + candidates: [ + { + content: { parts: [{ text: "ok" }] }, + groundingMetadata: { groundingChunks: [] }, + }, + ], + }; + } + return { + choices: [{ finish_reason: "stop", message: { role: "assistant", content: "ok" } }], + search_results: [], + }; +} + async function executePerplexitySearch( query: string, options?: { @@ -143,6 +195,45 @@ describe("web_search country and language parameters", () => { expect(mockFetch).not.toHaveBeenCalled(); expect(result?.details).toMatchObject({ error: "invalid_freshness" }); }); + + it("uses proxy-aware dispatcher when HTTP_PROXY is configured", async () => { + vi.stubEnv("HTTP_PROXY", "http://127.0.0.1:7890"); + const mockFetch = installMockFetch({ web: { results: [] } }); + const tool = createWebSearchTool({ config: undefined, sandboxed: true }); + + await tool?.execute?.("call-1", { query: "proxy-test" }); + + const requestInit = mockFetch.mock.calls[0]?.[1] as + | (RequestInit & { dispatcher?: unknown }) + | undefined; + expect(requestInit?.dispatcher).toBeInstanceOf(EnvHttpProxyAgent); + }); +}); + +describe("web_search provider proxy dispatch", () => { + const priorFetch = global.fetch; + + afterEach(() => { + vi.unstubAllEnvs(); + global.fetch = priorFetch; + }); + + it.each(["brave", "perplexity", "grok", "gemini", "kimi"] as const)( + "uses proxy-aware dispatcher for %s provider when HTTP_PROXY is configured", + async (provider) => { + vi.stubEnv("HTTP_PROXY", "http://127.0.0.1:7890"); + const mockFetch = installMockFetch(createProviderSuccessPayload(provider)); + const tool = createProviderSearchTool(provider); + expect(tool).not.toBeNull(); + + await tool?.execute?.("call-1", { query: `proxy-${provider}-test` }); + + const requestInit = mockFetch.mock.calls[0]?.[1] as + | (RequestInit & { dispatcher?: unknown }) + | undefined; + expect(requestInit?.dispatcher).toBeInstanceOf(EnvHttpProxyAgent); + }, + ); }); describe("web_search perplexity baseUrl defaults", () => { diff --git a/src/agents/tools/web-tools.fetch.test.ts b/src/agents/tools/web-tools.fetch.test.ts index 0c69e1e1767..53836b92067 100644 --- a/src/agents/tools/web-tools.fetch.test.ts +++ b/src/agents/tools/web-tools.fetch.test.ts @@ -1,3 +1,4 @@ +import { EnvHttpProxyAgent } from "undici"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; @@ -146,6 +147,7 @@ describe("web_fetch extraction fallbacks", () => { afterEach(() => { global.fetch = priorFetch; + vi.unstubAllEnvs(); vi.restoreAllMocks(); }); @@ -256,6 +258,27 @@ describe("web_fetch extraction fallbacks", () => { expect(details?.warning).toContain("Response body truncated"); }); + it("uses proxy-aware dispatcher when HTTP_PROXY is configured", async () => { + vi.stubEnv("HTTP_PROXY", "http://127.0.0.1:7890"); + const mockFetch = installMockFetch((input: RequestInfo | URL) => + Promise.resolve({ + ok: true, + status: 200, + headers: makeHeaders({ "content-type": "text/plain" }), + text: async () => "proxy body", + url: requestUrl(input), + } as Response), + ); + const tool = createFetchTool({ firecrawl: { enabled: false } }); + + await tool?.execute?.("call", { url: "https://example.com/proxy" }); + + const requestInit = mockFetch.mock.calls[0]?.[1] as + | (RequestInit & { dispatcher?: unknown }) + | undefined; + expect(requestInit?.dispatcher).toBeInstanceOf(EnvHttpProxyAgent); + }); + // NOTE: Test for wrapping url/finalUrl/warning fields requires DNS mocking. // The sanitization of these fields is verified by external-content.test.ts tests. diff --git a/src/agents/usage.ts b/src/agents/usage.ts index be23df97116..703df4ad7e7 100644 --- a/src/agents/usage.ts +++ b/src/agents/usage.ts @@ -34,6 +34,38 @@ export type NormalizedUsage = { total?: number; }; +export type AssistantUsageSnapshot = { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + totalTokens: number; + cost: { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + total: number; + }; +}; + +export function makeZeroUsageSnapshot(): AssistantUsageSnapshot { + return { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }; +} + const asFiniteNumber = (value: unknown): number | undefined => { if (typeof value !== "number") { return undefined; diff --git a/src/agents/workspace.bootstrap-cache.test.ts b/src/agents/workspace.bootstrap-cache.test.ts index a41bafe4a96..6d5300feba1 100644 --- a/src/agents/workspace.bootstrap-cache.test.ts +++ b/src/agents/workspace.bootstrap-cache.test.ts @@ -74,6 +74,34 @@ describe("workspace bootstrap file caching", () => { expectAgentsContent(agentsFile2, content2); }); + it("invalidates cache when inode changes with same mtime", async () => { + if (process.platform === "win32") { + return; + } + const content1 = "# old-content"; + const content2 = "# new-content"; + const filePath = path.join(workspaceDir, DEFAULT_AGENTS_FILENAME); + const tempPath = path.join(workspaceDir, ".AGENTS.tmp"); + + await writeWorkspaceFile({ + dir: workspaceDir, + name: DEFAULT_AGENTS_FILENAME, + content: content1, + }); + const originalStat = await fs.stat(filePath); + + const agentsFile1 = await loadAgentsFile(workspaceDir); + expectAgentsContent(agentsFile1, content1); + + await fs.writeFile(tempPath, content2, "utf-8"); + await fs.utimes(tempPath, originalStat.atime, originalStat.mtime); + await fs.rename(tempPath, filePath); + await fs.utimes(filePath, originalStat.atime, originalStat.mtime); + + const agentsFile2 = await loadAgentsFile(workspaceDir); + expectAgentsContent(agentsFile2, content2); + }); + it("handles file deletion gracefully", async () => { const content = "# Some content"; const filePath = path.join(workspaceDir, DEFAULT_AGENTS_FILENAME); diff --git a/src/agents/workspace.load-extra-bootstrap-files.test.ts b/src/agents/workspace.load-extra-bootstrap-files.test.ts index 0a478524aef..a10d0c727b4 100644 --- a/src/agents/workspace.load-extra-bootstrap-files.test.ts +++ b/src/agents/workspace.load-extra-bootstrap-files.test.ts @@ -2,7 +2,7 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { afterAll, beforeAll, describe, expect, it } from "vitest"; -import { loadExtraBootstrapFiles } from "./workspace.js"; +import { loadExtraBootstrapFiles, loadExtraBootstrapFilesWithDiagnostics } from "./workspace.js"; describe("loadExtraBootstrapFiles", () => { let fixtureRoot = ""; @@ -69,4 +69,43 @@ describe("loadExtraBootstrapFiles", () => { expect(files[0]?.name).toBe("AGENTS.md"); expect(files[0]?.content).toBe("linked agents"); }); + + it("rejects hardlinked aliases to files outside workspace", async () => { + if (process.platform === "win32") { + return; + } + + const rootDir = await createWorkspaceDir("hardlink"); + const workspaceDir = path.join(rootDir, "workspace"); + const outsideDir = path.join(rootDir, "outside"); + await fs.mkdir(workspaceDir, { recursive: true }); + await fs.mkdir(outsideDir, { recursive: true }); + const outsideFile = path.join(outsideDir, "AGENTS.md"); + const linkedFile = path.join(workspaceDir, "AGENTS.md"); + await fs.writeFile(outsideFile, "outside", "utf-8"); + try { + await fs.link(outsideFile, linkedFile); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const files = await loadExtraBootstrapFiles(workspaceDir, ["AGENTS.md"]); + expect(files).toHaveLength(0); + }); + + it("skips oversized bootstrap files and reports diagnostics", async () => { + const workspaceDir = await createWorkspaceDir("oversized"); + const payload = "x".repeat(2 * 1024 * 1024 + 1); + await fs.writeFile(path.join(workspaceDir, "AGENTS.md"), payload, "utf-8"); + + const { files, diagnostics } = await loadExtraBootstrapFilesWithDiagnostics(workspaceDir, [ + "AGENTS.md", + ]); + + expect(files).toHaveLength(0); + expect(diagnostics.some((d) => d.reason === "security")).toBe(true); + }); }); diff --git a/src/agents/workspace.test.ts b/src/agents/workspace.test.ts index 0c854178917..ac236e3c02b 100644 --- a/src/agents/workspace.test.ts +++ b/src/agents/workspace.test.ts @@ -1,4 +1,5 @@ import fs from "node:fs/promises"; +import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; import { makeTempWorkspace, writeWorkspaceFile } from "../test-helpers/workspace.js"; @@ -102,6 +103,39 @@ describe("ensureAgentWorkspace", () => { expect(state.bootstrapSeededAt).toBeUndefined(); expect(state.onboardingCompletedAt).toMatch(/\d{4}-\d{2}-\d{2}T/); }); + + it("treats memory-backed workspaces as existing even when template files are missing", async () => { + const tempDir = await makeTempWorkspace("openclaw-workspace-"); + await fs.mkdir(path.join(tempDir, "memory"), { recursive: true }); + await fs.writeFile(path.join(tempDir, "memory", "2026-02-25.md"), "# Daily log\nSome notes"); + await fs.writeFile(path.join(tempDir, "MEMORY.md"), "# Long-term memory\nImportant stuff"); + + await ensureAgentWorkspace({ dir: tempDir, ensureBootstrapFiles: true }); + + await expect(fs.access(path.join(tempDir, DEFAULT_IDENTITY_FILENAME))).resolves.toBeUndefined(); + await expect(fs.access(path.join(tempDir, DEFAULT_BOOTSTRAP_FILENAME))).rejects.toMatchObject({ + code: "ENOENT", + }); + const state = await readOnboardingState(tempDir); + expect(state.onboardingCompletedAt).toMatch(/\d{4}-\d{2}-\d{2}T/); + const memoryContent = await fs.readFile(path.join(tempDir, "MEMORY.md"), "utf-8"); + expect(memoryContent).toBe("# Long-term memory\nImportant stuff"); + }); + + it("treats git-backed workspaces as existing even when template files are missing", async () => { + const tempDir = await makeTempWorkspace("openclaw-workspace-"); + await fs.mkdir(path.join(tempDir, ".git"), { recursive: true }); + await fs.writeFile(path.join(tempDir, ".git", "HEAD"), "ref: refs/heads/main\n"); + + await ensureAgentWorkspace({ dir: tempDir, ensureBootstrapFiles: true }); + + await expect(fs.access(path.join(tempDir, DEFAULT_IDENTITY_FILENAME))).resolves.toBeUndefined(); + await expect(fs.access(path.join(tempDir, DEFAULT_BOOTSTRAP_FILENAME))).rejects.toMatchObject({ + code: "ENOENT", + }); + const state = await readOnboardingState(tempDir); + expect(state.onboardingCompletedAt).toMatch(/\d{4}-\d{2}-\d{2}T/); + }); }); describe("loadWorkspaceBootstrapFiles", () => { @@ -142,6 +176,37 @@ describe("loadWorkspaceBootstrapFiles", () => { const files = await loadWorkspaceBootstrapFiles(tempDir); expect(getMemoryEntries(files)).toHaveLength(0); }); + + it("treats hardlinked bootstrap aliases as missing", async () => { + if (process.platform === "win32") { + return; + } + const rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-workspace-hardlink-")); + try { + const workspaceDir = path.join(rootDir, "workspace"); + const outsideDir = path.join(rootDir, "outside"); + await fs.mkdir(workspaceDir, { recursive: true }); + await fs.mkdir(outsideDir, { recursive: true }); + const outsideFile = path.join(outsideDir, DEFAULT_AGENTS_FILENAME); + const linkPath = path.join(workspaceDir, DEFAULT_AGENTS_FILENAME); + await fs.writeFile(outsideFile, "outside", "utf-8"); + try { + await fs.link(outsideFile, linkPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const files = await loadWorkspaceBootstrapFiles(workspaceDir); + const agents = files.find((file) => file.name === DEFAULT_AGENTS_FILENAME); + expect(agents?.missing).toBe(true); + expect(agents?.content).toBeUndefined(); + } finally { + await fs.rm(rootDir, { recursive: true, force: true }); + } + }); }); describe("filterBootstrapFilesForSession", () => { diff --git a/src/agents/workspace.ts b/src/agents/workspace.ts index dbef9c6517d..830b44504ad 100644 --- a/src/agents/workspace.ts +++ b/src/agents/workspace.ts @@ -1,6 +1,8 @@ +import syncFs from "node:fs"; import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; +import { openBoundaryFile } from "../infra/boundary-file-read.js"; import { resolveRequiredHomeDir } from "../infra/home-dir.js"; import { runCommandWithTimeout } from "../process/exec.js"; import { isCronSessionKey, isSubagentSessionKey } from "../routing/session-key.js"; @@ -35,33 +37,53 @@ const WORKSPACE_STATE_VERSION = 1; const workspaceTemplateCache = new Map>(); let gitAvailabilityPromise: Promise | null = null; +const MAX_WORKSPACE_BOOTSTRAP_FILE_BYTES = 2 * 1024 * 1024; -// File content cache with mtime invalidation to avoid redundant reads -const workspaceFileCache = new Map(); +// File content cache keyed by stable file identity to avoid stale reads. +const workspaceFileCache = new Map(); /** - * Read file with caching based on mtime. Returns cached content if file - * hasn't changed, otherwise reads from disk and updates cache. + * Read workspace files via boundary-safe open and cache by inode/dev/size/mtime identity. */ -async function readFileWithCache(filePath: string): Promise { +type WorkspaceGuardedReadResult = + | { ok: true; content: string } + | { ok: false; reason: "path" | "validation" | "io"; error?: unknown }; + +function workspaceFileIdentity(stat: syncFs.Stats, canonicalPath: string): string { + return `${canonicalPath}|${stat.dev}:${stat.ino}:${stat.size}:${stat.mtimeMs}`; +} + +async function readWorkspaceFileWithGuards(params: { + filePath: string; + workspaceDir: string; +}): Promise { + const opened = await openBoundaryFile({ + absolutePath: params.filePath, + rootPath: params.workspaceDir, + boundaryLabel: "workspace root", + maxBytes: MAX_WORKSPACE_BOOTSTRAP_FILE_BYTES, + }); + if (!opened.ok) { + workspaceFileCache.delete(params.filePath); + return opened; + } + + const identity = workspaceFileIdentity(opened.stat, opened.path); + const cached = workspaceFileCache.get(params.filePath); + if (cached && cached.identity === identity) { + syncFs.closeSync(opened.fd); + return { ok: true, content: cached.content }; + } + try { - const stats = await fs.stat(filePath); - const mtimeMs = stats.mtimeMs; - const cached = workspaceFileCache.get(filePath); - - // Return cached content if mtime matches - if (cached && cached.mtimeMs === mtimeMs) { - return cached.content; - } - - // Read from disk and update cache - const content = await fs.readFile(filePath, "utf-8"); - workspaceFileCache.set(filePath, { content, mtimeMs }); - return content; + const content = syncFs.readFileSync(opened.fd, "utf-8"); + workspaceFileCache.set(params.filePath, { content, identity }); + return { ok: true, content }; } catch (error) { - // Remove from cache if file doesn't exist or is unreadable - workspaceFileCache.delete(filePath); - throw error; + workspaceFileCache.delete(params.filePath); + return { ok: false, reason: "io", error }; + } finally { + syncFs.closeSync(opened.fd); } } @@ -125,6 +147,18 @@ export type WorkspaceBootstrapFile = { missing: boolean; }; +export type ExtraBootstrapLoadDiagnosticCode = + | "invalid-bootstrap-filename" + | "missing" + | "security" + | "io"; + +export type ExtraBootstrapLoadDiagnostic = { + path: string; + reason: ExtraBootstrapLoadDiagnosticCode; + detail: string; +}; + type WorkspaceOnboardingState = { version: typeof WORKSPACE_STATE_VERSION; bootstrapSeededAt?: string; @@ -315,7 +349,13 @@ export async function ensureAgentWorkspace(params?: { const statePath = resolveWorkspaceStatePath(dir); const isBrandNewWorkspace = await (async () => { - const paths = [agentsPath, soulPath, toolsPath, identityPath, userPath, heartbeatPath]; + const templatePaths = [agentsPath, soulPath, toolsPath, identityPath, userPath, heartbeatPath]; + const userContentPaths = [ + path.join(dir, "memory"), + path.join(dir, DEFAULT_MEMORY_FILENAME), + path.join(dir, ".git"), + ]; + const paths = [...templatePaths, ...userContentPaths]; const existing = await Promise.all( paths.map(async (p) => { try { @@ -360,14 +400,31 @@ export async function ensureAgentWorkspace(params?: { } if (!state.bootstrapSeededAt && !state.onboardingCompletedAt && !bootstrapExists) { - // Legacy migration path: if USER/IDENTITY diverged from templates, treat onboarding as complete - // and avoid recreating BOOTSTRAP for already-onboarded workspaces. + // Legacy migration path: if USER/IDENTITY diverged from templates, or if user-content + // indicators exist, treat onboarding as complete and avoid recreating BOOTSTRAP for + // already-onboarded workspaces. const [identityContent, userContent] = await Promise.all([ fs.readFile(identityPath, "utf-8"), fs.readFile(userPath, "utf-8"), ]); + const hasUserContent = await (async () => { + const indicators = [ + path.join(dir, "memory"), + path.join(dir, DEFAULT_MEMORY_FILENAME), + path.join(dir, ".git"), + ]; + for (const indicator of indicators) { + try { + await fs.access(indicator); + return true; + } catch { + // continue + } + } + return false; + })(); const legacyOnboardingCompleted = - identityContent !== identityTemplate || userContent !== userTemplate; + identityContent !== identityTemplate || userContent !== userTemplate || hasUserContent; if (legacyOnboardingCompleted) { markState({ onboardingCompletedAt: nowIso() }); } else { @@ -479,15 +536,18 @@ export async function loadWorkspaceBootstrapFiles(dir: string): Promise { + const loaded = await loadExtraBootstrapFilesWithDiagnostics(dir, extraPatterns); + return loaded.files; +} + +export async function loadExtraBootstrapFilesWithDiagnostics( + dir: string, + extraPatterns: string[], +): Promise<{ + files: WorkspaceBootstrapFile[]; + diagnostics: ExtraBootstrapLoadDiagnostic[]; +}> { if (!extraPatterns.length) { - return []; + return { files: [], diagnostics: [] }; } const resolvedDir = resolveUserPath(dir); - let realResolvedDir = resolvedDir; - try { - realResolvedDir = await fs.realpath(resolvedDir); - } catch { - // Keep lexical root if realpath fails. - } // Resolve glob patterns into concrete file paths const resolvedPaths = new Set(); @@ -545,37 +610,46 @@ export async function loadExtraBootstrapFiles( } } - const result: WorkspaceBootstrapFile[] = []; + const files: WorkspaceBootstrapFile[] = []; + const diagnostics: ExtraBootstrapLoadDiagnostic[] = []; for (const relPath of resolvedPaths) { const filePath = path.resolve(resolvedDir, relPath); - // Guard against path traversal — resolved path must stay within workspace - if (!filePath.startsWith(resolvedDir + path.sep) && filePath !== resolvedDir) { + // Only load files whose basename is a recognized bootstrap filename + const baseName = path.basename(relPath); + if (!VALID_BOOTSTRAP_NAMES.has(baseName)) { + diagnostics.push({ + path: filePath, + reason: "invalid-bootstrap-filename", + detail: `unsupported bootstrap basename: ${baseName}`, + }); continue; } - try { - // Resolve symlinks and verify the real path is still within workspace - const realFilePath = await fs.realpath(filePath); - if ( - !realFilePath.startsWith(realResolvedDir + path.sep) && - realFilePath !== realResolvedDir - ) { - continue; - } - // Only load files whose basename is a recognized bootstrap filename - const baseName = path.basename(relPath); - if (!VALID_BOOTSTRAP_NAMES.has(baseName)) { - continue; - } - const content = await readFileWithCache(realFilePath); - result.push({ + const loaded = await readWorkspaceFileWithGuards({ + filePath, + workspaceDir: resolvedDir, + }); + if (loaded.ok) { + files.push({ name: baseName as WorkspaceBootstrapFileName, path: filePath, - content, + content: loaded.content, missing: false, }); - } catch { - // Silently skip missing extra files + continue; } + + const reason: ExtraBootstrapLoadDiagnosticCode = + loaded.reason === "path" ? "missing" : loaded.reason === "validation" ? "security" : "io"; + diagnostics.push({ + path: filePath, + reason, + detail: + loaded.error instanceof Error + ? loaded.error.message + : typeof loaded.error === "string" + ? loaded.error + : reason, + }); } - return result; + return { files, diagnostics }; } diff --git a/src/auto-reply/commands-registry.data.ts b/src/auto-reply/commands-registry.data.ts index eb3e6f6d5a2..3cb2e4ff9f9 100644 --- a/src/auto-reply/commands-registry.data.ts +++ b/src/auto-reply/commands-registry.data.ts @@ -311,6 +311,45 @@ function buildChatCommands(): ChatCommandDefinition[] { ], argsMenu: "auto", }), + defineChatCommand({ + key: "acp", + nativeName: "acp", + description: "Manage ACP sessions and runtime options.", + textAlias: "/acp", + category: "management", + args: [ + { + name: "action", + description: "Action to run", + type: "string", + choices: [ + "spawn", + "cancel", + "steer", + "close", + "sessions", + "status", + "set-mode", + "set", + "cwd", + "permissions", + "timeout", + "model", + "reset-options", + "doctor", + "install", + "help", + ], + }, + { + name: "value", + description: "Action arguments", + type: "string", + captureRemaining: true, + }, + ], + argsMenu: "auto", + }), defineChatCommand({ key: "focus", nativeName: "focus", diff --git a/src/auto-reply/commands-registry.test.ts b/src/auto-reply/commands-registry.test.ts index b05e5ea839c..918310278c9 100644 --- a/src/auto-reply/commands-registry.test.ts +++ b/src/auto-reply/commands-registry.test.ts @@ -109,6 +109,47 @@ describe("commands registry", () => { expect(findCommandByNativeName("tts", "discord")).toBeUndefined(); }); + it("keeps discord native command specs within slash-command limits", () => { + const native = listNativeCommandSpecsForConfig( + { commands: { native: true } }, + { provider: "discord" }, + ); + for (const spec of native) { + expect(spec.name).toMatch(/^[a-z0-9_-]{1,32}$/); + expect(spec.description.length).toBeGreaterThan(0); + expect(spec.description.length).toBeLessThanOrEqual(100); + for (const arg of spec.args ?? []) { + expect(arg.name).toMatch(/^[a-z0-9_-]{1,32}$/); + expect(arg.description.length).toBeGreaterThan(0); + expect(arg.description.length).toBeLessThanOrEqual(100); + } + } + }); + + it("keeps ACP native action choices aligned with implemented handlers", () => { + const acp = listChatCommands().find((command) => command.key === "acp"); + expect(acp).toBeTruthy(); + const actionArg = acp?.args?.find((arg) => arg.name === "action"); + expect(actionArg?.choices).toEqual([ + "spawn", + "cancel", + "steer", + "close", + "sessions", + "status", + "set-mode", + "set", + "cwd", + "permissions", + "timeout", + "model", + "reset-options", + "doctor", + "install", + "help", + ]); + }); + it("detects known text commands", () => { const detection = getCommandDetection(); expect(detection.exact.has("/commands")).toBe(true); diff --git a/src/auto-reply/model.test.ts b/src/auto-reply/model.test.ts index d96bc863b04..2b4ae646971 100644 --- a/src/auto-reply/model.test.ts +++ b/src/auto-reply/model.test.ts @@ -50,6 +50,20 @@ describe("extractModelDirective", () => { expect(result.rawProfile).toBe("work"); }); + it("keeps Cloudflare @cf path segments inside model ids", () => { + const result = extractModelDirective("/model openai/@cf/openai/gpt-oss-20b"); + expect(result.hasDirective).toBe(true); + expect(result.rawModel).toBe("openai/@cf/openai/gpt-oss-20b"); + expect(result.rawProfile).toBeUndefined(); + }); + + it("allows profile overrides after Cloudflare @cf path segments", () => { + const result = extractModelDirective("/model openai/@cf/openai/gpt-oss-20b@cf:default"); + expect(result.hasDirective).toBe(true); + expect(result.rawModel).toBe("openai/@cf/openai/gpt-oss-20b"); + expect(result.rawProfile).toBe("cf:default"); + }); + it("returns no directive for plain text", () => { const result = extractModelDirective("hello world"); expect(result.hasDirective).toBe(false); diff --git a/src/auto-reply/model.ts b/src/auto-reply/model.ts index 2341f805949..237af130b63 100644 --- a/src/auto-reply/model.ts +++ b/src/auto-reply/model.ts @@ -1,3 +1,4 @@ +import { splitTrailingAuthProfile } from "../agents/model-ref-profile.js"; import { escapeRegExp } from "../utils.js"; export function extractModelDirective( @@ -34,15 +35,9 @@ export function extractModelDirective( let rawModel = raw; let rawProfile: string | undefined; if (raw) { - const atIndex = raw.lastIndexOf("@"); - if (atIndex > 0) { - const candidateModel = raw.slice(0, atIndex).trim(); - const candidateProfile = raw.slice(atIndex + 1).trim(); - if (candidateModel && candidateProfile && !candidateProfile.includes("/")) { - rawModel = candidateModel; - rawProfile = candidateProfile; - } - } + const split = splitTrailingAuthProfile(raw); + rawModel = split.model; + rawProfile = split.profile; } const cleaned = match ? body.replace(match[0], " ").replace(/\s+/g, " ").trim() : body.trim(); diff --git a/src/auto-reply/reply/abort-cutoff.ts b/src/auto-reply/reply/abort-cutoff.ts new file mode 100644 index 00000000000..44fb8b04ca3 --- /dev/null +++ b/src/auto-reply/reply/abort-cutoff.ts @@ -0,0 +1,138 @@ +import type { SessionEntry } from "../../config/sessions.js"; +import { updateSessionStore } from "../../config/sessions.js"; +import type { MsgContext } from "../templating.js"; + +export type AbortCutoff = { + messageSid?: string; + timestamp?: number; +}; + +type SessionAbortCutoffEntry = Pick; + +export function resolveAbortCutoffFromContext(ctx: MsgContext): AbortCutoff | undefined { + const messageSid = + (typeof ctx.MessageSidFull === "string" && ctx.MessageSidFull.trim()) || + (typeof ctx.MessageSid === "string" && ctx.MessageSid.trim()) || + undefined; + const timestamp = + typeof ctx.Timestamp === "number" && Number.isFinite(ctx.Timestamp) ? ctx.Timestamp : undefined; + if (!messageSid && timestamp === undefined) { + return undefined; + } + return { messageSid, timestamp }; +} + +export function readAbortCutoffFromSessionEntry( + entry: SessionAbortCutoffEntry | undefined, +): AbortCutoff | undefined { + if (!entry) { + return undefined; + } + const messageSid = entry.abortCutoffMessageSid?.trim() || undefined; + const timestamp = + typeof entry.abortCutoffTimestamp === "number" && Number.isFinite(entry.abortCutoffTimestamp) + ? entry.abortCutoffTimestamp + : undefined; + if (!messageSid && timestamp === undefined) { + return undefined; + } + return { messageSid, timestamp }; +} + +export function hasAbortCutoff(entry: SessionAbortCutoffEntry | undefined): boolean { + return readAbortCutoffFromSessionEntry(entry) !== undefined; +} + +export function applyAbortCutoffToSessionEntry( + entry: SessionAbortCutoffEntry, + cutoff: AbortCutoff | undefined, +): void { + entry.abortCutoffMessageSid = cutoff?.messageSid; + entry.abortCutoffTimestamp = cutoff?.timestamp; +} + +export async function clearAbortCutoffInSession(params: { + sessionEntry?: SessionEntry; + sessionStore?: Record; + sessionKey?: string; + storePath?: string; +}): Promise { + const { sessionEntry, sessionStore, sessionKey, storePath } = params; + if (!sessionEntry || !sessionStore || !sessionKey || !hasAbortCutoff(sessionEntry)) { + return false; + } + + applyAbortCutoffToSessionEntry(sessionEntry, undefined); + sessionEntry.updatedAt = Date.now(); + sessionStore[sessionKey] = sessionEntry; + + if (storePath) { + await updateSessionStore(storePath, (store) => { + const existing = store[sessionKey] ?? sessionEntry; + if (!existing) { + return; + } + applyAbortCutoffToSessionEntry(existing, undefined); + existing.updatedAt = Date.now(); + store[sessionKey] = existing; + }); + } + + return true; +} + +function toNumericMessageSid(value: string | undefined): bigint | undefined { + const trimmed = value?.trim(); + if (!trimmed || !/^\d+$/.test(trimmed)) { + return undefined; + } + try { + return BigInt(trimmed); + } catch { + return undefined; + } +} + +export function shouldSkipMessageByAbortCutoff(params: { + cutoffMessageSid?: string; + cutoffTimestamp?: number; + messageSid?: string; + timestamp?: number; +}): boolean { + const cutoffSid = params.cutoffMessageSid?.trim(); + const currentSid = params.messageSid?.trim(); + if (cutoffSid && currentSid) { + const cutoffNumeric = toNumericMessageSid(cutoffSid); + const currentNumeric = toNumericMessageSid(currentSid); + if (cutoffNumeric !== undefined && currentNumeric !== undefined) { + return currentNumeric <= cutoffNumeric; + } + if (currentSid === cutoffSid) { + return true; + } + } + if ( + typeof params.cutoffTimestamp === "number" && + Number.isFinite(params.cutoffTimestamp) && + typeof params.timestamp === "number" && + Number.isFinite(params.timestamp) + ) { + return params.timestamp <= params.cutoffTimestamp; + } + return false; +} + +export function shouldPersistAbortCutoff(params: { + commandSessionKey?: string; + targetSessionKey?: string; +}): boolean { + const commandSessionKey = params.commandSessionKey?.trim(); + const targetSessionKey = params.targetSessionKey?.trim(); + if (!commandSessionKey || !targetSessionKey) { + return true; + } + // Native targeted /stop can run from a slash/session-control key while the + // actual target session uses different message id/timestamp spaces. + // Persist cutoff only when command source and target are the same session. + return commandSessionKey === targetSessionKey; +} diff --git a/src/auto-reply/reply/abort.test.ts b/src/auto-reply/reply/abort.test.ts index 68bb923fd16..a76eb9b1b2d 100644 --- a/src/auto-reply/reply/abort.test.ts +++ b/src/auto-reply/reply/abort.test.ts @@ -10,8 +10,10 @@ import { isAbortRequestText, isAbortTrigger, resetAbortMemoryForTest, + resolveAbortCutoffFromContext, resolveSessionEntryForKey, setAbortMemory, + shouldSkipMessageByAbortCutoff, tryFastAbortFromMessage, } from "./abort.js"; import { enqueueFollowupRun, getFollowupQueueDepth, type FollowupRun } from "./queue.js"; @@ -80,6 +82,9 @@ describe("abort detection", () => { sessionKey: string; from: string; to: string; + targetSessionKey?: string; + messageSid?: string; + timestamp?: number; }) { return tryFastAbortFromMessage({ ctx: buildTestCtx({ @@ -91,6 +96,9 @@ describe("abort detection", () => { Surface: "telegram", From: params.from, To: params.to, + ...(params.targetSessionKey ? { CommandTargetSessionKey: params.targetSessionKey } : {}), + ...(params.messageSid ? { MessageSid: params.messageSid } : {}), + ...(typeof params.timestamp === "number" ? { Timestamp: params.timestamp } : {}), }), cfg: params.cfg, }); @@ -221,6 +229,62 @@ describe("abort detection", () => { expect(getAbortMemory("session-2104")).toBe(true); }); + it("extracts abort cutoff metadata from context", () => { + expect( + resolveAbortCutoffFromContext( + buildTestCtx({ + MessageSid: "42", + Timestamp: 123, + }), + ), + ).toEqual({ + messageSid: "42", + timestamp: 123, + }); + }); + + it("treats numeric message IDs at or before cutoff as stale", () => { + expect( + shouldSkipMessageByAbortCutoff({ + cutoffMessageSid: "200", + messageSid: "199", + }), + ).toBe(true); + expect( + shouldSkipMessageByAbortCutoff({ + cutoffMessageSid: "200", + messageSid: "200", + }), + ).toBe(true); + expect( + shouldSkipMessageByAbortCutoff({ + cutoffMessageSid: "200", + messageSid: "201", + }), + ).toBe(false); + }); + + it("falls back to timestamp cutoff when message IDs are unavailable", () => { + expect( + shouldSkipMessageByAbortCutoff({ + cutoffTimestamp: 2000, + timestamp: 1999, + }), + ).toBe(true); + expect( + shouldSkipMessageByAbortCutoff({ + cutoffTimestamp: 2000, + timestamp: 2000, + }), + ).toBe(true); + expect( + shouldSkipMessageByAbortCutoff({ + cutoffTimestamp: 2000, + timestamp: 2001, + }), + ).toBe(false); + }); + it("resolves session entry when key exists in store", () => { const store = { "session-1": { sessionId: "abc", updatedAt: 0 }, @@ -291,6 +355,64 @@ describe("abort detection", () => { expect(commandQueueMocks.clearCommandLane).toHaveBeenCalledWith(`session:${sessionKey}`); }); + it("persists abort cutoff metadata on /stop when command and target session match", async () => { + const sessionKey = "telegram:123"; + const sessionId = "session-123"; + const { storePath, cfg } = await createAbortConfig({ + sessionIdsByKey: { [sessionKey]: sessionId }, + }); + + const result = await runStopCommand({ + cfg, + sessionKey, + from: "telegram:123", + to: "telegram:123", + messageSid: "55", + timestamp: 1234567890000, + }); + + expect(result.handled).toBe(true); + const store = JSON.parse(await fs.readFile(storePath, "utf8")) as Record; + const entry = store[sessionKey] as { + abortedLastRun?: boolean; + abortCutoffMessageSid?: string; + abortCutoffTimestamp?: number; + }; + expect(entry.abortedLastRun).toBe(true); + expect(entry.abortCutoffMessageSid).toBe("55"); + expect(entry.abortCutoffTimestamp).toBe(1234567890000); + }); + + it("does not persist cutoff metadata when native /stop targets a different session", async () => { + const slashSessionKey = "telegram:slash:123"; + const targetSessionKey = "agent:main:telegram:group:123"; + const targetSessionId = "session-target"; + const { storePath, cfg } = await createAbortConfig({ + sessionIdsByKey: { [targetSessionKey]: targetSessionId }, + }); + + const result = await runStopCommand({ + cfg, + sessionKey: slashSessionKey, + from: "telegram:123", + to: "telegram:123", + targetSessionKey, + messageSid: "999", + timestamp: 1234567890000, + }); + + expect(result.handled).toBe(true); + const store = JSON.parse(await fs.readFile(storePath, "utf8")) as Record; + const entry = store[targetSessionKey] as { + abortedLastRun?: boolean; + abortCutoffMessageSid?: string; + abortCutoffTimestamp?: number; + }; + expect(entry.abortedLastRun).toBe(true); + expect(entry.abortCutoffMessageSid).toBeUndefined(); + expect(entry.abortCutoffTimestamp).toBeUndefined(); + }); + it("fast-abort stops active subagent runs for requester session", async () => { const sessionKey = "telegram:parent"; const childKey = "agent:main:subagent:child-1"; diff --git a/src/auto-reply/reply/abort.ts b/src/auto-reply/reply/abort.ts index 3c05fa097b1..0b318272d20 100644 --- a/src/auto-reply/reply/abort.ts +++ b/src/auto-reply/reply/abort.ts @@ -20,9 +20,16 @@ import { parseAgentSessionKey } from "../../routing/session-key.js"; import { resolveCommandAuthorization } from "../command-auth.js"; import { normalizeCommandBody, type CommandNormalizeOptions } from "../commands-registry.js"; import type { FinalizedMsgContext, MsgContext } from "../templating.js"; +import { + applyAbortCutoffToSessionEntry, + resolveAbortCutoffFromContext, + shouldPersistAbortCutoff, +} from "./abort-cutoff.js"; import { stripMentions, stripStructuralPrefixes } from "./mentions.js"; import { clearSessionQueues } from "./queue.js"; +export { resolveAbortCutoffFromContext, shouldSkipMessageByAbortCutoff } from "./abort-cutoff.js"; + const ABORT_TRIGGERS = new Set([ "stop", "esc", @@ -302,8 +309,15 @@ export async function tryFastAbortFromMessage(params: { `abort: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, ); } + const abortCutoff = shouldPersistAbortCutoff({ + commandSessionKey: ctx.SessionKey, + targetSessionKey: key ?? targetKey, + }) + ? resolveAbortCutoffFromContext(ctx) + : undefined; if (entry && key) { entry.abortedLastRun = true; + applyAbortCutoffToSessionEntry(entry, abortCutoff); entry.updatedAt = Date.now(); store[key] = entry; await updateSessionStore(storePath, (nextStore) => { @@ -312,6 +326,7 @@ export async function tryFastAbortFromMessage(params: { return; } nextEntry.abortedLastRun = true; + applyAbortCutoffToSessionEntry(nextEntry, abortCutoff); nextEntry.updatedAt = Date.now(); nextStore[key] = nextEntry; }); diff --git a/src/auto-reply/reply/acp-projector.test.ts b/src/auto-reply/reply/acp-projector.test.ts new file mode 100644 index 00000000000..829ef7cc452 --- /dev/null +++ b/src/auto-reply/reply/acp-projector.test.ts @@ -0,0 +1,145 @@ +import { describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { createAcpReplyProjector } from "./acp-projector.js"; + +function createCfg(overrides?: Partial): OpenClawConfig { + return { + acp: { + enabled: true, + stream: { + coalesceIdleMs: 0, + maxChunkChars: 50, + }, + }, + ...overrides, + } as OpenClawConfig; +} + +describe("createAcpReplyProjector", () => { + it("coalesces text deltas into bounded block chunks", async () => { + const deliveries: Array<{ kind: string; text?: string }> = []; + const projector = createAcpReplyProjector({ + cfg: createCfg(), + shouldSendToolSummaries: true, + deliver: async (kind, payload) => { + deliveries.push({ kind, text: payload.text }); + return true; + }, + }); + + await projector.onEvent({ + type: "text_delta", + text: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + }); + await projector.onEvent({ + type: "text_delta", + text: "bbbbbbbbbb", + }); + await projector.flush(true); + + expect(deliveries).toEqual([ + { + kind: "block", + text: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + }, + { kind: "block", text: "aabbbbbbbbbb" }, + ]); + }); + + it("buffers tiny token deltas and flushes once at turn end", async () => { + const deliveries: Array<{ kind: string; text?: string }> = []; + const projector = createAcpReplyProjector({ + cfg: createCfg({ + acp: { + enabled: true, + stream: { + coalesceIdleMs: 0, + maxChunkChars: 256, + }, + }, + }), + shouldSendToolSummaries: true, + provider: "discord", + deliver: async (kind, payload) => { + deliveries.push({ kind, text: payload.text }); + return true; + }, + }); + + await projector.onEvent({ type: "text_delta", text: "What" }); + await projector.onEvent({ type: "text_delta", text: " do" }); + await projector.onEvent({ type: "text_delta", text: " you want to work on?" }); + + expect(deliveries).toEqual([]); + + await projector.flush(true); + + expect(deliveries).toEqual([{ kind: "block", text: "What do you want to work on?" }]); + }); + + it("filters thought stream text and suppresses tool summaries when disabled", async () => { + const deliver = vi.fn(async () => true); + const projector = createAcpReplyProjector({ + cfg: createCfg(), + shouldSendToolSummaries: false, + deliver, + }); + + await projector.onEvent({ type: "text_delta", text: "internal", stream: "thought" }); + await projector.onEvent({ type: "status", text: "running tool" }); + await projector.onEvent({ type: "tool_call", text: "ls" }); + await projector.flush(true); + + expect(deliver).not.toHaveBeenCalled(); + }); + + it("emits status and tool_call summaries when enabled", async () => { + const deliveries: Array<{ kind: string; text?: string }> = []; + const projector = createAcpReplyProjector({ + cfg: createCfg(), + shouldSendToolSummaries: true, + deliver: async (kind, payload) => { + deliveries.push({ kind, text: payload.text }); + return true; + }, + }); + + await projector.onEvent({ type: "status", text: "planning" }); + await projector.onEvent({ type: "tool_call", text: "exec ls" }); + + expect(deliveries).toEqual([ + { kind: "tool", text: "⚙️ planning" }, + { kind: "tool", text: "🧰 exec ls" }, + ]); + }); + + it("flushes pending streamed text before tool/status updates", async () => { + const deliveries: Array<{ kind: string; text?: string }> = []; + const projector = createAcpReplyProjector({ + cfg: createCfg({ + acp: { + enabled: true, + stream: { + coalesceIdleMs: 0, + maxChunkChars: 256, + }, + }, + }), + shouldSendToolSummaries: true, + provider: "discord", + deliver: async (kind, payload) => { + deliveries.push({ kind, text: payload.text }); + return true; + }, + }); + + await projector.onEvent({ type: "text_delta", text: "Hello" }); + await projector.onEvent({ type: "text_delta", text: " world" }); + await projector.onEvent({ type: "status", text: "running tool" }); + + expect(deliveries).toEqual([ + { kind: "block", text: "Hello world" }, + { kind: "tool", text: "⚙️ running tool" }, + ]); + }); +}); diff --git a/src/auto-reply/reply/acp-projector.ts b/src/auto-reply/reply/acp-projector.ts new file mode 100644 index 00000000000..8bbe643dc30 --- /dev/null +++ b/src/auto-reply/reply/acp-projector.ts @@ -0,0 +1,140 @@ +import type { AcpRuntimeEvent } from "../../acp/runtime/types.js"; +import { EmbeddedBlockChunker } from "../../agents/pi-embedded-block-chunker.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import type { ReplyPayload } from "../types.js"; +import { createBlockReplyPipeline } from "./block-reply-pipeline.js"; +import { resolveEffectiveBlockStreamingConfig } from "./block-streaming.js"; +import type { ReplyDispatchKind } from "./reply-dispatcher.js"; + +const DEFAULT_ACP_STREAM_COALESCE_IDLE_MS = 350; +const DEFAULT_ACP_STREAM_MAX_CHUNK_CHARS = 1800; +const ACP_BLOCK_REPLY_TIMEOUT_MS = 15_000; + +function clampPositiveInteger( + value: unknown, + fallback: number, + bounds: { min: number; max: number }, +): number { + if (typeof value !== "number" || !Number.isFinite(value)) { + return fallback; + } + const rounded = Math.round(value); + if (rounded < bounds.min) { + return bounds.min; + } + if (rounded > bounds.max) { + return bounds.max; + } + return rounded; +} + +function resolveAcpStreamCoalesceIdleMs(cfg: OpenClawConfig): number { + return clampPositiveInteger( + cfg.acp?.stream?.coalesceIdleMs, + DEFAULT_ACP_STREAM_COALESCE_IDLE_MS, + { + min: 0, + max: 5_000, + }, + ); +} + +function resolveAcpStreamMaxChunkChars(cfg: OpenClawConfig): number { + return clampPositiveInteger(cfg.acp?.stream?.maxChunkChars, DEFAULT_ACP_STREAM_MAX_CHUNK_CHARS, { + min: 50, + max: 4_000, + }); +} + +function resolveAcpStreamingConfig(params: { + cfg: OpenClawConfig; + provider?: string; + accountId?: string; +}) { + return resolveEffectiveBlockStreamingConfig({ + cfg: params.cfg, + provider: params.provider, + accountId: params.accountId, + maxChunkChars: resolveAcpStreamMaxChunkChars(params.cfg), + coalesceIdleMs: resolveAcpStreamCoalesceIdleMs(params.cfg), + }); +} + +export type AcpReplyProjector = { + onEvent: (event: AcpRuntimeEvent) => Promise; + flush: (force?: boolean) => Promise; +}; + +export function createAcpReplyProjector(params: { + cfg: OpenClawConfig; + shouldSendToolSummaries: boolean; + deliver: (kind: ReplyDispatchKind, payload: ReplyPayload) => Promise; + provider?: string; + accountId?: string; +}): AcpReplyProjector { + const streaming = resolveAcpStreamingConfig({ + cfg: params.cfg, + provider: params.provider, + accountId: params.accountId, + }); + const blockReplyPipeline = createBlockReplyPipeline({ + onBlockReply: async (payload) => { + await params.deliver("block", payload); + }, + timeoutMs: ACP_BLOCK_REPLY_TIMEOUT_MS, + coalescing: streaming.coalescing, + }); + const chunker = new EmbeddedBlockChunker(streaming.chunking); + + const drainChunker = (force: boolean) => { + chunker.drain({ + force, + emit: (chunk) => { + blockReplyPipeline.enqueue({ text: chunk }); + }, + }); + }; + + const flush = async (force = false): Promise => { + drainChunker(force); + await blockReplyPipeline.flush({ force }); + }; + + const emitToolSummary = async (prefix: string, text: string): Promise => { + if (!params.shouldSendToolSummaries || !text) { + return; + } + // Keep tool summaries ordered after any pending streamed text. + await flush(true); + await params.deliver("tool", { text: `${prefix} ${text}` }); + }; + + const onEvent = async (event: AcpRuntimeEvent): Promise => { + if (event.type === "text_delta") { + if (event.stream && event.stream !== "output") { + return; + } + if (event.text) { + chunker.append(event.text); + drainChunker(false); + } + return; + } + if (event.type === "status") { + await emitToolSummary("⚙️", event.text); + return; + } + if (event.type === "tool_call") { + await emitToolSummary("🧰", event.text); + return; + } + if (event.type === "done" || event.type === "error") { + await flush(true); + } + }; + + return { + onEvent, + flush, + }; +} diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index eb8605ccfe1..a9bd537b527 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -28,7 +28,12 @@ import { import { stripHeartbeatToken } from "../heartbeat.js"; import type { TemplateContext } from "../templating.js"; import type { VerboseLevel } from "../thinking.js"; -import { isSilentReplyPrefixText, isSilentReplyText, SILENT_REPLY_TOKEN } from "../tokens.js"; +import { + HEARTBEAT_TOKEN, + isSilentReplyPrefixText, + isSilentReplyText, + SILENT_REPLY_TOKEN, +} from "../tokens.js"; import type { GetReplyOptions, ReplyPayload } from "../types.js"; import { buildEmbeddedRunBaseParams, @@ -141,6 +146,12 @@ export async function runAgentTurnWithFallback(params: { if (isSilentReplyText(text, SILENT_REPLY_TOKEN)) { return { skip: true }; } + if ( + isSilentReplyPrefixText(text, SILENT_REPLY_TOKEN) || + isSilentReplyPrefixText(text, HEARTBEAT_TOKEN) + ) { + return { skip: true }; + } if (!text) { // Allow media-only payloads (e.g. tool result screenshots) through. if ((payload.mediaUrls?.length ?? 0) > 0) { @@ -572,6 +583,22 @@ export async function runAgentTurnWithFallback(params: { } } + // If the run completed but with an embedded context overflow error that + // wasn't recovered from (e.g. compaction reset already attempted), surface + // the error to the user instead of silently returning an empty response. + // See #26905: Slack DM sessions silently swallowed messages when context + // overflow errors were returned as embedded error payloads. + const finalEmbeddedError = runResult?.meta?.error; + const hasPayloadText = runResult?.payloads?.some((p) => p.text?.trim()); + if (finalEmbeddedError && isContextOverflowError(finalEmbeddedError.message) && !hasPayloadText) { + return { + kind: "final", + payload: { + text: "⚠️ Context overflow — this conversation is too large for the model. Use /new to start a fresh session.", + }, + }; + } + return { kind: "success", runId, diff --git a/src/auto-reply/reply/agent-runner.runreplyagent.test.ts b/src/auto-reply/reply/agent-runner.runreplyagent.test.ts index 52d1e4550c2..ee8ddc25179 100644 --- a/src/auto-reply/reply/agent-runner.runreplyagent.test.ts +++ b/src/auto-reply/reply/agent-runner.runreplyagent.test.ts @@ -1188,6 +1188,54 @@ describe("runReplyAgent typing (heartbeat)", () => { }); }); + it("surfaces overflow fallback when embedded run returns empty payloads", async () => { + state.runEmbeddedPiAgentMock.mockImplementationOnce(async () => ({ + payloads: [], + meta: { + durationMs: 1, + error: { + kind: "context_overflow", + message: 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', + }, + }, + })); + + const { run } = createMinimalRun(); + const res = await run(); + const payload = Array.isArray(res) ? res[0] : res; + expect(payload).toMatchObject({ + text: expect.stringContaining("conversation is too large"), + }); + if (!payload) { + throw new Error("expected payload"); + } + expect(payload.text).toContain("/new"); + }); + + it("surfaces overflow fallback when embedded payload text is whitespace-only", async () => { + state.runEmbeddedPiAgentMock.mockImplementationOnce(async () => ({ + payloads: [{ text: " \n\t ", isError: true }], + meta: { + durationMs: 1, + error: { + kind: "context_overflow", + message: 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', + }, + }, + })); + + const { run } = createMinimalRun(); + const res = await run(); + const payload = Array.isArray(res) ? res[0] : res; + expect(payload).toMatchObject({ + text: expect.stringContaining("conversation is too large"), + }); + if (!payload) { + throw new Error("expected payload"); + } + expect(payload.text).toContain("/new"); + }); + it("resets the session after role ordering payloads", async () => { await withTempStateDir(async (stateDir) => { const sessionId = "session"; diff --git a/src/auto-reply/reply/agent-runner.ts b/src/auto-reply/reply/agent-runner.ts index 8628fe33a51..22efc6b96dc 100644 --- a/src/auto-reply/reply/agent-runner.ts +++ b/src/auto-reply/reply/agent-runner.ts @@ -41,7 +41,7 @@ import { runMemoryFlushIfNeeded } from "./agent-runner-memory.js"; import { buildReplyPayloads } from "./agent-runner-payloads.js"; import { appendUsageLine, formatResponseUsageLine } from "./agent-runner-utils.js"; import { createAudioAsVoiceBuffer, createBlockReplyPipeline } from "./block-reply-pipeline.js"; -import { resolveBlockStreamingCoalescing } from "./block-streaming.js"; +import { resolveEffectiveBlockStreamingConfig } from "./block-streaming.js"; import { createFollowupRunner } from "./followup-runner.js"; import { resolveOriginMessageProvider, resolveOriginMessageTo } from "./origin-routing.js"; import { @@ -195,12 +195,12 @@ export async function runReplyAgent(params: { const cfg = followupRun.run.config; const blockReplyCoalescing = blockStreamingEnabled && opts?.onBlockReply - ? resolveBlockStreamingCoalescing( + ? resolveEffectiveBlockStreamingConfig({ cfg, - sessionCtx.Provider, - sessionCtx.AccountId, - blockReplyChunking, - ) + provider: sessionCtx.Provider, + accountId: sessionCtx.AccountId, + chunking: blockReplyChunking, + }).coalescing : undefined; const blockReplyPipeline = blockStreamingEnabled && opts?.onBlockReply @@ -748,5 +748,12 @@ export async function runReplyAgent(params: { } finally { blockReplyPipeline?.stop(); typing.markRunComplete(); + // Safety net: the dispatcher's onIdle callback normally fires + // markDispatchIdle(), but if the dispatcher exits early, errors, + // or the reply path doesn't go through it cleanly, the second + // signal never fires and the typing keepalive loop runs forever. + // Calling this twice is harmless — cleanup() is guarded by the + // `active` flag. Same pattern as the followup runner fix (#26881). + typing.markDispatchIdle(); } } diff --git a/src/auto-reply/reply/block-streaming.test.ts b/src/auto-reply/reply/block-streaming.test.ts new file mode 100644 index 00000000000..29264ca99b3 --- /dev/null +++ b/src/auto-reply/reply/block-streaming.test.ts @@ -0,0 +1,68 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { + resolveBlockStreamingChunking, + resolveEffectiveBlockStreamingConfig, +} from "./block-streaming.js"; + +describe("resolveEffectiveBlockStreamingConfig", () => { + it("applies ACP-style overrides while preserving chunk/coalescer bounds", () => { + const cfg = {} as OpenClawConfig; + const baseChunking = resolveBlockStreamingChunking(cfg, "discord"); + const resolved = resolveEffectiveBlockStreamingConfig({ + cfg, + provider: "discord", + maxChunkChars: 64, + coalesceIdleMs: 25, + }); + + expect(baseChunking.maxChars).toBeGreaterThanOrEqual(64); + expect(resolved.chunking.maxChars).toBe(64); + expect(resolved.chunking.minChars).toBeLessThanOrEqual(resolved.chunking.maxChars); + expect(resolved.coalescing.maxChars).toBeLessThanOrEqual(resolved.chunking.maxChars); + expect(resolved.coalescing.minChars).toBeLessThanOrEqual(resolved.coalescing.maxChars); + expect(resolved.coalescing.idleMs).toBe(25); + }); + + it("reuses caller-provided chunking for shared main/subagent/ACP config resolution", () => { + const resolved = resolveEffectiveBlockStreamingConfig({ + cfg: undefined, + chunking: { + minChars: 10, + maxChars: 20, + breakPreference: "paragraph", + }, + coalesceIdleMs: 0, + }); + + expect(resolved.chunking).toEqual({ + minChars: 10, + maxChars: 20, + breakPreference: "paragraph", + }); + expect(resolved.coalescing.maxChars).toBe(20); + expect(resolved.coalescing.idleMs).toBe(0); + }); + + it("allows ACP maxChunkChars overrides above base defaults up to provider text limits", () => { + const cfg = { + channels: { + discord: { + textChunkLimit: 4096, + }, + }, + } as OpenClawConfig; + + const baseChunking = resolveBlockStreamingChunking(cfg, "discord"); + expect(baseChunking.maxChars).toBeLessThan(1800); + + const resolved = resolveEffectiveBlockStreamingConfig({ + cfg, + provider: "discord", + maxChunkChars: 1800, + }); + + expect(resolved.chunking.maxChars).toBe(1800); + expect(resolved.chunking.minChars).toBeLessThanOrEqual(resolved.chunking.maxChars); + }); +}); diff --git a/src/auto-reply/reply/block-streaming.ts b/src/auto-reply/reply/block-streaming.ts index 318da982238..67b7a4528a7 100644 --- a/src/auto-reply/reply/block-streaming.ts +++ b/src/auto-reply/reply/block-streaming.ts @@ -59,16 +59,101 @@ export type BlockStreamingCoalescing = { flushOnEnqueue?: boolean; }; -export function resolveBlockStreamingChunking( - cfg: OpenClawConfig | undefined, - provider?: string, - accountId?: string | null, -): { +export type BlockStreamingChunking = { minChars: number; maxChars: number; breakPreference: "paragraph" | "newline" | "sentence"; flushOnParagraph?: boolean; +}; + +function clampPositiveInteger( + value: number | undefined, + fallback: number, + bounds: { min: number; max: number }, +): number { + if (typeof value !== "number" || !Number.isFinite(value)) { + return fallback; + } + const rounded = Math.round(value); + if (rounded < bounds.min) { + return bounds.min; + } + if (rounded > bounds.max) { + return bounds.max; + } + return rounded; +} + +export function resolveEffectiveBlockStreamingConfig(params: { + cfg: OpenClawConfig | undefined; + provider?: string; + accountId?: string | null; + chunking?: BlockStreamingChunking; + /** Optional upper bound for chunking/coalescing max chars. */ + maxChunkChars?: number; + /** Optional coalescer idle flush override in milliseconds. */ + coalesceIdleMs?: number; +}): { + chunking: BlockStreamingChunking; + coalescing: BlockStreamingCoalescing; } { + const providerKey = normalizeChunkProvider(params.provider); + const providerId = providerKey ? normalizeChannelId(providerKey) : null; + const providerChunkLimit = providerId + ? getChannelDock(providerId)?.outbound?.textChunkLimit + : undefined; + const textLimit = resolveTextChunkLimit(params.cfg, providerKey, params.accountId, { + fallbackLimit: providerChunkLimit, + }); + const chunkingDefaults = + params.chunking ?? resolveBlockStreamingChunking(params.cfg, params.provider, params.accountId); + const chunkingMax = clampPositiveInteger(params.maxChunkChars, chunkingDefaults.maxChars, { + min: 1, + max: Math.max(1, textLimit), + }); + const chunking: BlockStreamingChunking = { + ...chunkingDefaults, + minChars: Math.min(chunkingDefaults.minChars, chunkingMax), + maxChars: chunkingMax, + }; + const coalescingDefaults = resolveBlockStreamingCoalescing( + params.cfg, + params.provider, + params.accountId, + chunking, + ); + const coalescingMax = Math.max( + 1, + Math.min(coalescingDefaults?.maxChars ?? chunking.maxChars, chunking.maxChars), + ); + const coalescingMin = Math.min(coalescingDefaults?.minChars ?? chunking.minChars, coalescingMax); + const coalescingIdleMs = clampPositiveInteger( + params.coalesceIdleMs, + coalescingDefaults?.idleMs ?? DEFAULT_BLOCK_STREAM_COALESCE_IDLE_MS, + { min: 0, max: 5_000 }, + ); + const coalescing: BlockStreamingCoalescing = { + minChars: coalescingMin, + maxChars: coalescingMax, + idleMs: coalescingIdleMs, + joiner: + coalescingDefaults?.joiner ?? + (chunking.breakPreference === "sentence" + ? " " + : chunking.breakPreference === "newline" + ? "\n" + : "\n\n"), + flushOnEnqueue: coalescingDefaults?.flushOnEnqueue ?? chunking.flushOnParagraph === true, + }; + + return { chunking, coalescing }; +} + +export function resolveBlockStreamingChunking( + cfg: OpenClawConfig | undefined, + provider?: string, + accountId?: string | null, +): BlockStreamingChunking { const providerKey = normalizeChunkProvider(provider); const providerConfigKey = providerKey; const providerId = providerKey ? normalizeChannelId(providerKey) : null; diff --git a/src/auto-reply/reply/commands-acp.test.ts b/src/auto-reply/reply/commands-acp.test.ts new file mode 100644 index 00000000000..df3135f1b5b --- /dev/null +++ b/src/auto-reply/reply/commands-acp.test.ts @@ -0,0 +1,796 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { AcpRuntimeError } from "../../acp/runtime/errors.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import type { SessionBindingRecord } from "../../infra/outbound/session-binding-service.js"; + +const hoisted = vi.hoisted(() => { + const callGatewayMock = vi.fn(); + const requireAcpRuntimeBackendMock = vi.fn(); + const getAcpRuntimeBackendMock = vi.fn(); + const listAcpSessionEntriesMock = vi.fn(); + const readAcpSessionEntryMock = vi.fn(); + const upsertAcpSessionMetaMock = vi.fn(); + const resolveSessionStorePathForAcpMock = vi.fn(); + const loadSessionStoreMock = vi.fn(); + const sessionBindingCapabilitiesMock = vi.fn(); + const sessionBindingBindMock = vi.fn(); + const sessionBindingListBySessionMock = vi.fn(); + const sessionBindingResolveByConversationMock = vi.fn(); + const sessionBindingUnbindMock = vi.fn(); + const ensureSessionMock = vi.fn(); + const runTurnMock = vi.fn(); + const cancelMock = vi.fn(); + const closeMock = vi.fn(); + const getCapabilitiesMock = vi.fn(); + const getStatusMock = vi.fn(); + const setModeMock = vi.fn(); + const setConfigOptionMock = vi.fn(); + const doctorMock = vi.fn(); + return { + callGatewayMock, + requireAcpRuntimeBackendMock, + getAcpRuntimeBackendMock, + listAcpSessionEntriesMock, + readAcpSessionEntryMock, + upsertAcpSessionMetaMock, + resolveSessionStorePathForAcpMock, + loadSessionStoreMock, + sessionBindingCapabilitiesMock, + sessionBindingBindMock, + sessionBindingListBySessionMock, + sessionBindingResolveByConversationMock, + sessionBindingUnbindMock, + ensureSessionMock, + runTurnMock, + cancelMock, + closeMock, + getCapabilitiesMock, + getStatusMock, + setModeMock, + setConfigOptionMock, + doctorMock, + }; +}); + +vi.mock("../../gateway/call.js", () => ({ + callGateway: (args: unknown) => hoisted.callGatewayMock(args), +})); + +vi.mock("../../acp/runtime/registry.js", () => ({ + requireAcpRuntimeBackend: (id?: string) => hoisted.requireAcpRuntimeBackendMock(id), + getAcpRuntimeBackend: (id?: string) => hoisted.getAcpRuntimeBackendMock(id), +})); + +vi.mock("../../acp/runtime/session-meta.js", () => ({ + listAcpSessionEntries: (args: unknown) => hoisted.listAcpSessionEntriesMock(args), + readAcpSessionEntry: (args: unknown) => hoisted.readAcpSessionEntryMock(args), + upsertAcpSessionMeta: (args: unknown) => hoisted.upsertAcpSessionMetaMock(args), + resolveSessionStorePathForAcp: (args: unknown) => hoisted.resolveSessionStorePathForAcpMock(args), +})); + +vi.mock("../../config/sessions.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadSessionStore: (...args: unknown[]) => hoisted.loadSessionStoreMock(...args), + }; +}); + +vi.mock("../../infra/outbound/session-binding-service.js", async (importOriginal) => { + const actual = + await importOriginal(); + return { + ...actual, + getSessionBindingService: () => ({ + bind: (input: unknown) => hoisted.sessionBindingBindMock(input), + getCapabilities: (params: unknown) => hoisted.sessionBindingCapabilitiesMock(params), + listBySession: (targetSessionKey: string) => + hoisted.sessionBindingListBySessionMock(targetSessionKey), + resolveByConversation: (ref: unknown) => hoisted.sessionBindingResolveByConversationMock(ref), + touch: vi.fn(), + unbind: (input: unknown) => hoisted.sessionBindingUnbindMock(input), + }), + }; +}); + +// Prevent transitive import chain from reaching discord/monitor which needs https-proxy-agent. +vi.mock("../../discord/monitor/gateway-plugin.js", () => ({ + createDiscordGatewayPlugin: () => ({}), +})); + +const { handleAcpCommand } = await import("./commands-acp.js"); +const { buildCommandTestParams } = await import("./commands-spawn.test-harness.js"); +const { __testing: acpManagerTesting } = await import("../../acp/control-plane/manager.js"); + +type FakeBinding = { + bindingId: string; + targetSessionKey: string; + targetKind: "subagent" | "session"; + conversation: { + channel: "discord"; + accountId: string; + conversationId: string; + parentConversationId?: string; + }; + status: "active"; + boundAt: number; + metadata?: { + agentId?: string; + label?: string; + boundBy?: string; + webhookId?: string; + }; +}; + +function createSessionBinding(overrides?: Partial): FakeBinding { + return { + bindingId: "default:thread-created", + targetSessionKey: "agent:codex:acp:s1", + targetKind: "session", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-created", + parentConversationId: "parent-1", + }, + status: "active", + boundAt: Date.now(), + metadata: { + agentId: "codex", + boundBy: "user-1", + }, + ...overrides, + }; +} + +const baseCfg = { + session: { mainKey: "main", scope: "per-sender" }, + acp: { + enabled: true, + dispatch: { enabled: true }, + backend: "acpx", + }, + channels: { + discord: { + threadBindings: { + enabled: true, + spawnAcpSessions: true, + }, + }, + }, +} satisfies OpenClawConfig; + +function createDiscordParams(commandBody: string, cfg: OpenClawConfig = baseCfg) { + const params = buildCommandTestParams(commandBody, cfg, { + Provider: "discord", + Surface: "discord", + OriginatingChannel: "discord", + OriginatingTo: "channel:parent-1", + AccountId: "default", + }); + params.command.senderId = "user-1"; + return params; +} + +describe("/acp command", () => { + beforeEach(() => { + acpManagerTesting.resetAcpSessionManagerForTests(); + hoisted.listAcpSessionEntriesMock.mockReset().mockResolvedValue([]); + hoisted.callGatewayMock.mockReset().mockResolvedValue({ ok: true }); + hoisted.readAcpSessionEntryMock.mockReset().mockReturnValue(null); + hoisted.upsertAcpSessionMetaMock.mockReset().mockResolvedValue({ + sessionId: "session-1", + updatedAt: Date.now(), + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "run-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + hoisted.resolveSessionStorePathForAcpMock.mockReset().mockReturnValue({ + cfg: baseCfg, + storePath: "/tmp/sessions-acp.json", + }); + hoisted.loadSessionStoreMock.mockReset().mockReturnValue({}); + hoisted.sessionBindingCapabilitiesMock.mockReset().mockReturnValue({ + adapterAvailable: true, + bindSupported: true, + unbindSupported: true, + placements: ["current", "child"], + }); + hoisted.sessionBindingBindMock + .mockReset() + .mockImplementation( + async (input: { + targetSessionKey: string; + conversation: { accountId: string; conversationId: string }; + placement: "current" | "child"; + metadata?: Record; + }) => + createSessionBinding({ + targetSessionKey: input.targetSessionKey, + conversation: { + channel: "discord", + accountId: input.conversation.accountId, + conversationId: + input.placement === "child" ? "thread-created" : input.conversation.conversationId, + parentConversationId: "parent-1", + }, + metadata: { + boundBy: + typeof input.metadata?.boundBy === "string" ? input.metadata.boundBy : "user-1", + webhookId: "wh-1", + }, + }), + ); + hoisted.sessionBindingListBySessionMock.mockReset().mockReturnValue([]); + hoisted.sessionBindingResolveByConversationMock.mockReset().mockReturnValue(null); + hoisted.sessionBindingUnbindMock.mockReset().mockResolvedValue([]); + + hoisted.ensureSessionMock + .mockReset() + .mockImplementation(async (input: { sessionKey: string }) => ({ + sessionKey: input.sessionKey, + backend: "acpx", + runtimeSessionName: `${input.sessionKey}:runtime`, + })); + hoisted.runTurnMock.mockReset().mockImplementation(async function* () { + yield { type: "done" }; + }); + hoisted.cancelMock.mockReset().mockResolvedValue(undefined); + hoisted.closeMock.mockReset().mockResolvedValue(undefined); + hoisted.getCapabilitiesMock.mockReset().mockResolvedValue({ + controls: ["session/set_mode", "session/set_config_option", "session/status"], + }); + hoisted.getStatusMock.mockReset().mockResolvedValue({ + summary: "status=alive sessionId=sid-1 pid=1234", + details: { status: "alive", sessionId: "sid-1", pid: 1234 }, + }); + hoisted.setModeMock.mockReset().mockResolvedValue(undefined); + hoisted.setConfigOptionMock.mockReset().mockResolvedValue(undefined); + hoisted.doctorMock.mockReset().mockResolvedValue({ + ok: true, + message: "acpx command available", + }); + + const runtimeBackend = { + id: "acpx", + runtime: { + ensureSession: hoisted.ensureSessionMock, + runTurn: hoisted.runTurnMock, + getCapabilities: hoisted.getCapabilitiesMock, + getStatus: hoisted.getStatusMock, + setMode: hoisted.setModeMock, + setConfigOption: hoisted.setConfigOptionMock, + doctor: hoisted.doctorMock, + cancel: hoisted.cancelMock, + close: hoisted.closeMock, + }, + }; + hoisted.requireAcpRuntimeBackendMock.mockReset().mockReturnValue(runtimeBackend); + hoisted.getAcpRuntimeBackendMock.mockReset().mockReturnValue(runtimeBackend); + }); + + it("returns null when the message is not /acp", async () => { + const params = createDiscordParams("/status"); + const result = await handleAcpCommand(params, true); + expect(result).toBeNull(); + }); + + it("shows help by default", async () => { + const params = createDiscordParams("/acp"); + const result = await handleAcpCommand(params, true); + expect(result?.reply?.text).toContain("ACP commands:"); + expect(result?.reply?.text).toContain("/acp spawn"); + }); + + it("spawns an ACP session and binds a Discord thread", async () => { + hoisted.ensureSessionMock.mockResolvedValueOnce({ + sessionKey: "agent:codex:acp:s1", + backend: "acpx", + runtimeSessionName: "agent:codex:acp:s1:runtime", + agentSessionId: "codex-inner-1", + backendSessionId: "acpx-1", + }); + + const params = createDiscordParams("/acp spawn codex --cwd /home/bob/clawd"); + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("Spawned ACP session agent:codex:acp:"); + expect(result?.reply?.text).toContain("Created thread thread-created and bound it"); + expect(hoisted.requireAcpRuntimeBackendMock).toHaveBeenCalledWith("acpx"); + expect(hoisted.ensureSessionMock).toHaveBeenCalledWith( + expect.objectContaining({ + agent: "codex", + mode: "persistent", + cwd: "/home/bob/clawd", + }), + ); + expect(hoisted.sessionBindingBindMock).toHaveBeenCalledWith( + expect.objectContaining({ + targetKind: "session", + placement: "child", + metadata: expect.objectContaining({ + introText: expect.stringContaining("cwd: /home/bob/clawd"), + }), + }), + ); + expect(hoisted.sessionBindingBindMock).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: expect.objectContaining({ + introText: expect.not.stringContaining( + "session ids: pending (available after the first reply)", + ), + }), + }), + ); + expect(hoisted.callGatewayMock).toHaveBeenCalledWith( + expect.objectContaining({ + method: "sessions.patch", + }), + ); + expect(hoisted.upsertAcpSessionMetaMock).toHaveBeenCalled(); + const upsertArgs = hoisted.upsertAcpSessionMetaMock.mock.calls[0]?.[0] as + | { + sessionKey: string; + mutate: ( + current: unknown, + entry: { sessionId: string; updatedAt: number } | undefined, + ) => { + backend?: string; + runtimeSessionName?: string; + }; + } + | undefined; + expect(upsertArgs?.sessionKey).toMatch(/^agent:codex:acp:/); + const seededWithoutEntry = upsertArgs?.mutate(undefined, undefined); + expect(seededWithoutEntry?.backend).toBe("acpx"); + expect(seededWithoutEntry?.runtimeSessionName).toContain(":runtime"); + }); + + it("requires explicit ACP target when acp.defaultAgent is not configured", async () => { + const params = createDiscordParams("/acp spawn"); + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP target agent is required"); + expect(hoisted.ensureSessionMock).not.toHaveBeenCalled(); + }); + + it("rejects thread-bound ACP spawn when spawnAcpSessions is disabled", async () => { + const cfg = { + ...baseCfg, + channels: { + discord: { + threadBindings: { + enabled: true, + spawnAcpSessions: false, + }, + }, + }, + } satisfies OpenClawConfig; + + const params = createDiscordParams("/acp spawn codex", cfg); + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("spawnAcpSessions=true"); + expect(hoisted.closeMock).toHaveBeenCalledTimes(1); + expect(hoisted.callGatewayMock).toHaveBeenCalledWith( + expect.objectContaining({ + method: "sessions.delete", + params: expect.objectContaining({ + key: expect.stringMatching(/^agent:codex:acp:/), + deleteTranscript: false, + emitLifecycleHooks: false, + }), + }), + ); + expect(hoisted.callGatewayMock).not.toHaveBeenCalledWith( + expect.objectContaining({ method: "sessions.patch" }), + ); + }); + + it("cancels the ACP session bound to the current thread", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "running", + lastActivityAt: Date.now(), + }, + }); + + const params = createDiscordParams("/acp cancel", baseCfg); + params.ctx.MessageThreadId = "thread-1"; + + const result = await handleAcpCommand(params, true); + expect(result?.reply?.text).toContain("Cancel requested for ACP session agent:codex:acp:s1"); + expect(hoisted.cancelMock).toHaveBeenCalledWith({ + handle: expect.objectContaining({ + sessionKey: "agent:codex:acp:s1", + backend: "acpx", + }), + reason: "manual-cancel", + }); + }); + + it("sends steer instructions via ACP runtime", async () => { + hoisted.callGatewayMock.mockImplementation(async (request: { method?: string }) => { + if (request.method === "sessions.resolve") { + return { key: "agent:codex:acp:s1" }; + } + return { ok: true }; + }); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + hoisted.runTurnMock.mockImplementation(async function* () { + yield { type: "text_delta", text: "Applied steering." }; + yield { type: "done" }; + }); + + const params = createDiscordParams("/acp steer --session agent:codex:acp:s1 tighten logging"); + const result = await handleAcpCommand(params, true); + + expect(hoisted.runTurnMock).toHaveBeenCalledWith( + expect.objectContaining({ + mode: "steer", + text: "tighten logging", + }), + ); + expect(result?.reply?.text).toContain("Applied steering."); + }); + + it("blocks /acp steer when ACP dispatch is disabled by policy", async () => { + const cfg = { + ...baseCfg, + acp: { + ...baseCfg.acp, + dispatch: { enabled: false }, + }, + } satisfies OpenClawConfig; + const params = createDiscordParams("/acp steer tighten logging", cfg); + const result = await handleAcpCommand(params, true); + expect(result?.reply?.text).toContain("ACP dispatch is disabled by policy"); + expect(hoisted.runTurnMock).not.toHaveBeenCalled(); + }); + + it("closes an ACP session, unbinds thread targets, and clears metadata", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + hoisted.sessionBindingUnbindMock.mockResolvedValue([ + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }) as SessionBindingRecord, + ]); + + const params = createDiscordParams("/acp close", baseCfg); + params.ctx.MessageThreadId = "thread-1"; + + const result = await handleAcpCommand(params, true); + + expect(hoisted.closeMock).toHaveBeenCalledTimes(1); + expect(hoisted.sessionBindingUnbindMock).toHaveBeenCalledWith( + expect.objectContaining({ + targetSessionKey: "agent:codex:acp:s1", + reason: "manual", + }), + ); + expect(hoisted.upsertAcpSessionMetaMock).toHaveBeenCalled(); + expect(result?.reply?.text).toContain("Removed 1 binding"); + }); + + it("lists ACP sessions from the session store", async () => { + hoisted.sessionBindingListBySessionMock.mockImplementation((key: string) => + key === "agent:codex:acp:s1" + ? [ + createSessionBinding({ + targetSessionKey: key, + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }) as SessionBindingRecord, + ] + : [], + ); + hoisted.loadSessionStoreMock.mockReturnValue({ + "agent:codex:acp:s1": { + sessionId: "sess-1", + updatedAt: Date.now(), + label: "codex-main", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }, + "agent:main:main": { + sessionId: "sess-main", + updatedAt: Date.now(), + }, + }); + + const params = createDiscordParams("/acp sessions", baseCfg); + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP sessions:"); + expect(result?.reply?.text).toContain("codex-main"); + expect(result?.reply?.text).toContain("thread:thread-1"); + }); + + it("shows ACP status for the thread-bound ACP session", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + identity: { + state: "resolved", + source: "status", + acpxSessionId: "acpx-sid-1", + agentSessionId: "codex-sid-1", + lastUpdatedAt: Date.now(), + }, + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + const params = createDiscordParams("/acp status", baseCfg); + params.ctx.MessageThreadId = "thread-1"; + + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP status:"); + expect(result?.reply?.text).toContain("session: agent:codex:acp:s1"); + expect(result?.reply?.text).toContain("agent session id: codex-sid-1"); + expect(result?.reply?.text).toContain("acpx session id: acpx-sid-1"); + expect(result?.reply?.text).toContain("capabilities:"); + expect(hoisted.getStatusMock).toHaveBeenCalledTimes(1); + }); + + it("updates ACP runtime mode via /acp set-mode", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + const params = createDiscordParams("/acp set-mode plan", baseCfg); + params.ctx.MessageThreadId = "thread-1"; + + const result = await handleAcpCommand(params, true); + + expect(hoisted.setModeMock).toHaveBeenCalledWith( + expect.objectContaining({ + mode: "plan", + }), + ); + expect(result?.reply?.text).toContain("Updated ACP runtime mode"); + }); + + it("updates ACP config options and keeps cwd local when using /acp set", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + + const setModelParams = createDiscordParams("/acp set model gpt-5.3-codex", baseCfg); + setModelParams.ctx.MessageThreadId = "thread-1"; + const setModel = await handleAcpCommand(setModelParams, true); + expect(hoisted.setConfigOptionMock).toHaveBeenCalledWith( + expect.objectContaining({ + key: "model", + value: "gpt-5.3-codex", + }), + ); + expect(setModel?.reply?.text).toContain("Updated ACP config option"); + + hoisted.setConfigOptionMock.mockClear(); + const setCwdParams = createDiscordParams("/acp set cwd /tmp/worktree", baseCfg); + setCwdParams.ctx.MessageThreadId = "thread-1"; + const setCwd = await handleAcpCommand(setCwdParams, true); + expect(hoisted.setConfigOptionMock).not.toHaveBeenCalled(); + expect(setCwd?.reply?.text).toContain("Updated ACP cwd"); + }); + + it("rejects non-absolute cwd values via ACP runtime option validation", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + + const params = createDiscordParams("/acp cwd relative/path", baseCfg); + params.ctx.MessageThreadId = "thread-1"; + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP error (ACP_INVALID_RUNTIME_OPTION)"); + expect(result?.reply?.text).toContain("absolute path"); + }); + + it("rejects invalid timeout values before backend config writes", async () => { + hoisted.sessionBindingResolveByConversationMock.mockReturnValue( + createSessionBinding({ + targetSessionKey: "agent:codex:acp:s1", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + }), + ); + hoisted.readAcpSessionEntryMock.mockReturnValue({ + sessionKey: "agent:codex:acp:s1", + storeSessionKey: "agent:codex:acp:s1", + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }); + + const params = createDiscordParams("/acp timeout 10s", baseCfg); + params.ctx.MessageThreadId = "thread-1"; + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP error (ACP_INVALID_RUNTIME_OPTION)"); + expect(hoisted.setConfigOptionMock).not.toHaveBeenCalled(); + }); + + it("returns actionable doctor output when backend is missing", async () => { + hoisted.getAcpRuntimeBackendMock.mockReturnValue(null); + hoisted.requireAcpRuntimeBackendMock.mockImplementation(() => { + throw new AcpRuntimeError( + "ACP_BACKEND_MISSING", + "ACP runtime backend is not configured. Install and enable the acpx runtime plugin.", + ); + }); + + const params = createDiscordParams("/acp doctor", baseCfg); + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP doctor:"); + expect(result?.reply?.text).toContain("healthy: no"); + expect(result?.reply?.text).toContain("next:"); + }); + + it("shows deterministic install instructions via /acp install", async () => { + const params = createDiscordParams("/acp install", baseCfg); + const result = await handleAcpCommand(params, true); + + expect(result?.reply?.text).toContain("ACP install:"); + expect(result?.reply?.text).toContain("run:"); + expect(result?.reply?.text).toContain("then: /acp doctor"); + }); +}); diff --git a/src/auto-reply/reply/commands-acp.ts b/src/auto-reply/reply/commands-acp.ts new file mode 100644 index 00000000000..2eef395c9a2 --- /dev/null +++ b/src/auto-reply/reply/commands-acp.ts @@ -0,0 +1,83 @@ +import { logVerbose } from "../../globals.js"; +import { + handleAcpDoctorAction, + handleAcpInstallAction, + handleAcpSessionsAction, +} from "./commands-acp/diagnostics.js"; +import { + handleAcpCancelAction, + handleAcpCloseAction, + handleAcpSpawnAction, + handleAcpSteerAction, +} from "./commands-acp/lifecycle.js"; +import { + handleAcpCwdAction, + handleAcpModelAction, + handleAcpPermissionsAction, + handleAcpResetOptionsAction, + handleAcpSetAction, + handleAcpSetModeAction, + handleAcpStatusAction, + handleAcpTimeoutAction, +} from "./commands-acp/runtime-options.js"; +import { + COMMAND, + type AcpAction, + resolveAcpAction, + resolveAcpHelpText, + stopWithText, +} from "./commands-acp/shared.js"; +import type { + CommandHandler, + CommandHandlerResult, + HandleCommandsParams, +} from "./commands-types.js"; + +type AcpActionHandler = ( + params: HandleCommandsParams, + tokens: string[], +) => Promise; + +const ACP_ACTION_HANDLERS: Record, AcpActionHandler> = { + spawn: handleAcpSpawnAction, + cancel: handleAcpCancelAction, + steer: handleAcpSteerAction, + close: handleAcpCloseAction, + status: handleAcpStatusAction, + "set-mode": handleAcpSetModeAction, + set: handleAcpSetAction, + cwd: handleAcpCwdAction, + permissions: handleAcpPermissionsAction, + timeout: handleAcpTimeoutAction, + model: handleAcpModelAction, + "reset-options": handleAcpResetOptionsAction, + doctor: handleAcpDoctorAction, + install: async (params, tokens) => handleAcpInstallAction(params, tokens), + sessions: async (params, tokens) => handleAcpSessionsAction(params, tokens), +}; + +export const handleAcpCommand: CommandHandler = async (params, allowTextCommands) => { + if (!allowTextCommands) { + return null; + } + + const normalized = params.command.commandBodyNormalized; + if (!normalized.startsWith(COMMAND)) { + return null; + } + + if (!params.command.isAuthorizedSender) { + logVerbose(`Ignoring /acp from unauthorized sender: ${params.command.senderId || ""}`); + return { shouldContinue: false }; + } + + const rest = normalized.slice(COMMAND.length).trim(); + const tokens = rest.split(/\s+/).filter(Boolean); + const action = resolveAcpAction(tokens); + if (action === "help") { + return stopWithText(resolveAcpHelpText()); + } + + const handler = ACP_ACTION_HANDLERS[action]; + return handler ? await handler(params, tokens) : stopWithText(resolveAcpHelpText()); +}; diff --git a/src/auto-reply/reply/commands-acp/context.test.ts b/src/auto-reply/reply/commands-acp/context.test.ts new file mode 100644 index 00000000000..92952ad749f --- /dev/null +++ b/src/auto-reply/reply/commands-acp/context.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../../../config/config.js"; +import { buildCommandTestParams } from "../commands-spawn.test-harness.js"; +import { + isAcpCommandDiscordChannel, + resolveAcpCommandBindingContext, + resolveAcpCommandConversationId, +} from "./context.js"; + +const baseCfg = { + session: { mainKey: "main", scope: "per-sender" }, +} satisfies OpenClawConfig; + +describe("commands-acp context", () => { + it("resolves channel/account/thread context from originating fields", () => { + const params = buildCommandTestParams("/acp sessions", baseCfg, { + Provider: "discord", + Surface: "discord", + OriginatingChannel: "discord", + OriginatingTo: "channel:parent-1", + AccountId: "work", + MessageThreadId: "thread-42", + }); + + expect(resolveAcpCommandBindingContext(params)).toEqual({ + channel: "discord", + accountId: "work", + threadId: "thread-42", + conversationId: "thread-42", + }); + expect(isAcpCommandDiscordChannel(params)).toBe(true); + }); + + it("falls back to default account and target-derived conversation id", () => { + const params = buildCommandTestParams("/acp status", baseCfg, { + Provider: "slack", + Surface: "slack", + OriginatingChannel: "slack", + To: "<#123456789>", + }); + + expect(resolveAcpCommandBindingContext(params)).toEqual({ + channel: "slack", + accountId: "default", + threadId: undefined, + conversationId: "123456789", + }); + expect(resolveAcpCommandConversationId(params)).toBe("123456789"); + expect(isAcpCommandDiscordChannel(params)).toBe(false); + }); +}); diff --git a/src/auto-reply/reply/commands-acp/context.ts b/src/auto-reply/reply/commands-acp/context.ts new file mode 100644 index 00000000000..f9ac901ec92 --- /dev/null +++ b/src/auto-reply/reply/commands-acp/context.ts @@ -0,0 +1,58 @@ +import { DISCORD_THREAD_BINDING_CHANNEL } from "../../../channels/thread-bindings-policy.js"; +import { resolveConversationIdFromTargets } from "../../../infra/outbound/conversation-id.js"; +import type { HandleCommandsParams } from "../commands-types.js"; + +function normalizeString(value: unknown): string { + if (typeof value === "string") { + return value.trim(); + } + if (typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") { + return `${value}`.trim(); + } + return ""; +} + +export function resolveAcpCommandChannel(params: HandleCommandsParams): string { + const raw = + params.ctx.OriginatingChannel ?? + params.command.channel ?? + params.ctx.Surface ?? + params.ctx.Provider; + return normalizeString(raw).toLowerCase(); +} + +export function resolveAcpCommandAccountId(params: HandleCommandsParams): string { + const accountId = normalizeString(params.ctx.AccountId); + return accountId || "default"; +} + +export function resolveAcpCommandThreadId(params: HandleCommandsParams): string | undefined { + const threadId = + params.ctx.MessageThreadId != null ? normalizeString(String(params.ctx.MessageThreadId)) : ""; + return threadId || undefined; +} + +export function resolveAcpCommandConversationId(params: HandleCommandsParams): string | undefined { + return resolveConversationIdFromTargets({ + threadId: params.ctx.MessageThreadId, + targets: [params.ctx.OriginatingTo, params.command.to, params.ctx.To], + }); +} + +export function isAcpCommandDiscordChannel(params: HandleCommandsParams): boolean { + return resolveAcpCommandChannel(params) === DISCORD_THREAD_BINDING_CHANNEL; +} + +export function resolveAcpCommandBindingContext(params: HandleCommandsParams): { + channel: string; + accountId: string; + threadId?: string; + conversationId?: string; +} { + return { + channel: resolveAcpCommandChannel(params), + accountId: resolveAcpCommandAccountId(params), + threadId: resolveAcpCommandThreadId(params), + conversationId: resolveAcpCommandConversationId(params), + }; +} diff --git a/src/auto-reply/reply/commands-acp/diagnostics.ts b/src/auto-reply/reply/commands-acp/diagnostics.ts new file mode 100644 index 00000000000..d521ac7ae5f --- /dev/null +++ b/src/auto-reply/reply/commands-acp/diagnostics.ts @@ -0,0 +1,203 @@ +import { getAcpSessionManager } from "../../../acp/control-plane/manager.js"; +import { formatAcpRuntimeErrorText } from "../../../acp/runtime/error-text.js"; +import { toAcpRuntimeError } from "../../../acp/runtime/errors.js"; +import { getAcpRuntimeBackend, requireAcpRuntimeBackend } from "../../../acp/runtime/registry.js"; +import { resolveSessionStorePathForAcp } from "../../../acp/runtime/session-meta.js"; +import { loadSessionStore } from "../../../config/sessions.js"; +import type { SessionEntry } from "../../../config/sessions/types.js"; +import { getSessionBindingService } from "../../../infra/outbound/session-binding-service.js"; +import type { CommandHandlerResult, HandleCommandsParams } from "../commands-types.js"; +import { resolveAcpCommandBindingContext } from "./context.js"; +import { + ACP_DOCTOR_USAGE, + ACP_INSTALL_USAGE, + ACP_SESSIONS_USAGE, + formatAcpCapabilitiesText, + resolveAcpInstallCommandHint, + resolveConfiguredAcpBackendId, + stopWithText, +} from "./shared.js"; +import { resolveBoundAcpThreadSessionKey } from "./targets.js"; + +export async function handleAcpDoctorAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + if (restTokens.length > 0) { + return stopWithText(`⚠️ ${ACP_DOCTOR_USAGE}`); + } + + const backendId = resolveConfiguredAcpBackendId(params.cfg); + const installHint = resolveAcpInstallCommandHint(params.cfg); + const registeredBackend = getAcpRuntimeBackend(backendId); + const managerSnapshot = getAcpSessionManager().getObservabilitySnapshot(params.cfg); + const lines = ["ACP doctor:", "-----", `configuredBackend: ${backendId}`]; + lines.push(`activeRuntimeSessions: ${managerSnapshot.runtimeCache.activeSessions}`); + lines.push(`runtimeIdleTtlMs: ${managerSnapshot.runtimeCache.idleTtlMs}`); + lines.push(`evictedIdleRuntimes: ${managerSnapshot.runtimeCache.evictedTotal}`); + lines.push(`activeTurns: ${managerSnapshot.turns.active}`); + lines.push(`queueDepth: ${managerSnapshot.turns.queueDepth}`); + lines.push( + `turnLatencyMs: avg=${managerSnapshot.turns.averageLatencyMs}, max=${managerSnapshot.turns.maxLatencyMs}`, + ); + lines.push( + `turnCounts: completed=${managerSnapshot.turns.completed}, failed=${managerSnapshot.turns.failed}`, + ); + const errorStatsText = + Object.entries(managerSnapshot.errorsByCode) + .map(([code, count]) => `${code}=${count}`) + .join(", ") || "(none)"; + lines.push(`errorCodes: ${errorStatsText}`); + if (registeredBackend) { + lines.push(`registeredBackend: ${registeredBackend.id}`); + } else { + lines.push("registeredBackend: (none)"); + } + + if (registeredBackend?.runtime.doctor) { + try { + const report = await registeredBackend.runtime.doctor(); + lines.push(`runtimeDoctor: ${report.ok ? "ok" : "error"} (${report.message})`); + if (report.code) { + lines.push(`runtimeDoctorCode: ${report.code}`); + } + if (report.installCommand) { + lines.push(`runtimeDoctorInstall: ${report.installCommand}`); + } + for (const detail of report.details ?? []) { + lines.push(`runtimeDoctorDetail: ${detail}`); + } + } catch (error) { + lines.push( + `runtimeDoctor: error (${ + toAcpRuntimeError({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Runtime doctor failed.", + }).message + })`, + ); + } + } + + try { + const backend = requireAcpRuntimeBackend(backendId); + const capabilities = backend.runtime.getCapabilities + ? await backend.runtime.getCapabilities({}) + : { controls: [] as string[], configOptionKeys: [] as string[] }; + lines.push("healthy: yes"); + lines.push(`capabilities: ${formatAcpCapabilitiesText(capabilities.controls ?? [])}`); + if ((capabilities.configOptionKeys?.length ?? 0) > 0) { + lines.push(`configKeys: ${capabilities.configOptionKeys?.join(", ")}`); + } + return stopWithText(lines.join("\n")); + } catch (error) { + const acpError = toAcpRuntimeError({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP backend doctor failed.", + }); + lines.push("healthy: no"); + lines.push(formatAcpRuntimeErrorText(acpError)); + lines.push(`next: ${installHint}`); + lines.push(`next: openclaw config set plugins.entries.${backendId}.enabled true`); + if (backendId.toLowerCase() === "acpx") { + lines.push("next: verify acpx is installed (`acpx --help`)."); + } + return stopWithText(lines.join("\n")); + } +} + +export function handleAcpInstallAction( + params: HandleCommandsParams, + restTokens: string[], +): CommandHandlerResult { + if (restTokens.length > 0) { + return stopWithText(`⚠️ ${ACP_INSTALL_USAGE}`); + } + const backendId = resolveConfiguredAcpBackendId(params.cfg); + const installHint = resolveAcpInstallCommandHint(params.cfg); + const lines = [ + "ACP install:", + "-----", + `configuredBackend: ${backendId}`, + `run: ${installHint}`, + `then: openclaw config set plugins.entries.${backendId}.enabled true`, + "then: /acp doctor", + ]; + return stopWithText(lines.join("\n")); +} + +function formatAcpSessionLine(params: { + key: string; + entry: SessionEntry; + currentSessionKey?: string; + threadId?: string; +}): string { + const acp = params.entry.acp; + if (!acp) { + return ""; + } + const marker = params.currentSessionKey === params.key ? "*" : " "; + const label = params.entry.label?.trim() || acp.agent; + const threadText = params.threadId ? `, thread:${params.threadId}` : ""; + return `${marker} ${label} (${acp.mode}, ${acp.state}, backend:${acp.backend}${threadText}) -> ${params.key}`; +} + +export function handleAcpSessionsAction( + params: HandleCommandsParams, + restTokens: string[], +): CommandHandlerResult { + if (restTokens.length > 0) { + return stopWithText(ACP_SESSIONS_USAGE); + } + + const currentSessionKey = resolveBoundAcpThreadSessionKey(params) || params.sessionKey; + if (!currentSessionKey) { + return stopWithText("⚠️ Missing session key."); + } + + const { storePath } = resolveSessionStorePathForAcp({ + cfg: params.cfg, + sessionKey: currentSessionKey, + }); + + let store: Record; + try { + store = loadSessionStore(storePath); + } catch { + store = {}; + } + + const bindingContext = resolveAcpCommandBindingContext(params); + const normalizedChannel = bindingContext.channel; + const normalizedAccountId = bindingContext.accountId || undefined; + const bindingService = getSessionBindingService(); + + const rows = Object.entries(store) + .filter(([, entry]) => Boolean(entry?.acp)) + .toSorted(([, a], [, b]) => (b?.updatedAt ?? 0) - (a?.updatedAt ?? 0)) + .slice(0, 20) + .map(([key, entry]) => { + const bindingThreadId = bindingService + .listBySession(key) + .find( + (binding) => + (!normalizedChannel || binding.conversation.channel === normalizedChannel) && + (!normalizedAccountId || binding.conversation.accountId === normalizedAccountId), + )?.conversation.conversationId; + return formatAcpSessionLine({ + key, + entry, + currentSessionKey, + threadId: bindingThreadId, + }); + }) + .filter(Boolean); + + if (rows.length === 0) { + return stopWithText("ACP sessions:\n-----\n(none)"); + } + + return stopWithText(["ACP sessions:", "-----", ...rows].join("\n")); +} diff --git a/src/auto-reply/reply/commands-acp/lifecycle.ts b/src/auto-reply/reply/commands-acp/lifecycle.ts new file mode 100644 index 00000000000..9039cfe64e0 --- /dev/null +++ b/src/auto-reply/reply/commands-acp/lifecycle.ts @@ -0,0 +1,588 @@ +import { randomUUID } from "node:crypto"; +import { getAcpSessionManager } from "../../../acp/control-plane/manager.js"; +import { + cleanupFailedAcpSpawn, + type AcpSpawnRuntimeCloseHandle, +} from "../../../acp/control-plane/spawn.js"; +import { + isAcpEnabledByPolicy, + resolveAcpAgentPolicyError, + resolveAcpDispatchPolicyError, + resolveAcpDispatchPolicyMessage, +} from "../../../acp/policy.js"; +import { AcpRuntimeError } from "../../../acp/runtime/errors.js"; +import { + resolveAcpSessionCwd, + resolveAcpThreadSessionDetailLines, +} from "../../../acp/runtime/session-identifiers.js"; +import { + resolveThreadBindingIntroText, + resolveThreadBindingThreadName, +} from "../../../channels/thread-bindings-messages.js"; +import { + formatThreadBindingDisabledError, + formatThreadBindingSpawnDisabledError, + resolveThreadBindingSessionTtlMsForChannel, + resolveThreadBindingSpawnPolicy, +} from "../../../channels/thread-bindings-policy.js"; +import type { OpenClawConfig } from "../../../config/config.js"; +import type { SessionAcpMeta } from "../../../config/sessions/types.js"; +import { callGateway } from "../../../gateway/call.js"; +import { + getSessionBindingService, + type SessionBindingRecord, +} from "../../../infra/outbound/session-binding-service.js"; +import type { CommandHandlerResult, HandleCommandsParams } from "../commands-types.js"; +import { + resolveAcpCommandAccountId, + resolveAcpCommandBindingContext, + resolveAcpCommandThreadId, +} from "./context.js"; +import { + ACP_STEER_OUTPUT_LIMIT, + collectAcpErrorText, + parseSpawnInput, + parseSteerInput, + resolveCommandRequestId, + stopWithText, + type AcpSpawnThreadMode, + withAcpCommandErrorBoundary, +} from "./shared.js"; +import { resolveAcpTargetSessionKey } from "./targets.js"; + +async function bindSpawnedAcpSessionToThread(params: { + commandParams: HandleCommandsParams; + sessionKey: string; + agentId: string; + label?: string; + threadMode: AcpSpawnThreadMode; + sessionMeta?: SessionAcpMeta; +}): Promise<{ ok: true; binding: SessionBindingRecord } | { ok: false; error: string }> { + const { commandParams, threadMode } = params; + if (threadMode === "off") { + return { + ok: false, + error: "internal: thread binding is disabled for this spawn", + }; + } + + const bindingContext = resolveAcpCommandBindingContext(commandParams); + const channel = bindingContext.channel; + if (!channel) { + return { + ok: false, + error: "ACP thread binding requires a channel context.", + }; + } + + const accountId = resolveAcpCommandAccountId(commandParams); + const spawnPolicy = resolveThreadBindingSpawnPolicy({ + cfg: commandParams.cfg, + channel, + accountId, + kind: "acp", + }); + if (!spawnPolicy.enabled) { + return { + ok: false, + error: formatThreadBindingDisabledError({ + channel: spawnPolicy.channel, + accountId: spawnPolicy.accountId, + kind: "acp", + }), + }; + } + if (!spawnPolicy.spawnEnabled) { + return { + ok: false, + error: formatThreadBindingSpawnDisabledError({ + channel: spawnPolicy.channel, + accountId: spawnPolicy.accountId, + kind: "acp", + }), + }; + } + + const bindingService = getSessionBindingService(); + const capabilities = bindingService.getCapabilities({ + channel: spawnPolicy.channel, + accountId: spawnPolicy.accountId, + }); + if (!capabilities.adapterAvailable) { + return { + ok: false, + error: `Thread bindings are unavailable for ${channel}.`, + }; + } + if (!capabilities.bindSupported) { + return { + ok: false, + error: `Thread bindings are unavailable for ${channel}.`, + }; + } + + const currentThreadId = bindingContext.threadId ?? ""; + + if (threadMode === "here" && !currentThreadId) { + return { + ok: false, + error: `--thread here requires running /acp spawn inside an active ${channel} thread/conversation.`, + }; + } + + const threadId = currentThreadId || undefined; + const placement = threadId ? "current" : "child"; + if (!capabilities.placements.includes(placement)) { + return { + ok: false, + error: `Thread bindings do not support ${placement} placement for ${channel}.`, + }; + } + const channelId = placement === "child" ? bindingContext.conversationId : undefined; + + if (placement === "child" && !channelId) { + return { + ok: false, + error: `Could not resolve a ${channel} conversation for ACP thread spawn.`, + }; + } + + const senderId = commandParams.command.senderId?.trim() || ""; + if (threadId) { + const existingBinding = bindingService.resolveByConversation({ + channel: spawnPolicy.channel, + accountId: spawnPolicy.accountId, + conversationId: threadId, + }); + const boundBy = + typeof existingBinding?.metadata?.boundBy === "string" + ? existingBinding.metadata.boundBy.trim() + : ""; + if (existingBinding && boundBy && boundBy !== "system" && senderId && senderId !== boundBy) { + return { + ok: false, + error: `Only ${boundBy} can rebind this thread.`, + }; + } + } + + const label = params.label || params.agentId; + const conversationId = threadId || channelId; + if (!conversationId) { + return { + ok: false, + error: `Could not resolve a ${channel} conversation for ACP thread spawn.`, + }; + } + + try { + const binding = await bindingService.bind({ + targetSessionKey: params.sessionKey, + targetKind: "session", + conversation: { + channel: spawnPolicy.channel, + accountId: spawnPolicy.accountId, + conversationId, + }, + placement, + metadata: { + threadName: resolveThreadBindingThreadName({ + agentId: params.agentId, + label, + }), + agentId: params.agentId, + label, + boundBy: senderId || "unknown", + introText: resolveThreadBindingIntroText({ + agentId: params.agentId, + label, + sessionTtlMs: resolveThreadBindingSessionTtlMsForChannel({ + cfg: commandParams.cfg, + channel: spawnPolicy.channel, + accountId: spawnPolicy.accountId, + }), + sessionCwd: resolveAcpSessionCwd(params.sessionMeta), + sessionDetails: resolveAcpThreadSessionDetailLines({ + sessionKey: params.sessionKey, + meta: params.sessionMeta, + }), + }), + }, + }); + return { + ok: true, + binding, + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + return { + ok: false, + error: message || `Failed to bind a ${channel} thread/conversation to the new ACP session.`, + }; + } +} + +async function cleanupFailedSpawn(params: { + cfg: OpenClawConfig; + sessionKey: string; + shouldDeleteSession: boolean; + initializedRuntime?: AcpSpawnRuntimeCloseHandle; +}) { + await cleanupFailedAcpSpawn({ + cfg: params.cfg, + sessionKey: params.sessionKey, + shouldDeleteSession: params.shouldDeleteSession, + deleteTranscript: false, + runtimeCloseHandle: params.initializedRuntime, + }); +} + +export async function handleAcpSpawnAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + if (!isAcpEnabledByPolicy(params.cfg)) { + return stopWithText("ACP is disabled by policy (`acp.enabled=false`)."); + } + + const parsed = parseSpawnInput(params, restTokens); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + + const spawn = parsed.value; + const agentPolicyError = resolveAcpAgentPolicyError(params.cfg, spawn.agentId); + if (agentPolicyError) { + return stopWithText( + collectAcpErrorText({ + error: agentPolicyError, + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "ACP target agent is not allowed by policy.", + }), + ); + } + + const acpManager = getAcpSessionManager(); + const sessionKey = `agent:${spawn.agentId}:acp:${randomUUID()}`; + + let initializedBackend = ""; + let initializedMeta: SessionAcpMeta | undefined; + let initializedRuntime: AcpSpawnRuntimeCloseHandle | undefined; + try { + const initialized = await acpManager.initializeSession({ + cfg: params.cfg, + sessionKey, + agent: spawn.agentId, + mode: spawn.mode, + cwd: spawn.cwd, + }); + initializedRuntime = { + runtime: initialized.runtime, + handle: initialized.handle, + }; + initializedBackend = initialized.handle.backend || initialized.meta.backend; + initializedMeta = initialized.meta; + } catch (err) { + return stopWithText( + collectAcpErrorText({ + error: err, + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "Could not initialize ACP session runtime.", + }), + ); + } + + let binding: SessionBindingRecord | null = null; + if (spawn.thread !== "off") { + const bound = await bindSpawnedAcpSessionToThread({ + commandParams: params, + sessionKey, + agentId: spawn.agentId, + label: spawn.label, + threadMode: spawn.thread, + sessionMeta: initializedMeta, + }); + if (!bound.ok) { + await cleanupFailedSpawn({ + cfg: params.cfg, + sessionKey, + shouldDeleteSession: true, + initializedRuntime, + }); + return stopWithText(`⚠️ ${bound.error}`); + } + binding = bound.binding; + } + + try { + await callGateway({ + method: "sessions.patch", + params: { + key: sessionKey, + ...(spawn.label ? { label: spawn.label } : {}), + }, + timeoutMs: 10_000, + }); + } catch (err) { + await cleanupFailedSpawn({ + cfg: params.cfg, + sessionKey, + shouldDeleteSession: true, + initializedRuntime, + }); + const message = err instanceof Error ? err.message : String(err); + return stopWithText(`⚠️ ACP spawn failed: ${message}`); + } + + const parts = [ + `✅ Spawned ACP session ${sessionKey} (${spawn.mode}, backend ${initializedBackend}).`, + ]; + if (binding) { + const currentThreadId = resolveAcpCommandThreadId(params) ?? ""; + const boundConversationId = binding.conversation.conversationId.trim(); + if (currentThreadId && boundConversationId === currentThreadId) { + parts.push(`Bound this thread to ${sessionKey}.`); + } else { + parts.push(`Created thread ${boundConversationId} and bound it to ${sessionKey}.`); + } + } else { + parts.push("Session is unbound (use /focus to bind this thread/conversation)."); + } + + const dispatchNote = resolveAcpDispatchPolicyMessage(params.cfg); + if (dispatchNote) { + parts.push(`ℹ️ ${dispatchNote}`); + } + + return stopWithText(parts.join(" ")); +} + +export async function handleAcpCancelAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const acpManager = getAcpSessionManager(); + const token = restTokens.join(" ").trim() || undefined; + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + const resolved = acpManager.resolveSession({ + cfg: params.cfg, + sessionKey: target.sessionKey, + }); + if (resolved.kind === "none") { + return stopWithText( + collectAcpErrorText({ + error: new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${target.sessionKey}`, + ), + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "Session is not ACP-enabled.", + }), + ); + } + if (resolved.kind === "stale") { + return stopWithText( + collectAcpErrorText({ + error: resolved.error, + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: resolved.error.message, + }), + ); + } + + return await withAcpCommandErrorBoundary({ + run: async () => + await acpManager.cancelSession({ + cfg: params.cfg, + sessionKey: target.sessionKey, + reason: "manual-cancel", + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP cancel failed before completion.", + onSuccess: () => stopWithText(`✅ Cancel requested for ACP session ${target.sessionKey}.`), + }); +} + +async function runAcpSteer(params: { + cfg: OpenClawConfig; + sessionKey: string; + instruction: string; + requestId: string; +}): Promise { + const acpManager = getAcpSessionManager(); + let output = ""; + + await acpManager.runTurn({ + cfg: params.cfg, + sessionKey: params.sessionKey, + text: params.instruction, + mode: "steer", + requestId: params.requestId, + onEvent: (event) => { + if (event.type !== "text_delta") { + return; + } + if (event.stream && event.stream !== "output") { + return; + } + if (event.text) { + output += event.text; + if (output.length > ACP_STEER_OUTPUT_LIMIT) { + output = `${output.slice(0, ACP_STEER_OUTPUT_LIMIT)}…`; + } + } + }, + }); + return output.trim(); +} + +export async function handleAcpSteerAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const dispatchPolicyError = resolveAcpDispatchPolicyError(params.cfg); + if (dispatchPolicyError) { + return stopWithText( + collectAcpErrorText({ + error: dispatchPolicyError, + fallbackCode: "ACP_DISPATCH_DISABLED", + fallbackMessage: dispatchPolicyError.message, + }), + ); + } + + const parsed = parseSteerInput(restTokens); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const acpManager = getAcpSessionManager(); + + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + const resolved = acpManager.resolveSession({ + cfg: params.cfg, + sessionKey: target.sessionKey, + }); + if (resolved.kind === "none") { + return stopWithText( + collectAcpErrorText({ + error: new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${target.sessionKey}`, + ), + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "Session is not ACP-enabled.", + }), + ); + } + if (resolved.kind === "stale") { + return stopWithText( + collectAcpErrorText({ + error: resolved.error, + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: resolved.error.message, + }), + ); + } + + return await withAcpCommandErrorBoundary({ + run: async () => + await runAcpSteer({ + cfg: params.cfg, + sessionKey: target.sessionKey, + instruction: parsed.value.instruction, + requestId: `${resolveCommandRequestId(params)}:steer`, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP steer failed before completion.", + onSuccess: (steerOutput) => { + if (!steerOutput) { + return stopWithText(`✅ ACP steer sent to ${target.sessionKey}.`); + } + return stopWithText(`✅ ACP steer sent to ${target.sessionKey}.\n${steerOutput}`); + }, + }); +} + +export async function handleAcpCloseAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const acpManager = getAcpSessionManager(); + const token = restTokens.join(" ").trim() || undefined; + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + const resolved = acpManager.resolveSession({ + cfg: params.cfg, + sessionKey: target.sessionKey, + }); + if (resolved.kind === "none") { + return stopWithText( + collectAcpErrorText({ + error: new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `Session is not ACP-enabled: ${target.sessionKey}`, + ), + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: "Session is not ACP-enabled.", + }), + ); + } + if (resolved.kind === "stale") { + return stopWithText( + collectAcpErrorText({ + error: resolved.error, + fallbackCode: "ACP_SESSION_INIT_FAILED", + fallbackMessage: resolved.error.message, + }), + ); + } + + let runtimeNotice = ""; + try { + const closed = await acpManager.closeSession({ + cfg: params.cfg, + sessionKey: target.sessionKey, + reason: "manual-close", + allowBackendUnavailable: true, + clearMeta: true, + }); + runtimeNotice = closed.runtimeNotice ? ` (${closed.runtimeNotice})` : ""; + } catch (error) { + return stopWithText( + collectAcpErrorText({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP close failed before completion.", + }), + ); + } + + const removedBindings = await getSessionBindingService().unbind({ + targetSessionKey: target.sessionKey, + reason: "manual", + }); + + return stopWithText( + `✅ Closed ACP session ${target.sessionKey}${runtimeNotice}. Removed ${removedBindings.length} binding${removedBindings.length === 1 ? "" : "s"}.`, + ); +} diff --git a/src/auto-reply/reply/commands-acp/runtime-options.ts b/src/auto-reply/reply/commands-acp/runtime-options.ts new file mode 100644 index 00000000000..359b712e0e3 --- /dev/null +++ b/src/auto-reply/reply/commands-acp/runtime-options.ts @@ -0,0 +1,348 @@ +import { getAcpSessionManager } from "../../../acp/control-plane/manager.js"; +import { + parseRuntimeTimeoutSecondsInput, + validateRuntimeConfigOptionInput, + validateRuntimeCwdInput, + validateRuntimeModeInput, + validateRuntimeModelInput, + validateRuntimePermissionProfileInput, +} from "../../../acp/control-plane/runtime-options.js"; +import { resolveAcpSessionIdentifierLinesFromIdentity } from "../../../acp/runtime/session-identifiers.js"; +import type { CommandHandlerResult, HandleCommandsParams } from "../commands-types.js"; +import { + ACP_CWD_USAGE, + ACP_MODEL_USAGE, + ACP_PERMISSIONS_USAGE, + ACP_RESET_OPTIONS_USAGE, + ACP_SET_MODE_USAGE, + ACP_STATUS_USAGE, + ACP_TIMEOUT_USAGE, + formatAcpCapabilitiesText, + formatRuntimeOptionsText, + parseOptionalSingleTarget, + parseSetCommandInput, + parseSingleValueCommandInput, + stopWithText, + withAcpCommandErrorBoundary, +} from "./shared.js"; +import { resolveAcpTargetSessionKey } from "./targets.js"; + +export async function handleAcpStatusAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseOptionalSingleTarget(restTokens, ACP_STATUS_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + return await withAcpCommandErrorBoundary({ + run: async () => + await getAcpSessionManager().getSessionStatus({ + cfg: params.cfg, + sessionKey: target.sessionKey, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not read ACP session status.", + onSuccess: (status) => { + const sessionIdentifierLines = resolveAcpSessionIdentifierLinesFromIdentity({ + backend: status.backend, + identity: status.identity, + }); + const lines = [ + "ACP status:", + "-----", + `session: ${status.sessionKey}`, + `backend: ${status.backend}`, + `agent: ${status.agent}`, + ...sessionIdentifierLines, + `sessionMode: ${status.mode}`, + `state: ${status.state}`, + `runtimeOptions: ${formatRuntimeOptionsText(status.runtimeOptions)}`, + `capabilities: ${formatAcpCapabilitiesText(status.capabilities.controls)}`, + `lastActivityAt: ${new Date(status.lastActivityAt).toISOString()}`, + ...(status.lastError ? [`lastError: ${status.lastError}`] : []), + ...(status.runtimeStatus?.summary ? [`runtime: ${status.runtimeStatus.summary}`] : []), + ...(status.runtimeStatus?.details + ? [`runtimeDetails: ${JSON.stringify(status.runtimeStatus.details)}`] + : []), + ]; + return stopWithText(lines.join("\n")); + }, + }); +} + +export async function handleAcpSetModeAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseSingleValueCommandInput(restTokens, ACP_SET_MODE_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + return await withAcpCommandErrorBoundary({ + run: async () => { + const runtimeMode = validateRuntimeModeInput(parsed.value.value); + const options = await getAcpSessionManager().setSessionRuntimeMode({ + cfg: params.cfg, + sessionKey: target.sessionKey, + runtimeMode, + }); + return { + runtimeMode, + options, + }; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP runtime mode.", + onSuccess: ({ runtimeMode, options }) => + stopWithText( + `✅ Updated ACP runtime mode for ${target.sessionKey}: ${runtimeMode}. Effective options: ${formatRuntimeOptionsText(options)}`, + ), + }); +} + +export async function handleAcpSetAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseSetCommandInput(restTokens); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + const key = parsed.value.key.trim(); + const value = parsed.value.value.trim(); + + return await withAcpCommandErrorBoundary({ + run: async () => { + const lowerKey = key.toLowerCase(); + if (lowerKey === "cwd") { + const cwd = validateRuntimeCwdInput(value); + const options = await getAcpSessionManager().updateSessionRuntimeOptions({ + cfg: params.cfg, + sessionKey: target.sessionKey, + patch: { cwd }, + }); + return { + text: `✅ Updated ACP cwd for ${target.sessionKey}: ${cwd}. Effective options: ${formatRuntimeOptionsText(options)}`, + }; + } + const validated = validateRuntimeConfigOptionInput(key, value); + const options = await getAcpSessionManager().setSessionConfigOption({ + cfg: params.cfg, + sessionKey: target.sessionKey, + key: validated.key, + value: validated.value, + }); + return { + text: `✅ Updated ACP config option for ${target.sessionKey}: ${validated.key}=${validated.value}. Effective options: ${formatRuntimeOptionsText(options)}`, + }; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP config option.", + onSuccess: ({ text }) => stopWithText(text), + }); +} + +export async function handleAcpCwdAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseSingleValueCommandInput(restTokens, ACP_CWD_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + return await withAcpCommandErrorBoundary({ + run: async () => { + const cwd = validateRuntimeCwdInput(parsed.value.value); + const options = await getAcpSessionManager().updateSessionRuntimeOptions({ + cfg: params.cfg, + sessionKey: target.sessionKey, + patch: { cwd }, + }); + return { + cwd, + options, + }; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP cwd.", + onSuccess: ({ cwd, options }) => + stopWithText( + `✅ Updated ACP cwd for ${target.sessionKey}: ${cwd}. Effective options: ${formatRuntimeOptionsText(options)}`, + ), + }); +} + +export async function handleAcpPermissionsAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseSingleValueCommandInput(restTokens, ACP_PERMISSIONS_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + return await withAcpCommandErrorBoundary({ + run: async () => { + const permissionProfile = validateRuntimePermissionProfileInput(parsed.value.value); + const options = await getAcpSessionManager().setSessionConfigOption({ + cfg: params.cfg, + sessionKey: target.sessionKey, + key: "approval_policy", + value: permissionProfile, + }); + return { + permissionProfile, + options, + }; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP permissions profile.", + onSuccess: ({ permissionProfile, options }) => + stopWithText( + `✅ Updated ACP permissions profile for ${target.sessionKey}: ${permissionProfile}. Effective options: ${formatRuntimeOptionsText(options)}`, + ), + }); +} + +export async function handleAcpTimeoutAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseSingleValueCommandInput(restTokens, ACP_TIMEOUT_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + return await withAcpCommandErrorBoundary({ + run: async () => { + const timeoutSeconds = parseRuntimeTimeoutSecondsInput(parsed.value.value); + const options = await getAcpSessionManager().setSessionConfigOption({ + cfg: params.cfg, + sessionKey: target.sessionKey, + key: "timeout", + value: String(timeoutSeconds), + }); + return { + timeoutSeconds, + options, + }; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP timeout.", + onSuccess: ({ timeoutSeconds, options }) => + stopWithText( + `✅ Updated ACP timeout for ${target.sessionKey}: ${timeoutSeconds}s. Effective options: ${formatRuntimeOptionsText(options)}`, + ), + }); +} + +export async function handleAcpModelAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseSingleValueCommandInput(restTokens, ACP_MODEL_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.value.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + return await withAcpCommandErrorBoundary({ + run: async () => { + const model = validateRuntimeModelInput(parsed.value.value); + const options = await getAcpSessionManager().setSessionConfigOption({ + cfg: params.cfg, + sessionKey: target.sessionKey, + key: "model", + value: model, + }); + return { + model, + options, + }; + }, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not update ACP model.", + onSuccess: ({ model, options }) => + stopWithText( + `✅ Updated ACP model for ${target.sessionKey}: ${model}. Effective options: ${formatRuntimeOptionsText(options)}`, + ), + }); +} + +export async function handleAcpResetOptionsAction( + params: HandleCommandsParams, + restTokens: string[], +): Promise { + const parsed = parseOptionalSingleTarget(restTokens, ACP_RESET_OPTIONS_USAGE); + if (!parsed.ok) { + return stopWithText(`⚠️ ${parsed.error}`); + } + const target = await resolveAcpTargetSessionKey({ + commandParams: params, + token: parsed.sessionToken, + }); + if (!target.ok) { + return stopWithText(`⚠️ ${target.error}`); + } + + return await withAcpCommandErrorBoundary({ + run: async () => + await getAcpSessionManager().resetSessionRuntimeOptions({ + cfg: params.cfg, + sessionKey: target.sessionKey, + }), + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "Could not reset ACP runtime options.", + onSuccess: () => stopWithText(`✅ Reset ACP runtime options for ${target.sessionKey}.`), + }); +} diff --git a/src/auto-reply/reply/commands-acp/shared.ts b/src/auto-reply/reply/commands-acp/shared.ts new file mode 100644 index 00000000000..adf31247b6d --- /dev/null +++ b/src/auto-reply/reply/commands-acp/shared.ts @@ -0,0 +1,500 @@ +import { randomUUID } from "node:crypto"; +import { existsSync } from "node:fs"; +import path from "node:path"; +import { toAcpRuntimeErrorText } from "../../../acp/runtime/error-text.js"; +import type { AcpRuntimeError } from "../../../acp/runtime/errors.js"; +import type { AcpRuntimeSessionMode } from "../../../acp/runtime/types.js"; +import { DISCORD_THREAD_BINDING_CHANNEL } from "../../../channels/thread-bindings-policy.js"; +import type { OpenClawConfig } from "../../../config/config.js"; +import type { AcpSessionRuntimeOptions } from "../../../config/sessions/types.js"; +import { normalizeAgentId } from "../../../routing/session-key.js"; +import type { CommandHandlerResult, HandleCommandsParams } from "../commands-types.js"; +import { resolveAcpCommandChannel, resolveAcpCommandThreadId } from "./context.js"; + +export const COMMAND = "/acp"; +export const ACP_SPAWN_USAGE = + "Usage: /acp spawn [agentId] [--mode persistent|oneshot] [--thread auto|here|off] [--cwd ] [--label

", "Agent workspace directory (default: ~/.openclaw/workspace)") - .option("--reset", "Reset config + credentials + sessions + workspace before running wizard") + .option( + "--reset", + "Reset config + credentials + sessions before running wizard (workspace only with --reset-scope full)", + ) + .option("--reset-scope ", "Reset scope: config|config+creds+sessions|full") .option("--non-interactive", "Run without prompts", false) .option( "--accept-risk", @@ -74,6 +80,10 @@ export function registerOnboardCommand(program: Command) { "Auth profile id (non-interactive; default: :manual)", ) .option("--token-expires-in ", "Optional token expiry duration (e.g. 365d, 12h)") + .option( + "--secret-input-mode ", + "API key persistence mode: plaintext|ref (default: plaintext)", + ) .option("--cloudflare-ai-gateway-account-id ", "Cloudflare Account ID") .option("--cloudflare-ai-gateway-gateway-id ", "Cloudflare AI Gateway ID"); @@ -129,6 +139,7 @@ export function registerOnboardCommand(program: Command) { token: opts.token as string | undefined, tokenProfileId: opts.tokenProfileId as string | undefined, tokenExpiresIn: opts.tokenExpiresIn as string | undefined, + secretInputMode: opts.secretInputMode as SecretInputMode | undefined, anthropicApiKey: opts.anthropicApiKey as string | undefined, openaiApiKey: opts.openaiApiKey as string | undefined, mistralApiKey: opts.mistralApiKey as string | undefined, @@ -172,6 +183,7 @@ export function registerOnboardCommand(program: Command) { tailscale: opts.tailscale as TailscaleMode | undefined, tailscaleResetOnExit: Boolean(opts.tailscaleResetOnExit), reset: Boolean(opts.reset), + resetScope: opts.resetScope as ResetScope | undefined, installDaemon, daemonRuntime: opts.daemonRuntime as GatewayDaemonRuntime | undefined, skipChannels: Boolean(opts.skipChannels), diff --git a/src/cli/program/register.status-health-sessions.test.ts b/src/cli/program/register.status-health-sessions.test.ts index ac84bb5c1ca..5a45b4d293a 100644 --- a/src/cli/program/register.status-health-sessions.test.ts +++ b/src/cli/program/register.status-health-sessions.test.ts @@ -171,6 +171,7 @@ describe("registerStatusHealthSessionsCommands", () => { "/tmp/sessions.json", "--dry-run", "--enforce", + "--fix-missing", "--active-key", "agent:main:main", "--json", @@ -183,6 +184,7 @@ describe("registerStatusHealthSessionsCommands", () => { allAgents: false, dryRun: true, enforce: true, + fixMissing: true, activeKey: "agent:main:main", json: true, }), diff --git a/src/cli/program/register.status-health-sessions.ts b/src/cli/program/register.status-health-sessions.ts index b708d42e665..3a3d81abcf3 100644 --- a/src/cli/program/register.status-health-sessions.ts +++ b/src/cli/program/register.status-health-sessions.ts @@ -163,6 +163,11 @@ export function registerStatusHealthSessionsCommands(program: Command) { .option("--all-agents", "Run maintenance across all configured agents", false) .option("--dry-run", "Preview maintenance actions without writing", false) .option("--enforce", "Apply maintenance even when configured mode is warn", false) + .option( + "--fix-missing", + "Remove store entries whose transcript files are missing (bypasses age/count retention)", + false, + ) .option("--active-key ", "Protect this session key from budget-eviction") .option("--json", "Output JSON", false) .addHelpText( @@ -170,6 +175,10 @@ export function registerStatusHealthSessionsCommands(program: Command) { () => `\n${theme.heading("Examples:")}\n${formatHelpExamples([ ["openclaw sessions cleanup --dry-run", "Preview stale/cap cleanup."], + [ + "openclaw sessions cleanup --dry-run --fix-missing", + "Also preview pruning entries with missing transcript files.", + ], ["openclaw sessions cleanup --enforce", "Apply maintenance now."], ["openclaw sessions cleanup --agent work --dry-run", "Preview one agent store."], ["openclaw sessions cleanup --all-agents --dry-run", "Preview all agent stores."], @@ -196,6 +205,7 @@ export function registerStatusHealthSessionsCommands(program: Command) { allAgents: Boolean(opts.allAgents || parentOpts?.allAgents), dryRun: Boolean(opts.dryRun), enforce: Boolean(opts.enforce), + fixMissing: Boolean(opts.fixMissing), activeKey: opts.activeKey as string | undefined, json: Boolean(opts.json || parentOpts?.json), }, diff --git a/src/cli/program/register.subclis.ts b/src/cli/program/register.subclis.ts index 77c5cd28596..fc044dbcd92 100644 --- a/src/cli/program/register.subclis.ts +++ b/src/cli/program/register.subclis.ts @@ -260,6 +260,15 @@ const entries: SubCliEntry[] = [ mod.registerSecurityCli(program); }, }, + { + name: "secrets", + description: "Secrets runtime reload controls", + hasSubcommands: true, + register: async (program) => { + const mod = await import("../secrets-cli.js"); + mod.registerSecretsCli(program); + }, + }, { name: "skills", description: "List and inspect available skills", diff --git a/src/cli/secrets-cli.test.ts b/src/cli/secrets-cli.test.ts new file mode 100644 index 00000000000..8f781e0d150 --- /dev/null +++ b/src/cli/secrets-cli.test.ts @@ -0,0 +1,158 @@ +import { Command } from "commander"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { createCliRuntimeCapture } from "./test-runtime-capture.js"; + +const callGatewayFromCli = vi.fn(); +const runSecretsAudit = vi.fn(); +const resolveSecretsAuditExitCode = vi.fn(); +const runSecretsConfigureInteractive = vi.fn(); +const runSecretsApply = vi.fn(); +const confirm = vi.fn(); + +const { defaultRuntime, runtimeLogs, runtimeErrors, resetRuntimeCapture } = + createCliRuntimeCapture(); + +vi.mock("./gateway-rpc.js", () => ({ + addGatewayClientOptions: (cmd: Command) => cmd, + callGatewayFromCli: (method: string, opts: unknown, params?: unknown, extra?: unknown) => + callGatewayFromCli(method, opts, params, extra), +})); + +vi.mock("../runtime.js", () => ({ + defaultRuntime, +})); + +vi.mock("../secrets/audit.js", () => ({ + runSecretsAudit: () => runSecretsAudit(), + resolveSecretsAuditExitCode: (report: unknown, check: boolean) => + resolveSecretsAuditExitCode(report, check), +})); + +vi.mock("../secrets/configure.js", () => ({ + runSecretsConfigureInteractive: () => runSecretsConfigureInteractive(), +})); + +vi.mock("../secrets/apply.js", () => ({ + runSecretsApply: (options: unknown) => runSecretsApply(options), +})); + +vi.mock("@clack/prompts", () => ({ + confirm: (options: unknown) => confirm(options), +})); + +const { registerSecretsCli } = await import("./secrets-cli.js"); + +describe("secrets CLI", () => { + const createProgram = () => { + const program = new Command(); + program.exitOverride(); + registerSecretsCli(program); + return program; + }; + + beforeEach(() => { + resetRuntimeCapture(); + callGatewayFromCli.mockReset(); + runSecretsAudit.mockReset(); + resolveSecretsAuditExitCode.mockReset(); + runSecretsConfigureInteractive.mockReset(); + runSecretsApply.mockReset(); + confirm.mockReset(); + }); + + it("calls secrets.reload and prints human output", async () => { + callGatewayFromCli.mockResolvedValue({ ok: true, warningCount: 1 }); + await createProgram().parseAsync(["secrets", "reload"], { from: "user" }); + expect(callGatewayFromCli).toHaveBeenCalledWith( + "secrets.reload", + expect.anything(), + undefined, + expect.objectContaining({ expectFinal: false }), + ); + expect(runtimeLogs.at(-1)).toBe("Secrets reloaded with 1 warning(s)."); + expect(runtimeErrors).toHaveLength(0); + }); + + it("prints JSON when requested", async () => { + callGatewayFromCli.mockResolvedValue({ ok: true, warningCount: 0 }); + await createProgram().parseAsync(["secrets", "reload", "--json"], { from: "user" }); + expect(runtimeLogs.at(-1)).toContain('"ok": true'); + }); + + it("runs secrets audit and exits via check code", async () => { + runSecretsAudit.mockResolvedValue({ + version: 1, + status: "findings", + filesScanned: [], + summary: { + plaintextCount: 1, + unresolvedRefCount: 0, + shadowedRefCount: 0, + legacyResidueCount: 0, + }, + findings: [], + }); + resolveSecretsAuditExitCode.mockReturnValue(1); + + await expect( + createProgram().parseAsync(["secrets", "audit", "--check"], { from: "user" }), + ).rejects.toBeTruthy(); + expect(runSecretsAudit).toHaveBeenCalled(); + expect(resolveSecretsAuditExitCode).toHaveBeenCalledWith(expect.anything(), true); + }); + + it("runs secrets configure then apply when confirmed", async () => { + runSecretsConfigureInteractive.mockResolvedValue({ + plan: { + version: 1, + protocolVersion: 1, + generatedAt: "2026-02-26T00:00:00.000Z", + generatedBy: "openclaw secrets configure", + targets: [ + { + type: "skills.entries.apiKey", + path: "skills.entries.qa-secret-test.apiKey", + pathSegments: ["skills", "entries", "qa-secret-test", "apiKey"], + ref: { + source: "env", + provider: "default", + id: "QA_SECRET_TEST_API_KEY", + }, + }, + ], + }, + preflight: { + mode: "dry-run", + changed: true, + changedFiles: ["/tmp/openclaw.json"], + warningCount: 0, + warnings: [], + }, + }); + confirm.mockResolvedValue(true); + runSecretsApply.mockResolvedValue({ + mode: "write", + changed: true, + changedFiles: ["/tmp/openclaw.json"], + warningCount: 0, + warnings: [], + }); + + await createProgram().parseAsync(["secrets", "configure"], { from: "user" }); + expect(runSecretsConfigureInteractive).toHaveBeenCalled(); + expect(runSecretsApply).toHaveBeenCalledWith( + expect.objectContaining({ + write: true, + plan: expect.objectContaining({ + targets: expect.arrayContaining([ + expect.objectContaining({ + type: "skills.entries.apiKey", + path: "skills.entries.qa-secret-test.apiKey", + }), + ]), + }), + }), + ); + expect(runtimeLogs.at(-1)).toContain("Secrets applied"); + }); +}); diff --git a/src/cli/secrets-cli.ts b/src/cli/secrets-cli.ts new file mode 100644 index 00000000000..05cc38afe03 --- /dev/null +++ b/src/cli/secrets-cli.ts @@ -0,0 +1,245 @@ +import fs from "node:fs"; +import { confirm } from "@clack/prompts"; +import type { Command } from "commander"; +import { danger } from "../globals.js"; +import { defaultRuntime } from "../runtime.js"; +import { runSecretsApply } from "../secrets/apply.js"; +import { resolveSecretsAuditExitCode, runSecretsAudit } from "../secrets/audit.js"; +import { runSecretsConfigureInteractive } from "../secrets/configure.js"; +import { isSecretsApplyPlan, type SecretsApplyPlan } from "../secrets/plan.js"; +import { formatDocsLink } from "../terminal/links.js"; +import { theme } from "../terminal/theme.js"; +import { addGatewayClientOptions, callGatewayFromCli, type GatewayRpcOpts } from "./gateway-rpc.js"; + +type SecretsReloadOptions = GatewayRpcOpts & { json?: boolean }; +type SecretsAuditOptions = { + check?: boolean; + json?: boolean; +}; +type SecretsConfigureOptions = { + apply?: boolean; + yes?: boolean; + planOut?: string; + providersOnly?: boolean; + skipProviderSetup?: boolean; + json?: boolean; +}; +type SecretsApplyOptions = { + from: string; + dryRun?: boolean; + json?: boolean; +}; + +function readPlanFile(pathname: string): SecretsApplyPlan { + const raw = fs.readFileSync(pathname, "utf8"); + const parsed = JSON.parse(raw) as unknown; + if (!isSecretsApplyPlan(parsed)) { + throw new Error(`Invalid secrets plan file: ${pathname}`); + } + return parsed; +} + +export function registerSecretsCli(program: Command) { + const secrets = program + .command("secrets") + .description("Secrets runtime controls") + .addHelpText( + "after", + () => + `\n${theme.muted("Docs:")} ${formatDocsLink("/gateway/security", "docs.openclaw.ai/gateway/security")}\n`, + ); + + addGatewayClientOptions( + secrets + .command("reload") + .description("Re-resolve secret references and atomically swap runtime snapshot") + .option("--json", "Output JSON", false), + ).action(async (opts: SecretsReloadOptions) => { + try { + const result = await callGatewayFromCli("secrets.reload", opts, undefined, { + expectFinal: false, + }); + if (opts.json) { + defaultRuntime.log(JSON.stringify(result, null, 2)); + return; + } + const warningCount = Number( + (result as { warningCount?: unknown } | undefined)?.warningCount ?? 0, + ); + if (Number.isFinite(warningCount) && warningCount > 0) { + defaultRuntime.log(`Secrets reloaded with ${warningCount} warning(s).`); + return; + } + defaultRuntime.log("Secrets reloaded."); + } catch (err) { + defaultRuntime.error(danger(String(err))); + defaultRuntime.exit(1); + } + }); + + secrets + .command("audit") + .description("Audit plaintext secrets, unresolved refs, and precedence drift") + .option("--check", "Exit non-zero when findings are present", false) + .option("--json", "Output JSON", false) + .action(async (opts: SecretsAuditOptions) => { + try { + const report = await runSecretsAudit(); + if (opts.json) { + defaultRuntime.log(JSON.stringify(report, null, 2)); + } else { + defaultRuntime.log( + `Secrets audit: ${report.status}. plaintext=${report.summary.plaintextCount}, unresolved=${report.summary.unresolvedRefCount}, shadowed=${report.summary.shadowedRefCount}, legacy=${report.summary.legacyResidueCount}.`, + ); + if (report.findings.length > 0) { + for (const finding of report.findings.slice(0, 20)) { + defaultRuntime.log( + `- [${finding.code}] ${finding.file}:${finding.jsonPath} ${finding.message}`, + ); + } + if (report.findings.length > 20) { + defaultRuntime.log(`... ${report.findings.length - 20} more finding(s).`); + } + } + } + const exitCode = resolveSecretsAuditExitCode(report, Boolean(opts.check)); + if (exitCode !== 0) { + defaultRuntime.exit(exitCode); + } + } catch (err) { + defaultRuntime.error(danger(String(err))); + defaultRuntime.exit(2); + } + }); + + secrets + .command("configure") + .description("Interactive secrets helper (provider setup + SecretRef mapping + preflight)") + .option("--apply", "Apply changes immediately after preflight", false) + .option("--yes", "Skip apply confirmation prompt", false) + .option("--providers-only", "Configure secrets.providers only, skip credential mapping", false) + .option( + "--skip-provider-setup", + "Skip provider setup and only map credential fields to existing providers", + false, + ) + .option("--plan-out ", "Write generated plan JSON to a file") + .option("--json", "Output JSON", false) + .action(async (opts: SecretsConfigureOptions) => { + try { + const configured = await runSecretsConfigureInteractive({ + providersOnly: Boolean(opts.providersOnly), + skipProviderSetup: Boolean(opts.skipProviderSetup), + }); + if (opts.planOut) { + fs.writeFileSync(opts.planOut, `${JSON.stringify(configured.plan, null, 2)}\n`, "utf8"); + } + if (opts.json) { + defaultRuntime.log( + JSON.stringify( + { + plan: configured.plan, + preflight: configured.preflight, + }, + null, + 2, + ), + ); + } else { + defaultRuntime.log( + `Preflight: changed=${configured.preflight.changed}, files=${configured.preflight.changedFiles.length}, warnings=${configured.preflight.warningCount}.`, + ); + if (configured.preflight.warningCount > 0) { + for (const warning of configured.preflight.warnings) { + defaultRuntime.log(`- warning: ${warning}`); + } + } + const providerUpserts = Object.keys(configured.plan.providerUpserts ?? {}).length; + const providerDeletes = configured.plan.providerDeletes?.length ?? 0; + defaultRuntime.log( + `Plan: targets=${configured.plan.targets.length}, providerUpserts=${providerUpserts}, providerDeletes=${providerDeletes}.`, + ); + if (opts.planOut) { + defaultRuntime.log(`Plan written to ${opts.planOut}`); + } + } + + let shouldApply = Boolean(opts.apply); + if (!shouldApply && !opts.json) { + const approved = await confirm({ + message: "Apply this plan now?", + initialValue: true, + }); + if (typeof approved === "boolean") { + shouldApply = approved; + } + } + if (shouldApply) { + const needsIrreversiblePrompt = Boolean(opts.apply); + if (needsIrreversiblePrompt && !opts.yes && !opts.json) { + const confirmed = await confirm({ + message: + "This migration is one-way for migrated plaintext values. Continue with apply?", + initialValue: true, + }); + if (confirmed !== true) { + defaultRuntime.log("Apply cancelled."); + return; + } + } + const result = await runSecretsApply({ + plan: configured.plan, + write: true, + }); + if (opts.json) { + defaultRuntime.log(JSON.stringify(result, null, 2)); + return; + } + defaultRuntime.log( + result.changed + ? `Secrets applied. Updated ${result.changedFiles.length} file(s).` + : "Secrets apply: no changes.", + ); + } + } catch (err) { + defaultRuntime.error(danger(String(err))); + defaultRuntime.exit(1); + } + }); + + secrets + .command("apply") + .description("Apply a previously generated secrets plan") + .requiredOption("--from ", "Path to plan JSON") + .option("--dry-run", "Validate/preflight only", false) + .option("--json", "Output JSON", false) + .action(async (opts: SecretsApplyOptions) => { + try { + const plan = readPlanFile(opts.from); + const result = await runSecretsApply({ + plan, + write: !opts.dryRun, + }); + if (opts.json) { + defaultRuntime.log(JSON.stringify(result, null, 2)); + return; + } + if (opts.dryRun) { + defaultRuntime.log( + result.changed + ? `Secrets apply dry run: ${result.changedFiles.length} file(s) would change.` + : "Secrets apply dry run: no changes.", + ); + return; + } + defaultRuntime.log( + result.changed + ? `Secrets applied. Updated ${result.changedFiles.length} file(s).` + : "Secrets apply: no changes.", + ); + } catch (err) { + defaultRuntime.error(danger(String(err))); + defaultRuntime.exit(1); + } + }); +} diff --git a/src/cli/update-cli/progress.test.ts b/src/cli/update-cli/progress.test.ts new file mode 100644 index 00000000000..d8ddf52128e --- /dev/null +++ b/src/cli/update-cli/progress.test.ts @@ -0,0 +1,56 @@ +import { describe, expect, it } from "vitest"; +import type { UpdateRunResult } from "../../infra/update-runner.js"; +import { inferUpdateFailureHints } from "./progress.js"; + +function makeResult( + stepName: string, + stderrTail: string, + mode: UpdateRunResult["mode"] = "npm", +): UpdateRunResult { + return { + status: "error", + mode, + reason: stepName, + steps: [ + { + name: stepName, + command: "npm i -g openclaw@latest", + cwd: "/tmp", + durationMs: 1, + exitCode: 1, + stderrTail, + }, + ], + durationMs: 1, + }; +} + +describe("inferUpdateFailureHints", () => { + it("returns EACCES hint for global update permission failures", () => { + const result = makeResult( + "global update", + "npm ERR! code EACCES\nnpm ERR! Error: EACCES: permission denied", + ); + const hints = inferUpdateFailureHints(result); + expect(hints.join("\n")).toContain("EACCES"); + expect(hints.join("\n")).toContain("npm config set prefix ~/.local"); + }); + + it("returns native optional dependency hint for node-gyp/opus failures", () => { + const result = makeResult( + "global update", + "node-pre-gyp ERR!\n@discordjs/opus\nnode-gyp rebuild failed", + ); + const hints = inferUpdateFailureHints(result); + expect(hints.join("\n")).toContain("--omit=optional"); + }); + + it("does not return npm hints for non-npm install modes", () => { + const result = makeResult( + "global update", + "npm ERR! code EACCES\nnpm ERR! Error: EACCES: permission denied", + "pnpm", + ); + expect(inferUpdateFailureHints(result)).toEqual([]); + }); +}); diff --git a/src/cli/update-cli/progress.ts b/src/cli/update-cli/progress.ts index 1fd2f3d2047..edaf4d3d665 100644 --- a/src/cli/update-cli/progress.ts +++ b/src/cli/update-cli/progress.ts @@ -28,6 +28,7 @@ const STEP_LABELS: Record = { "openclaw doctor": "Running doctor checks", "git rev-parse HEAD (after)": "Verifying update", "global update": "Updating via package manager", + "global update (omit optional)": "Retrying update without optional deps", "global install": "Installing global package", }; @@ -35,6 +36,40 @@ function getStepLabel(step: UpdateStepInfo): string { return STEP_LABELS[step.name] ?? step.name; } +export function inferUpdateFailureHints(result: UpdateRunResult): string[] { + if (result.status !== "error" || result.mode !== "npm") { + return []; + } + const failedStep = [...result.steps].toReversed().find((step) => step.exitCode !== 0); + if (!failedStep) { + return []; + } + + const stderr = (failedStep.stderrTail ?? "").toLowerCase(); + const hints: string[] = []; + + if (failedStep.name.startsWith("global update") && stderr.includes("eacces")) { + hints.push( + "Detected permission failure (EACCES). Re-run with a writable global prefix or sudo (for system-managed Node installs).", + ); + hints.push("Example: npm config set prefix ~/.local && npm i -g openclaw@latest"); + } + + if ( + failedStep.name.startsWith("global update") && + (stderr.includes("node-gyp") || + stderr.includes("@discordjs/opus") || + stderr.includes("prebuild")) + ) { + hints.push( + "Detected native optional dependency build failure (e.g. opus). The updater retries with --omit=optional automatically.", + ); + hints.push("If it still fails: npm i -g openclaw@latest --omit=optional"); + } + + return hints; +} + export type ProgressController = { progress: UpdateStepProgress; stop: () => void; @@ -151,6 +186,15 @@ export function printResult(result: UpdateRunResult, opts: PrintResultOptions): } } + const hints = inferUpdateFailureHints(result); + if (hints.length > 0) { + defaultRuntime.log(""); + defaultRuntime.log(theme.heading("Recovery hints:")); + for (const hint of hints) { + defaultRuntime.log(` - ${theme.warn(hint)}`); + } + } + defaultRuntime.log(""); defaultRuntime.log(`Total time: ${theme.muted(formatDurationPrecise(result.durationMs))}`); } diff --git a/src/commands/agent.acp.test.ts b/src/commands/agent.acp.test.ts new file mode 100644 index 00000000000..cd8934799f0 --- /dev/null +++ b/src/commands/agent.acp.test.ts @@ -0,0 +1,294 @@ +import fs from "node:fs"; +import path from "node:path"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js"; +import * as acpManagerModule from "../acp/control-plane/manager.js"; +import { AcpRuntimeError } from "../acp/runtime/errors.js"; +import * as embeddedModule from "../agents/pi-embedded.js"; +import type { OpenClawConfig } from "../config/config.js"; +import * as configModule from "../config/config.js"; +import type { RuntimeEnv } from "../runtime.js"; +import { agentCommand } from "./agent.js"; + +const loadConfigSpy = vi.spyOn(configModule, "loadConfig"); +const runEmbeddedPiAgentSpy = vi.spyOn(embeddedModule, "runEmbeddedPiAgent"); +const getAcpSessionManagerSpy = vi.spyOn(acpManagerModule, "getAcpSessionManager"); + +const runtime: RuntimeEnv = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(() => { + throw new Error("exit"); + }), +}; + +async function withTempHome(fn: (home: string) => Promise): Promise { + return withTempHomeBase(fn, { prefix: "openclaw-agent-acp-" }); +} + +function mockConfig(home: string, storePath: string) { + loadConfigSpy.mockReturnValue({ + acp: { + enabled: true, + backend: "acpx", + allowedAgents: ["codex"], + dispatch: { enabled: true }, + }, + agents: { + defaults: { + model: { primary: "openai/gpt-5.3-codex" }, + models: { "openai/gpt-5.3-codex": {} }, + workspace: path.join(home, "openclaw"), + }, + }, + session: { store: storePath, mainKey: "main" }, + } satisfies OpenClawConfig); +} + +function mockConfigWithAcpOverrides( + home: string, + storePath: string, + acpOverrides: Partial>, +) { + loadConfigSpy.mockReturnValue({ + acp: { + enabled: true, + backend: "acpx", + allowedAgents: ["codex"], + dispatch: { enabled: true }, + ...acpOverrides, + }, + agents: { + defaults: { + model: { primary: "openai/gpt-5.3-codex" }, + models: { "openai/gpt-5.3-codex": {} }, + workspace: path.join(home, "openclaw"), + }, + }, + session: { store: storePath, mainKey: "main" }, + } satisfies OpenClawConfig); +} + +function writeAcpSessionStore(storePath: string) { + fs.mkdirSync(path.dirname(storePath), { recursive: true }); + fs.writeFileSync( + storePath, + JSON.stringify( + { + "agent:codex:acp:test": { + sessionId: "acp-session-1", + updatedAt: Date.now(), + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "agent:codex:acp:test", + mode: "oneshot", + state: "idle", + lastActivityAt: Date.now(), + }, + }, + }, + null, + 2, + ), + ); +} + +function resolveReadySession( + sessionKey: string, + agent = "codex", +): ReturnType["resolveSession"]> { + return { + kind: "ready", + sessionKey, + meta: { + backend: "acpx", + agent, + runtimeSessionName: sessionKey, + mode: "oneshot", + state: "idle", + lastActivityAt: Date.now(), + }, + }; +} + +function mockAcpManager(params: { + runTurn: (params: unknown) => Promise; + resolveSession?: (params: { + cfg: OpenClawConfig; + sessionKey: string; + }) => ReturnType["resolveSession"]>; +}) { + getAcpSessionManagerSpy.mockReturnValue({ + runTurn: params.runTurn, + resolveSession: + params.resolveSession ?? + ((input) => { + return resolveReadySession(input.sessionKey); + }), + } as unknown as ReturnType); +} + +describe("agentCommand ACP runtime routing", () => { + beforeEach(() => { + vi.clearAllMocks(); + runEmbeddedPiAgentSpy.mockResolvedValue({ + payloads: [{ text: "embedded" }], + meta: { + durationMs: 5, + }, + } as never); + }); + + it("routes ACP sessions through AcpSessionManager instead of embedded agent", async () => { + await withTempHome(async (home) => { + const storePath = path.join(home, "sessions.json"); + writeAcpSessionStore(storePath); + mockConfig(home, storePath); + + const runTurn = vi.fn(async (paramsUnknown: unknown) => { + const params = paramsUnknown as { + onEvent?: (event: { type: string; text?: string; stopReason?: string }) => Promise; + }; + await params.onEvent?.({ type: "text_delta", text: "ACP_" }); + await params.onEvent?.({ type: "text_delta", text: "OK" }); + await params.onEvent?.({ type: "done", stopReason: "stop" }); + }); + + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + }); + + await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); + + expect(runTurn).toHaveBeenCalledWith( + expect.objectContaining({ + sessionKey: "agent:codex:acp:test", + text: "ping", + mode: "prompt", + }), + ); + expect(runEmbeddedPiAgentSpy).not.toHaveBeenCalled(); + const hasAckLog = vi + .mocked(runtime.log) + .mock.calls.some(([first]) => typeof first === "string" && first.includes("ACP_OK")); + expect(hasAckLog).toBe(true); + }); + }); + + it("fails closed for ACP-shaped session keys missing ACP metadata", async () => { + await withTempHome(async (home) => { + const storePath = path.join(home, "sessions.json"); + fs.mkdirSync(path.dirname(storePath), { recursive: true }); + fs.writeFileSync( + storePath, + JSON.stringify( + { + "agent:codex:acp:stale": { + sessionId: "stale-1", + updatedAt: Date.now(), + }, + }, + null, + 2, + ), + ); + mockConfig(home, storePath); + + const runTurn = vi.fn(async (_params: unknown) => {}); + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + resolveSession: ({ sessionKey }) => { + return { + kind: "stale", + sessionKey, + error: new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + `ACP metadata is missing for session ${sessionKey}.`, + ), + }; + }, + }); + + await expect( + agentCommand({ message: "ping", sessionKey: "agent:codex:acp:stale" }, runtime), + ).rejects.toMatchObject({ + code: "ACP_SESSION_INIT_FAILED", + message: expect.stringContaining("ACP metadata is missing"), + }); + expect(runTurn).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentSpy).not.toHaveBeenCalled(); + }); + }); + + it("blocks ACP turns when ACP is disabled by policy", async () => { + await withTempHome(async (home) => { + const storePath = path.join(home, "sessions.json"); + writeAcpSessionStore(storePath); + mockConfigWithAcpOverrides(home, storePath, { + enabled: false, + }); + + const runTurn = vi.fn(async (_params: unknown) => {}); + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + }); + + await expect( + agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime), + ).rejects.toMatchObject({ + code: "ACP_DISPATCH_DISABLED", + }); + expect(runTurn).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentSpy).not.toHaveBeenCalled(); + }); + }); + + it("blocks ACP turns when ACP dispatch is disabled by policy", async () => { + await withTempHome(async (home) => { + const storePath = path.join(home, "sessions.json"); + writeAcpSessionStore(storePath); + mockConfigWithAcpOverrides(home, storePath, { + dispatch: { enabled: false }, + }); + + const runTurn = vi.fn(async (_params: unknown) => {}); + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + }); + + await expect( + agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime), + ).rejects.toMatchObject({ + code: "ACP_DISPATCH_DISABLED", + }); + expect(runTurn).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentSpy).not.toHaveBeenCalled(); + }); + }); + + it("blocks ACP turns when ACP agent is disallowed by policy", async () => { + await withTempHome(async (home) => { + const storePath = path.join(home, "sessions.json"); + writeAcpSessionStore(storePath); + mockConfigWithAcpOverrides(home, storePath, { + allowedAgents: ["claude"], + }); + + const runTurn = vi.fn(async (_params: unknown) => {}); + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + resolveSession: ({ sessionKey }) => resolveReadySession(sessionKey, "codex"), + }); + + await expect( + agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime), + ).rejects.toMatchObject({ + code: "ACP_SESSION_INIT_FAILED", + message: expect.stringContaining("not allowed by policy"), + }); + expect(runTurn).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentSpy).not.toHaveBeenCalled(); + }); + }); +}); diff --git a/src/commands/agent.delivery.test.ts b/src/commands/agent.delivery.test.ts index baa44213ab4..e13cf219966 100644 --- a/src/commands/agent.delivery.test.ts +++ b/src/commands/agent.delivery.test.ts @@ -48,6 +48,7 @@ describe("deliverAgentCommandResult", () => { async function runDelivery(params: { opts: Record; + outboundSession?: { key?: string; agentId?: string }; sessionEntry?: SessionEntry; runtime?: RuntimeEnv; resultText?: string; @@ -62,6 +63,7 @@ describe("deliverAgentCommandResult", () => { deps, runtime, opts: params.opts as never, + outboundSession: params.outboundSession, sessionEntry: params.sessionEntry, result, payloads: result.payloads, @@ -234,6 +236,30 @@ describe("deliverAgentCommandResult", () => { ); }); + it("uses caller-provided outbound session context when opts.sessionKey is absent", async () => { + await runDelivery({ + opts: { + message: "hello", + deliver: true, + channel: "whatsapp", + to: "+15551234567", + }, + outboundSession: { + key: "agent:exec:hook:gmail:thread-1", + agentId: "exec", + }, + }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + session: expect.objectContaining({ + key: "agent:exec:hook:gmail:thread-1", + agentId: "exec", + }), + }), + ); + }); + it("prefixes nested agent outputs with context", async () => { const runtime = createRuntime(); await runDelivery({ diff --git a/src/commands/agent.test.ts b/src/commands/agent.test.ts index 0118e076365..038e9651777 100644 --- a/src/commands/agent.test.ts +++ b/src/commands/agent.test.ts @@ -14,6 +14,7 @@ import { setActivePluginRegistry } from "../plugins/runtime.js"; import type { RuntimeEnv } from "../runtime.js"; import { createOutboundTestPlugin, createTestRegistry } from "../test-utils/channel-plugins.js"; import { agentCommand } from "./agent.js"; +import * as agentDeliveryModule from "./agent/delivery.js"; vi.mock("../agents/auth-profiles.js", async (importOriginal) => { const actual = await importOriginal(); @@ -49,6 +50,7 @@ const runtime: RuntimeEnv = { const configSpy = vi.spyOn(configModule, "loadConfig"); const runCliAgentSpy = vi.spyOn(cliRunnerModule, "runCliAgent"); +const deliverAgentCommandResultSpy = vi.spyOn(agentDeliveryModule, "deliverAgentCommandResult"); async function withTempHome(fn: (home: string) => Promise): Promise { return withTempHomeBase(fn, { prefix: "openclaw-agent-" }); @@ -230,6 +232,35 @@ describe("agentCommand", () => { }); }); + it("forwards resolved outbound session context when resuming by sessionId", async () => { + await withTempHome(async (home) => { + const storePattern = path.join(home, "sessions", "{agentId}", "sessions.json"); + const execStore = path.join(home, "sessions", "exec", "sessions.json"); + writeSessionStoreSeed(execStore, { + "agent:exec:hook:gmail:thread-1": { + sessionId: "session-exec-hook", + updatedAt: Date.now(), + systemSent: true, + }, + }); + mockConfig(home, storePattern, undefined, undefined, [ + { id: "dev" }, + { id: "exec", default: true }, + ]); + + await agentCommand({ message: "resume me", sessionId: "session-exec-hook" }, runtime); + + const deliverCall = deliverAgentCommandResultSpy.mock.calls.at(-1)?.[0]; + expect(deliverCall?.opts.sessionKey).toBeUndefined(); + expect(deliverCall?.outboundSession).toEqual( + expect.objectContaining({ + key: "agent:exec:hook:gmail:thread-1", + agentId: "exec", + }), + ); + }); + }); + it("resolves resumed session transcript path from custom session store directory", async () => { await withTempHome(async (home) => { const customStoreDir = path.join(home, "custom-state"); @@ -409,6 +440,73 @@ describe("agentCommand", () => { }); }); + it("persists cleared model and auth override fields when stored override falls back to default", async () => { + await withTempHome(async (home) => { + const store = path.join(home, "sessions.json"); + writeSessionStoreSeed(store, { + "agent:main:subagent:clear-overrides": { + sessionId: "session-clear-overrides", + updatedAt: Date.now(), + providerOverride: "anthropic", + modelOverride: "claude-opus-4-5", + authProfileOverride: "profile-legacy", + authProfileOverrideSource: "user", + authProfileOverrideCompactionCount: 2, + fallbackNoticeSelectedModel: "anthropic/claude-opus-4-5", + fallbackNoticeActiveModel: "openai/gpt-4.1-mini", + fallbackNoticeReason: "fallback", + }, + }); + + mockConfig(home, store, { + model: { primary: "openai/gpt-4.1-mini" }, + models: { + "openai/gpt-4.1-mini": {}, + }, + }); + + vi.mocked(loadModelCatalog).mockResolvedValueOnce([ + { id: "claude-opus-4-5", name: "Opus", provider: "anthropic" }, + { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" }, + ]); + + await agentCommand( + { + message: "hi", + sessionKey: "agent:main:subagent:clear-overrides", + }, + runtime, + ); + + const callArgs = vi.mocked(runEmbeddedPiAgent).mock.calls.at(-1)?.[0]; + expect(callArgs?.provider).toBe("openai"); + expect(callArgs?.model).toBe("gpt-4.1-mini"); + + const saved = JSON.parse(fs.readFileSync(store, "utf-8")) as Record< + string, + { + providerOverride?: string; + modelOverride?: string; + authProfileOverride?: string; + authProfileOverrideSource?: string; + authProfileOverrideCompactionCount?: number; + fallbackNoticeSelectedModel?: string; + fallbackNoticeActiveModel?: string; + fallbackNoticeReason?: string; + } + >; + const entry = saved["agent:main:subagent:clear-overrides"]; + expect(entry?.providerOverride).toBeUndefined(); + expect(entry?.modelOverride).toBeUndefined(); + expect(entry?.authProfileOverride).toBeUndefined(); + expect(entry?.authProfileOverrideSource).toBeUndefined(); + expect(entry?.authProfileOverrideCompactionCount).toBeUndefined(); + expect(entry?.fallbackNoticeSelectedModel).toBeUndefined(); + expect(entry?.fallbackNoticeActiveModel).toBeUndefined(); + expect(entry?.fallbackNoticeReason).toBeUndefined(); + }); + }); + it("keeps explicit sessionKey even when sessionId exists elsewhere", async () => { await withTempHome(async (home) => { const store = path.join(home, "sessions.json"); diff --git a/src/commands/agent.ts b/src/commands/agent.ts index ca4e42d314b..9d869a0f5d1 100644 --- a/src/commands/agent.ts +++ b/src/commands/agent.ts @@ -1,3 +1,6 @@ +import { getAcpSessionManager } from "../acp/control-plane/manager.js"; +import { resolveAcpAgentPolicyError, resolveAcpDispatchPolicyError } from "../acp/policy.js"; +import { toAcpRuntimeError } from "../acp/runtime/errors.js"; import { listAgentIds, resolveAgentDir, @@ -41,6 +44,7 @@ import { formatCliCommand } from "../cli/command-format.js"; import { type CliDeps, createDefaultDeps } from "../cli/deps.js"; import { loadConfig } from "../config/config.js"; import { + mergeSessionEntry, parseSessionThreadInfo, resolveAndPersistSessionFile, resolveAgentIdFromSessionKey, @@ -55,6 +59,7 @@ import { emitAgentEvent, registerAgentRunContext, } from "../infra/agent-events.js"; +import { buildOutboundSessionContext } from "../infra/outbound/session-context.js"; import { getRemoteSkillEligibility } from "../infra/skills-remote.js"; import { normalizeAgentId } from "../routing/session-key.js"; import { defaultRuntime, type RuntimeEnv } from "../runtime.js"; @@ -75,11 +80,40 @@ type PersistSessionEntryParams = { entry: SessionEntry; }; +type OverrideFieldClearedByDelete = + | "providerOverride" + | "modelOverride" + | "authProfileOverride" + | "authProfileOverrideSource" + | "authProfileOverrideCompactionCount" + | "fallbackNoticeSelectedModel" + | "fallbackNoticeActiveModel" + | "fallbackNoticeReason"; + +const OVERRIDE_FIELDS_CLEARED_BY_DELETE: OverrideFieldClearedByDelete[] = [ + "providerOverride", + "modelOverride", + "authProfileOverride", + "authProfileOverrideSource", + "authProfileOverrideCompactionCount", + "fallbackNoticeSelectedModel", + "fallbackNoticeActiveModel", + "fallbackNoticeReason", +]; + async function persistSessionEntry(params: PersistSessionEntryParams): Promise { - params.sessionStore[params.sessionKey] = params.entry; - await updateSessionStore(params.storePath, (store) => { - store[params.sessionKey] = params.entry; + const persisted = await updateSessionStore(params.storePath, (store) => { + const merged = mergeSessionEntry(store[params.sessionKey], params.entry); + // Preserve explicit `delete` clears done by session override helpers. + for (const field of OVERRIDE_FIELDS_CLEARED_BY_DELETE) { + if (!Object.hasOwn(params.entry, field)) { + Reflect.deleteProperty(merged, field); + } + } + store[params.sessionKey] = merged; + return merged; }); + params.sessionStore[params.sessionKey] = persisted; } function resolveFallbackRetryPrompt(params: { body: string; isFallbackRetry: boolean }): string { @@ -283,6 +317,11 @@ export async function agentCommand( sessionKey: sessionKey ?? opts.sessionKey?.trim(), config: cfg, }); + const outboundSession = buildOutboundSessionContext({ + cfg, + agentId: sessionAgentId, + sessionKey, + }); const workspaceDirRaw = resolveAgentWorkspaceDir(cfg, sessionAgentId); const agentDir = resolveAgentDir(cfg, sessionAgentId); const workspace = await ensureAgentWorkspace({ @@ -292,6 +331,13 @@ export async function agentCommand( const workspaceDir = workspace.dir; let sessionEntry = resolvedSessionEntry; const runId = opts.runId?.trim() || sessionId; + const acpManager = getAcpSessionManager(); + const acpResolution = sessionKey + ? acpManager.resolveSession({ + cfg, + sessionKey, + }) + : null; try { if (opts.deliver === true) { @@ -307,6 +353,127 @@ export async function agentCommand( } } + if (acpResolution?.kind === "stale") { + throw acpResolution.error; + } + + if (acpResolution?.kind === "ready" && sessionKey) { + const startedAt = Date.now(); + registerAgentRunContext(runId, { + sessionKey, + }); + emitAgentEvent({ + runId, + stream: "lifecycle", + data: { + phase: "start", + startedAt, + }, + }); + + let streamedText = ""; + let stopReason: string | undefined; + try { + const dispatchPolicyError = resolveAcpDispatchPolicyError(cfg); + if (dispatchPolicyError) { + throw dispatchPolicyError; + } + const acpAgent = normalizeAgentId( + acpResolution.meta.agent || resolveAgentIdFromSessionKey(sessionKey), + ); + const agentPolicyError = resolveAcpAgentPolicyError(cfg, acpAgent); + if (agentPolicyError) { + throw agentPolicyError; + } + + await acpManager.runTurn({ + cfg, + sessionKey, + text: body, + mode: "prompt", + requestId: runId, + signal: opts.abortSignal, + onEvent: (event) => { + if (event.type === "done") { + stopReason = event.stopReason; + return; + } + if (event.type !== "text_delta") { + return; + } + if (event.stream && event.stream !== "output") { + return; + } + if (!event.text) { + return; + } + streamedText += event.text; + emitAgentEvent({ + runId, + stream: "assistant", + data: { + text: streamedText, + delta: event.text, + }, + }); + }, + }); + } catch (error) { + const acpError = toAcpRuntimeError({ + error, + fallbackCode: "ACP_TURN_FAILED", + fallbackMessage: "ACP turn failed before completion.", + }); + emitAgentEvent({ + runId, + stream: "lifecycle", + data: { + phase: "error", + error: acpError.message, + endedAt: Date.now(), + }, + }); + throw acpError; + } + + emitAgentEvent({ + runId, + stream: "lifecycle", + data: { + phase: "end", + endedAt: Date.now(), + }, + }); + + const finalText = streamedText.trim(); + const payloads = finalText + ? [ + { + text: finalText, + }, + ] + : []; + const result = { + payloads, + meta: { + durationMs: Date.now() - startedAt, + aborted: opts.abortSignal?.aborted === true, + stopReason, + }, + }; + + return await deliverAgentCommandResult({ + cfg, + deps, + runtime, + opts, + outboundSession, + sessionEntry, + result, + payloads, + }); + } + let resolvedThinkLevel = thinkOnce ?? thinkOverride ?? @@ -649,6 +816,7 @@ export async function agentCommand( deps, runtime, opts, + outboundSession, sessionEntry, result, payloads, diff --git a/src/commands/agent/delivery.ts b/src/commands/agent/delivery.ts index caecb2a6283..282ed52e45e 100644 --- a/src/commands/agent/delivery.ts +++ b/src/commands/agent/delivery.ts @@ -1,4 +1,3 @@ -import { resolveSessionAgentId } from "../../agents/agent-scope.js"; import { AGENT_LANE_NESTED } from "../../agents/lanes.js"; import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; import { createOutboundSendDeps, type CliDeps } from "../../cli/outbound-send-deps.js"; @@ -17,6 +16,7 @@ import { normalizeOutboundPayloads, normalizeOutboundPayloadsForJson, } from "../../infra/outbound/payloads.js"; +import type { OutboundSessionContext } from "../../infra/outbound/session-context.js"; import type { RuntimeEnv } from "../../runtime.js"; import { isInternalMessageChannel } from "../../utils/message-channel.js"; import type { AgentCommandOpts } from "./types.js"; @@ -27,9 +27,9 @@ type RunResult = Awaited< const NESTED_LOG_PREFIX = "[agent:nested]"; -function formatNestedLogPrefix(opts: AgentCommandOpts): string { +function formatNestedLogPrefix(opts: AgentCommandOpts, sessionKey?: string): string { const parts = [NESTED_LOG_PREFIX]; - const session = opts.sessionKey ?? opts.sessionId; + const session = sessionKey ?? opts.sessionKey ?? opts.sessionId; if (session) { parts.push(`session=${session}`); } @@ -49,8 +49,13 @@ function formatNestedLogPrefix(opts: AgentCommandOpts): string { return parts.join(" "); } -function logNestedOutput(runtime: RuntimeEnv, opts: AgentCommandOpts, output: string) { - const prefix = formatNestedLogPrefix(opts); +function logNestedOutput( + runtime: RuntimeEnv, + opts: AgentCommandOpts, + output: string, + sessionKey?: string, +) { + const prefix = formatNestedLogPrefix(opts, sessionKey); for (const line of output.split(/\r?\n/)) { if (!line) { continue; @@ -64,11 +69,13 @@ export async function deliverAgentCommandResult(params: { deps: CliDeps; runtime: RuntimeEnv; opts: AgentCommandOpts; + outboundSession: OutboundSessionContext | undefined; sessionEntry: SessionEntry | undefined; result: RunResult; payloads: RunResult["payloads"]; }) { - const { cfg, deps, runtime, opts, sessionEntry, payloads, result } = params; + const { cfg, deps, runtime, opts, outboundSession, sessionEntry, payloads, result } = params; + const effectiveSessionKey = outboundSession?.key ?? opts.sessionKey; const deliver = opts.deliver === true; const bestEffortDeliver = opts.bestEffortDeliver === true; const turnSourceChannel = opts.runContext?.messageChannel ?? opts.messageChannel; @@ -200,7 +207,7 @@ export async function deliverAgentCommandResult(params: { return; } if (opts.lane === AGENT_LANE_NESTED) { - logNestedOutput(runtime, opts, output); + logNestedOutput(runtime, opts, output, effectiveSessionKey); return; } runtime.log(output); @@ -212,18 +219,13 @@ export async function deliverAgentCommandResult(params: { } if (deliver && deliveryChannel && !isInternalMessageChannel(deliveryChannel)) { if (deliveryTarget) { - const deliveryAgentId = - opts.agentId ?? - (opts.sessionKey - ? resolveSessionAgentId({ sessionKey: opts.sessionKey, config: cfg }) - : undefined); await deliverOutboundPayloads({ cfg, channel: deliveryChannel, to: deliveryTarget, accountId: resolvedAccountId, payloads: deliveryPayloads, - agentId: deliveryAgentId, + session: outboundSession, replyToId: resolvedReplyToId ?? null, threadId: resolvedThreadTarget ?? null, bestEffort: bestEffortDeliver, diff --git a/src/commands/agent/session-store.test.ts b/src/commands/agent/session-store.test.ts new file mode 100644 index 00000000000..19de2486cbb --- /dev/null +++ b/src/commands/agent/session-store.test.ts @@ -0,0 +1,66 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import type { SessionEntry } from "../../config/sessions.js"; +import { loadSessionStore } from "../../config/sessions.js"; +import { updateSessionStoreAfterAgentRun } from "./session-store.js"; + +function acpMeta() { + return { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent" as const, + state: "idle" as const, + lastActivityAt: Date.now(), + }; +} + +describe("updateSessionStoreAfterAgentRun", () => { + it("preserves ACP metadata when caller has a stale session snapshot", async () => { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-")); + const storePath = path.join(dir, "sessions.json"); + const sessionKey = `agent:codex:acp:${randomUUID()}`; + const sessionId = randomUUID(); + + const existing: SessionEntry = { + sessionId, + updatedAt: Date.now(), + acp: acpMeta(), + }; + await fs.writeFile(storePath, JSON.stringify({ [sessionKey]: existing }, null, 2), "utf8"); + + const staleInMemory: Record = { + [sessionKey]: { + sessionId, + updatedAt: Date.now(), + }, + }; + + await updateSessionStoreAfterAgentRun({ + cfg: {} as never, + sessionId, + sessionKey, + storePath, + sessionStore: staleInMemory, + defaultProvider: "openai", + defaultModel: "gpt-5.3-codex", + result: { + payloads: [], + meta: { + aborted: false, + agentMeta: { + provider: "openai", + model: "gpt-5.3-codex", + }, + }, + } as never, + }); + + const persisted = loadSessionStore(storePath, { skipCache: true })[sessionKey]; + expect(persisted?.acp).toBeDefined(); + expect(staleInMemory[sessionKey]?.acp).toBeDefined(); + }); +}); diff --git a/src/commands/agent/session-store.ts b/src/commands/agent/session-store.ts index 21574090c12..cbc69b3b438 100644 --- a/src/commands/agent/session-store.ts +++ b/src/commands/agent/session-store.ts @@ -5,6 +5,7 @@ import { isCliProvider } from "../../agents/model-selection.js"; import { deriveSessionTotalTokens, hasNonzeroUsage } from "../../agents/usage.js"; import type { OpenClawConfig } from "../../config/config.js"; import { + mergeSessionEntry, setSessionRuntimeModel, type SessionEntry, updateSessionStore, @@ -94,8 +95,10 @@ export async function updateSessionStoreAfterAgentRun(params: { if (compactionsThisRun > 0) { next.compactionCount = (entry.compactionCount ?? 0) + compactionsThisRun; } - sessionStore[sessionKey] = next; - await updateSessionStore(storePath, (store) => { - store[sessionKey] = next; + const persisted = await updateSessionStore(storePath, (store) => { + const merged = mergeSessionEntry(store[sessionKey], next); + store[sessionKey] = merged; + return merged; }); + sessionStore[sessionKey] = persisted; } diff --git a/src/commands/agents.bind.commands.test.ts b/src/commands/agents.bind.commands.test.ts new file mode 100644 index 00000000000..0fe03173be6 --- /dev/null +++ b/src/commands/agents.bind.commands.test.ts @@ -0,0 +1,200 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { baseConfigSnapshot, createTestRuntime } from "./test-runtime-config-helpers.js"; + +const readConfigFileSnapshotMock = vi.hoisted(() => vi.fn()); +const writeConfigFileMock = vi.hoisted(() => vi.fn().mockResolvedValue(undefined)); + +vi.mock("../config/config.js", async (importOriginal) => ({ + ...(await importOriginal()), + readConfigFileSnapshot: readConfigFileSnapshotMock, + writeConfigFile: writeConfigFileMock, +})); + +vi.mock("../channels/plugins/index.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + getChannelPlugin: (channel: string) => { + if (channel === "matrix-js") { + return { + id: "matrix-js", + setup: { + resolveBindingAccountId: ({ agentId }: { agentId: string }) => agentId.toLowerCase(), + }, + }; + } + return actual.getChannelPlugin(channel); + }, + normalizeChannelId: (channel: string) => { + if (channel.trim().toLowerCase() === "matrix-js") { + return "matrix-js"; + } + return actual.normalizeChannelId(channel); + }, + }; +}); + +import { agentsBindCommand, agentsBindingsCommand, agentsUnbindCommand } from "./agents.js"; + +const runtime = createTestRuntime(); + +describe("agents bind/unbind commands", () => { + beforeEach(() => { + readConfigFileSnapshotMock.mockClear(); + writeConfigFileMock.mockClear(); + runtime.log.mockClear(); + runtime.error.mockClear(); + runtime.exit.mockClear(); + }); + + it("lists all bindings by default", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + bindings: [ + { agentId: "main", match: { channel: "matrix-js" } }, + { agentId: "ops", match: { channel: "telegram", accountId: "work" } }, + ], + }, + }); + + await agentsBindingsCommand({}, runtime); + + expect(runtime.log).toHaveBeenCalledWith(expect.stringContaining("main <- matrix-js")); + expect(runtime.log).toHaveBeenCalledWith( + expect.stringContaining("ops <- telegram accountId=work"), + ); + }); + + it("binds routes to default agent when --agent is omitted", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: {}, + }); + + await agentsBindCommand({ bind: ["telegram"] }, runtime); + + expect(writeConfigFileMock).toHaveBeenCalledWith( + expect.objectContaining({ + bindings: [{ agentId: "main", match: { channel: "telegram" } }], + }), + ); + expect(runtime.exit).not.toHaveBeenCalled(); + }); + + it("defaults matrix-js accountId to the target agent id when omitted", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: {}, + }); + + await agentsBindCommand({ agent: "main", bind: ["matrix-js"] }, runtime); + + expect(writeConfigFileMock).toHaveBeenCalledWith( + expect.objectContaining({ + bindings: [{ agentId: "main", match: { channel: "matrix-js", accountId: "main" } }], + }), + ); + expect(runtime.exit).not.toHaveBeenCalled(); + }); + + it("upgrades existing channel-only binding when accountId is later provided", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + bindings: [{ agentId: "main", match: { channel: "telegram" } }], + }, + }); + + await agentsBindCommand({ bind: ["telegram:work"] }, runtime); + + expect(writeConfigFileMock).toHaveBeenCalledWith( + expect.objectContaining({ + bindings: [{ agentId: "main", match: { channel: "telegram", accountId: "work" } }], + }), + ); + expect(runtime.log).toHaveBeenCalledWith("Updated bindings:"); + expect(runtime.exit).not.toHaveBeenCalled(); + }); + + it("unbinds all routes for an agent", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + agents: { list: [{ id: "ops", workspace: "/tmp/ops" }] }, + bindings: [ + { agentId: "main", match: { channel: "matrix-js" } }, + { agentId: "ops", match: { channel: "telegram", accountId: "work" } }, + ], + }, + }); + + await agentsUnbindCommand({ agent: "ops", all: true }, runtime); + + expect(writeConfigFileMock).toHaveBeenCalledWith( + expect.objectContaining({ + bindings: [{ agentId: "main", match: { channel: "matrix-js" } }], + }), + ); + expect(runtime.exit).not.toHaveBeenCalled(); + }); + + it("reports ownership conflicts during unbind and exits 1", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + agents: { list: [{ id: "ops", workspace: "/tmp/ops" }] }, + bindings: [{ agentId: "main", match: { channel: "telegram", accountId: "ops" } }], + }, + }); + + await agentsUnbindCommand({ agent: "ops", bind: ["telegram:ops"] }, runtime); + + expect(writeConfigFileMock).not.toHaveBeenCalled(); + expect(runtime.error).toHaveBeenCalledWith("Bindings are owned by another agent:"); + expect(runtime.exit).toHaveBeenCalledWith(1); + }); + + it("keeps role-based bindings when removing channel-level discord binding", async () => { + readConfigFileSnapshotMock.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + bindings: [ + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + roles: ["111", "222"], + }, + }, + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + }, + }, + ], + }, + }); + + await agentsUnbindCommand({ bind: ["discord:guild-a"] }, runtime); + + expect(writeConfigFileMock).toHaveBeenCalledWith( + expect.objectContaining({ + bindings: [ + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + roles: ["111", "222"], + }, + }, + ], + }), + ); + expect(runtime.exit).not.toHaveBeenCalled(); + }); +}); diff --git a/src/commands/agents.bindings.ts b/src/commands/agents.bindings.ts index f0eaf959e1e..ca0c0ee649c 100644 --- a/src/commands/agents.bindings.ts +++ b/src/commands/agents.bindings.ts @@ -8,16 +8,51 @@ import type { ChannelChoice } from "./onboard-types.js"; function bindingMatchKey(match: AgentBinding["match"]) { const accountId = match.accountId?.trim() || DEFAULT_ACCOUNT_ID; + const identityKey = bindingMatchIdentityKey(match); + return [identityKey, accountId].join("|"); +} + +function bindingMatchIdentityKey(match: AgentBinding["match"]) { + const roles = Array.isArray(match.roles) + ? Array.from( + new Set( + match.roles + .map((role) => role.trim()) + .filter(Boolean) + .toSorted(), + ), + ) + : []; return [ match.channel, - accountId, match.peer?.kind ?? "", match.peer?.id ?? "", match.guildId ?? "", match.teamId ?? "", + roles.join(","), ].join("|"); } +function canUpgradeBindingAccountScope(params: { + existing: AgentBinding; + incoming: AgentBinding; + normalizedIncomingAgentId: string; +}): boolean { + if (!params.incoming.match.accountId?.trim()) { + return false; + } + if (params.existing.match.accountId?.trim()) { + return false; + } + if (normalizeAgentId(params.existing.agentId) !== params.normalizedIncomingAgentId) { + return false; + } + return ( + bindingMatchIdentityKey(params.existing.match) === + bindingMatchIdentityKey(params.incoming.match) + ); +} + export function describeBinding(binding: AgentBinding) { const match = binding.match; const parts = [match.channel]; @@ -42,10 +77,11 @@ export function applyAgentBindings( ): { config: OpenClawConfig; added: AgentBinding[]; + updated: AgentBinding[]; skipped: AgentBinding[]; conflicts: Array<{ binding: AgentBinding; existingAgentId: string }>; } { - const existing = cfg.bindings ?? []; + const existing = [...(cfg.bindings ?? [])]; const existingMatchMap = new Map(); for (const binding of existing) { const key = bindingMatchKey(binding.match); @@ -55,6 +91,7 @@ export function applyAgentBindings( } const added: AgentBinding[] = []; + const updated: AgentBinding[] = []; const skipped: AgentBinding[] = []; const conflicts: Array<{ binding: AgentBinding; existingAgentId: string }> = []; @@ -70,12 +107,41 @@ export function applyAgentBindings( } continue; } + + const upgradeIndex = existing.findIndex((candidate) => + canUpgradeBindingAccountScope({ + existing: candidate, + incoming: binding, + normalizedIncomingAgentId: agentId, + }), + ); + if (upgradeIndex >= 0) { + const current = existing[upgradeIndex]; + if (!current) { + continue; + } + const previousKey = bindingMatchKey(current.match); + const upgradedBinding: AgentBinding = { + ...current, + agentId, + match: { + ...current.match, + accountId: binding.match.accountId?.trim(), + }, + }; + existing[upgradeIndex] = upgradedBinding; + existingMatchMap.delete(previousKey); + existingMatchMap.set(bindingMatchKey(upgradedBinding.match), agentId); + updated.push(upgradedBinding); + continue; + } + existingMatchMap.set(key, agentId); added.push({ ...binding, agentId }); } - if (added.length === 0) { - return { config: cfg, added, skipped, conflicts }; + if (added.length === 0 && updated.length === 0) { + return { config: cfg, added, updated, skipped, conflicts }; } return { @@ -84,11 +150,78 @@ export function applyAgentBindings( bindings: [...existing, ...added], }, added, + updated, skipped, conflicts, }; } +export function removeAgentBindings( + cfg: OpenClawConfig, + bindings: AgentBinding[], +): { + config: OpenClawConfig; + removed: AgentBinding[]; + missing: AgentBinding[]; + conflicts: Array<{ binding: AgentBinding; existingAgentId: string }>; +} { + const existing = cfg.bindings ?? []; + const removeIndexes = new Set(); + const removed: AgentBinding[] = []; + const missing: AgentBinding[] = []; + const conflicts: Array<{ binding: AgentBinding; existingAgentId: string }> = []; + + for (const binding of bindings) { + const desiredAgentId = normalizeAgentId(binding.agentId); + const key = bindingMatchKey(binding.match); + let matchedIndex = -1; + let conflictingAgentId: string | null = null; + for (let i = 0; i < existing.length; i += 1) { + if (removeIndexes.has(i)) { + continue; + } + const current = existing[i]; + if (!current || bindingMatchKey(current.match) !== key) { + continue; + } + const currentAgentId = normalizeAgentId(current.agentId); + if (currentAgentId === desiredAgentId) { + matchedIndex = i; + break; + } + conflictingAgentId = currentAgentId; + } + if (matchedIndex >= 0) { + const matched = existing[matchedIndex]; + if (matched) { + removeIndexes.add(matchedIndex); + removed.push(matched); + } + continue; + } + if (conflictingAgentId) { + conflicts.push({ binding, existingAgentId: conflictingAgentId }); + continue; + } + missing.push(binding); + } + + if (removeIndexes.size === 0) { + return { config: cfg, removed, missing, conflicts }; + } + + const nextBindings = existing.filter((_, index) => !removeIndexes.has(index)); + return { + config: { + ...cfg, + bindings: nextBindings.length > 0 ? nextBindings : undefined, + }, + removed, + missing, + conflicts, + }; +} + function resolveDefaultAccountId(cfg: OpenClawConfig, provider: ChannelId): string { const plugin = getChannelPlugin(provider); if (!plugin) { @@ -97,6 +230,33 @@ function resolveDefaultAccountId(cfg: OpenClawConfig, provider: ChannelId): stri return resolveChannelDefaultAccountId({ plugin, cfg }); } +function resolveBindingAccountId(params: { + channel: ChannelId; + config: OpenClawConfig; + agentId: string; + explicitAccountId?: string; +}): string | undefined { + const explicitAccountId = params.explicitAccountId?.trim(); + if (explicitAccountId) { + return explicitAccountId; + } + + const plugin = getChannelPlugin(params.channel); + const pluginAccountId = plugin?.setup?.resolveBindingAccountId?.({ + cfg: params.config, + agentId: params.agentId, + }); + if (pluginAccountId?.trim()) { + return pluginAccountId.trim(); + } + + if (plugin?.meta.forceAccountBinding) { + return resolveDefaultAccountId(params.config, params.channel); + } + + return undefined; +} + export function buildChannelBindings(params: { agentId: string; selection: ChannelChoice[]; @@ -107,14 +267,14 @@ export function buildChannelBindings(params: { const agentId = normalizeAgentId(params.agentId); for (const channel of params.selection) { const match: AgentBinding["match"] = { channel }; - const accountId = params.accountIds?.[channel]?.trim(); + const accountId = resolveBindingAccountId({ + channel, + config: params.config, + agentId, + explicitAccountId: params.accountIds?.[channel], + }); if (accountId) { match.accountId = accountId; - } else { - const plugin = getChannelPlugin(channel); - if (plugin?.meta.forceAccountBinding) { - match.accountId = resolveDefaultAccountId(params.config, channel); - } } bindings.push({ agentId, match }); } @@ -141,17 +301,17 @@ export function parseBindingSpecs(params: { errors.push(`Unknown channel "${channelRaw}".`); continue; } - let accountId = accountRaw?.trim(); + let accountId: string | undefined = accountRaw?.trim(); if (accountRaw !== undefined && !accountId) { errors.push(`Invalid binding "${trimmed}" (empty account id).`); continue; } - if (!accountId) { - const plugin = getChannelPlugin(channel); - if (plugin?.meta.forceAccountBinding) { - accountId = resolveDefaultAccountId(params.config, channel); - } - } + accountId = resolveBindingAccountId({ + channel, + config: params.config, + agentId, + explicitAccountId: accountId, + }); const match: AgentBinding["match"] = { channel }; if (accountId) { match.accountId = accountId; diff --git a/src/commands/agents.commands.add.ts b/src/commands/agents.commands.add.ts index 807ecca0b20..61c45392f59 100644 --- a/src/commands/agents.commands.add.ts +++ b/src/commands/agents.commands.add.ts @@ -125,7 +125,7 @@ export async function agentsAddCommand( const bindingResult = bindingParse.bindings.length > 0 ? applyAgentBindings(nextConfig, bindingParse.bindings) - : { config: nextConfig, added: [], skipped: [], conflicts: [] }; + : { config: nextConfig, added: [], updated: [], skipped: [], conflicts: [] }; await writeConfigFile(bindingResult.config); if (!opts.json) { @@ -145,6 +145,7 @@ export async function agentsAddCommand( model, bindings: { added: bindingResult.added.map(describeBinding), + updated: bindingResult.updated.map(describeBinding), skipped: bindingResult.skipped.map(describeBinding), conflicts: bindingResult.conflicts.map( (conflict) => `${describeBinding(conflict.binding)} (agent=${conflict.existingAgentId})`, diff --git a/src/commands/agents.commands.bind.ts b/src/commands/agents.commands.bind.ts new file mode 100644 index 00000000000..b7a021053c6 --- /dev/null +++ b/src/commands/agents.commands.bind.ts @@ -0,0 +1,324 @@ +import { resolveDefaultAgentId } from "../agents/agent-scope.js"; +import { writeConfigFile } from "../config/config.js"; +import { logConfigUpdated } from "../config/logging.js"; +import type { AgentBinding } from "../config/types.js"; +import { normalizeAgentId } from "../routing/session-key.js"; +import type { RuntimeEnv } from "../runtime.js"; +import { defaultRuntime } from "../runtime.js"; +import { + applyAgentBindings, + describeBinding, + parseBindingSpecs, + removeAgentBindings, +} from "./agents.bindings.js"; +import { requireValidConfig } from "./agents.command-shared.js"; +import { buildAgentSummaries } from "./agents.config.js"; + +type AgentsBindingsListOptions = { + agent?: string; + json?: boolean; +}; + +type AgentsBindOptions = { + agent?: string; + bind?: string[]; + json?: boolean; +}; + +type AgentsUnbindOptions = { + agent?: string; + bind?: string[]; + all?: boolean; + json?: boolean; +}; + +function resolveAgentId( + cfg: Awaited>, + agentInput: string | undefined, + params?: { fallbackToDefault?: boolean }, +): string | null { + if (!cfg) { + return null; + } + if (agentInput?.trim()) { + return normalizeAgentId(agentInput); + } + if (params?.fallbackToDefault) { + return resolveDefaultAgentId(cfg); + } + return null; +} + +function hasAgent(cfg: Awaited>, agentId: string): boolean { + if (!cfg) { + return false; + } + return buildAgentSummaries(cfg).some((summary) => summary.id === agentId); +} + +function formatBindingOwnerLine(binding: AgentBinding): string { + return `${normalizeAgentId(binding.agentId)} <- ${describeBinding(binding)}`; +} + +export async function agentsBindingsCommand( + opts: AgentsBindingsListOptions, + runtime: RuntimeEnv = defaultRuntime, +) { + const cfg = await requireValidConfig(runtime); + if (!cfg) { + return; + } + + const filterAgentId = resolveAgentId(cfg, opts.agent?.trim()); + if (opts.agent && !filterAgentId) { + runtime.error("Agent id is required."); + runtime.exit(1); + return; + } + if (filterAgentId && !hasAgent(cfg, filterAgentId)) { + runtime.error(`Agent "${filterAgentId}" not found.`); + runtime.exit(1); + return; + } + + const filtered = (cfg.bindings ?? []).filter( + (binding) => !filterAgentId || normalizeAgentId(binding.agentId) === filterAgentId, + ); + if (opts.json) { + runtime.log( + JSON.stringify( + filtered.map((binding) => ({ + agentId: normalizeAgentId(binding.agentId), + match: binding.match, + description: describeBinding(binding), + })), + null, + 2, + ), + ); + return; + } + + if (filtered.length === 0) { + runtime.log( + filterAgentId ? `No routing bindings for agent "${filterAgentId}".` : "No routing bindings.", + ); + return; + } + + runtime.log( + [ + "Routing bindings:", + ...filtered.map((binding) => `- ${formatBindingOwnerLine(binding)}`), + ].join("\n"), + ); +} + +export async function agentsBindCommand( + opts: AgentsBindOptions, + runtime: RuntimeEnv = defaultRuntime, +) { + const cfg = await requireValidConfig(runtime); + if (!cfg) { + return; + } + + const agentId = resolveAgentId(cfg, opts.agent?.trim(), { fallbackToDefault: true }); + if (!agentId) { + runtime.error("Unable to resolve agent id."); + runtime.exit(1); + return; + } + if (!hasAgent(cfg, agentId)) { + runtime.error(`Agent "${agentId}" not found.`); + runtime.exit(1); + return; + } + + const specs = (opts.bind ?? []).map((value) => value.trim()).filter(Boolean); + if (specs.length === 0) { + runtime.error("Provide at least one --bind ."); + runtime.exit(1); + return; + } + + const parsed = parseBindingSpecs({ agentId, specs, config: cfg }); + if (parsed.errors.length > 0) { + runtime.error(parsed.errors.join("\n")); + runtime.exit(1); + return; + } + + const result = applyAgentBindings(cfg, parsed.bindings); + if (result.added.length > 0 || result.updated.length > 0) { + await writeConfigFile(result.config); + if (!opts.json) { + logConfigUpdated(runtime); + } + } + + const payload = { + agentId, + added: result.added.map(describeBinding), + updated: result.updated.map(describeBinding), + skipped: result.skipped.map(describeBinding), + conflicts: result.conflicts.map( + (conflict) => `${describeBinding(conflict.binding)} (agent=${conflict.existingAgentId})`, + ), + }; + if (opts.json) { + runtime.log(JSON.stringify(payload, null, 2)); + if (result.conflicts.length > 0) { + runtime.exit(1); + } + return; + } + + if (result.added.length > 0) { + runtime.log("Added bindings:"); + for (const binding of result.added) { + runtime.log(`- ${describeBinding(binding)}`); + } + } else if (result.updated.length === 0) { + runtime.log("No new bindings added."); + } + + if (result.updated.length > 0) { + runtime.log("Updated bindings:"); + for (const binding of result.updated) { + runtime.log(`- ${describeBinding(binding)}`); + } + } + + if (result.skipped.length > 0) { + runtime.log("Already present:"); + for (const binding of result.skipped) { + runtime.log(`- ${describeBinding(binding)}`); + } + } + + if (result.conflicts.length > 0) { + runtime.error("Skipped bindings already claimed by another agent:"); + for (const conflict of result.conflicts) { + runtime.error(`- ${describeBinding(conflict.binding)} (agent=${conflict.existingAgentId})`); + } + runtime.exit(1); + } +} + +export async function agentsUnbindCommand( + opts: AgentsUnbindOptions, + runtime: RuntimeEnv = defaultRuntime, +) { + const cfg = await requireValidConfig(runtime); + if (!cfg) { + return; + } + + const agentId = resolveAgentId(cfg, opts.agent?.trim(), { fallbackToDefault: true }); + if (!agentId) { + runtime.error("Unable to resolve agent id."); + runtime.exit(1); + return; + } + if (!hasAgent(cfg, agentId)) { + runtime.error(`Agent "${agentId}" not found.`); + runtime.exit(1); + return; + } + if (opts.all && (opts.bind?.length ?? 0) > 0) { + runtime.error("Use either --all or --bind, not both."); + runtime.exit(1); + return; + } + + if (opts.all) { + const existing = cfg.bindings ?? []; + const removed = existing.filter((binding) => normalizeAgentId(binding.agentId) === agentId); + const kept = existing.filter((binding) => normalizeAgentId(binding.agentId) !== agentId); + if (removed.length === 0) { + runtime.log(`No bindings to remove for agent "${agentId}".`); + return; + } + const next = { + ...cfg, + bindings: kept.length > 0 ? kept : undefined, + }; + await writeConfigFile(next); + if (!opts.json) { + logConfigUpdated(runtime); + } + const payload = { + agentId, + removed: removed.map(describeBinding), + missing: [] as string[], + conflicts: [] as string[], + }; + if (opts.json) { + runtime.log(JSON.stringify(payload, null, 2)); + return; + } + runtime.log(`Removed ${removed.length} binding(s) for "${agentId}".`); + return; + } + + const specs = (opts.bind ?? []).map((value) => value.trim()).filter(Boolean); + if (specs.length === 0) { + runtime.error("Provide at least one --bind or use --all."); + runtime.exit(1); + return; + } + + const parsed = parseBindingSpecs({ agentId, specs, config: cfg }); + if (parsed.errors.length > 0) { + runtime.error(parsed.errors.join("\n")); + runtime.exit(1); + return; + } + + const result = removeAgentBindings(cfg, parsed.bindings); + if (result.removed.length > 0) { + await writeConfigFile(result.config); + if (!opts.json) { + logConfigUpdated(runtime); + } + } + + const payload = { + agentId, + removed: result.removed.map(describeBinding), + missing: result.missing.map(describeBinding), + conflicts: result.conflicts.map( + (conflict) => `${describeBinding(conflict.binding)} (agent=${conflict.existingAgentId})`, + ), + }; + if (opts.json) { + runtime.log(JSON.stringify(payload, null, 2)); + if (result.conflicts.length > 0) { + runtime.exit(1); + } + return; + } + + if (result.removed.length > 0) { + runtime.log("Removed bindings:"); + for (const binding of result.removed) { + runtime.log(`- ${describeBinding(binding)}`); + } + } else { + runtime.log("No bindings removed."); + } + if (result.missing.length > 0) { + runtime.log("Not found:"); + for (const binding of result.missing) { + runtime.log(`- ${describeBinding(binding)}`); + } + } + if (result.conflicts.length > 0) { + runtime.error("Bindings are owned by another agent:"); + for (const conflict of result.conflicts) { + runtime.error(`- ${describeBinding(conflict.binding)} (agent=${conflict.existingAgentId})`); + } + runtime.exit(1); + } +} diff --git a/src/commands/agents.test.ts b/src/commands/agents.test.ts index 1becb77548f..dfb339e4384 100644 --- a/src/commands/agents.test.ts +++ b/src/commands/agents.test.ts @@ -8,6 +8,7 @@ import { applyAgentConfig, buildAgentSummaries, pruneAgentConfig, + removeAgentBindings, } from "./agents.js"; describe("agents helpers", () => { @@ -111,6 +112,114 @@ describe("agents helpers", () => { expect(result.config.bindings).toHaveLength(2); }); + it("applyAgentBindings upgrades channel-only binding to account-specific binding for same agent", () => { + const cfg: OpenClawConfig = { + bindings: [ + { + agentId: "main", + match: { channel: "telegram" }, + }, + ], + }; + + const result = applyAgentBindings(cfg, [ + { + agentId: "main", + match: { channel: "telegram", accountId: "work" }, + }, + ]); + + expect(result.added).toHaveLength(0); + expect(result.updated).toHaveLength(1); + expect(result.conflicts).toHaveLength(0); + expect(result.config.bindings).toEqual([ + { + agentId: "main", + match: { channel: "telegram", accountId: "work" }, + }, + ]); + }); + + it("applyAgentBindings treats role-based bindings as distinct routes", () => { + const cfg: OpenClawConfig = { + bindings: [ + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + guildId: "123", + roles: ["111", "222"], + }, + }, + ], + }; + + const result = applyAgentBindings(cfg, [ + { + agentId: "work", + match: { + channel: "discord", + accountId: "guild-a", + guildId: "123", + }, + }, + ]); + + expect(result.added).toHaveLength(1); + expect(result.conflicts).toHaveLength(0); + expect(result.config.bindings).toHaveLength(2); + }); + + it("removeAgentBindings does not remove role-based bindings when removing channel-level routes", () => { + const cfg: OpenClawConfig = { + bindings: [ + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + guildId: "123", + roles: ["111", "222"], + }, + }, + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + guildId: "123", + }, + }, + ], + }; + + const result = removeAgentBindings(cfg, [ + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + guildId: "123", + }, + }, + ]); + + expect(result.removed).toHaveLength(1); + expect(result.conflicts).toHaveLength(0); + expect(result.config.bindings).toEqual([ + { + agentId: "main", + match: { + channel: "discord", + accountId: "guild-a", + guildId: "123", + roles: ["111", "222"], + }, + }, + ]); + }); + it("pruneAgentConfig removes agent, bindings, and allowlist entries", () => { const cfg: OpenClawConfig = { agents: { diff --git a/src/commands/agents.ts b/src/commands/agents.ts index 6679bb853da..5f5bdcd3c7b 100644 --- a/src/commands/agents.ts +++ b/src/commands/agents.ts @@ -1,4 +1,5 @@ export * from "./agents.bindings.js"; +export * from "./agents.commands.bind.js"; export * from "./agents.commands.add.js"; export * from "./agents.commands.delete.js"; export * from "./agents.commands.identity.js"; diff --git a/src/commands/auth-choice-options.ts b/src/commands/auth-choice-options.ts index 43ef7c4eda0..0296b306de1 100644 --- a/src/commands/auth-choice-options.ts +++ b/src/commands/auth-choice-options.ts @@ -242,7 +242,7 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ { value: "google-gemini-cli", label: "Google Gemini CLI OAuth", - hint: "Uses the bundled Gemini CLI auth plugin", + hint: "Unofficial flow; review account-risk warning before use", }, { value: "zai-api-key", label: "Z.AI API key" }, { diff --git a/src/commands/auth-choice.apply-helpers.test.ts b/src/commands/auth-choice.apply-helpers.test.ts index c122fe197ca..471123621e1 100644 --- a/src/commands/auth-choice.apply-helpers.test.ts +++ b/src/commands/auth-choice.apply-helpers.test.ts @@ -26,11 +26,13 @@ function restoreMinimaxEnv(): void { function createPrompter(params?: { confirm?: WizardPrompter["confirm"]; note?: WizardPrompter["note"]; + select?: WizardPrompter["select"]; text?: WizardPrompter["text"]; }): WizardPrompter { return { confirm: params?.confirm ?? (vi.fn(async () => true) as WizardPrompter["confirm"]), note: params?.note ?? (vi.fn(async () => undefined) as WizardPrompter["note"]), + ...(params?.select ? { select: params.select } : {}), text: params?.text ?? (vi.fn(async () => "prompt-key") as WizardPrompter["text"]), } as unknown as WizardPrompter; } @@ -53,6 +55,7 @@ async function runEnsureMinimaxApiKeyFlow(params: { confirmResult: boolean; text const setCredential = vi.fn(async () => undefined); const result = await ensureApiKeyFromEnvOrPrompt({ + config: {}, provider: "minimax", envLabel: "MINIMAX_API_KEY", promptMessage: "Enter key", @@ -90,7 +93,7 @@ describe("maybeApplyApiKeyFromOption", () => { }); expect(result).toBe("opt-key"); - expect(setCredential).toHaveBeenCalledWith("opt-key"); + expect(setCredential).toHaveBeenCalledWith("opt-key", undefined); }); it("matches provider with whitespace/case normalization", async () => { @@ -105,7 +108,7 @@ describe("maybeApplyApiKeyFromOption", () => { }); expect(result).toBe("opt-key"); - expect(setCredential).toHaveBeenCalledWith("opt-key"); + expect(setCredential).toHaveBeenCalledWith("opt-key", undefined); }); it("skips when provider does not match", async () => { @@ -132,7 +135,7 @@ describe("ensureApiKeyFromEnvOrPrompt", () => { }); expect(result).toBe("env-key"); - expect(setCredential).toHaveBeenCalledWith("env-key"); + expect(setCredential).toHaveBeenCalledWith("env-key", "plaintext"); expect(text).not.toHaveBeenCalled(); }); @@ -143,13 +146,144 @@ describe("ensureApiKeyFromEnvOrPrompt", () => { }); expect(result).toBe("prompted-key"); - expect(setCredential).toHaveBeenCalledWith("prompted-key"); + expect(setCredential).toHaveBeenCalledWith("prompted-key", "plaintext"); expect(text).toHaveBeenCalledWith( expect.objectContaining({ message: "Enter key", }), ); }); + + it("uses explicit inline env ref when secret-input-mode=ref selects existing env key", async () => { + process.env.MINIMAX_API_KEY = "env-key"; + delete process.env.MINIMAX_OAUTH_TOKEN; + + const { confirm, text } = createPromptSpies({ + confirmResult: true, + textResult: "prompt-key", + }); + const setCredential = vi.fn(async () => undefined); + + const result = await ensureApiKeyFromEnvOrPrompt({ + config: {}, + provider: "minimax", + envLabel: "MINIMAX_API_KEY", + promptMessage: "Enter key", + normalize: (value) => value.trim(), + validate: () => undefined, + prompter: createPrompter({ confirm, text }), + secretInputMode: "ref", + setCredential, + }); + + expect(result).toBe("env-key"); + expect(setCredential).toHaveBeenCalledWith( + { source: "env", provider: "default", id: "MINIMAX_API_KEY" }, + "ref", + ); + expect(text).not.toHaveBeenCalled(); + }); + + it("fails ref mode without select when fallback env var is missing", async () => { + delete process.env.MINIMAX_API_KEY; + delete process.env.MINIMAX_OAUTH_TOKEN; + + const { confirm, text } = createPromptSpies({ + confirmResult: true, + textResult: "prompt-key", + }); + const setCredential = vi.fn(async () => undefined); + + await expect( + ensureApiKeyFromEnvOrPrompt({ + config: {}, + provider: "minimax", + envLabel: "MINIMAX_API_KEY", + promptMessage: "Enter key", + normalize: (value) => value.trim(), + validate: () => undefined, + prompter: createPrompter({ confirm, text }), + secretInputMode: "ref", + setCredential, + }), + ).rejects.toThrow( + 'Environment variable "MINIMAX_API_KEY" is required for --secret-input-mode ref in non-interactive onboarding.', + ); + expect(setCredential).not.toHaveBeenCalled(); + }); + + it("re-prompts after provider ref validation failure and succeeds with env ref", async () => { + process.env.MINIMAX_API_KEY = "env-key"; + delete process.env.MINIMAX_OAUTH_TOKEN; + + const selectValues: Array<"provider" | "env" | "filemain"> = ["provider", "filemain", "env"]; + const select = vi.fn(async () => selectValues.shift() ?? "env") as WizardPrompter["select"]; + const text = vi + .fn() + .mockResolvedValueOnce("/providers/minimax/apiKey") + .mockResolvedValueOnce("MINIMAX_API_KEY"); + const note = vi.fn(async () => undefined); + const setCredential = vi.fn(async () => undefined); + + const result = await ensureApiKeyFromEnvOrPrompt({ + config: { + secrets: { + providers: { + filemain: { + source: "file", + path: "/tmp/does-not-exist-secrets.json", + mode: "json", + }, + }, + }, + }, + provider: "minimax", + envLabel: "MINIMAX_API_KEY", + promptMessage: "Enter key", + normalize: (value) => value.trim(), + validate: () => undefined, + prompter: createPrompter({ select, text, note }), + secretInputMode: "ref", + setCredential, + }); + + expect(result).toBe("env-key"); + expect(setCredential).toHaveBeenCalledWith( + { source: "env", provider: "default", id: "MINIMAX_API_KEY" }, + "ref", + ); + expect(note).toHaveBeenCalledWith( + expect.stringContaining("Could not validate provider reference"), + "Reference check failed", + ); + }); + + it("never includes resolved env secret values in reference validation notes", async () => { + process.env.MINIMAX_API_KEY = "sk-minimax-redacted-value"; + delete process.env.MINIMAX_OAUTH_TOKEN; + + const select = vi.fn(async () => "env") as WizardPrompter["select"]; + const text = vi.fn().mockResolvedValue("MINIMAX_API_KEY"); + const note = vi.fn(async () => undefined); + const setCredential = vi.fn(async () => undefined); + + const result = await ensureApiKeyFromEnvOrPrompt({ + config: {}, + provider: "minimax", + envLabel: "MINIMAX_API_KEY", + promptMessage: "Enter key", + normalize: (value) => value.trim(), + validate: () => undefined, + prompter: createPrompter({ select, text, note }), + secretInputMode: "ref", + setCredential, + }); + + expect(result).toBe("sk-minimax-redacted-value"); + const noteMessages = note.mock.calls.map((call) => String(call.at(0) ?? "")).join("\n"); + expect(noteMessages).toContain("Validated environment variable MINIMAX_API_KEY."); + expect(noteMessages).not.toContain("sk-minimax-redacted-value"); + }); }); describe("ensureApiKeyFromOptionEnvOrPrompt", () => { @@ -163,6 +297,7 @@ describe("ensureApiKeyFromOptionEnvOrPrompt", () => { const result = await ensureApiKeyFromOptionEnvOrPrompt({ token: " opts-key ", tokenProvider: " HUGGINGFACE ", + config: {}, expectedProviders: ["huggingface"], provider: "huggingface", envLabel: "HF_TOKEN", @@ -176,7 +311,7 @@ describe("ensureApiKeyFromOptionEnvOrPrompt", () => { }); expect(result).toBe("opts-key"); - expect(setCredential).toHaveBeenCalledWith("opts-key"); + expect(setCredential).toHaveBeenCalledWith("opts-key", undefined); expect(note).not.toHaveBeenCalled(); expect(confirm).not.toHaveBeenCalled(); expect(text).not.toHaveBeenCalled(); @@ -195,6 +330,7 @@ describe("ensureApiKeyFromOptionEnvOrPrompt", () => { const result = await ensureApiKeyFromOptionEnvOrPrompt({ token: "opts-key", tokenProvider: "openai", + config: {}, expectedProviders: ["minimax"], provider: "minimax", envLabel: "MINIMAX_API_KEY", @@ -211,6 +347,6 @@ describe("ensureApiKeyFromOptionEnvOrPrompt", () => { expect(note).toHaveBeenCalledWith("MiniMax note", "MiniMax"); expect(confirm).toHaveBeenCalled(); expect(text).not.toHaveBeenCalled(); - expect(setCredential).toHaveBeenCalledWith("env-key"); + expect(setCredential).toHaveBeenCalledWith("env-key", "plaintext"); }); }); diff --git a/src/commands/auth-choice.apply-helpers.ts b/src/commands/auth-choice.apply-helpers.ts index 8e7e0853567..52e019aae19 100644 --- a/src/commands/auth-choice.apply-helpers.ts +++ b/src/commands/auth-choice.apply-helpers.ts @@ -1,8 +1,243 @@ import { resolveEnvApiKey } from "../agents/model-auth.js"; +import type { OpenClawConfig } from "../config/types.js"; +import { type SecretInput, type SecretRef } from "../config/types.secrets.js"; +import { encodeJsonPointerToken } from "../secrets/json-pointer.js"; +import { PROVIDER_ENV_VARS } from "../secrets/provider-env-vars.js"; +import { + isValidFileSecretRefId, + resolveDefaultSecretProviderAlias, +} from "../secrets/ref-contract.js"; +import { resolveSecretRefString } from "../secrets/resolve.js"; import type { WizardPrompter } from "../wizard/prompts.js"; import { formatApiKeyPreview } from "./auth-choice.api-key.js"; import type { ApplyAuthChoiceParams } from "./auth-choice.apply.js"; import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; +import type { SecretInputMode } from "./onboard-types.js"; + +const ENV_SOURCE_LABEL_RE = /(?:^|:\s)([A-Z][A-Z0-9_]*)$/; +const ENV_SECRET_REF_ID_RE = /^[A-Z][A-Z0-9_]{0,127}$/; + +type SecretRefChoice = "env" | "provider"; + +function formatErrorMessage(error: unknown): string { + if (error instanceof Error && typeof error.message === "string" && error.message.trim()) { + return error.message; + } + return String(error); +} + +function extractEnvVarFromSourceLabel(source: string): string | undefined { + const match = ENV_SOURCE_LABEL_RE.exec(source.trim()); + return match?.[1]; +} + +function resolveDefaultProviderEnvVar(provider: string): string | undefined { + const envVars = PROVIDER_ENV_VARS[provider]; + return envVars?.find((candidate) => candidate.trim().length > 0); +} + +function resolveDefaultFilePointerId(provider: string): string { + return `/providers/${encodeJsonPointerToken(provider)}/apiKey`; +} + +function resolveRefFallbackInput(params: { + config: OpenClawConfig; + provider: string; + preferredEnvVar?: string; +}): { ref: SecretRef; resolvedValue: string } { + const fallbackEnvVar = params.preferredEnvVar ?? resolveDefaultProviderEnvVar(params.provider); + if (!fallbackEnvVar) { + throw new Error( + `No default environment variable mapping found for provider "${params.provider}". Set a provider-specific env var, or re-run onboarding in an interactive terminal to configure a ref.`, + ); + } + const value = process.env[fallbackEnvVar]?.trim(); + if (!value) { + throw new Error( + `Environment variable "${fallbackEnvVar}" is required for --secret-input-mode ref in non-interactive onboarding.`, + ); + } + return { + ref: { + source: "env", + provider: resolveDefaultSecretProviderAlias(params.config, "env", { + preferFirstProviderForSource: true, + }), + id: fallbackEnvVar, + }, + resolvedValue: value, + }; +} + +async function resolveApiKeyRefForOnboarding(params: { + provider: string; + config: OpenClawConfig; + prompter: WizardPrompter; + preferredEnvVar?: string; +}): Promise<{ ref: SecretRef; resolvedValue: string }> { + const defaultEnvVar = + params.preferredEnvVar ?? resolveDefaultProviderEnvVar(params.provider) ?? ""; + const defaultFilePointer = resolveDefaultFilePointerId(params.provider); + let sourceChoice: SecretRefChoice = "env"; + + while (true) { + const sourceRaw: SecretRefChoice = await params.prompter.select({ + message: "Where is this API key stored?", + initialValue: sourceChoice, + options: [ + { + value: "env", + label: "Environment variable", + hint: "Reference a variable from your runtime environment", + }, + { + value: "provider", + label: "Configured secret provider", + hint: "Use a configured file or exec secret provider", + }, + ], + }); + const source: SecretRefChoice = sourceRaw === "provider" ? "provider" : "env"; + sourceChoice = source; + + if (source === "env") { + const envVarRaw = await params.prompter.text({ + message: "Environment variable name", + initialValue: defaultEnvVar || undefined, + placeholder: "OPENAI_API_KEY", + validate: (value) => { + const candidate = value.trim(); + if (!ENV_SECRET_REF_ID_RE.test(candidate)) { + return 'Use an env var name like "OPENAI_API_KEY" (uppercase letters, numbers, underscores).'; + } + if (!process.env[candidate]?.trim()) { + return `Environment variable "${candidate}" is missing or empty in this session.`; + } + return undefined; + }, + }); + const envCandidate = String(envVarRaw ?? "").trim(); + const envVar = + envCandidate && ENV_SECRET_REF_ID_RE.test(envCandidate) ? envCandidate : defaultEnvVar; + if (!envVar) { + throw new Error( + `No valid environment variable name provided for provider "${params.provider}".`, + ); + } + const ref: SecretRef = { + source: "env", + provider: resolveDefaultSecretProviderAlias(params.config, "env", { + preferFirstProviderForSource: true, + }), + id: envVar, + }; + const resolvedValue = await resolveSecretRefString(ref, { + config: params.config, + env: process.env, + }); + await params.prompter.note( + `Validated environment variable ${envVar}. OpenClaw will store a reference, not the key value.`, + "Reference validated", + ); + return { ref, resolvedValue }; + } + + const externalProviders = Object.entries(params.config.secrets?.providers ?? {}).filter( + ([, provider]) => provider?.source === "file" || provider?.source === "exec", + ); + if (externalProviders.length === 0) { + await params.prompter.note( + "No file/exec secret providers are configured yet. Add one under secrets.providers, or select Environment variable.", + "No providers configured", + ); + continue; + } + const defaultProvider = resolveDefaultSecretProviderAlias(params.config, "file", { + preferFirstProviderForSource: true, + }); + const selectedProvider = await params.prompter.select({ + message: "Select secret provider", + initialValue: + externalProviders.find(([providerName]) => providerName === defaultProvider)?.[0] ?? + externalProviders[0]?.[0], + options: externalProviders.map(([providerName, provider]) => ({ + value: providerName, + label: providerName, + hint: provider?.source === "exec" ? "Exec provider" : "File provider", + })), + }); + const providerEntry = params.config.secrets?.providers?.[selectedProvider]; + if (!providerEntry || (providerEntry.source !== "file" && providerEntry.source !== "exec")) { + await params.prompter.note( + `Provider "${selectedProvider}" is not a file/exec provider.`, + "Invalid provider", + ); + continue; + } + const idPrompt = + providerEntry.source === "file" + ? "Secret id (JSON pointer for json mode, or 'value' for singleValue mode)" + : "Secret id for the exec provider"; + const idDefault = + providerEntry.source === "file" + ? providerEntry.mode === "singleValue" + ? "value" + : defaultFilePointer + : `${params.provider}/apiKey`; + const idRaw = await params.prompter.text({ + message: idPrompt, + initialValue: idDefault, + placeholder: providerEntry.source === "file" ? "/providers/openai/apiKey" : "openai/api-key", + validate: (value) => { + const candidate = value.trim(); + if (!candidate) { + return "Secret id cannot be empty."; + } + if ( + providerEntry.source === "file" && + providerEntry.mode !== "singleValue" && + !isValidFileSecretRefId(candidate) + ) { + return 'Use an absolute JSON pointer like "/providers/openai/apiKey".'; + } + if ( + providerEntry.source === "file" && + providerEntry.mode === "singleValue" && + candidate !== "value" + ) { + return 'singleValue mode expects id "value".'; + } + return undefined; + }, + }); + const id = String(idRaw ?? "").trim() || idDefault; + const ref: SecretRef = { + source: providerEntry.source, + provider: selectedProvider, + id, + }; + try { + const resolvedValue = await resolveSecretRefString(ref, { + config: params.config, + env: process.env, + }); + await params.prompter.note( + `Validated ${providerEntry.source} reference ${selectedProvider}:${id}. OpenClaw will store a reference, not the key value.`, + "Reference validated", + ); + return { ref, resolvedValue }; + } catch (error) { + await params.prompter.note( + [ + `Could not validate provider reference ${selectedProvider}:${id}.`, + formatErrorMessage(error), + "Check your provider configuration and try again.", + ].join("\n"), + "Reference check failed", + ); + } + } +} export function createAuthChoiceAgentModelNoter( params: ApplyAuthChoiceParams, @@ -78,12 +313,56 @@ export function normalizeTokenProviderInput( return normalized || undefined; } +export function normalizeSecretInputModeInput( + secretInputMode: string | null | undefined, +): SecretInputMode | undefined { + const normalized = String(secretInputMode ?? "") + .trim() + .toLowerCase(); + if (normalized === "plaintext" || normalized === "ref") { + return normalized; + } + return undefined; +} + +export async function resolveSecretInputModeForEnvSelection(params: { + prompter: WizardPrompter; + explicitMode?: SecretInputMode; +}): Promise { + if (params.explicitMode) { + return params.explicitMode; + } + // Some tests pass partial prompt harnesses without a select implementation. + // Preserve backward-compatible behavior by defaulting to plaintext in that case. + if (typeof params.prompter.select !== "function") { + return "plaintext"; + } + const selected = await params.prompter.select({ + message: "How do you want to provide this API key?", + initialValue: "plaintext", + options: [ + { + value: "plaintext", + label: "Paste API key now", + hint: "Stores the key directly in OpenClaw config", + }, + { + value: "ref", + label: "Use secret reference", + hint: "Stores a reference to env or configured external secret providers", + }, + ], + }); + return selected === "ref" ? "ref" : "plaintext"; +} + export async function maybeApplyApiKeyFromOption(params: { token: string | undefined; tokenProvider: string | undefined; + secretInputMode?: SecretInputMode; expectedProviders: string[]; normalize: (value: string) => string; - setCredential: (apiKey: string) => Promise; + setCredential: (apiKey: SecretInput, mode?: SecretInputMode) => Promise; }): Promise { const tokenProvider = normalizeTokenProviderInput(params.tokenProvider); const expectedProviders = params.expectedProviders @@ -93,13 +372,15 @@ export async function maybeApplyApiKeyFromOption(params: { return undefined; } const apiKey = params.normalize(params.token); - await params.setCredential(apiKey); + await params.setCredential(apiKey, params.secretInputMode); return apiKey; } export async function ensureApiKeyFromOptionEnvOrPrompt(params: { token: string | undefined; tokenProvider: string | undefined; + secretInputMode?: SecretInputMode; + config: OpenClawConfig; expectedProviders: string[]; provider: string; envLabel: string; @@ -107,13 +388,14 @@ export async function ensureApiKeyFromOptionEnvOrPrompt(params: { normalize: (value: string) => string; validate: (value: string) => string | undefined; prompter: WizardPrompter; - setCredential: (apiKey: string) => Promise; + setCredential: (apiKey: SecretInput, mode?: SecretInputMode) => Promise; noteMessage?: string; noteTitle?: string; }): Promise { const optionApiKey = await maybeApplyApiKeyFromOption({ token: params.token, tokenProvider: params.tokenProvider, + secretInputMode: params.secretInputMode, expectedProviders: params.expectedProviders, normalize: params.normalize, setCredential: params.setCredential, @@ -127,33 +409,62 @@ export async function ensureApiKeyFromOptionEnvOrPrompt(params: { } return await ensureApiKeyFromEnvOrPrompt({ + config: params.config, provider: params.provider, envLabel: params.envLabel, promptMessage: params.promptMessage, normalize: params.normalize, validate: params.validate, prompter: params.prompter, + secretInputMode: params.secretInputMode, setCredential: params.setCredential, }); } export async function ensureApiKeyFromEnvOrPrompt(params: { + config: OpenClawConfig; provider: string; envLabel: string; promptMessage: string; normalize: (value: string) => string; validate: (value: string) => string | undefined; prompter: WizardPrompter; - setCredential: (apiKey: string) => Promise; + secretInputMode?: SecretInputMode; + setCredential: (apiKey: SecretInput, mode?: SecretInputMode) => Promise; }): Promise { + const selectedMode = await resolveSecretInputModeForEnvSelection({ + prompter: params.prompter, + explicitMode: params.secretInputMode, + }); const envKey = resolveEnvApiKey(params.provider); - if (envKey) { + + if (selectedMode === "ref") { + if (typeof params.prompter.select !== "function") { + const fallback = resolveRefFallbackInput({ + config: params.config, + provider: params.provider, + preferredEnvVar: envKey?.source ? extractEnvVarFromSourceLabel(envKey.source) : undefined, + }); + await params.setCredential(fallback.ref, selectedMode); + return fallback.resolvedValue; + } + const resolved = await resolveApiKeyRefForOnboarding({ + provider: params.provider, + config: params.config, + prompter: params.prompter, + preferredEnvVar: envKey?.source ? extractEnvVarFromSourceLabel(envKey.source) : undefined, + }); + await params.setCredential(resolved.ref, selectedMode); + return resolved.resolvedValue; + } + + if (envKey && selectedMode === "plaintext") { const useExisting = await params.prompter.confirm({ message: `Use existing ${params.envLabel} (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, initialValue: true, }); if (useExisting) { - await params.setCredential(envKey.apiKey); + await params.setCredential(envKey.apiKey, selectedMode); return envKey.apiKey; } } @@ -163,6 +474,6 @@ export async function ensureApiKeyFromEnvOrPrompt(params: { validate: params.validate, }); const apiKey = params.normalize(String(key ?? "")); - await params.setCredential(apiKey); + await params.setCredential(apiKey, selectedMode); return apiKey; } diff --git a/src/commands/auth-choice.apply.anthropic.ts b/src/commands/auth-choice.apply.anthropic.ts index b910768ea0f..5f82426ef10 100644 --- a/src/commands/auth-choice.apply.anthropic.ts +++ b/src/commands/auth-choice.apply.anthropic.ts @@ -1,9 +1,9 @@ import { upsertAuthProfile } from "../agents/auth-profiles.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; + normalizeSecretInputModeInput, + ensureApiKeyFromOptionEnvOrPrompt, +} from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { buildTokenProfileId, validateAnthropicSetupToken } from "./auth-token.js"; import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js"; @@ -14,6 +14,7 @@ const DEFAULT_ANTHROPIC_MODEL = "anthropic/claude-sonnet-4-6"; export async function applyAuthChoiceAnthropic( params: ApplyAuthChoiceParams, ): Promise { + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); if ( params.authChoice === "setup-token" || params.authChoice === "oauth" || @@ -70,31 +71,21 @@ export async function applyAuthChoiceAnthropic( } let nextConfig = params.config; - let hasCredential = false; - const envKey = process.env.ANTHROPIC_API_KEY?.trim(); - - if (params.opts?.token) { - await setAnthropicApiKey(normalizeApiKeyInput(params.opts.token), params.agentDir); - hasCredential = true; - } - - if (!hasCredential && envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing ANTHROPIC_API_KEY (env, ${formatApiKeyPreview(envKey)})?`, - initialValue: true, - }); - if (useExisting) { - await setAnthropicApiKey(envKey, params.agentDir); - hasCredential = true; - } - } - if (!hasCredential) { - const key = await params.prompter.text({ - message: "Enter Anthropic API key", - validate: validateApiKeyInput, - }); - await setAnthropicApiKey(normalizeApiKeyInput(String(key ?? "")), params.agentDir); - } + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.token, + tokenProvider: params.opts?.tokenProvider ?? "anthropic", + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders: ["anthropic"], + provider: "anthropic", + envLabel: "ANTHROPIC_API_KEY", + promptMessage: "Enter Anthropic API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setAnthropicApiKey(apiKey, params.agentDir, { secretInputMode: mode }), + }); nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "anthropic:default", provider: "anthropic", diff --git a/src/commands/auth-choice.apply.api-providers.ts b/src/commands/auth-choice.apply.api-providers.ts index 2b1e80387da..2be73ee14f2 100644 --- a/src/commands/auth-choice.apply.api-providers.ts +++ b/src/commands/auth-choice.apply.api-providers.ts @@ -1,11 +1,8 @@ import { ensureAuthProfileStore, resolveAuthProfileOrder } from "../agents/auth-profiles.js"; -import { resolveEnvApiKey } from "../agents/model-auth.js"; -import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; +import type { SecretInput } from "../config/types.secrets.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { + normalizeSecretInputModeInput, createAuthChoiceAgentModelNoter, createAuthChoiceDefaultModelApplier, createAuthChoiceModelStateBridge, @@ -19,6 +16,7 @@ import { applyGoogleGeminiModelDefault, GOOGLE_GEMINI_DEFAULT_MODEL, } from "./google-gemini-model-default.js"; +import type { ApiKeyStorageOptions } from "./onboard-auth.credentials.js"; import { applyAuthProfileConfig, applyCloudflareAiGatewayConfig, @@ -80,7 +78,7 @@ import { setZaiApiKey, ZAI_DEFAULT_MODEL_REF, } from "./onboard-auth.js"; -import type { AuthChoice } from "./onboard-types.js"; +import type { AuthChoice, SecretInputMode } from "./onboard-types.js"; import { OPENCODE_ZEN_DEFAULT_MODEL } from "./opencode-zen-model-default.js"; import { detectZaiEndpoint } from "./zai-endpoint-detect.js"; @@ -124,7 +122,11 @@ type SimpleApiKeyProviderFlow = { expectedProviders: string[]; envLabel: string; promptMessage: string; - setCredential: (apiKey: string, agentDir?: string) => void | Promise; + setCredential: ( + apiKey: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, + ) => void | Promise; defaultModel: string; applyDefaultConfig: ApiKeyProviderConfigApplier; applyProviderConfig: ApiKeyProviderConfigApplier; @@ -327,6 +329,7 @@ export async function applyAuthChoiceApiProviders( let authChoice = params.authChoice; const normalizedTokenProvider = normalizeTokenProviderInput(params.opts?.tokenProvider); + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); if (authChoice === "apiKey" && params.opts?.tokenProvider) { if (normalizedTokenProvider !== "anthropic" && normalizedTokenProvider !== "openai") { authChoice = API_KEY_TOKEN_PROVIDER_AUTH_CHOICE[normalizedTokenProvider ?? ""] ?? authChoice; @@ -355,7 +358,7 @@ export async function applyAuthChoiceApiProviders( expectedProviders: string[]; envLabel: string; promptMessage: string; - setCredential: (apiKey: string) => void | Promise; + setCredential: (apiKey: SecretInput, mode?: SecretInputMode) => void | Promise; defaultModel: string; applyDefaultConfig: ( config: ApplyAuthChoiceParams["config"], @@ -374,11 +377,13 @@ export async function applyAuthChoiceApiProviders( token: params.opts?.token, provider, tokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, expectedProviders, envLabel, promptMessage, - setCredential: async (apiKey) => { - await setCredential(apiKey); + setCredential: async (apiKey, mode) => { + await setCredential(apiKey, mode); }, noteMessage, noteTitle, @@ -421,6 +426,8 @@ export async function applyAuthChoiceApiProviders( await ensureApiKeyFromOptionEnvOrPrompt({ token: params.opts?.token, tokenProvider: normalizedTokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, expectedProviders: ["litellm"], provider: "litellm", envLabel: "LITELLM_API_KEY", @@ -428,7 +435,8 @@ export async function applyAuthChoiceApiProviders( normalize: normalizeApiKeyInput, validate: validateApiKeyInput, prompter: params.prompter, - setCredential: async (apiKey) => setLitellmApiKey(apiKey, params.agentDir), + setCredential: async (apiKey, mode) => + setLitellmApiKey(apiKey, params.agentDir, { secretInputMode: mode }), noteMessage: "LiteLLM provides a unified API to 100+ LLM providers.\nGet your API key from your LiteLLM proxy or https://litellm.ai\nDefault proxy runs on http://localhost:4000", noteTitle: "LiteLLM", @@ -460,8 +468,10 @@ export async function applyAuthChoiceApiProviders( expectedProviders: simpleApiKeyProviderFlow.expectedProviders, envLabel: simpleApiKeyProviderFlow.envLabel, promptMessage: simpleApiKeyProviderFlow.promptMessage, - setCredential: async (apiKey) => - simpleApiKeyProviderFlow.setCredential(apiKey, params.agentDir), + setCredential: async (apiKey, mode) => + simpleApiKeyProviderFlow.setCredential(apiKey, params.agentDir, { + secretInputMode: mode ?? requestedSecretInputMode, + }), defaultModel: simpleApiKeyProviderFlow.defaultModel, applyDefaultConfig: simpleApiKeyProviderFlow.applyDefaultConfig, applyProviderConfig: simpleApiKeyProviderFlow.applyProviderConfig, @@ -495,39 +505,26 @@ export async function applyAuthChoiceApiProviders( } }; - const optsApiKey = normalizeApiKeyInput(params.opts?.cloudflareAiGatewayApiKey ?? ""); - let resolvedApiKey = ""; - if (accountId && gatewayId && optsApiKey) { - resolvedApiKey = optsApiKey; - } + await ensureAccountGateway(); - const envKey = resolveEnvApiKey("cloudflare-ai-gateway"); - if (!resolvedApiKey && envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing CLOUDFLARE_AI_GATEWAY_API_KEY (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, - initialValue: true, - }); - if (useExisting) { - await ensureAccountGateway(); - resolvedApiKey = normalizeApiKeyInput(envKey.apiKey); - } - } + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.cloudflareAiGatewayApiKey, + tokenProvider: "cloudflare-ai-gateway", + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders: ["cloudflare-ai-gateway"], + provider: "cloudflare-ai-gateway", + envLabel: "CLOUDFLARE_AI_GATEWAY_API_KEY", + promptMessage: "Enter Cloudflare AI Gateway API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setCloudflareAiGatewayConfig(accountId, gatewayId, apiKey, params.agentDir, { + secretInputMode: mode, + }), + }); - if (!resolvedApiKey && optsApiKey) { - await ensureAccountGateway(); - resolvedApiKey = optsApiKey; - } - - if (!resolvedApiKey) { - await ensureAccountGateway(); - const key = await params.prompter.text({ - message: "Enter Cloudflare AI Gateway API key", - validate: validateApiKeyInput, - }); - resolvedApiKey = normalizeApiKeyInput(String(key ?? "")); - } - - await setCloudflareAiGatewayConfig(accountId, gatewayId, resolvedApiKey, params.agentDir); nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "cloudflare-ai-gateway:default", provider: "cloudflare-ai-gateway", @@ -555,13 +552,16 @@ export async function applyAuthChoiceApiProviders( token: params.opts?.token, provider: "google", tokenProvider: normalizedTokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, expectedProviders: ["google"], envLabel: "GEMINI_API_KEY", promptMessage: "Enter Gemini API key", normalize: normalizeApiKeyInput, validate: validateApiKeyInput, prompter: params.prompter, - setCredential: async (apiKey) => setGeminiApiKey(apiKey, params.agentDir), + setCredential: async (apiKey, mode) => + setGeminiApiKey(apiKey, params.agentDir, { secretInputMode: mode }), }); nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "google:default", @@ -597,13 +597,16 @@ export async function applyAuthChoiceApiProviders( token: params.opts?.token, provider: "zai", tokenProvider: normalizedTokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, expectedProviders: ["zai"], envLabel: "ZAI_API_KEY", promptMessage: "Enter Z.AI API key", normalize: normalizeApiKeyInput, validate: validateApiKeyInput, prompter: params.prompter, - setCredential: async (apiKey) => setZaiApiKey(apiKey, params.agentDir), + setCredential: async (apiKey, mode) => + setZaiApiKey(apiKey, params.agentDir, { secretInputMode: mode }), }); // zai-api-key: auto-detect endpoint + choose a working default model. diff --git a/src/commands/auth-choice.apply.byteplus.ts b/src/commands/auth-choice.apply.byteplus.ts index de62f6bd082..80cfa377bde 100644 --- a/src/commands/auth-choice.apply.byteplus.ts +++ b/src/commands/auth-choice.apply.byteplus.ts @@ -1,12 +1,11 @@ -import { resolveEnvApiKey } from "../agents/model-auth.js"; -import { upsertSharedEnvVar } from "../infra/env-file.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; + ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, +} from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyPrimaryModel } from "./model-picker.js"; +import { applyAuthProfileConfig, setByteplusApiKey } from "./onboard-auth.js"; /** Default model for BytePlus auth onboarding. */ export const BYTEPLUS_DEFAULT_MODEL = "byteplus-plan/ark-code-latest"; @@ -18,54 +17,28 @@ export async function applyAuthChoiceBytePlus( return null; } - const envKey = resolveEnvApiKey("byteplus"); - if (envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing BYTEPLUS_API_KEY (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, - initialValue: true, - }); - if (useExisting) { - const result = upsertSharedEnvVar({ - key: "BYTEPLUS_API_KEY", - value: envKey.apiKey, - }); - if (!process.env.BYTEPLUS_API_KEY) { - process.env.BYTEPLUS_API_KEY = envKey.apiKey; - } - await params.prompter.note( - `Copied BYTEPLUS_API_KEY to ${result.path} for launchd compatibility.`, - "BytePlus API key", - ); - const configWithModel = applyPrimaryModel(params.config, BYTEPLUS_DEFAULT_MODEL); - return { - config: configWithModel, - agentModelOverride: BYTEPLUS_DEFAULT_MODEL, - }; - } - } - - let key: string | undefined; - if (params.opts?.byteplusApiKey) { - key = params.opts.byteplusApiKey; - } else { - key = await params.prompter.text({ - message: "Enter BytePlus API key", - validate: validateApiKeyInput, - }); - } - - const trimmed = normalizeApiKeyInput(String(key)); - const result = upsertSharedEnvVar({ - key: "BYTEPLUS_API_KEY", - value: trimmed, + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.byteplusApiKey, + tokenProvider: "byteplus", + secretInputMode: requestedSecretInputMode, + config: params.config, + expectedProviders: ["byteplus"], + provider: "byteplus", + envLabel: "BYTEPLUS_API_KEY", + promptMessage: "Enter BytePlus API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setByteplusApiKey(apiKey, params.agentDir, { secretInputMode: mode }), }); - process.env.BYTEPLUS_API_KEY = trimmed; - await params.prompter.note( - `Saved BYTEPLUS_API_KEY to ${result.path} for launchd compatibility.`, - "BytePlus API key", - ); - - const configWithModel = applyPrimaryModel(params.config, BYTEPLUS_DEFAULT_MODEL); + const configWithAuth = applyAuthProfileConfig(params.config, { + profileId: "byteplus:default", + provider: "byteplus", + mode: "api_key", + }); + const configWithModel = applyPrimaryModel(configWithAuth, BYTEPLUS_DEFAULT_MODEL); return { config: configWithModel, agentModelOverride: BYTEPLUS_DEFAULT_MODEL, diff --git a/src/commands/auth-choice.apply.google-gemini-cli.test.ts b/src/commands/auth-choice.apply.google-gemini-cli.test.ts new file mode 100644 index 00000000000..f07f970a18d --- /dev/null +++ b/src/commands/auth-choice.apply.google-gemini-cli.test.ts @@ -0,0 +1,86 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { applyAuthChoiceGoogleGeminiCli } from "./auth-choice.apply.google-gemini-cli.js"; +import type { ApplyAuthChoiceParams } from "./auth-choice.apply.js"; +import { applyAuthChoicePluginProvider } from "./auth-choice.apply.plugin-provider.js"; +import { createExitThrowingRuntime, createWizardPrompter } from "./test-wizard-helpers.js"; + +vi.mock("./auth-choice.apply.plugin-provider.js", () => ({ + applyAuthChoicePluginProvider: vi.fn(), +})); + +function createParams( + authChoice: ApplyAuthChoiceParams["authChoice"], + overrides: Partial = {}, +): ApplyAuthChoiceParams { + return { + authChoice, + config: {}, + prompter: createWizardPrompter({}, { defaultSelect: "" }), + runtime: createExitThrowingRuntime(), + setDefaultModel: true, + ...overrides, + }; +} + +describe("applyAuthChoiceGoogleGeminiCli", () => { + const mockedApplyAuthChoicePluginProvider = vi.mocked(applyAuthChoicePluginProvider); + + beforeEach(() => { + mockedApplyAuthChoicePluginProvider.mockReset(); + }); + + it("returns null for unrelated authChoice", async () => { + const result = await applyAuthChoiceGoogleGeminiCli(createParams("openrouter-api-key")); + + expect(result).toBeNull(); + expect(mockedApplyAuthChoicePluginProvider).not.toHaveBeenCalled(); + }); + + it("shows caution and skips setup when user declines", async () => { + const confirm = vi.fn(async () => false); + const note = vi.fn(async () => {}); + const params = createParams("google-gemini-cli", { + prompter: createWizardPrompter({ confirm, note }, { defaultSelect: "" }), + }); + + const result = await applyAuthChoiceGoogleGeminiCli(params); + + expect(result).toEqual({ config: params.config }); + expect(note).toHaveBeenNthCalledWith( + 1, + expect.stringContaining("This is an unofficial integration and is not endorsed by Google."), + "Google Gemini CLI caution", + ); + expect(confirm).toHaveBeenCalledWith({ + message: "Continue with Google Gemini CLI OAuth?", + initialValue: false, + }); + expect(note).toHaveBeenNthCalledWith( + 2, + "Skipped Google Gemini CLI OAuth setup.", + "Setup skipped", + ); + expect(mockedApplyAuthChoicePluginProvider).not.toHaveBeenCalled(); + }); + + it("continues to plugin provider flow when user confirms", async () => { + const confirm = vi.fn(async () => true); + const note = vi.fn(async () => {}); + const params = createParams("google-gemini-cli", { + prompter: createWizardPrompter({ confirm, note }, { defaultSelect: "" }), + }); + const expected = { config: {} }; + mockedApplyAuthChoicePluginProvider.mockResolvedValue(expected); + + const result = await applyAuthChoiceGoogleGeminiCli(params); + + expect(result).toBe(expected); + expect(mockedApplyAuthChoicePluginProvider).toHaveBeenCalledWith(params, { + authChoice: "google-gemini-cli", + pluginId: "google-gemini-cli-auth", + providerId: "google-gemini-cli", + methodId: "oauth", + label: "Google Gemini CLI", + }); + }); +}); diff --git a/src/commands/auth-choice.apply.google-gemini-cli.ts b/src/commands/auth-choice.apply.google-gemini-cli.ts index d2a3281f628..5fcbc832338 100644 --- a/src/commands/auth-choice.apply.google-gemini-cli.ts +++ b/src/commands/auth-choice.apply.google-gemini-cli.ts @@ -4,6 +4,29 @@ import { applyAuthChoicePluginProvider } from "./auth-choice.apply.plugin-provid export async function applyAuthChoiceGoogleGeminiCli( params: ApplyAuthChoiceParams, ): Promise { + if (params.authChoice !== "google-gemini-cli") { + return null; + } + + await params.prompter.note( + [ + "This is an unofficial integration and is not endorsed by Google.", + "Some users have reported account restrictions or suspensions after using third-party Gemini CLI and Antigravity OAuth clients.", + "Proceed only if you understand and accept this risk.", + ].join("\n"), + "Google Gemini CLI caution", + ); + + const proceed = await params.prompter.confirm({ + message: "Continue with Google Gemini CLI OAuth?", + initialValue: false, + }); + + if (!proceed) { + await params.prompter.note("Skipped Google Gemini CLI OAuth setup.", "Setup skipped"); + return { config: params.config }; + } + return await applyAuthChoicePluginProvider(params, { authChoice: "google-gemini-cli", pluginId: "google-gemini-cli-auth", diff --git a/src/commands/auth-choice.apply.huggingface.ts b/src/commands/auth-choice.apply.huggingface.ts index 3f4c980879f..91bfd533cb0 100644 --- a/src/commands/auth-choice.apply.huggingface.ts +++ b/src/commands/auth-choice.apply.huggingface.ts @@ -6,6 +6,7 @@ import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key import { createAuthChoiceAgentModelNoter, ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, } from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; @@ -27,10 +28,13 @@ export async function applyAuthChoiceHuggingface( let nextConfig = params.config; let agentModelOverride: string | undefined; const noteAgentModel = createAuthChoiceAgentModelNoter(params); + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); const hfKey = await ensureApiKeyFromOptionEnvOrPrompt({ token: params.opts?.token, tokenProvider: params.opts?.tokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, expectedProviders: ["huggingface"], provider: "huggingface", envLabel: "Hugging Face token", @@ -38,7 +42,8 @@ export async function applyAuthChoiceHuggingface( normalize: normalizeApiKeyInput, validate: validateApiKeyInput, prompter: params.prompter, - setCredential: async (apiKey) => setHuggingfaceApiKey(apiKey, params.agentDir), + setCredential: async (apiKey, mode) => + setHuggingfaceApiKey(apiKey, params.agentDir, { secretInputMode: mode }), noteMessage: [ "Hugging Face Inference Providers offer OpenAI-compatible chat completions.", "Create a token at: https://huggingface.co/settings/tokens (fine-grained, 'Make calls to Inference Providers').", diff --git a/src/commands/auth-choice.apply.minimax.test.ts b/src/commands/auth-choice.apply.minimax.test.ts index 78ae5d5fa12..c3de54b1e74 100644 --- a/src/commands/auth-choice.apply.minimax.test.ts +++ b/src/commands/auth-choice.apply.minimax.test.ts @@ -44,7 +44,7 @@ describe("applyAuthChoiceMiniMax", () => { async function readAuthProfiles(agentDir: string) { return await readAuthProfilesForAgent<{ - profiles?: Record; + profiles?: Record; }>(agentDir); } @@ -126,7 +126,7 @@ describe("applyAuthChoiceMiniMax", () => { }, ); - it("uses env token for minimax-api-key-cn when confirmed", async () => { + it("uses env token for minimax-api-key-cn as plaintext by default", async () => { const agentDir = await setupTempState(); process.env.MINIMAX_API_KEY = "mm-env-token"; delete process.env.MINIMAX_OAUTH_TOKEN; @@ -155,6 +155,36 @@ describe("applyAuthChoiceMiniMax", () => { const parsed = await readAuthProfiles(agentDir); expect(parsed.profiles?.["minimax-cn:default"]?.key).toBe("mm-env-token"); + expect(parsed.profiles?.["minimax-cn:default"]?.keyRef).toBeUndefined(); + }); + + it("uses env token for minimax-api-key-cn as keyRef in ref mode", async () => { + const agentDir = await setupTempState(); + process.env.MINIMAX_API_KEY = "mm-env-token"; + delete process.env.MINIMAX_OAUTH_TOKEN; + + const text = vi.fn(async () => "should-not-be-used"); + const confirm = vi.fn(async () => true); + + const result = await applyAuthChoiceMiniMax({ + authChoice: "minimax-api-key-cn", + config: {}, + prompter: createMinimaxPrompter({ text, confirm }), + runtime: createExitThrowingRuntime(), + setDefaultModel: true, + opts: { + secretInputMode: "ref", + }, + }); + + expect(result).not.toBeNull(); + const parsed = await readAuthProfiles(agentDir); + expect(parsed.profiles?.["minimax-cn:default"]?.keyRef).toEqual({ + source: "env", + provider: "default", + id: "MINIMAX_API_KEY", + }); + expect(parsed.profiles?.["minimax-cn:default"]?.key).toBeUndefined(); }); it("uses minimax-api-lightning default model", async () => { diff --git a/src/commands/auth-choice.apply.minimax.ts b/src/commands/auth-choice.apply.minimax.ts index d7c99ff8f0d..9b6c83fc204 100644 --- a/src/commands/auth-choice.apply.minimax.ts +++ b/src/commands/auth-choice.apply.minimax.ts @@ -3,6 +3,7 @@ import { createAuthChoiceDefaultModelApplier, createAuthChoiceModelStateBridge, ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, } from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyAuthChoicePluginProvider } from "./auth-choice.apply.plugin-provider.js"; @@ -31,6 +32,7 @@ export async function applyAuthChoiceMiniMax( setAgentModelOverride: (model) => (agentModelOverride = model), }), ); + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); const ensureMinimaxApiKey = async (opts: { profileId: string; promptMessage: string; @@ -38,6 +40,8 @@ export async function applyAuthChoiceMiniMax( await ensureApiKeyFromOptionEnvOrPrompt({ token: params.opts?.token, tokenProvider: params.opts?.tokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, expectedProviders: ["minimax", "minimax-cn"], provider: "minimax", envLabel: "MINIMAX_API_KEY", @@ -45,7 +49,8 @@ export async function applyAuthChoiceMiniMax( normalize: normalizeApiKeyInput, validate: validateApiKeyInput, prompter: params.prompter, - setCredential: async (apiKey) => setMinimaxApiKey(apiKey, params.agentDir, opts.profileId), + setCredential: async (apiKey, mode) => + setMinimaxApiKey(apiKey, params.agentDir, opts.profileId, { secretInputMode: mode }), }); }; const applyMinimaxApiVariant = async (opts: { diff --git a/src/commands/auth-choice.apply.openai.test.ts b/src/commands/auth-choice.apply.openai.test.ts new file mode 100644 index 00000000000..8ec1c667f0f --- /dev/null +++ b/src/commands/auth-choice.apply.openai.test.ts @@ -0,0 +1,116 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { applyAuthChoiceOpenAI } from "./auth-choice.apply.openai.js"; +import { + createAuthTestLifecycle, + createExitThrowingRuntime, + createWizardPrompter, + readAuthProfilesForAgent, + setupAuthTestEnv, +} from "./test-wizard-helpers.js"; + +describe("applyAuthChoiceOpenAI", () => { + const lifecycle = createAuthTestLifecycle([ + "OPENCLAW_STATE_DIR", + "OPENCLAW_AGENT_DIR", + "PI_CODING_AGENT_DIR", + "OPENAI_API_KEY", + ]); + + async function setupTempState() { + const env = await setupAuthTestEnv("openclaw-openai-"); + lifecycle.setStateDir(env.stateDir); + return env.agentDir; + } + + afterEach(async () => { + await lifecycle.cleanup(); + }); + + it("writes env-backed OpenAI key as plaintext by default", async () => { + const agentDir = await setupTempState(); + process.env.OPENAI_API_KEY = "sk-openai-env"; + + const confirm = vi.fn(async () => true); + const text = vi.fn(async () => "unused"); + const prompter = createWizardPrompter({ confirm, text }, { defaultSelect: "plaintext" }); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceOpenAI({ + authChoice: "openai-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + expect(result?.config.auth?.profiles?.["openai:default"]).toMatchObject({ + provider: "openai", + mode: "api_key", + }); + const defaultModel = result?.config.agents?.defaults?.model; + const primaryModel = typeof defaultModel === "string" ? defaultModel : defaultModel?.primary; + expect(primaryModel).toBe("openai/gpt-5.1-codex"); + expect(text).not.toHaveBeenCalled(); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["openai:default"]?.key).toBe("sk-openai-env"); + expect(parsed.profiles?.["openai:default"]?.keyRef).toBeUndefined(); + }); + + it("writes env-backed OpenAI key as keyRef when secret-input-mode=ref", async () => { + const agentDir = await setupTempState(); + process.env.OPENAI_API_KEY = "sk-openai-env"; + + const confirm = vi.fn(async () => true); + const text = vi.fn(async () => "unused"); + const prompter = createWizardPrompter({ confirm, text }, { defaultSelect: "ref" }); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceOpenAI({ + authChoice: "openai-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["openai:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }); + expect(parsed.profiles?.["openai:default"]?.key).toBeUndefined(); + }); + + it("writes explicit token input into openai auth profile", async () => { + const agentDir = await setupTempState(); + + const prompter = createWizardPrompter({}, { defaultSelect: "" }); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceOpenAI({ + authChoice: "apiKey", + config: {}, + prompter, + runtime, + setDefaultModel: true, + opts: { + tokenProvider: "openai", + token: "sk-openai-token", + }, + }); + + expect(result).not.toBeNull(); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["openai:default"]?.key).toBe("sk-openai-token"); + expect(parsed.profiles?.["openai:default"]?.keyRef).toBeUndefined(); + }); +}); diff --git a/src/commands/auth-choice.apply.openai.ts b/src/commands/auth-choice.apply.openai.ts index 2d1beaf041c..57059307920 100644 --- a/src/commands/auth-choice.apply.openai.ts +++ b/src/commands/auth-choice.apply.openai.ts @@ -1,15 +1,13 @@ -import { resolveEnvApiKey } from "../agents/model-auth.js"; -import { upsertSharedEnvVar } from "../infra/env-file.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; -import { createAuthChoiceAgentModelNoter } from "./auth-choice.apply-helpers.js"; + createAuthChoiceAgentModelNoter, + ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, +} from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; import { isRemoteEnvironment } from "./oauth-env.js"; -import { applyAuthProfileConfig, writeOAuthCredentials } from "./onboard-auth.js"; +import { applyAuthProfileConfig, setOpenaiApiKey, writeOAuthCredentials } from "./onboard-auth.js"; import { openUrl } from "./onboard-helpers.js"; import { applyOpenAICodexModelDefault, @@ -25,6 +23,7 @@ import { export async function applyAuthChoiceOpenAI( params: ApplyAuthChoiceParams, ): Promise { + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); const noteAgentModel = createAuthChoiceAgentModelNoter(params); let authChoice = params.authChoice; if (authChoice === "apiKey" && params.opts?.tokenProvider === "openai") { @@ -51,48 +50,26 @@ export async function applyAuthChoiceOpenAI( return { config: nextConfig, agentModelOverride }; }; - const envKey = resolveEnvApiKey("openai"); - if (envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing OPENAI_API_KEY (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, - initialValue: true, - }); - if (useExisting) { - const result = upsertSharedEnvVar({ - key: "OPENAI_API_KEY", - value: envKey.apiKey, - }); - if (!process.env.OPENAI_API_KEY) { - process.env.OPENAI_API_KEY = envKey.apiKey; - } - await params.prompter.note( - `Copied OPENAI_API_KEY to ${result.path} for launchd compatibility.`, - "OpenAI API key", - ); - return await applyOpenAiDefaultModelChoice(); - } - } - - let key: string | undefined; - if (params.opts?.token && params.opts?.tokenProvider === "openai") { - key = params.opts.token; - } else { - key = await params.prompter.text({ - message: "Enter OpenAI API key", - validate: validateApiKeyInput, - }); - } - - const trimmed = normalizeApiKeyInput(String(key)); - const result = upsertSharedEnvVar({ - key: "OPENAI_API_KEY", - value: trimmed, + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.token, + tokenProvider: params.opts?.tokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders: ["openai"], + provider: "openai", + envLabel: "OPENAI_API_KEY", + promptMessage: "Enter OpenAI API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setOpenaiApiKey(apiKey, params.agentDir, { secretInputMode: mode }), + }); + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId: "openai:default", + provider: "openai", + mode: "api_key", }); - process.env.OPENAI_API_KEY = trimmed; - await params.prompter.note( - `Saved OPENAI_API_KEY to ${result.path} for launchd compatibility.`, - "OpenAI API key", - ); return await applyOpenAiDefaultModelChoice(); } diff --git a/src/commands/auth-choice.apply.openrouter.ts b/src/commands/auth-choice.apply.openrouter.ts index bacbe1f290c..4cf01762615 100644 --- a/src/commands/auth-choice.apply.openrouter.ts +++ b/src/commands/auth-choice.apply.openrouter.ts @@ -1,11 +1,10 @@ import { ensureAuthProfileStore, resolveAuthProfileOrder } from "../agents/auth-profiles.js"; -import { resolveEnvApiKey } from "../agents/model-auth.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; -import { createAuthChoiceAgentModelNoter } from "./auth-choice.apply-helpers.js"; + createAuthChoiceAgentModelNoter, + ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, +} from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; import { @@ -22,6 +21,7 @@ export async function applyAuthChoiceOpenRouter( let nextConfig = params.config; let agentModelOverride: string | undefined; const noteAgentModel = createAuthChoiceAgentModelNoter(params); + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); const store = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false }); const profileOrder = resolveAuthProfileOrder({ @@ -43,30 +43,28 @@ export async function applyAuthChoiceOpenRouter( } if (!hasCredential && params.opts?.token && params.opts?.tokenProvider === "openrouter") { - await setOpenrouterApiKey(normalizeApiKeyInput(params.opts.token), params.agentDir); + await setOpenrouterApiKey(normalizeApiKeyInput(params.opts.token), params.agentDir, { + secretInputMode: requestedSecretInputMode, + }); hasCredential = true; } if (!hasCredential) { - const envKey = resolveEnvApiKey("openrouter"); - if (envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing OPENROUTER_API_KEY (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, - initialValue: true, - }); - if (useExisting) { - await setOpenrouterApiKey(envKey.apiKey, params.agentDir); - hasCredential = true; - } - } - } - - if (!hasCredential) { - const key = await params.prompter.text({ - message: "Enter OpenRouter API key", + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.token, + tokenProvider: params.opts?.tokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders: ["openrouter"], + provider: "openrouter", + envLabel: "OPENROUTER_API_KEY", + promptMessage: "Enter OpenRouter API key", + normalize: normalizeApiKeyInput, validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setOpenrouterApiKey(apiKey, params.agentDir, { secretInputMode: mode }), }); - await setOpenrouterApiKey(normalizeApiKeyInput(String(key ?? "")), params.agentDir); hasCredential = true; } diff --git a/src/commands/auth-choice.apply.volcengine-byteplus.test.ts b/src/commands/auth-choice.apply.volcengine-byteplus.test.ts new file mode 100644 index 00000000000..c1d83bf7101 --- /dev/null +++ b/src/commands/auth-choice.apply.volcengine-byteplus.test.ts @@ -0,0 +1,187 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { applyAuthChoiceBytePlus } from "./auth-choice.apply.byteplus.js"; +import { applyAuthChoiceVolcengine } from "./auth-choice.apply.volcengine.js"; +import { + createAuthTestLifecycle, + createExitThrowingRuntime, + createWizardPrompter, + readAuthProfilesForAgent, + setupAuthTestEnv, +} from "./test-wizard-helpers.js"; + +describe("volcengine/byteplus auth choice", () => { + const lifecycle = createAuthTestLifecycle([ + "OPENCLAW_STATE_DIR", + "OPENCLAW_AGENT_DIR", + "PI_CODING_AGENT_DIR", + "VOLCANO_ENGINE_API_KEY", + "BYTEPLUS_API_KEY", + ]); + + async function setupTempState() { + const env = await setupAuthTestEnv("openclaw-volc-byte-"); + lifecycle.setStateDir(env.stateDir); + return env.agentDir; + } + + afterEach(async () => { + await lifecycle.cleanup(); + }); + + it("stores volcengine env key as plaintext by default", async () => { + const agentDir = await setupTempState(); + process.env.VOLCANO_ENGINE_API_KEY = "volc-env-key"; + + const prompter = createWizardPrompter( + { + confirm: vi.fn(async () => true), + text: vi.fn(async () => "unused"), + }, + { defaultSelect: "plaintext" }, + ); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceVolcengine({ + authChoice: "volcengine-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + expect(result?.config.auth?.profiles?.["volcengine:default"]).toMatchObject({ + provider: "volcengine", + mode: "api_key", + }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["volcengine:default"]?.key).toBe("volc-env-key"); + expect(parsed.profiles?.["volcengine:default"]?.keyRef).toBeUndefined(); + }); + + it("stores volcengine env key as keyRef in ref mode", async () => { + const agentDir = await setupTempState(); + process.env.VOLCANO_ENGINE_API_KEY = "volc-env-key"; + + const prompter = createWizardPrompter( + { + confirm: vi.fn(async () => true), + text: vi.fn(async () => "unused"), + }, + { defaultSelect: "ref" }, + ); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceVolcengine({ + authChoice: "volcengine-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["volcengine:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "VOLCANO_ENGINE_API_KEY" }, + }); + expect(parsed.profiles?.["volcengine:default"]?.key).toBeUndefined(); + }); + + it("stores byteplus env key as plaintext by default", async () => { + const agentDir = await setupTempState(); + process.env.BYTEPLUS_API_KEY = "byte-env-key"; + + const prompter = createWizardPrompter( + { + confirm: vi.fn(async () => true), + text: vi.fn(async () => "unused"), + }, + { defaultSelect: "plaintext" }, + ); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceBytePlus({ + authChoice: "byteplus-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + expect(result?.config.auth?.profiles?.["byteplus:default"]).toMatchObject({ + provider: "byteplus", + mode: "api_key", + }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["byteplus:default"]?.key).toBe("byte-env-key"); + expect(parsed.profiles?.["byteplus:default"]?.keyRef).toBeUndefined(); + }); + + it("stores byteplus env key as keyRef in ref mode", async () => { + const agentDir = await setupTempState(); + process.env.BYTEPLUS_API_KEY = "byte-env-key"; + + const prompter = createWizardPrompter( + { + confirm: vi.fn(async () => true), + text: vi.fn(async () => "unused"), + }, + { defaultSelect: "ref" }, + ); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceBytePlus({ + authChoice: "byteplus-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["byteplus:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "BYTEPLUS_API_KEY" }, + }); + expect(parsed.profiles?.["byteplus:default"]?.key).toBeUndefined(); + }); + + it("stores explicit volcengine key when env is not used", async () => { + const agentDir = await setupTempState(); + const prompter = createWizardPrompter( + { + confirm: vi.fn(async () => false), + text: vi.fn(async () => "volc-manual-key"), + }, + { defaultSelect: "" }, + ); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoiceVolcengine({ + authChoice: "volcengine-api-key", + config: {}, + prompter, + runtime, + setDefaultModel: true, + }); + + expect(result).not.toBeNull(); + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(agentDir); + expect(parsed.profiles?.["volcengine:default"]?.key).toBe("volc-manual-key"); + expect(parsed.profiles?.["volcengine:default"]?.keyRef).toBeUndefined(); + }); +}); diff --git a/src/commands/auth-choice.apply.volcengine.ts b/src/commands/auth-choice.apply.volcengine.ts index 0616dc177ad..c98f442ae4e 100644 --- a/src/commands/auth-choice.apply.volcengine.ts +++ b/src/commands/auth-choice.apply.volcengine.ts @@ -1,12 +1,11 @@ -import { resolveEnvApiKey } from "../agents/model-auth.js"; -import { upsertSharedEnvVar } from "../infra/env-file.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; + ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, +} from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyPrimaryModel } from "./model-picker.js"; +import { applyAuthProfileConfig, setVolcengineApiKey } from "./onboard-auth.js"; /** Default model for Volcano Engine auth onboarding. */ export const VOLCENGINE_DEFAULT_MODEL = "volcengine-plan/ark-code-latest"; @@ -18,54 +17,28 @@ export async function applyAuthChoiceVolcengine( return null; } - const envKey = resolveEnvApiKey("volcengine"); - if (envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing VOLCANO_ENGINE_API_KEY (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, - initialValue: true, - }); - if (useExisting) { - const result = upsertSharedEnvVar({ - key: "VOLCANO_ENGINE_API_KEY", - value: envKey.apiKey, - }); - if (!process.env.VOLCANO_ENGINE_API_KEY) { - process.env.VOLCANO_ENGINE_API_KEY = envKey.apiKey; - } - await params.prompter.note( - `Copied VOLCANO_ENGINE_API_KEY to ${result.path} for launchd compatibility.`, - "Volcano Engine API Key", - ); - const configWithModel = applyPrimaryModel(params.config, VOLCENGINE_DEFAULT_MODEL); - return { - config: configWithModel, - agentModelOverride: VOLCENGINE_DEFAULT_MODEL, - }; - } - } - - let key: string | undefined; - if (params.opts?.volcengineApiKey) { - key = params.opts.volcengineApiKey; - } else { - key = await params.prompter.text({ - message: "Enter Volcano Engine API Key", - validate: validateApiKeyInput, - }); - } - - const trimmed = normalizeApiKeyInput(String(key)); - const result = upsertSharedEnvVar({ - key: "VOLCANO_ENGINE_API_KEY", - value: trimmed, + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.volcengineApiKey, + tokenProvider: "volcengine", + secretInputMode: requestedSecretInputMode, + config: params.config, + expectedProviders: ["volcengine"], + provider: "volcengine", + envLabel: "VOLCANO_ENGINE_API_KEY", + promptMessage: "Enter Volcano Engine API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setVolcengineApiKey(apiKey, params.agentDir, { secretInputMode: mode }), }); - process.env.VOLCANO_ENGINE_API_KEY = trimmed; - await params.prompter.note( - `Saved VOLCANO_ENGINE_API_KEY to ${result.path} for launchd compatibility.`, - "Volcano Engine API Key", - ); - - const configWithModel = applyPrimaryModel(params.config, VOLCENGINE_DEFAULT_MODEL); + const configWithAuth = applyAuthProfileConfig(params.config, { + profileId: "volcengine:default", + provider: "volcengine", + mode: "api_key", + }); + const configWithModel = applyPrimaryModel(configWithAuth, VOLCENGINE_DEFAULT_MODEL); return { config: configWithModel, agentModelOverride: VOLCENGINE_DEFAULT_MODEL, diff --git a/src/commands/auth-choice.apply.xai.ts b/src/commands/auth-choice.apply.xai.ts index d925dc3872a..68e9ac651c3 100644 --- a/src/commands/auth-choice.apply.xai.ts +++ b/src/commands/auth-choice.apply.xai.ts @@ -1,10 +1,9 @@ -import { resolveEnvApiKey } from "../agents/model-auth.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { - formatApiKeyPreview, - normalizeApiKeyInput, - validateApiKeyInput, -} from "./auth-choice.api-key.js"; -import { createAuthChoiceAgentModelNoter } from "./auth-choice.apply-helpers.js"; + createAuthChoiceAgentModelNoter, + ensureApiKeyFromOptionEnvOrPrompt, + normalizeSecretInputModeInput, +} from "./auth-choice.apply-helpers.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; import { @@ -25,35 +24,22 @@ export async function applyAuthChoiceXAI( let nextConfig = params.config; let agentModelOverride: string | undefined; const noteAgentModel = createAuthChoiceAgentModelNoter(params); - - let hasCredential = false; - const optsKey = params.opts?.xaiApiKey?.trim(); - if (optsKey) { - setXaiApiKey(normalizeApiKeyInput(optsKey), params.agentDir); - hasCredential = true; - } - - if (!hasCredential) { - const envKey = resolveEnvApiKey("xai"); - if (envKey) { - const useExisting = await params.prompter.confirm({ - message: `Use existing XAI_API_KEY (${envKey.source}, ${formatApiKeyPreview(envKey.apiKey)})?`, - initialValue: true, - }); - if (useExisting) { - setXaiApiKey(envKey.apiKey, params.agentDir); - hasCredential = true; - } - } - } - - if (!hasCredential) { - const key = await params.prompter.text({ - message: "Enter xAI API key", - validate: validateApiKeyInput, - }); - setXaiApiKey(normalizeApiKeyInput(String(key)), params.agentDir); - } + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.xaiApiKey, + tokenProvider: "xai", + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders: ["xai"], + provider: "xai", + envLabel: "XAI_API_KEY", + promptMessage: "Enter xAI API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setXaiApiKey(apiKey, params.agentDir, { secretInputMode: mode }), + }); nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "xai:default", diff --git a/src/commands/auth-choice.test.ts b/src/commands/auth-choice.test.ts index 5a96c31650f..bfadf93f074 100644 --- a/src/commands/auth-choice.test.ts +++ b/src/commands/auth-choice.test.ts @@ -46,6 +46,7 @@ vi.mock("./zai-endpoint-detect.js", () => ({ type StoredAuthProfile = { key?: string; + keyRef?: { source: string; provider: string; id: string }; access?: string; refresh?: string; provider?: string; @@ -628,6 +629,11 @@ describe("applyAuthChoice", () => { envValue: string; profileId: string; provider: string; + opts?: { secretInputMode?: "ref" }; + expectEnvPrompt: boolean; + expectedTextCalls: number; + expectedKey?: string; + expectedKeyRef?: { source: "env"; provider: string; id: string }; expectedModel?: string; expectedModelPrefix?: string; }> = [ @@ -637,6 +643,9 @@ describe("applyAuthChoice", () => { envValue: "sk-synthetic-env", profileId: "synthetic:default", provider: "synthetic", + expectEnvPrompt: true, + expectedTextCalls: 0, + expectedKey: "sk-synthetic-env", expectedModelPrefix: "synthetic/", }, { @@ -645,6 +654,9 @@ describe("applyAuthChoice", () => { envValue: "sk-openrouter-test", profileId: "openrouter:default", provider: "openrouter", + expectEnvPrompt: true, + expectedTextCalls: 0, + expectedKey: "sk-openrouter-test", expectedModel: "openrouter/auto", }, { @@ -653,6 +665,21 @@ describe("applyAuthChoice", () => { envValue: "gateway-test-key", profileId: "vercel-ai-gateway:default", provider: "vercel-ai-gateway", + expectEnvPrompt: true, + expectedTextCalls: 0, + expectedKey: "gateway-test-key", + expectedModel: "vercel-ai-gateway/anthropic/claude-opus-4.6", + }, + { + authChoice: "ai-gateway-api-key", + envKey: "AI_GATEWAY_API_KEY", + envValue: "gateway-ref-key", + profileId: "vercel-ai-gateway:default", + provider: "vercel-ai-gateway", + opts: { secretInputMode: "ref" }, + expectEnvPrompt: false, + expectedTextCalls: 1, + expectedKeyRef: { source: "env", provider: "default", id: "AI_GATEWAY_API_KEY" }, expectedModel: "vercel-ai-gateway/anthropic/claude-opus-4.6", }, ]; @@ -673,14 +700,19 @@ describe("applyAuthChoice", () => { prompter, runtime, setDefaultModel: true, + opts: scenario.opts, }); - expect(confirm).toHaveBeenCalledWith( - expect.objectContaining({ - message: expect.stringContaining(scenario.envKey), - }), - ); - expect(text).not.toHaveBeenCalled(); + if (scenario.expectEnvPrompt) { + expect(confirm).toHaveBeenCalledWith( + expect.objectContaining({ + message: expect.stringContaining(scenario.envKey), + }), + ); + } else { + expect(confirm).not.toHaveBeenCalled(); + } + expect(text).toHaveBeenCalledTimes(scenario.expectedTextCalls); expect(result.config.auth?.profiles?.[scenario.profileId]).toMatchObject({ provider: scenario.provider, mode: "api_key", @@ -697,10 +729,80 @@ describe("applyAuthChoice", () => { ), ).toBe(true); } - expect((await readAuthProfile(scenario.profileId))?.key).toBe(scenario.envValue); + const profile = await readAuthProfile(scenario.profileId); + if (scenario.expectedKeyRef) { + expect(profile?.keyRef).toEqual(scenario.expectedKeyRef); + expect(profile?.key).toBeUndefined(); + } else { + expect(profile?.key).toBe(scenario.expectedKey); + expect(profile?.keyRef).toBeUndefined(); + } } }); + it("retries ref setup when provider preflight fails and can switch to env ref", async () => { + await setupTempState(); + process.env.OPENAI_API_KEY = "sk-openai-env"; + + const selectValues: Array<"provider" | "env" | "filemain"> = ["provider", "filemain", "env"]; + const select = vi.fn(async (params: Parameters[0]) => { + const next = selectValues[0]; + if (next && params.options.some((option) => option.value === next)) { + selectValues.shift(); + return next as never; + } + return (params.options[0]?.value ?? "env") as never; + }); + const text = vi + .fn() + .mockResolvedValueOnce("/providers/openai/apiKey") + .mockResolvedValueOnce("OPENAI_API_KEY"); + const note = vi.fn(async () => undefined); + + const prompter = createPrompter({ + select, + text, + note, + confirm: vi.fn(async () => true), + }); + const runtime = createExitThrowingRuntime(); + + const result = await applyAuthChoice({ + authChoice: "openai-api-key", + config: { + secrets: { + providers: { + filemain: { + source: "file", + path: "/tmp/openclaw-missing-secrets.json", + mode: "json", + }, + }, + }, + }, + prompter, + runtime, + setDefaultModel: false, + opts: { secretInputMode: "ref" }, + }); + + expect(result.config.auth?.profiles?.["openai:default"]).toMatchObject({ + provider: "openai", + mode: "api_key", + }); + expect(note).toHaveBeenCalledWith( + expect.stringContaining("Could not validate provider reference"), + "Reference check failed", + ); + expect(note).toHaveBeenCalledWith( + expect.stringContaining("Validated environment variable OPENAI_API_KEY."), + "Reference validated", + ); + expect(await readAuthProfile("openai:default")).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }); + }); + it("keeps existing default model for explicit provider keys when setDefaultModel=false", async () => { const scenarios: Array<{ authChoice: "xai-api-key" | "opencode-zen"; @@ -916,12 +1018,15 @@ describe("applyAuthChoice", () => { textValues: string[]; confirmValue: boolean; opts?: { - cloudflareAiGatewayAccountId: string; - cloudflareAiGatewayGatewayId: string; - cloudflareAiGatewayApiKey: string; + secretInputMode?: "ref"; + cloudflareAiGatewayAccountId?: string; + cloudflareAiGatewayGatewayId?: string; + cloudflareAiGatewayApiKey?: string; }; expectEnvPrompt: boolean; - expectedKey: string; + expectedTextCalls: number; + expectedKey?: string; + expectedKeyRef?: { source: string; provider: string; id: string }; expectedMetadata: { accountId: string; gatewayId: string }; }> = [ { @@ -929,12 +1034,28 @@ describe("applyAuthChoice", () => { textValues: ["cf-account-id", "cf-gateway-id"], confirmValue: true, expectEnvPrompt: true, + expectedTextCalls: 2, expectedKey: "cf-gateway-test-key", expectedMetadata: { accountId: "cf-account-id", gatewayId: "cf-gateway-id", }, }, + { + envGatewayKey: "cf-gateway-ref-key", + textValues: ["cf-account-id-ref", "cf-gateway-id-ref"], + confirmValue: true, + opts: { + secretInputMode: "ref", + }, + expectEnvPrompt: false, + expectedTextCalls: 3, + expectedKeyRef: { source: "env", provider: "default", id: "CLOUDFLARE_AI_GATEWAY_API_KEY" }, + expectedMetadata: { + accountId: "cf-account-id-ref", + gatewayId: "cf-gateway-id-ref", + }, + }, { textValues: [], confirmValue: false, @@ -944,6 +1065,7 @@ describe("applyAuthChoice", () => { cloudflareAiGatewayApiKey: "cf-direct-key", }, expectEnvPrompt: false, + expectedTextCalls: 0, expectedKey: "cf-direct-key", expectedMetadata: { accountId: "acc-direct", @@ -983,7 +1105,7 @@ describe("applyAuthChoice", () => { } else { expect(confirm).not.toHaveBeenCalled(); } - expect(text).toHaveBeenCalledTimes(scenario.textValues.length); + expect(text).toHaveBeenCalledTimes(scenario.expectedTextCalls); expect(result.config.auth?.profiles?.["cloudflare-ai-gateway:default"]).toMatchObject({ provider: "cloudflare-ai-gateway", mode: "api_key", @@ -993,7 +1115,11 @@ describe("applyAuthChoice", () => { ); const profile = await readAuthProfile("cloudflare-ai-gateway:default"); - expect(profile?.key).toBe(scenario.expectedKey); + if (scenario.expectedKeyRef) { + expect(profile?.keyRef).toEqual(scenario.expectedKeyRef); + } else { + expect(profile?.key).toBe(scenario.expectedKey); + } expect(profile?.metadata).toEqual(scenario.expectedMetadata); } delete process.env.CLOUDFLARE_AI_GATEWAY_API_KEY; diff --git a/src/commands/channel-test-helpers.ts b/src/commands/channel-test-helpers.ts index fd7e6f36278..65745a55d5e 100644 --- a/src/commands/channel-test-helpers.ts +++ b/src/commands/channel-test-helpers.ts @@ -6,6 +6,9 @@ import { telegramPlugin } from "../../extensions/telegram/src/channel.js"; import { whatsappPlugin } from "../../extensions/whatsapp/src/channel.js"; import { setActivePluginRegistry } from "../plugins/runtime.js"; import { createTestRegistry } from "../test-utils/channel-plugins.js"; +import type { ChannelChoice } from "./onboard-types.js"; +import { getChannelOnboardingAdapter } from "./onboarding/registry.js"; +import type { ChannelOnboardingAdapter } from "./onboarding/types.js"; export function setDefaultChannelPluginRegistryForTests(): void { const channels = [ @@ -18,3 +21,24 @@ export function setDefaultChannelPluginRegistryForTests(): void { ] as unknown as Parameters[0]; setActivePluginRegistry(createTestRegistry(channels)); } + +export function patchChannelOnboardingAdapter( + channel: ChannelChoice, + patch: Pick, +): () => void { + const adapter = getChannelOnboardingAdapter(channel); + if (!adapter) { + throw new Error(`missing onboarding adapter for ${channel}`); + } + const keys = Object.keys(patch) as K[]; + const previous = {} as Pick; + for (const key of keys) { + previous[key] = adapter[key]; + adapter[key] = patch[key]; + } + return () => { + for (const key of keys) { + adapter[key] = previous[key]; + } + }; +} diff --git a/src/commands/channels.adds-non-default-telegram-account.test.ts b/src/commands/channels.adds-non-default-telegram-account.test.ts index 0187675788d..3df9fc11061 100644 --- a/src/commands/channels.adds-non-default-telegram-account.test.ts +++ b/src/commands/channels.adds-non-default-telegram-account.test.ts @@ -66,6 +66,96 @@ describe("channels command", () => { expect(next.channels?.telegram?.accounts?.alerts?.botToken).toBe("123:abc"); }); + it("moves single-account telegram config into accounts.default when adding non-default", async () => { + configMocks.readConfigFileSnapshot.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + channels: { + telegram: { + enabled: true, + botToken: "legacy-token", + dmPolicy: "allowlist", + allowFrom: ["111"], + groupPolicy: "allowlist", + streaming: "partial", + }, + }, + }, + }); + + await channelsAddCommand( + { channel: "telegram", account: "alerts", token: "alerts-token" }, + runtime, + { hasFlags: true }, + ); + + const next = configMocks.writeConfigFile.mock.calls[0]?.[0] as { + channels?: { + telegram?: { + botToken?: string; + dmPolicy?: string; + allowFrom?: string[]; + groupPolicy?: string; + streaming?: string; + accounts?: Record< + string, + { + botToken?: string; + dmPolicy?: string; + allowFrom?: string[]; + groupPolicy?: string; + streaming?: string; + } + >; + }; + }; + }; + expect(next.channels?.telegram?.accounts?.default).toEqual({ + botToken: "legacy-token", + dmPolicy: "allowlist", + allowFrom: ["111"], + groupPolicy: "allowlist", + streaming: "partial", + }); + expect(next.channels?.telegram?.botToken).toBeUndefined(); + expect(next.channels?.telegram?.dmPolicy).toBeUndefined(); + expect(next.channels?.telegram?.allowFrom).toBeUndefined(); + expect(next.channels?.telegram?.groupPolicy).toBeUndefined(); + expect(next.channels?.telegram?.streaming).toBeUndefined(); + expect(next.channels?.telegram?.accounts?.alerts?.botToken).toBe("alerts-token"); + }); + + it("seeds accounts.default for env-only single-account telegram config when adding non-default", async () => { + configMocks.readConfigFileSnapshot.mockResolvedValue({ + ...baseConfigSnapshot, + config: { + channels: { + telegram: { + enabled: true, + }, + }, + }, + }); + + await channelsAddCommand( + { channel: "telegram", account: "alerts", token: "alerts-token" }, + runtime, + { hasFlags: true }, + ); + + const next = configMocks.writeConfigFile.mock.calls[0]?.[0] as { + channels?: { + telegram?: { + enabled?: boolean; + accounts?: Record; + }; + }; + }; + expect(next.channels?.telegram?.enabled).toBe(true); + expect(next.channels?.telegram?.accounts?.default).toEqual({}); + expect(next.channels?.telegram?.accounts?.alerts?.botToken).toBe("alerts-token"); + }); + it("adds a default slack account with tokens", async () => { configMocks.readConfigFileSnapshot.mockResolvedValue({ ...baseConfigSnapshot }); await channelsAddCommand( diff --git a/src/commands/channels/add.ts b/src/commands/channels/add.ts index a23fb2428e2..882e7f16ca5 100644 --- a/src/commands/channels/add.ts +++ b/src/commands/channels/add.ts @@ -1,6 +1,7 @@ import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "../../agents/agent-scope.js"; import { listChannelPluginCatalogEntries } from "../../channels/plugins/catalog.js"; import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; +import { moveSingleAccountChannelSectionToDefaultAccount } from "../../channels/plugins/setup-helpers.js"; import type { ChannelId, ChannelSetupInput } from "../../channels/plugins/types.js"; import { writeConfigFile, type OpenClawConfig } from "../../config/config.js"; import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "../../routing/session-key.js"; @@ -8,6 +9,8 @@ import { defaultRuntime, type RuntimeEnv } from "../../runtime.js"; import { resolveTelegramAccount } from "../../telegram/accounts.js"; import { deleteTelegramUpdateOffset } from "../../telegram/update-offset-store.js"; import { createClackPrompter } from "../../wizard/clack-prompter.js"; +import { applyAgentBindings, describeBinding } from "../agents.bindings.js"; +import { buildAgentSummaries } from "../agents.config.js"; import { setupChannels } from "../onboard-channels.js"; import type { ChannelChoice } from "../onboard-types.js"; import { @@ -111,6 +114,68 @@ export async function channelsAddCommand( } } + const bindTargets = selection + .map((channel) => ({ + channel, + accountId: accountIds[channel]?.trim(), + })) + .filter( + ( + value, + ): value is { + channel: ChannelChoice; + accountId: string; + } => Boolean(value.accountId), + ); + if (bindTargets.length > 0) { + const bindNow = await prompter.confirm({ + message: "Bind configured channel accounts to agents now?", + initialValue: true, + }); + if (bindNow) { + const agentSummaries = buildAgentSummaries(nextConfig); + const defaultAgentId = resolveDefaultAgentId(nextConfig); + for (const target of bindTargets) { + const targetAgentId = await prompter.select({ + message: `Route ${target.channel} account "${target.accountId}" to agent`, + options: agentSummaries.map((agent) => ({ + value: agent.id, + label: agent.isDefault ? `${agent.id} (default)` : agent.id, + })), + initialValue: defaultAgentId, + }); + const bindingResult = applyAgentBindings(nextConfig, [ + { + agentId: targetAgentId, + match: { channel: target.channel, accountId: target.accountId }, + }, + ]); + nextConfig = bindingResult.config; + if (bindingResult.added.length > 0 || bindingResult.updated.length > 0) { + await prompter.note( + [ + ...bindingResult.added.map((binding) => `Added: ${describeBinding(binding)}`), + ...bindingResult.updated.map((binding) => `Updated: ${describeBinding(binding)}`), + ].join("\n"), + "Routing bindings", + ); + } + if (bindingResult.conflicts.length > 0) { + await prompter.note( + [ + "Skipped bindings already claimed by another agent:", + ...bindingResult.conflicts.map( + (conflict) => + `- ${describeBinding(conflict.binding)} (agent=${conflict.existingAgentId})`, + ), + ].join("\n"), + "Routing bindings", + ); + } + } + } + } + await writeConfigFile(nextConfig); await prompter.outro("Channels updated."); return; @@ -153,9 +218,6 @@ export async function channelsAddCommand( runtime.exit(1); return; } - const accountId = - plugin.setup.resolveAccountId?.({ cfg: nextConfig, accountId: opts.account }) ?? - normalizeAccountId(opts.account); const useEnv = opts.useEnv === true; const initialSyncLimit = typeof opts.initialSyncLimit === "number" @@ -199,6 +261,12 @@ export async function channelsAddCommand( dmAllowlist, autoDiscoverChannels: opts.autoDiscoverChannels, }; + const accountId = + plugin.setup.resolveAccountId?.({ + cfg: nextConfig, + accountId: opts.account, + input, + }) ?? normalizeAccountId(opts.account); const validationError = plugin.setup.validateInput?.({ cfg: nextConfig, @@ -216,6 +284,13 @@ export async function channelsAddCommand( ? resolveTelegramAccount({ cfg: nextConfig, accountId }).token.trim() : ""; + if (accountId !== DEFAULT_ACCOUNT_ID) { + nextConfig = moveSingleAccountChannelSectionToDefaultAccount({ + cfg: nextConfig, + channelKey: channel, + }); + } + nextConfig = applyChannelAccountConfig({ cfg: nextConfig, channel, diff --git a/src/commands/configure.wizard.ts b/src/commands/configure.wizard.ts index e96983461ba..5639b5e6d07 100644 --- a/src/commands/configure.wizard.ts +++ b/src/commands/configure.wizard.ts @@ -1,3 +1,5 @@ +import fsPromises from "node:fs/promises"; +import nodePath from "node:path"; import { formatCliCommand } from "../cli/command-format.js"; import type { OpenClawConfig } from "../config/config.js"; import { readConfigFileSnapshot, resolveGatewayPort, writeConfigFile } from "../config/config.js"; @@ -332,6 +334,32 @@ export async function runConfigureWizard( runtime, ); workspaceDir = resolveUserPath(String(workspaceInput ?? "").trim() || DEFAULT_WORKSPACE); + if (!snapshot.exists) { + const indicators = ["MEMORY.md", "memory", ".git"].map((name) => + nodePath.join(workspaceDir, name), + ); + const hasExistingContent = ( + await Promise.all( + indicators.map(async (candidate) => { + try { + await fsPromises.access(candidate); + return true; + } catch { + return false; + } + }), + ) + ).some(Boolean); + if (hasExistingContent) { + note( + [ + `Existing workspace detected at ${workspaceDir}`, + "Existing files are preserved. Missing templates may be created, never overwritten.", + ].join("\n"), + "Existing workspace", + ); + } + } nextConfig = { ...nextConfig, agents: { diff --git a/src/commands/doctor-auth.hints.test.ts b/src/commands/doctor-auth.hints.test.ts new file mode 100644 index 00000000000..f660a4e82a2 --- /dev/null +++ b/src/commands/doctor-auth.hints.test.ts @@ -0,0 +1,28 @@ +import { describe, expect, it } from "vitest"; +import { resolveUnusableProfileHint } from "./doctor-auth.js"; + +describe("resolveUnusableProfileHint", () => { + it("returns billing guidance for disabled billing profiles", () => { + expect(resolveUnusableProfileHint({ kind: "disabled", reason: "billing" })).toBe( + "Top up credits (provider billing) or switch provider.", + ); + }); + + it("returns credential guidance for permanent auth disables", () => { + expect(resolveUnusableProfileHint({ kind: "disabled", reason: "auth_permanent" })).toBe( + "Refresh or replace credentials, then retry.", + ); + }); + + it("falls back to cooldown guidance for non-billing disable reasons", () => { + expect(resolveUnusableProfileHint({ kind: "disabled", reason: "unknown" })).toBe( + "Wait for cooldown or switch provider.", + ); + }); + + it("returns cooldown guidance for cooldown windows", () => { + expect(resolveUnusableProfileHint({ kind: "cooldown" })).toBe( + "Wait for cooldown or switch provider.", + ); + }); +}); diff --git a/src/commands/doctor-auth.ts b/src/commands/doctor-auth.ts index a12ab384a20..f408dc43f93 100644 --- a/src/commands/doctor-auth.ts +++ b/src/commands/doctor-auth.ts @@ -206,6 +206,21 @@ type AuthIssue = { remainingMs?: number; }; +export function resolveUnusableProfileHint(params: { + kind: "cooldown" | "disabled"; + reason?: string; +}): string { + if (params.kind === "disabled") { + if (params.reason === "billing") { + return "Top up credits (provider billing) or switch provider."; + } + if (params.reason === "auth_permanent" || params.reason === "auth") { + return "Refresh or replace credentials, then retry."; + } + } + return "Wait for cooldown or switch provider."; +} + function formatAuthIssueHint(issue: AuthIssue): string | null { if (issue.provider === "anthropic" && issue.profileId === CLAUDE_CLI_PROFILE_ID) { return `Deprecated profile. Use ${formatCliCommand("openclaw models auth setup-token")} or ${formatCliCommand( @@ -245,13 +260,14 @@ export async function noteAuthProfileHealth(params: { } const stats = store.usageStats?.[profileId]; const remaining = formatRemainingShort(until - now); - const kind = - typeof stats?.disabledUntil === "number" && now < stats.disabledUntil - ? `disabled${stats.disabledReason ? `:${stats.disabledReason}` : ""}` - : "cooldown"; - const hint = kind.startsWith("disabled:billing") - ? "Top up credits (provider billing) or switch provider." - : "Wait for cooldown or switch provider."; + const disabledActive = typeof stats?.disabledUntil === "number" && now < stats.disabledUntil; + const kind = disabledActive + ? `disabled${stats.disabledReason ? `:${stats.disabledReason}` : ""}` + : "cooldown"; + const hint = resolveUnusableProfileHint({ + kind: disabledActive ? "disabled" : "cooldown", + reason: stats?.disabledReason, + }); out.push(`- ${profileId}: ${kind} (${remaining})${hint ? ` — ${hint}` : ""}`); } return out; diff --git a/src/commands/doctor-config-flow.missing-default-account-bindings.integration.test.ts b/src/commands/doctor-config-flow.missing-default-account-bindings.integration.test.ts new file mode 100644 index 00000000000..dae204ede43 --- /dev/null +++ b/src/commands/doctor-config-flow.missing-default-account-bindings.integration.test.ts @@ -0,0 +1,56 @@ +import { describe, expect, it, vi } from "vitest"; +import { withEnvAsync } from "../test-utils/env.js"; +import { runDoctorConfigWithInput } from "./doctor-config-flow.test-utils.js"; + +const { noteSpy } = vi.hoisted(() => ({ + noteSpy: vi.fn(), +})); + +vi.mock("../terminal/note.js", () => ({ + note: noteSpy, +})); + +vi.mock("./doctor-legacy-config.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + normalizeCompatibilityConfigValues: (cfg: unknown) => ({ + config: cfg, + changes: [], + }), + }; +}); + +import { loadAndMaybeMigrateDoctorConfig } from "./doctor-config-flow.js"; + +describe("doctor missing default account binding warning", () => { + it("emits a doctor warning when named accounts have no valid account-scoped bindings", async () => { + await withEnvAsync( + { + TELEGRAM_BOT_TOKEN: undefined, + TELEGRAM_BOT_TOKEN_FILE: undefined, + }, + async () => { + await runDoctorConfigWithInput({ + config: { + channels: { + telegram: { + accounts: { + alerts: {}, + work: {}, + }, + }, + }, + bindings: [{ agentId: "ops", match: { channel: "telegram" } }], + }, + run: loadAndMaybeMigrateDoctorConfig, + }); + }, + ); + + expect(noteSpy).toHaveBeenCalledWith( + expect.stringContaining("channels.telegram: accounts.default is missing"), + "Doctor warnings", + ); + }); +}); diff --git a/src/commands/doctor-config-flow.missing-default-account-bindings.test.ts b/src/commands/doctor-config-flow.missing-default-account-bindings.test.ts new file mode 100644 index 00000000000..6a47ab1f962 --- /dev/null +++ b/src/commands/doctor-config-flow.missing-default-account-bindings.test.ts @@ -0,0 +1,89 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { collectMissingDefaultAccountBindingWarnings } from "./doctor-config-flow.js"; + +describe("collectMissingDefaultAccountBindingWarnings", () => { + it("warns when named accounts exist without default and no valid binding exists", () => { + const cfg: OpenClawConfig = { + channels: { + telegram: { + accounts: { + alerts: { botToken: "a" }, + work: { botToken: "w" }, + }, + }, + }, + bindings: [{ agentId: "ops", match: { channel: "telegram" } }], + }; + + const warnings = collectMissingDefaultAccountBindingWarnings(cfg); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain("channels.telegram"); + expect(warnings[0]).toContain("alerts, work"); + }); + + it("does not warn when an explicit account binding exists", () => { + const cfg: OpenClawConfig = { + channels: { + telegram: { + accounts: { + alerts: { botToken: "a" }, + }, + }, + }, + bindings: [{ agentId: "ops", match: { channel: "telegram", accountId: "alerts" } }], + }; + + expect(collectMissingDefaultAccountBindingWarnings(cfg)).toEqual([]); + }); + + it("warns when bindings cover only a subset of configured accounts", () => { + const cfg: OpenClawConfig = { + channels: { + telegram: { + accounts: { + alerts: { botToken: "a" }, + work: { botToken: "w" }, + }, + }, + }, + bindings: [{ agentId: "ops", match: { channel: "telegram", accountId: "alerts" } }], + }; + + const warnings = collectMissingDefaultAccountBindingWarnings(cfg); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain("subset"); + expect(warnings[0]).toContain("Uncovered accounts: work"); + }); + + it("does not warn when wildcard account binding exists", () => { + const cfg: OpenClawConfig = { + channels: { + telegram: { + accounts: { + alerts: { botToken: "a" }, + }, + }, + }, + bindings: [{ agentId: "ops", match: { channel: "telegram", accountId: "*" } }], + }; + + expect(collectMissingDefaultAccountBindingWarnings(cfg)).toEqual([]); + }); + + it("does not warn when default account is present", () => { + const cfg: OpenClawConfig = { + channels: { + telegram: { + accounts: { + default: { botToken: "d" }, + alerts: { botToken: "a" }, + }, + }, + }, + bindings: [{ agentId: "ops", match: { channel: "telegram" } }], + }; + + expect(collectMissingDefaultAccountBindingWarnings(cfg)).toEqual([]); + }); +}); diff --git a/src/commands/doctor-config-flow.test.ts b/src/commands/doctor-config-flow.test.ts index d820cd10b89..d4b0327397d 100644 --- a/src/commands/doctor-config-flow.test.ts +++ b/src/commands/doctor-config-flow.test.ts @@ -26,14 +26,14 @@ type DiscordGuildRule = { }; type DiscordAccountRule = { - allowFrom: string[]; - dm: { allowFrom: string[]; groupChannels: string[] }; - execApprovals: { approvers: string[] }; - guilds: Record; + allowFrom?: string[]; + dm?: { allowFrom: string[]; groupChannels: string[] }; + execApprovals?: { approvers: string[] }; + guilds?: Record; }; type RepairedDiscordPolicy = { - allowFrom: string[]; + allowFrom?: string[]; dm: { allowFrom: string[]; groupChannels: string[] }; execApprovals: { approvers: string[] }; guilds: Record; @@ -186,21 +186,23 @@ describe("doctor config flow", () => { const cfg = result.cfg as unknown as { channels: { telegram: { - allowFrom: string[]; - groupAllowFrom: string[]; + allowFrom?: string[]; + groupAllowFrom?: string[]; groups: Record< string, { allowFrom: string[]; topics: Record } >; - accounts: Record; + accounts: Record; }; }; }; - expect(cfg.channels.telegram.allowFrom).toEqual(["111"]); - expect(cfg.channels.telegram.groupAllowFrom).toEqual(["222"]); + expect(cfg.channels.telegram.allowFrom).toBeUndefined(); + expect(cfg.channels.telegram.groupAllowFrom).toBeUndefined(); expect(cfg.channels.telegram.groups["-100123"].allowFrom).toEqual(["333"]); expect(cfg.channels.telegram.groups["-100123"].topics["99"].allowFrom).toEqual(["444"]); expect(cfg.channels.telegram.accounts.alerts.allowFrom).toEqual(["444"]); + expect(cfg.channels.telegram.accounts.default.allowFrom).toEqual(["111"]); + expect(cfg.channels.telegram.accounts.default.groupAllowFrom).toEqual(["222"]); } finally { vi.unstubAllGlobals(); } @@ -259,10 +261,23 @@ describe("doctor config flow", () => { }); const cfg = result.cfg as unknown as { - channels: { discord: RepairedDiscordPolicy }; + channels: { + discord: Omit & { + allowFrom?: string[]; + accounts: Record & { + default: { allowFrom: string[] }; + work: { + allowFrom: string[]; + dm: { allowFrom: string[]; groupChannels: string[] }; + execApprovals: { approvers: string[] }; + guilds: Record; + }; + }; + }; + }; }; - expect(cfg.channels.discord.allowFrom).toEqual(["123"]); + expect(cfg.channels.discord.allowFrom).toBeUndefined(); expect(cfg.channels.discord.dm.allowFrom).toEqual(["456"]); expect(cfg.channels.discord.dm.groupChannels).toEqual(["789"]); expect(cfg.channels.discord.execApprovals.approvers).toEqual(["321"]); @@ -270,6 +285,7 @@ describe("doctor config flow", () => { expect(cfg.channels.discord.guilds["100"].roles).toEqual(["222"]); expect(cfg.channels.discord.guilds["100"].channels.general.users).toEqual(["333"]); expect(cfg.channels.discord.guilds["100"].channels.general.roles).toEqual(["444"]); + expect(cfg.channels.discord.accounts.default.allowFrom).toEqual(["123"]); expect(cfg.channels.discord.accounts.work.allowFrom).toEqual(["555"]); expect(cfg.channels.discord.accounts.work.dm.allowFrom).toEqual(["666"]); expect(cfg.channels.discord.accounts.work.dm.groupChannels).toEqual(["777"]); @@ -285,6 +301,35 @@ describe("doctor config flow", () => { }); }); + it("does not restore top-level allowFrom when config is intentionally default-account scoped", async () => { + const result = await runDoctorConfigWithInput({ + repair: true, + config: { + channels: { + discord: { + accounts: { + default: { token: "discord-default-token", allowFrom: ["123"] }, + work: { token: "discord-work-token" }, + }, + }, + }, + }, + run: loadAndMaybeMigrateDoctorConfig, + }); + + const cfg = result.cfg as { + channels: { + discord: { + allowFrom?: string[]; + accounts: Record; + }; + }; + }; + + expect(cfg.channels.discord.allowFrom).toBeUndefined(); + expect(cfg.channels.discord.accounts.default.allowFrom).toEqual(["123"]); + }); + it('adds allowFrom ["*"] when dmPolicy="open" and allowFrom is missing on repair', async () => { const result = await runDoctorConfigWithInput({ repair: true, @@ -407,6 +452,50 @@ describe("doctor config flow", () => { expect(cfg.channels.discord.accounts.work.allowFrom).toEqual(["*"]); }); + it('repairs dmPolicy="allowlist" by restoring allowFrom from pairing store on repair', async () => { + const result = await withTempHome(async (home) => { + const configDir = path.join(home, ".openclaw"); + const credentialsDir = path.join(configDir, "credentials"); + await fs.mkdir(credentialsDir, { recursive: true }); + await fs.writeFile( + path.join(configDir, "openclaw.json"), + JSON.stringify( + { + channels: { + telegram: { + botToken: "fake-token", + dmPolicy: "allowlist", + }, + }, + }, + null, + 2, + ), + "utf-8", + ); + await fs.writeFile( + path.join(credentialsDir, "telegram-allowFrom.json"), + JSON.stringify({ version: 1, allowFrom: ["12345"] }, null, 2), + "utf-8", + ); + return await loadAndMaybeMigrateDoctorConfig({ + options: { nonInteractive: true, repair: true }, + confirm: async () => false, + }); + }); + + const cfg = result.cfg as { + channels: { + telegram: { + dmPolicy: string; + allowFrom: string[]; + }; + }; + }; + expect(cfg.channels.telegram.dmPolicy).toBe("allowlist"); + expect(cfg.channels.telegram.allowFrom).toEqual(["12345"]); + }); + it("migrates legacy toolsBySender keys to typed id entries on repair", async () => { const result = await runDoctorConfigWithInput({ repair: true, diff --git a/src/commands/doctor-config-flow.ts b/src/commands/doctor-config-flow.ts index f4a7e4132a8..5c62a8c2516 100644 --- a/src/commands/doctor-config-flow.ts +++ b/src/commands/doctor-config-flow.ts @@ -1,6 +1,7 @@ import fs from "node:fs/promises"; import path from "node:path"; import type { ZodIssue } from "zod"; +import { normalizeChatChannelId } from "../channels/registry.js"; import { isNumericTelegramUserId, normalizeTelegramAllowFromEntry, @@ -27,6 +28,8 @@ import { isTrustedSafeBinPath, normalizeTrustedSafeBinDirs, } from "../infra/exec-safe-bin-trust.js"; +import { readChannelAllowFromStore } from "../pairing/pairing-store.js"; +import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "../routing/session-key.js"; import { isDiscordMutableAllowEntry, isGoogleChatMutableAllowEntry, @@ -38,7 +41,7 @@ import { import { listTelegramAccountIds, resolveTelegramAccount } from "../telegram/accounts.js"; import { note } from "../terminal/note.js"; import { isRecord, resolveHomeDir } from "../utils.js"; -import { normalizeLegacyConfigValues } from "./doctor-legacy-config.js"; +import { normalizeCompatibilityConfigValues } from "./doctor-legacy-config.js"; import type { DoctorOptions } from "./doctor-prompter.js"; import { autoMigrateLegacyStateDir } from "./doctor-state-migrations.js"; @@ -207,6 +210,103 @@ function asObjectRecord(value: unknown): Record | null { return value as Record; } +function normalizeBindingChannelKey(raw?: string | null): string { + const normalized = normalizeChatChannelId(raw); + if (normalized) { + return normalized; + } + return (raw ?? "").trim().toLowerCase(); +} + +export function collectMissingDefaultAccountBindingWarnings(cfg: OpenClawConfig): string[] { + const channels = asObjectRecord(cfg.channels); + if (!channels) { + return []; + } + + const bindings = Array.isArray(cfg.bindings) ? cfg.bindings : []; + const warnings: string[] = []; + + for (const [channelKey, rawChannel] of Object.entries(channels)) { + const channel = asObjectRecord(rawChannel); + if (!channel) { + continue; + } + const accounts = asObjectRecord(channel.accounts); + if (!accounts) { + continue; + } + + const normalizedAccountIds = Array.from( + new Set( + Object.keys(accounts) + .map((accountId) => normalizeAccountId(accountId)) + .filter(Boolean), + ), + ); + if (normalizedAccountIds.length === 0 || normalizedAccountIds.includes(DEFAULT_ACCOUNT_ID)) { + continue; + } + const accountIdSet = new Set(normalizedAccountIds); + const channelPattern = normalizeBindingChannelKey(channelKey); + + let hasWildcardBinding = false; + const coveredAccountIds = new Set(); + for (const binding of bindings) { + const bindingRecord = asObjectRecord(binding); + if (!bindingRecord) { + continue; + } + const match = asObjectRecord(bindingRecord.match); + if (!match) { + continue; + } + + const matchChannel = + typeof match.channel === "string" ? normalizeBindingChannelKey(match.channel) : ""; + if (!matchChannel || matchChannel !== channelPattern) { + continue; + } + + const rawAccountId = typeof match.accountId === "string" ? match.accountId.trim() : ""; + if (!rawAccountId) { + continue; + } + if (rawAccountId === "*") { + hasWildcardBinding = true; + continue; + } + const normalizedBindingAccountId = normalizeAccountId(rawAccountId); + if (accountIdSet.has(normalizedBindingAccountId)) { + coveredAccountIds.add(normalizedBindingAccountId); + } + } + + if (hasWildcardBinding) { + continue; + } + + const uncoveredAccountIds = normalizedAccountIds.filter( + (accountId) => !coveredAccountIds.has(accountId), + ); + if (uncoveredAccountIds.length === 0) { + continue; + } + if (coveredAccountIds.size > 0) { + warnings.push( + `- channels.${channelKey}: accounts.default is missing and account bindings only cover a subset of configured accounts. Uncovered accounts: ${uncoveredAccountIds.join(", ")}. Add bindings[].match.accountId for uncovered accounts (or "*"), or add channels.${channelKey}.accounts.default.`, + ); + continue; + } + + warnings.push( + `- channels.${channelKey}: accounts.default is missing and no valid account-scoped binding exists for configured accounts (${normalizedAccountIds.join(", ")}). Channel-only bindings (no accountId) match only default. Add bindings[].match.accountId for one of these accounts (or "*"), or add channels.${channelKey}.accounts.default.`, + ); + } + + return warnings; +} + function collectTelegramAccountScopes( cfg: OpenClawConfig, ): Array<{ prefix: string; account: Record }> { @@ -996,6 +1096,243 @@ function maybeRepairOpenPolicyAllowFrom(cfg: OpenClawConfig): { return { config: next, changes }; } +function hasAllowFromEntries(list?: Array) { + return Array.isArray(list) && list.map((v) => String(v).trim()).filter(Boolean).length > 0; +} + +async function maybeRepairAllowlistPolicyAllowFrom(cfg: OpenClawConfig): Promise<{ + config: OpenClawConfig; + changes: string[]; +}> { + const channels = cfg.channels; + if (!channels || typeof channels !== "object") { + return { config: cfg, changes: [] }; + } + + type AllowFromMode = "topOnly" | "topOrNested" | "nestedOnly"; + + const resolveAllowFromMode = (channelName: string): AllowFromMode => { + if (channelName === "googlechat") { + return "nestedOnly"; + } + if (channelName === "discord" || channelName === "slack") { + return "topOrNested"; + } + return "topOnly"; + }; + + const next = structuredClone(cfg); + const changes: string[] = []; + + const applyRecoveredAllowFrom = (params: { + account: Record; + allowFrom: string[]; + mode: AllowFromMode; + prefix: string; + }) => { + const count = params.allowFrom.length; + const noun = count === 1 ? "entry" : "entries"; + + if (params.mode === "nestedOnly") { + const dmEntry = params.account.dm; + const dm = + dmEntry && typeof dmEntry === "object" && !Array.isArray(dmEntry) + ? (dmEntry as Record) + : {}; + dm.allowFrom = params.allowFrom; + params.account.dm = dm; + changes.push( + `- ${params.prefix}.dm.allowFrom: restored ${count} sender ${noun} from pairing store (dmPolicy="allowlist").`, + ); + return; + } + + if (params.mode === "topOrNested") { + const dmEntry = params.account.dm; + const dm = + dmEntry && typeof dmEntry === "object" && !Array.isArray(dmEntry) + ? (dmEntry as Record) + : undefined; + const nestedAllowFrom = dm?.allowFrom as Array | undefined; + if (dm && !Array.isArray(params.account.allowFrom) && Array.isArray(nestedAllowFrom)) { + dm.allowFrom = params.allowFrom; + changes.push( + `- ${params.prefix}.dm.allowFrom: restored ${count} sender ${noun} from pairing store (dmPolicy="allowlist").`, + ); + return; + } + } + + params.account.allowFrom = params.allowFrom; + changes.push( + `- ${params.prefix}.allowFrom: restored ${count} sender ${noun} from pairing store (dmPolicy="allowlist").`, + ); + }; + + const recoverAllowFromForAccount = async (params: { + channelName: string; + account: Record; + accountId?: string; + prefix: string; + }) => { + const dmEntry = params.account.dm; + const dm = + dmEntry && typeof dmEntry === "object" && !Array.isArray(dmEntry) + ? (dmEntry as Record) + : undefined; + const dmPolicy = + (params.account.dmPolicy as string | undefined) ?? (dm?.policy as string | undefined); + if (dmPolicy !== "allowlist") { + return; + } + + const topAllowFrom = params.account.allowFrom as Array | undefined; + const nestedAllowFrom = dm?.allowFrom as Array | undefined; + if (hasAllowFromEntries(topAllowFrom) || hasAllowFromEntries(nestedAllowFrom)) { + return; + } + + const normalizedChannelId = (normalizeChatChannelId(params.channelName) ?? params.channelName) + .trim() + .toLowerCase(); + if (!normalizedChannelId) { + return; + } + const normalizedAccountId = normalizeAccountId(params.accountId) || DEFAULT_ACCOUNT_ID; + const fromStore = await readChannelAllowFromStore( + normalizedChannelId, + process.env, + normalizedAccountId, + ).catch(() => []); + const recovered = Array.from(new Set(fromStore.map((entry) => String(entry).trim()))).filter( + Boolean, + ); + if (recovered.length === 0) { + return; + } + + applyRecoveredAllowFrom({ + account: params.account, + allowFrom: recovered, + mode: resolveAllowFromMode(params.channelName), + prefix: params.prefix, + }); + }; + + const nextChannels = next.channels as Record>; + for (const [channelName, channelConfig] of Object.entries(nextChannels)) { + if (!channelConfig || typeof channelConfig !== "object") { + continue; + } + await recoverAllowFromForAccount({ + channelName, + account: channelConfig, + prefix: `channels.${channelName}`, + }); + + const accounts = channelConfig.accounts as Record> | undefined; + if (!accounts || typeof accounts !== "object") { + continue; + } + for (const [accountId, accountConfig] of Object.entries(accounts)) { + if (!accountConfig || typeof accountConfig !== "object") { + continue; + } + await recoverAllowFromForAccount({ + channelName, + account: accountConfig, + accountId, + prefix: `channels.${channelName}.accounts.${accountId}`, + }); + } + } + + if (changes.length === 0) { + return { config: cfg, changes: [] }; + } + return { config: next, changes }; +} + +/** + * Scan all channel configs for dmPolicy="allowlist" without any allowFrom entries. + * This configuration blocks all DMs because no sender can match the empty + * allowlist. Common after upgrades that remove external allowlist + * file support. + */ +function detectEmptyAllowlistPolicy(cfg: OpenClawConfig): string[] { + const channels = cfg.channels; + if (!channels || typeof channels !== "object") { + return []; + } + + const warnings: string[] = []; + + const checkAccount = ( + account: Record, + prefix: string, + parent?: Record, + ) => { + const dmEntry = account.dm; + const dm = + dmEntry && typeof dmEntry === "object" && !Array.isArray(dmEntry) + ? (dmEntry as Record) + : undefined; + const parentDmEntry = parent?.dm; + const parentDm = + parentDmEntry && typeof parentDmEntry === "object" && !Array.isArray(parentDmEntry) + ? (parentDmEntry as Record) + : undefined; + const dmPolicy = + (account.dmPolicy as string | undefined) ?? + (dm?.policy as string | undefined) ?? + (parent?.dmPolicy as string | undefined) ?? + (parentDm?.policy as string | undefined) ?? + undefined; + + if (dmPolicy !== "allowlist") { + return; + } + + const topAllowFrom = + (account.allowFrom as Array | undefined) ?? + (parent?.allowFrom as Array | undefined); + const nestedAllowFrom = dm?.allowFrom as Array | undefined; + const parentNestedAllowFrom = parentDm?.allowFrom as Array | undefined; + const effectiveAllowFrom = topAllowFrom ?? nestedAllowFrom ?? parentNestedAllowFrom; + + if (hasAllowFromEntries(effectiveAllowFrom)) { + return; + } + + warnings.push( + `- ${prefix}.dmPolicy is "allowlist" but allowFrom is empty — all DMs will be blocked. Add sender IDs to ${prefix}.allowFrom, or run "${formatCliCommand("openclaw doctor --fix")}" to auto-migrate from pairing store when entries exist.`, + ); + }; + + for (const [channelName, channelConfig] of Object.entries( + channels as Record>, + )) { + if (!channelConfig || typeof channelConfig !== "object") { + continue; + } + checkAccount(channelConfig, `channels.${channelName}`); + + const accounts = channelConfig.accounts; + if (accounts && typeof accounts === "object") { + for (const [accountId, account] of Object.entries( + accounts as Record>, + )) { + if (!account || typeof account !== "object") { + continue; + } + checkAccount(account, `channels.${channelName}.accounts.${accountId}`, channelConfig); + } + } + } + + return warnings; +} + type ExecSafeBinCoverageHit = { scopePath: string; bin: string; @@ -1375,7 +1712,7 @@ export async function loadAndMaybeMigrateDoctorConfig(params: { if (snapshot.legacyIssues.length > 0) { note( snapshot.legacyIssues.map((issue) => `- ${issue.path}: ${issue.message}`).join("\n"), - "Legacy config keys detected", + "Compatibility config keys detected", ); const { config: migrated, changes } = migrateLegacyConfig(snapshot.parsed); if (changes.length > 0) { @@ -1386,18 +1723,18 @@ export async function loadAndMaybeMigrateDoctorConfig(params: { pendingChanges = pendingChanges || changes.length > 0; } if (shouldRepair) { - // Legacy migration (2026-01-02, commit: 16420e5b) — normalize per-provider allowlists; move WhatsApp gating into channels.whatsapp.allowFrom. + // Compatibility migration (2026-01-02, commit: 16420e5b) — normalize per-provider allowlists; move WhatsApp gating into channels.whatsapp.allowFrom. if (migrated) { cfg = migrated; } } else { fixHints.push( - `Run "${formatCliCommand("openclaw doctor --fix")}" to apply legacy migrations.`, + `Run "${formatCliCommand("openclaw doctor --fix")}" to apply compatibility migrations.`, ); } } - const normalized = normalizeLegacyConfigValues(candidate); + const normalized = normalizeCompatibilityConfigValues(candidate); if (normalized.changes.length > 0) { note(normalized.changes.join("\n"), "Doctor changes"); candidate = normalized.config; @@ -1421,6 +1758,12 @@ export async function loadAndMaybeMigrateDoctorConfig(params: { } } + const missingDefaultAccountBindingWarnings = + collectMissingDefaultAccountBindingWarnings(candidate); + if (missingDefaultAccountBindingWarnings.length > 0) { + note(missingDefaultAccountBindingWarnings.join("\n"), "Doctor warnings"); + } + if (shouldRepair) { const repair = await maybeRepairTelegramAllowFromUsernames(candidate); if (repair.changes.length > 0) { @@ -1446,6 +1789,19 @@ export async function loadAndMaybeMigrateDoctorConfig(params: { cfg = allowFromRepair.config; } + const allowlistRepair = await maybeRepairAllowlistPolicyAllowFrom(candidate); + if (allowlistRepair.changes.length > 0) { + note(allowlistRepair.changes.join("\n"), "Doctor changes"); + candidate = allowlistRepair.config; + pendingChanges = true; + cfg = allowlistRepair.config; + } + + const emptyAllowlistWarnings = detectEmptyAllowlistPolicy(candidate); + if (emptyAllowlistWarnings.length > 0) { + note(emptyAllowlistWarnings.join("\n"), "Doctor warnings"); + } + const toolsBySenderRepair = maybeRepairLegacyToolsBySenderKeys(candidate); if (toolsBySenderRepair.changes.length > 0) { note(toolsBySenderRepair.changes.join("\n"), "Doctor changes"); @@ -1498,6 +1854,11 @@ export async function loadAndMaybeMigrateDoctorConfig(params: { ); } + const emptyAllowlistWarnings = detectEmptyAllowlistPolicy(candidate); + if (emptyAllowlistWarnings.length > 0) { + note(emptyAllowlistWarnings.join("\n"), "Doctor warnings"); + } + const toolsBySenderHits = scanLegacyToolsBySenderKeys(candidate); if (toolsBySenderHits.length > 0) { const sample = toolsBySenderHits[0]; diff --git a/src/commands/doctor-legacy-config.migrations.test.ts b/src/commands/doctor-legacy-config.migrations.test.ts index a626371c8e3..e364d1b7168 100644 --- a/src/commands/doctor-legacy-config.migrations.test.ts +++ b/src/commands/doctor-legacy-config.migrations.test.ts @@ -2,9 +2,9 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it } from "vitest"; -import { normalizeLegacyConfigValues } from "./doctor-legacy-config.js"; +import { normalizeCompatibilityConfigValues } from "./doctor-legacy-config.js"; -describe("normalizeLegacyConfigValues", () => { +describe("normalizeCompatibilityConfigValues", () => { let previousOauthDir: string | undefined; let tempOauthDir: string | undefined; @@ -15,7 +15,7 @@ describe("normalizeLegacyConfigValues", () => { const expectNoWhatsAppConfigForLegacyAuth = (setup?: () => void) => { setup?.(); - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ messages: { ackReaction: "👀", ackReactionScope: "group-mentions" }, }); expect(res.config.channels?.whatsapp).toBeUndefined(); @@ -41,7 +41,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("does not add whatsapp config when missing and no auth exists", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ messages: { ackReaction: "👀" }, }); @@ -50,7 +50,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("copies legacy ack reaction when whatsapp config exists", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ messages: { ackReaction: "👀", ackReactionScope: "group-mentions" }, channels: { whatsapp: {} }, }); @@ -91,7 +91,7 @@ describe("normalizeLegacyConfigValues", () => { try { writeCreds(customDir); - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ messages: { ackReaction: "👀", ackReactionScope: "group-mentions" }, channels: { whatsapp: { accounts: { work: { authDir: customDir } } } }, }); @@ -107,7 +107,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("migrates Slack dm.policy/dm.allowFrom to dmPolicy/allowFrom aliases", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { slack: { dm: { enabled: true, policy: "open", allowFrom: ["*"] }, @@ -125,7 +125,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("migrates Discord account dm.policy/dm.allowFrom to dmPolicy/allowFrom aliases", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { discord: { accounts: { @@ -147,7 +147,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("migrates Discord streaming boolean alias to streaming enum", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { discord: { streaming: true, @@ -164,14 +164,16 @@ describe("normalizeLegacyConfigValues", () => { expect(res.config.channels?.discord?.streamMode).toBeUndefined(); expect(res.config.channels?.discord?.accounts?.work?.streaming).toBe("off"); expect(res.config.channels?.discord?.accounts?.work?.streamMode).toBeUndefined(); - expect(res.changes).toEqual([ + expect(res.changes).toContain( "Normalized channels.discord.streaming boolean → enum (partial).", + ); + expect(res.changes).toContain( "Normalized channels.discord.accounts.work.streaming boolean → enum (off).", - ]); + ); }); it("migrates Discord legacy streamMode into streaming enum", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { discord: { streaming: false, @@ -189,7 +191,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("migrates Telegram streamMode into streaming enum", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { telegram: { streamMode: "block", @@ -205,7 +207,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("migrates Slack legacy streaming keys to unified config", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { slack: { streaming: false, @@ -223,8 +225,46 @@ describe("normalizeLegacyConfigValues", () => { ]); }); + it("moves missing default account from single-account top-level config when named accounts already exist", () => { + const res = normalizeCompatibilityConfigValues({ + channels: { + telegram: { + enabled: true, + botToken: "legacy-token", + dmPolicy: "allowlist", + allowFrom: ["123"], + groupPolicy: "allowlist", + streaming: "partial", + accounts: { + alerts: { + enabled: true, + botToken: "alerts-token", + }, + }, + }, + }, + }); + + expect(res.config.channels?.telegram?.accounts?.default).toEqual({ + botToken: "legacy-token", + dmPolicy: "allowlist", + allowFrom: ["123"], + groupPolicy: "allowlist", + streaming: "partial", + }); + expect(res.config.channels?.telegram?.botToken).toBeUndefined(); + expect(res.config.channels?.telegram?.dmPolicy).toBeUndefined(); + expect(res.config.channels?.telegram?.allowFrom).toBeUndefined(); + expect(res.config.channels?.telegram?.groupPolicy).toBeUndefined(); + expect(res.config.channels?.telegram?.streaming).toBeUndefined(); + expect(res.config.channels?.telegram?.accounts?.alerts?.botToken).toBe("alerts-token"); + expect(res.changes).toContain( + "Moved channels.telegram single-account top-level values into channels.telegram.accounts.default.", + ); + }); + it("migrates browser ssrfPolicy allowPrivateNetwork to dangerouslyAllowPrivateNetwork", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ browser: { ssrfPolicy: { allowPrivateNetwork: true, @@ -242,7 +282,7 @@ describe("normalizeLegacyConfigValues", () => { }); it("normalizes conflicting browser SSRF alias keys without changing effective behavior", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ browser: { ssrfPolicy: { allowPrivateNetwork: true, diff --git a/src/commands/doctor-legacy-config.test.ts b/src/commands/doctor-legacy-config.test.ts index 38e51757b21..acc256c13b5 100644 --- a/src/commands/doctor-legacy-config.test.ts +++ b/src/commands/doctor-legacy-config.test.ts @@ -1,9 +1,9 @@ import { describe, expect, it } from "vitest"; -import { normalizeLegacyConfigValues } from "./doctor-legacy-config.js"; +import { normalizeCompatibilityConfigValues } from "./doctor-legacy-config.js"; -describe("normalizeLegacyConfigValues preview streaming aliases", () => { +describe("normalizeCompatibilityConfigValues preview streaming aliases", () => { it("normalizes telegram boolean streaming aliases to enum", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { telegram: { streaming: false, @@ -17,7 +17,7 @@ describe("normalizeLegacyConfigValues preview streaming aliases", () => { }); it("normalizes discord boolean streaming aliases to enum", () => { - const res = normalizeLegacyConfigValues({ + const res = normalizeCompatibilityConfigValues({ channels: { discord: { streaming: true, diff --git a/src/commands/doctor-legacy-config.ts b/src/commands/doctor-legacy-config.ts index 6f84067ca62..4d8117bd841 100644 --- a/src/commands/doctor-legacy-config.ts +++ b/src/commands/doctor-legacy-config.ts @@ -1,3 +1,4 @@ +import { shouldMoveSingleAccountChannelKey } from "../channels/plugins/setup-helpers.js"; import type { OpenClawConfig } from "../config/config.js"; import { resolveDiscordPreviewStreamMode, @@ -5,8 +6,9 @@ import { resolveSlackStreamingMode, resolveTelegramPreviewStreamMode, } from "../config/discord-preview-streaming.js"; +import { DEFAULT_ACCOUNT_ID } from "../routing/session-key.js"; -export function normalizeLegacyConfigValues(cfg: OpenClawConfig): { +export function normalizeCompatibilityConfigValues(cfg: OpenClawConfig): { config: OpenClawConfig; changes: string[]; } { @@ -289,9 +291,80 @@ export function normalizeLegacyConfigValues(cfg: OpenClawConfig): { } }; + const seedMissingDefaultAccountsFromSingleAccountBase = () => { + const channels = next.channels as Record | undefined; + if (!channels) { + return; + } + + let channelsChanged = false; + const nextChannels = { ...channels }; + for (const [channelId, rawChannel] of Object.entries(channels)) { + if (!isRecord(rawChannel)) { + continue; + } + const rawAccounts = rawChannel.accounts; + if (!isRecord(rawAccounts)) { + continue; + } + const accountKeys = Object.keys(rawAccounts); + if (accountKeys.length === 0) { + continue; + } + const hasDefault = accountKeys.some((key) => key.trim().toLowerCase() === DEFAULT_ACCOUNT_ID); + if (hasDefault) { + continue; + } + + const keysToMove = Object.entries(rawChannel) + .filter( + ([key, value]) => + key !== "accounts" && + key !== "enabled" && + value !== undefined && + shouldMoveSingleAccountChannelKey({ channelKey: channelId, key }), + ) + .map(([key]) => key); + if (keysToMove.length === 0) { + continue; + } + + const defaultAccount: Record = {}; + for (const key of keysToMove) { + const value = rawChannel[key]; + defaultAccount[key] = value && typeof value === "object" ? structuredClone(value) : value; + } + const nextChannel: Record = { + ...rawChannel, + }; + for (const key of keysToMove) { + delete nextChannel[key]; + } + nextChannel.accounts = { + ...rawAccounts, + [DEFAULT_ACCOUNT_ID]: defaultAccount, + }; + + nextChannels[channelId] = nextChannel; + channelsChanged = true; + changes.push( + `Moved channels.${channelId} single-account top-level values into channels.${channelId}.accounts.default.`, + ); + } + + if (!channelsChanged) { + return; + } + next = { + ...next, + channels: nextChannels as OpenClawConfig["channels"], + }; + }; + normalizeProvider("telegram"); normalizeProvider("slack"); normalizeProvider("discord"); + seedMissingDefaultAccountsFromSingleAccountBase(); const normalizeBrowserSsrFPolicyAlias = () => { const rawBrowser = next.browser; diff --git a/src/commands/doctor-security.ts b/src/commands/doctor-security.ts index dc06f6396f3..d1672c2ea75 100644 --- a/src/commands/doctor-security.ts +++ b/src/commands/doctor-security.ts @@ -90,6 +90,7 @@ export async function noteSecurityWarnings(cfg: OpenClawConfig) { const warnDmPolicy = async (params: { label: string; provider: ChannelId; + accountId: string; dmPolicy: string; allowFrom?: Array | null; policyPath?: string; @@ -101,6 +102,7 @@ export async function noteSecurityWarnings(cfg: OpenClawConfig) { const policyPath = params.policyPath ?? `${params.allowFromPath}policy`; const { hasWildcard, allowCount, isMultiUserDm } = await resolveDmAllowState({ provider: params.provider, + accountId: params.accountId, allowFrom: params.allowFrom, normalizeEntry: params.normalizeEntry, }); @@ -158,6 +160,7 @@ export async function noteSecurityWarnings(cfg: OpenClawConfig) { await warnDmPolicy({ label: plugin.meta.label ?? plugin.id, provider: plugin.id, + accountId: defaultAccountId, dmPolicy: dmPolicy.policy, allowFrom: dmPolicy.allowFrom, policyPath: dmPolicy.policyPath, diff --git a/src/commands/doctor-state-integrity.test.ts b/src/commands/doctor-state-integrity.test.ts index ba889d28bdf..dd33786c32d 100644 --- a/src/commands/doctor-state-integrity.test.ts +++ b/src/commands/doctor-state-integrity.test.ts @@ -168,7 +168,34 @@ describe("doctor state integrity oauth dir checks", () => { expect(text).toContain("recent sessions are missing transcripts"); expect(text).toMatch(/openclaw sessions --store ".*sessions\.json"/); expect(text).toMatch(/openclaw sessions cleanup --store ".*sessions\.json" --dry-run/); + expect(text).toMatch( + /openclaw sessions cleanup --store ".*sessions\.json" --enforce --fix-missing/, + ); expect(text).not.toContain("--active"); expect(text).not.toContain(" ls "); }); + + it("ignores slash-routing sessions for recent missing transcript warnings", async () => { + const cfg: OpenClawConfig = {}; + setupSessionState(cfg, process.env, process.env.HOME ?? ""); + const storePath = resolveStorePath(cfg.session?.store, { agentId: "main" }); + fs.writeFileSync( + storePath, + JSON.stringify( + { + "agent:main:telegram:slash:6790081233": { + sessionId: "missing-slash-transcript", + updatedAt: Date.now(), + }, + }, + null, + 2, + ), + ); + + await noteStateIntegrity(cfg, { confirmSkipInNonInteractive: vi.fn(async () => false) }); + + const text = stateIntegrityText(); + expect(text).not.toContain("recent sessions are missing transcripts"); + }); }); diff --git a/src/commands/doctor-state-integrity.ts b/src/commands/doctor-state-integrity.ts index 2e31da8e76a..1e599f0f4af 100644 --- a/src/commands/doctor-state-integrity.ts +++ b/src/commands/doctor-state-integrity.ts @@ -16,6 +16,7 @@ import { resolveStorePath, } from "../config/sessions.js"; import { resolveRequiredHomeDir } from "../infra/home-dir.js"; +import { parseAgentSessionKey } from "../sessions/session-key-utils.js"; import { note } from "../terminal/note.js"; import { shortenHomePath } from "../utils.js"; @@ -165,6 +166,15 @@ function hasPairingPolicy(value: unknown): boolean { return false; } +function isSlashRoutingSessionKey(sessionKey: string): boolean { + const raw = sessionKey.trim().toLowerCase(); + if (!raw) { + return false; + } + const scoped = parseAgentSessionKey(raw)?.rest ?? raw; + return /^[^:]+:slash:[^:]+(?:$|:)/.test(scoped); +} + function shouldRequireOAuthDir(cfg: OpenClawConfig, env: NodeJS.ProcessEnv): boolean { if (env.OPENCLAW_OAUTH_DIR?.trim()) { return true; @@ -413,7 +423,8 @@ export async function noteStateIntegrity( return bUpdated - aUpdated; }) .slice(0, 5); - const missing = recent.filter(([, entry]) => { + const recentTranscriptCandidates = recent.filter(([key]) => !isSlashRoutingSessionKey(key)); + const missing = recentTranscriptCandidates.filter(([, entry]) => { const sessionId = entry.sessionId; if (!sessionId) { return false; @@ -424,9 +435,10 @@ export async function noteStateIntegrity( if (missing.length > 0) { warnings.push( [ - `- ${missing.length}/${recent.length} recent sessions are missing transcripts.`, + `- ${missing.length}/${recentTranscriptCandidates.length} recent sessions are missing transcripts.`, ` Verify sessions in store: ${formatCliCommand(`openclaw sessions --store "${absoluteStorePath}"`)}`, ` Preview cleanup impact: ${formatCliCommand(`openclaw sessions cleanup --store "${absoluteStorePath}" --dry-run`)}`, + ` Prune missing entries: ${formatCliCommand(`openclaw sessions cleanup --store "${absoluteStorePath}" --enforce --fix-missing`)}`, ].join("\n"), ); } diff --git a/src/commands/models.list.auth-sync.test.ts b/src/commands/models.list.auth-sync.test.ts index 75eb98cc09d..b97de4eba1d 100644 --- a/src/commands/models.list.auth-sync.test.ts +++ b/src/commands/models.list.auth-sync.test.ts @@ -100,7 +100,7 @@ describe("models list auth-profile sync", () => { const openrouter = await runModelsListAndGetProvider("openrouter/"); expect(openrouter?.available).toBe(true); - expect(await pathExists(authPath)).toBe(true); + expect(await pathExists(authPath)).toBe(false); }); }); diff --git a/src/commands/models.list.test.ts b/src/commands/models.list.test.ts index da64561de3f..1469effeff1 100644 --- a/src/commands/models.list.test.ts +++ b/src/commands/models.list.test.ts @@ -6,9 +6,6 @@ let toModelRow: typeof import("./models/list.registry.js").toModelRow; const loadConfig = vi.fn(); const ensureOpenClawModelsJson = vi.fn().mockResolvedValue(undefined); -const ensurePiAuthJsonFromAuthProfiles = vi - .fn() - .mockResolvedValue({ wrote: false, authPath: "/tmp/openclaw-agent/auth.json" }); const resolveOpenClawAgentDir = vi.fn().mockReturnValue("/tmp/openclaw-agent"); const ensureAuthProfileStore = vi.fn().mockReturnValue({ version: 1, profiles: {} }); const listProfilesForProvider = vi.fn().mockReturnValue([]); @@ -38,10 +35,6 @@ vi.mock("../agents/models-config.js", () => ({ ensureOpenClawModelsJson, })); -vi.mock("../agents/pi-auth-json.js", () => ({ - ensurePiAuthJsonFromAuthProfiles, -})); - vi.mock("../agents/agent-paths.js", () => ({ resolveOpenClawAgentDir, })); @@ -121,7 +114,6 @@ beforeEach(() => { modelRegistryState.getAllError = undefined; modelRegistryState.getAvailableError = undefined; listProfilesForProvider.mockReturnValue([]); - ensurePiAuthJsonFromAuthProfiles.mockClear(); }); afterEach(() => { @@ -223,13 +215,12 @@ describe("models list/status", () => { ({ loadModelRegistry, toModelRow } = await import("./models/list.registry.js")); }); - it("models list syncs auth-profiles into auth.json before availability checks", async () => { + it("models list runs model discovery without auth.json sync", async () => { setDefaultZaiRegistry(); const runtime = makeRuntime(); await modelsListCommand({ all: true, json: true }, runtime); - - expect(ensurePiAuthJsonFromAuthProfiles).toHaveBeenCalledWith("/tmp/openclaw-agent"); + expect(runtime.error).not.toHaveBeenCalled(); }); it("models list outputs canonical zai key for configured z.ai model", async () => { diff --git a/src/commands/models/list.auth-overview.test.ts b/src/commands/models/list.auth-overview.test.ts new file mode 100644 index 00000000000..bc23ff9351c --- /dev/null +++ b/src/commands/models/list.auth-overview.test.ts @@ -0,0 +1,24 @@ +import { describe, expect, it } from "vitest"; +import { resolveProviderAuthOverview } from "./list.auth-overview.js"; + +describe("resolveProviderAuthOverview", () => { + it("does not throw when token profile only has tokenRef", () => { + const overview = resolveProviderAuthOverview({ + provider: "github-copilot", + cfg: {}, + store: { + version: 1, + profiles: { + "github-copilot:default": { + type: "token", + provider: "github-copilot", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + }, + } as never, + modelsPath: "/tmp/models.json", + }); + + expect(overview.profiles.labels[0]).toContain("token:ref(env:GITHUB_TOKEN)"); + }); +}); diff --git a/src/commands/models/list.auth-overview.ts b/src/commands/models/list.auth-overview.ts index 49159e93af6..0fc2f9828c5 100644 --- a/src/commands/models/list.auth-overview.ts +++ b/src/commands/models/list.auth-overview.ts @@ -12,6 +12,22 @@ import { shortenHomePath } from "../../utils.js"; import { maskApiKey } from "./list.format.js"; import type { ProviderAuthOverview } from "./list.types.js"; +function formatProfileSecretLabel(params: { + value: string | undefined; + ref: { source: string; id: string } | undefined; + kind: "api-key" | "token"; +}): string { + const value = typeof params.value === "string" ? params.value.trim() : ""; + if (value) { + return params.kind === "token" ? `token:${maskApiKey(value)}` : maskApiKey(value); + } + if (params.ref) { + const refLabel = `ref(${params.ref.source}:${params.ref.id})`; + return params.kind === "token" ? `token:${refLabel}` : refLabel; + } + return params.kind === "token" ? "token:missing" : "missing"; +} + export function resolveProviderAuthOverview(params: { provider: string; cfg: OpenClawConfig; @@ -40,10 +56,24 @@ export function resolveProviderAuthOverview(params: { return `${profileId}=missing`; } if (profile.type === "api_key") { - return withUnusableSuffix(`${profileId}=${maskApiKey(profile.key ?? "")}`, profileId); + return withUnusableSuffix( + `${profileId}=${formatProfileSecretLabel({ + value: profile.key, + ref: profile.keyRef, + kind: "api-key", + })}`, + profileId, + ); } if (profile.type === "token") { - return withUnusableSuffix(`${profileId}=token:${maskApiKey(profile.token)}`, profileId); + return withUnusableSuffix( + `${profileId}=${formatProfileSecretLabel({ + value: profile.token, + ref: profile.tokenRef, + kind: "token", + })}`, + profileId, + ); } const display = resolveAuthProfileDisplayLabel({ cfg, store, profileId }); const suffix = diff --git a/src/commands/models/list.list-command.ts b/src/commands/models/list.list-command.ts index dc195985706..11ebae8f16d 100644 --- a/src/commands/models/list.list-command.ts +++ b/src/commands/models/list.list-command.ts @@ -1,7 +1,7 @@ import type { Api, Model } from "@mariozechner/pi-ai"; +import type { ModelRegistry } from "@mariozechner/pi-coding-agent"; import { resolveForwardCompatModel } from "../../agents/model-forward-compat.js"; import { parseModelRef } from "../../agents/model-selection.js"; -import type { ModelRegistry } from "../../agents/pi-model-discovery.js"; import type { RuntimeEnv } from "../../runtime.js"; import { resolveConfiguredEntries } from "./list.configured.js"; import { formatErrorWithStack } from "./list.errors.js"; diff --git a/src/commands/models/list.probe.test.ts b/src/commands/models/list.probe.test.ts new file mode 100644 index 00000000000..55c5ef064f3 --- /dev/null +++ b/src/commands/models/list.probe.test.ts @@ -0,0 +1,22 @@ +import { describe, expect, it } from "vitest"; +import { mapFailoverReasonToProbeStatus } from "./list.probe.js"; + +describe("mapFailoverReasonToProbeStatus", () => { + it("maps auth_permanent to auth", () => { + expect(mapFailoverReasonToProbeStatus("auth_permanent")).toBe("auth"); + }); + + it("keeps existing failover reason mappings", () => { + expect(mapFailoverReasonToProbeStatus("auth")).toBe("auth"); + expect(mapFailoverReasonToProbeStatus("rate_limit")).toBe("rate_limit"); + expect(mapFailoverReasonToProbeStatus("billing")).toBe("billing"); + expect(mapFailoverReasonToProbeStatus("timeout")).toBe("timeout"); + expect(mapFailoverReasonToProbeStatus("format")).toBe("format"); + }); + + it("falls back to unknown for unrecognized values", () => { + expect(mapFailoverReasonToProbeStatus(undefined)).toBe("unknown"); + expect(mapFailoverReasonToProbeStatus(null)).toBe("unknown"); + expect(mapFailoverReasonToProbeStatus("model_not_found")).toBe("unknown"); + }); +}); diff --git a/src/commands/models/list.probe.ts b/src/commands/models/list.probe.ts index 60b38316117..ef48564df88 100644 --- a/src/commands/models/list.probe.ts +++ b/src/commands/models/list.probe.ts @@ -82,11 +82,13 @@ export type AuthProbeOptions = { maxTokens: number; }; -const toStatus = (reason?: string | null): AuthProbeStatus => { +export function mapFailoverReasonToProbeStatus(reason?: string | null): AuthProbeStatus { if (!reason) { return "unknown"; } - if (reason === "auth") { + if (reason === "auth" || reason === "auth_permanent") { + // Keep probe output backward-compatible: permanent auth failures still + // surface in the auth bucket instead of showing as unknown. return "auth"; } if (reason === "rate_limit") { @@ -102,7 +104,7 @@ const toStatus = (reason?: string | null): AuthProbeStatus => { return "format"; } return "unknown"; -}; +} function buildCandidateMap(modelCandidates: string[]): Map { const map = new Map(); @@ -346,7 +348,7 @@ async function probeTarget(params: { label: target.label, source: target.source, mode: target.mode, - status: toStatus(described.reason), + status: mapFailoverReasonToProbeStatus(described.reason), error: redactSecrets(described.message), latencyMs: Date.now() - start, }; diff --git a/src/commands/models/list.registry.ts b/src/commands/models/list.registry.ts index 23cef29485c..012b4eafb07 100644 --- a/src/commands/models/list.registry.ts +++ b/src/commands/models/list.registry.ts @@ -1,4 +1,5 @@ import type { Api, Model } from "@mariozechner/pi-ai"; +import type { ModelRegistry } from "@mariozechner/pi-coding-agent"; import { resolveOpenClawAgentDir } from "../../agents/agent-paths.js"; import type { AuthProfileStore } from "../../agents/auth-profiles.js"; import { listProfilesForProvider } from "../../agents/auth-profiles.js"; @@ -8,8 +9,6 @@ import { resolveEnvApiKey, } from "../../agents/model-auth.js"; import { ensureOpenClawModelsJson } from "../../agents/models-config.js"; -import { ensurePiAuthJsonFromAuthProfiles } from "../../agents/pi-auth-json.js"; -import type { ModelRegistry } from "../../agents/pi-model-discovery.js"; import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js"; import type { OpenClawConfig } from "../../config/config.js"; import { @@ -98,7 +97,6 @@ function loadAvailableModels(registry: ModelRegistry): Model[] { export async function loadModelRegistry(cfg: OpenClawConfig) { await ensureOpenClawModelsJson(cfg); const agentDir = resolveOpenClawAgentDir(); - await ensurePiAuthJsonFromAuthProfiles(agentDir); const authStorage = discoverAuthStorage(agentDir); const registry = discoverModels(authStorage, agentDir); const models = registry.getAll(); diff --git a/src/commands/models/list.status.test.ts b/src/commands/models/list.status.test.ts index b99cacc1cd4..a2563b09f08 100644 --- a/src/commands/models/list.status.test.ts +++ b/src/commands/models/list.status.test.ts @@ -229,6 +229,35 @@ describe("modelsStatusCommand auth overview", () => { ).toBe(true); }); + it("does not emit raw short api-key values in JSON labels", async () => { + const localRuntime = createRuntime(); + const shortSecret = "abc123"; + const originalProfiles = { ...mocks.store.profiles }; + mocks.store.profiles = { + ...mocks.store.profiles, + "openai:default": { + type: "api_key", + provider: "openai", + key: shortSecret, + }, + }; + + try { + await modelsStatusCommand({ json: true }, localRuntime as never); + const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0])); + const providers = payload.auth.providers as Array<{ + provider: string; + profiles: { labels: string[] }; + }>; + const openai = providers.find((p) => p.provider === "openai"); + const labels = openai?.profiles.labels ?? []; + expect(labels.join(" ")).toContain("..."); + expect(labels.join(" ")).not.toContain(shortSecret); + } finally { + mocks.store.profiles = originalProfiles; + } + }); + it("uses agent overrides and reports sources", async () => { const localRuntime = createRuntime(); await withAgentScopeOverrides( diff --git a/src/commands/onboard-auth.config-minimax.ts b/src/commands/onboard-auth.config-minimax.ts index 6314a641dbb..90a3c58883a 100644 --- a/src/commands/onboard-auth.config-minimax.ts +++ b/src/commands/onboard-auth.config-minimax.ts @@ -181,6 +181,7 @@ function applyMinimaxApiProviderConfigWithBaseUrl( ...existingProviderRest, baseUrl: params.baseUrl, api: "anthropic-messages", + authHeader: true, ...(normalizedApiKey?.trim() ? { apiKey: normalizedApiKey } : {}), models: mergedModels.length > 0 ? mergedModels : [apiModel], }; diff --git a/src/commands/onboard-auth.credentials.test.ts b/src/commands/onboard-auth.credentials.test.ts new file mode 100644 index 00000000000..48ccc9954f6 --- /dev/null +++ b/src/commands/onboard-auth.credentials.test.ts @@ -0,0 +1,168 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { + setByteplusApiKey, + setCloudflareAiGatewayConfig, + setMoonshotApiKey, + setOpenaiApiKey, + setVolcengineApiKey, +} from "./onboard-auth.js"; +import { + createAuthTestLifecycle, + readAuthProfilesForAgent, + setupAuthTestEnv, +} from "./test-wizard-helpers.js"; + +describe("onboard auth credentials secret refs", () => { + const lifecycle = createAuthTestLifecycle([ + "OPENCLAW_STATE_DIR", + "OPENCLAW_AGENT_DIR", + "PI_CODING_AGENT_DIR", + "MOONSHOT_API_KEY", + "OPENAI_API_KEY", + "CLOUDFLARE_AI_GATEWAY_API_KEY", + "VOLCANO_ENGINE_API_KEY", + "BYTEPLUS_API_KEY", + ]); + + afterEach(async () => { + await lifecycle.cleanup(); + }); + + it("keeps env-backed moonshot key as plaintext by default", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-"); + lifecycle.setStateDir(env.stateDir); + process.env.MOONSHOT_API_KEY = "sk-moonshot-env"; + + await setMoonshotApiKey("sk-moonshot-env"); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["moonshot:default"]).toMatchObject({ + key: "sk-moonshot-env", + }); + expect(parsed.profiles?.["moonshot:default"]?.keyRef).toBeUndefined(); + }); + + it("stores env-backed moonshot key as keyRef when secret-input-mode=ref", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-ref-"); + lifecycle.setStateDir(env.stateDir); + process.env.MOONSHOT_API_KEY = "sk-moonshot-env"; + + await setMoonshotApiKey("sk-moonshot-env", env.agentDir, { secretInputMode: "ref" }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["moonshot:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" }, + }); + expect(parsed.profiles?.["moonshot:default"]?.key).toBeUndefined(); + }); + + it("stores ${ENV} moonshot input as keyRef even when env value is unset", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-inline-ref-"); + lifecycle.setStateDir(env.stateDir); + + await setMoonshotApiKey("${MOONSHOT_API_KEY}"); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["moonshot:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" }, + }); + expect(parsed.profiles?.["moonshot:default"]?.key).toBeUndefined(); + }); + + it("keeps plaintext moonshot key when no env ref applies", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-plaintext-"); + lifecycle.setStateDir(env.stateDir); + process.env.MOONSHOT_API_KEY = "sk-moonshot-other"; + + await setMoonshotApiKey("sk-moonshot-plaintext"); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["moonshot:default"]).toMatchObject({ + key: "sk-moonshot-plaintext", + }); + expect(parsed.profiles?.["moonshot:default"]?.keyRef).toBeUndefined(); + }); + + it("preserves cloudflare metadata when storing keyRef", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-cloudflare-"); + lifecycle.setStateDir(env.stateDir); + process.env.CLOUDFLARE_AI_GATEWAY_API_KEY = "cf-secret"; + + await setCloudflareAiGatewayConfig("account-1", "gateway-1", "cf-secret", env.agentDir, { + secretInputMode: "ref", + }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["cloudflare-ai-gateway:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "CLOUDFLARE_AI_GATEWAY_API_KEY" }, + metadata: { accountId: "account-1", gatewayId: "gateway-1" }, + }); + expect(parsed.profiles?.["cloudflare-ai-gateway:default"]?.key).toBeUndefined(); + }); + + it("keeps env-backed openai key as plaintext by default", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-openai-"); + lifecycle.setStateDir(env.stateDir); + process.env.OPENAI_API_KEY = "sk-openai-env"; + + await setOpenaiApiKey("sk-openai-env"); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["openai:default"]).toMatchObject({ + key: "sk-openai-env", + }); + expect(parsed.profiles?.["openai:default"]?.keyRef).toBeUndefined(); + }); + + it("stores env-backed openai key as keyRef in ref mode", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-openai-ref-"); + lifecycle.setStateDir(env.stateDir); + process.env.OPENAI_API_KEY = "sk-openai-env"; + + await setOpenaiApiKey("sk-openai-env", env.agentDir, { secretInputMode: "ref" }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + expect(parsed.profiles?.["openai:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }); + expect(parsed.profiles?.["openai:default"]?.key).toBeUndefined(); + }); + + it("stores env-backed volcengine and byteplus keys as keyRef in ref mode", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-volc-byte-"); + lifecycle.setStateDir(env.stateDir); + process.env.VOLCANO_ENGINE_API_KEY = "volcengine-secret"; + process.env.BYTEPLUS_API_KEY = "byteplus-secret"; + + await setVolcengineApiKey("volcengine-secret", env.agentDir, { secretInputMode: "ref" }); + await setByteplusApiKey("byteplus-secret", env.agentDir, { secretInputMode: "ref" }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + + expect(parsed.profiles?.["volcengine:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "VOLCANO_ENGINE_API_KEY" }, + }); + expect(parsed.profiles?.["volcengine:default"]?.key).toBeUndefined(); + + expect(parsed.profiles?.["byteplus:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "BYTEPLUS_API_KEY" }, + }); + expect(parsed.profiles?.["byteplus:default"]?.key).toBeUndefined(); + }); +}); diff --git a/src/commands/onboard-auth.credentials.ts b/src/commands/onboard-auth.credentials.ts index 5d003d48bd1..2cf9c25b689 100644 --- a/src/commands/onboard-auth.credentials.ts +++ b/src/commands/onboard-auth.credentials.ts @@ -4,13 +4,100 @@ import type { OAuthCredentials } from "@mariozechner/pi-ai"; import { resolveOpenClawAgentDir } from "../agents/agent-paths.js"; import { upsertAuthProfile } from "../agents/auth-profiles.js"; import { resolveStateDir } from "../config/paths.js"; +import { + coerceSecretRef, + DEFAULT_SECRET_PROVIDER_ALIAS, + type SecretInput, + type SecretRef, +} from "../config/types.secrets.js"; import { KILOCODE_DEFAULT_MODEL_REF } from "../providers/kilocode-shared.js"; +import { PROVIDER_ENV_VARS } from "../secrets/provider-env-vars.js"; +import { normalizeSecretInput } from "../utils/normalize-secret-input.js"; +import type { SecretInputMode } from "./onboard-types.js"; export { CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF } from "../agents/cloudflare-ai-gateway.js"; export { MISTRAL_DEFAULT_MODEL_REF, XAI_DEFAULT_MODEL_REF } from "./onboard-auth.models.js"; export { KILOCODE_DEFAULT_MODEL_REF }; const resolveAuthAgentDir = (agentDir?: string) => agentDir ?? resolveOpenClawAgentDir(); +const ENV_REF_PATTERN = /^\$\{([A-Z][A-Z0-9_]*)\}$/; + +export type ApiKeyStorageOptions = { + secretInputMode?: SecretInputMode; +}; + +function buildEnvSecretRef(id: string): SecretRef { + return { source: "env", provider: DEFAULT_SECRET_PROVIDER_ALIAS, id }; +} + +function parseEnvSecretRef(value: string): SecretRef | null { + const match = ENV_REF_PATTERN.exec(value); + if (!match) { + return null; + } + return buildEnvSecretRef(match[1]); +} + +function resolveProviderDefaultEnvSecretRef(provider: string): SecretRef { + const envVars = PROVIDER_ENV_VARS[provider]; + const envVar = envVars?.find((candidate) => candidate.trim().length > 0); + if (!envVar) { + throw new Error( + `Provider "${provider}" does not have a default env var mapping for secret-input-mode=ref.`, + ); + } + return buildEnvSecretRef(envVar); +} + +function resolveApiKeySecretInput( + provider: string, + input: SecretInput, + options?: ApiKeyStorageOptions, +): SecretInput { + const coercedRef = coerceSecretRef(input); + if (coercedRef) { + return coercedRef; + } + const normalized = normalizeSecretInput(input); + const inlineEnvRef = parseEnvSecretRef(normalized); + if (inlineEnvRef) { + return inlineEnvRef; + } + if (options?.secretInputMode === "ref") { + return resolveProviderDefaultEnvSecretRef(provider); + } + return normalized; +} + +function buildApiKeyCredential( + provider: string, + input: SecretInput, + metadata?: Record, + options?: ApiKeyStorageOptions, +): { + type: "api_key"; + provider: string; + key?: string; + keyRef?: SecretRef; + metadata?: Record; +} { + const secretInput = resolveApiKeySecretInput(provider, input, options); + if (typeof secretInput === "string") { + return { + type: "api_key", + provider, + key: secretInput, + ...(metadata ? { metadata } : {}), + }; + } + return { + type: "api_key", + provider, + keyRef: secretInput, + ...(metadata ? { metadata } : {}), + }; +} + export type WriteOAuthCredentialsOptions = { syncSiblingAgents?: boolean; }; @@ -112,98 +199,131 @@ export async function writeOAuthCredentials( return profileId; } -export async function setAnthropicApiKey(key: string, agentDir?: string) { +export async function setAnthropicApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "anthropic:default", - credential: { - type: "api_key", - provider: "anthropic", - key, - }, + credential: buildApiKeyCredential("anthropic", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setGeminiApiKey(key: string, agentDir?: string) { +export async function setOpenaiApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + upsertAuthProfile({ + profileId: "openai:default", + credential: buildApiKeyCredential("openai", key, undefined, options), + agentDir: resolveAuthAgentDir(agentDir), + }); +} + +export async function setGeminiApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "google:default", - credential: { - type: "api_key", - provider: "google", - key, - }, + credential: buildApiKeyCredential("google", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } export async function setMinimaxApiKey( - key: string, + key: SecretInput, agentDir?: string, profileId: string = "minimax:default", + options?: ApiKeyStorageOptions, ) { const provider = profileId.split(":")[0] ?? "minimax"; // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId, - credential: { - type: "api_key", - provider, - key, - }, + credential: buildApiKeyCredential(provider, key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setMoonshotApiKey(key: string, agentDir?: string) { +export async function setMoonshotApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "moonshot:default", - credential: { - type: "api_key", - provider: "moonshot", - key, - }, + credential: buildApiKeyCredential("moonshot", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setKimiCodingApiKey(key: string, agentDir?: string) { +export async function setKimiCodingApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "kimi-coding:default", - credential: { - type: "api_key", - provider: "kimi-coding", - key, - }, + credential: buildApiKeyCredential("kimi-coding", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setSyntheticApiKey(key: string, agentDir?: string) { +export async function setVolcengineApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + upsertAuthProfile({ + profileId: "volcengine:default", + credential: buildApiKeyCredential("volcengine", key, undefined, options), + agentDir: resolveAuthAgentDir(agentDir), + }); +} + +export async function setByteplusApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + upsertAuthProfile({ + profileId: "byteplus:default", + credential: buildApiKeyCredential("byteplus", key, undefined, options), + agentDir: resolveAuthAgentDir(agentDir), + }); +} + +export async function setSyntheticApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "synthetic:default", - credential: { - type: "api_key", - provider: "synthetic", - key, - }, + credential: buildApiKeyCredential("synthetic", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setVeniceApiKey(key: string, agentDir?: string) { +export async function setVeniceApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "venice:default", - credential: { - type: "api_key", - provider: "venice", - key, - }, + credential: buildApiKeyCredential("venice", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } @@ -216,41 +336,41 @@ export const TOGETHER_DEFAULT_MODEL_REF = "together/moonshotai/Kimi-K2.5"; export const LITELLM_DEFAULT_MODEL_REF = "litellm/claude-opus-4-6"; export const VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF = "vercel-ai-gateway/anthropic/claude-opus-4.6"; -export async function setZaiApiKey(key: string, agentDir?: string) { +export async function setZaiApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Write to resolved agent dir so gateway finds credentials on startup. upsertAuthProfile({ profileId: "zai:default", - credential: { - type: "api_key", - provider: "zai", - key, - }, + credential: buildApiKeyCredential("zai", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setXiaomiApiKey(key: string, agentDir?: string) { +export async function setXiaomiApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "xiaomi:default", - credential: { - type: "api_key", - provider: "xiaomi", - key, - }, + credential: buildApiKeyCredential("xiaomi", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setOpenrouterApiKey(key: string, agentDir?: string) { +export async function setOpenrouterApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { // Never persist the literal "undefined" (e.g. when prompt returns undefined and caller used String(key)). - const safeKey = key === "undefined" ? "" : key; + const safeKey = typeof key === "string" && key === "undefined" ? "" : key; upsertAuthProfile({ profileId: "openrouter:default", - credential: { - type: "api_key", - provider: "openrouter", - key: safeKey, - }, + credential: buildApiKeyCredential("openrouter", safeKey, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } @@ -258,131 +378,127 @@ export async function setOpenrouterApiKey(key: string, agentDir?: string) { export async function setCloudflareAiGatewayConfig( accountId: string, gatewayId: string, - apiKey: string, + apiKey: SecretInput, agentDir?: string, + options?: ApiKeyStorageOptions, ) { const normalizedAccountId = accountId.trim(); const normalizedGatewayId = gatewayId.trim(); - const normalizedKey = apiKey.trim(); upsertAuthProfile({ profileId: "cloudflare-ai-gateway:default", - credential: { - type: "api_key", - provider: "cloudflare-ai-gateway", - key: normalizedKey, - metadata: { + credential: buildApiKeyCredential( + "cloudflare-ai-gateway", + apiKey, + { accountId: normalizedAccountId, gatewayId: normalizedGatewayId, }, - }, + options, + ), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setLitellmApiKey(key: string, agentDir?: string) { +export async function setLitellmApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "litellm:default", - credential: { - type: "api_key", - provider: "litellm", - key, - }, + credential: buildApiKeyCredential("litellm", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setVercelAiGatewayApiKey(key: string, agentDir?: string) { +export async function setVercelAiGatewayApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "vercel-ai-gateway:default", - credential: { - type: "api_key", - provider: "vercel-ai-gateway", - key, - }, + credential: buildApiKeyCredential("vercel-ai-gateway", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setOpencodeZenApiKey(key: string, agentDir?: string) { +export async function setOpencodeZenApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "opencode:default", - credential: { - type: "api_key", - provider: "opencode", - key, - }, + credential: buildApiKeyCredential("opencode", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setTogetherApiKey(key: string, agentDir?: string) { +export async function setTogetherApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "together:default", - credential: { - type: "api_key", - provider: "together", - key, - }, + credential: buildApiKeyCredential("together", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setHuggingfaceApiKey(key: string, agentDir?: string) { +export async function setHuggingfaceApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "huggingface:default", - credential: { - type: "api_key", - provider: "huggingface", - key, - }, + credential: buildApiKeyCredential("huggingface", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export function setQianfanApiKey(key: string, agentDir?: string) { +export function setQianfanApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "qianfan:default", - credential: { - type: "api_key", - provider: "qianfan", - key, - }, + credential: buildApiKeyCredential("qianfan", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export function setXaiApiKey(key: string, agentDir?: string) { +export function setXaiApiKey(key: SecretInput, agentDir?: string, options?: ApiKeyStorageOptions) { upsertAuthProfile({ profileId: "xai:default", - credential: { - type: "api_key", - provider: "xai", - key, - }, + credential: buildApiKeyCredential("xai", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setMistralApiKey(key: string, agentDir?: string) { +export async function setMistralApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "mistral:default", - credential: { - type: "api_key", - provider: "mistral", - key, - }, + credential: buildApiKeyCredential("mistral", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } -export async function setKilocodeApiKey(key: string, agentDir?: string) { +export async function setKilocodeApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { upsertAuthProfile({ profileId: "kilocode:default", - credential: { - type: "api_key", - provider: "kilocode", - key, - }, + credential: buildApiKeyCredential("kilocode", key, undefined, options), agentDir: resolveAuthAgentDir(agentDir), }); } diff --git a/src/commands/onboard-auth.test.ts b/src/commands/onboard-auth.test.ts index e8671fa1a0d..65c886b2926 100644 --- a/src/commands/onboard-auth.test.ts +++ b/src/commands/onboard-auth.test.ts @@ -8,6 +8,7 @@ import { resolveAgentModelFallbackValues, resolveAgentModelPrimaryValue, } from "../config/model-input.js"; +import type { ModelApi } from "../config/types.models.js"; import { applyAuthProfileConfig, applyLitellmProviderConfig, @@ -45,7 +46,7 @@ import { function createLegacyProviderConfig(params: { providerId: string; - api: "anthropic-messages" | "openai-completions" | "openai-responses"; + api: ModelApi; modelId?: string; modelName?: string; baseUrl?: string; @@ -365,6 +366,7 @@ describe("applyMinimaxApiConfig", () => { expect(cfg.models?.providers?.minimax).toMatchObject({ baseUrl: "https://api.minimax.io/anthropic", api: "anthropic-messages", + authHeader: true, }); }); @@ -404,6 +406,7 @@ describe("applyMinimaxApiConfig", () => { ); expect(cfg.models?.providers?.minimax?.baseUrl).toBe("https://api.minimax.io/anthropic"); expect(cfg.models?.providers?.minimax?.api).toBe("anthropic-messages"); + expect(cfg.models?.providers?.minimax?.authHeader).toBe(true); expect(cfg.models?.providers?.minimax?.apiKey).toBe("old-key"); expect(cfg.models?.providers?.minimax?.models.map((m) => m.id)).toEqual([ "old-model", diff --git a/src/commands/onboard-auth.ts b/src/commands/onboard-auth.ts index de506df0bb5..13d2cf75bf0 100644 --- a/src/commands/onboard-auth.ts +++ b/src/commands/onboard-auth.ts @@ -61,8 +61,10 @@ export { KILOCODE_DEFAULT_MODEL_REF, LITELLM_DEFAULT_MODEL_REF, OPENROUTER_DEFAULT_MODEL_REF, + setOpenaiApiKey, setAnthropicApiKey, setCloudflareAiGatewayConfig, + setByteplusApiKey, setQianfanApiKey, setGeminiApiKey, setKilocodeApiKey, @@ -79,6 +81,7 @@ export { setVeniceApiKey, setVercelAiGatewayApiKey, setXiaomiApiKey, + setVolcengineApiKey, setZaiApiKey, setXaiApiKey, writeOAuthCredentials, diff --git a/src/commands/onboard-channels.test.ts b/src/commands/onboard-channels.test.ts index d6c0669e4fd..cd146b82c09 100644 --- a/src/commands/onboard-channels.test.ts +++ b/src/commands/onboard-channels.test.ts @@ -3,7 +3,10 @@ import type { OpenClawConfig } from "../config/config.js"; import { createEmptyPluginRegistry } from "../plugins/registry.js"; import { setActivePluginRegistry } from "../plugins/runtime.js"; import type { WizardPrompter } from "../wizard/prompts.js"; -import { setDefaultChannelPluginRegistryForTests } from "./channel-test-helpers.js"; +import { + patchChannelOnboardingAdapter, + setDefaultChannelPluginRegistryForTests, +} from "./channel-test-helpers.js"; import { setupChannels } from "./onboard-channels.js"; import { createExitThrowingRuntime, createWizardPrompter } from "./test-wizard-helpers.js"; @@ -249,4 +252,307 @@ describe("setupChannels", () => { expect(select).toHaveBeenCalledWith(expect.objectContaining({ message: "Select a channel" })); expect(multiselect).not.toHaveBeenCalled(); }); + + it("uses configureInteractive skip without mutating selection/account state", async () => { + const select = vi.fn(async ({ message }: { message: string }) => { + if (message === "Select channel (QuickStart)") { + return "telegram"; + } + return "__done__"; + }); + const selection = vi.fn(); + const onAccountId = vi.fn(); + const configureInteractive = vi.fn(async () => "skip" as const); + const restore = patchChannelOnboardingAdapter("telegram", { + getStatus: vi.fn(async ({ cfg }) => ({ + channel: "telegram", + configured: Boolean(cfg.channels?.telegram?.botToken), + statusLines: [], + })), + configureInteractive, + }); + const { multiselect, text } = createUnexpectedPromptGuards(); + + const prompter = createPrompter({ + select: select as unknown as WizardPrompter["select"], + multiselect, + text, + }); + + const runtime = createExitThrowingRuntime(); + try { + const cfg = await setupChannels({} as OpenClawConfig, runtime, prompter, { + skipConfirm: true, + quickstartDefaults: true, + onSelection: selection, + onAccountId, + }); + + expect(configureInteractive).toHaveBeenCalledWith( + expect.objectContaining({ configured: false, label: expect.any(String) }), + ); + expect(selection).toHaveBeenCalledWith([]); + expect(onAccountId).not.toHaveBeenCalled(); + expect(cfg.channels?.telegram?.botToken).toBeUndefined(); + } finally { + restore(); + } + }); + + it("applies configureInteractive result cfg/account updates", async () => { + const select = vi.fn(async ({ message }: { message: string }) => { + if (message === "Select channel (QuickStart)") { + return "telegram"; + } + return "__done__"; + }); + const selection = vi.fn(); + const onAccountId = vi.fn(); + const configureInteractive = vi.fn(async ({ cfg }: { cfg: OpenClawConfig }) => ({ + cfg: { + ...cfg, + channels: { + ...cfg.channels, + telegram: { ...cfg.channels?.telegram, botToken: "new-token" }, + }, + } as OpenClawConfig, + accountId: "acct-1", + })); + const configure = vi.fn(async () => { + throw new Error("configure should not be called when configureInteractive is present"); + }); + const restore = patchChannelOnboardingAdapter("telegram", { + getStatus: vi.fn(async ({ cfg }) => ({ + channel: "telegram", + configured: Boolean(cfg.channels?.telegram?.botToken), + statusLines: [], + })), + configureInteractive, + configure, + }); + const { multiselect, text } = createUnexpectedPromptGuards(); + + const prompter = createPrompter({ + select: select as unknown as WizardPrompter["select"], + multiselect, + text, + }); + + const runtime = createExitThrowingRuntime(); + try { + const cfg = await setupChannels({} as OpenClawConfig, runtime, prompter, { + skipConfirm: true, + quickstartDefaults: true, + onSelection: selection, + onAccountId, + }); + + expect(configureInteractive).toHaveBeenCalledTimes(1); + expect(configure).not.toHaveBeenCalled(); + expect(selection).toHaveBeenCalledWith(["telegram"]); + expect(onAccountId).toHaveBeenCalledWith("telegram", "acct-1"); + expect(cfg.channels?.telegram?.botToken).toBe("new-token"); + } finally { + restore(); + } + }); + + it("uses configureWhenConfigured when channel is already configured", async () => { + const select = vi.fn(async ({ message }: { message: string }) => { + if (message === "Select channel (QuickStart)") { + return "telegram"; + } + return "__done__"; + }); + const selection = vi.fn(); + const onAccountId = vi.fn(); + const configureWhenConfigured = vi.fn(async ({ cfg }: { cfg: OpenClawConfig }) => ({ + cfg: { + ...cfg, + channels: { + ...cfg.channels, + telegram: { ...cfg.channels?.telegram, botToken: "updated-token" }, + }, + } as OpenClawConfig, + accountId: "acct-2", + })); + const configure = vi.fn(async () => { + throw new Error( + "configure should not be called when configureWhenConfigured handles updates", + ); + }); + const restore = patchChannelOnboardingAdapter("telegram", { + getStatus: vi.fn(async ({ cfg }) => ({ + channel: "telegram", + configured: Boolean(cfg.channels?.telegram?.botToken), + statusLines: [], + })), + configureInteractive: undefined, + configureWhenConfigured, + configure, + }); + const { multiselect, text } = createUnexpectedPromptGuards(); + + const prompter = createPrompter({ + select: select as unknown as WizardPrompter["select"], + multiselect, + text, + }); + + const runtime = createExitThrowingRuntime(); + try { + const cfg = await setupChannels( + { + channels: { + telegram: { + botToken: "old-token", + }, + }, + } as OpenClawConfig, + runtime, + prompter, + { + skipConfirm: true, + quickstartDefaults: true, + onSelection: selection, + onAccountId, + }, + ); + + expect(configureWhenConfigured).toHaveBeenCalledTimes(1); + expect(configureWhenConfigured).toHaveBeenCalledWith( + expect.objectContaining({ configured: true, label: expect.any(String) }), + ); + expect(configure).not.toHaveBeenCalled(); + expect(selection).toHaveBeenCalledWith(["telegram"]); + expect(onAccountId).toHaveBeenCalledWith("telegram", "acct-2"); + expect(cfg.channels?.telegram?.botToken).toBe("updated-token"); + } finally { + restore(); + } + }); + + it("respects configureWhenConfigured skip without mutating selection or account state", async () => { + const select = vi.fn(async ({ message }: { message: string }) => { + if (message === "Select channel (QuickStart)") { + return "telegram"; + } + throw new Error(`unexpected select prompt: ${message}`); + }); + const selection = vi.fn(); + const onAccountId = vi.fn(); + const configureWhenConfigured = vi.fn(async () => "skip" as const); + const configure = vi.fn(async () => { + throw new Error("configure should not run when configureWhenConfigured handles skip"); + }); + const restore = patchChannelOnboardingAdapter("telegram", { + getStatus: vi.fn(async ({ cfg }) => ({ + channel: "telegram", + configured: Boolean(cfg.channels?.telegram?.botToken), + statusLines: [], + })), + configureInteractive: undefined, + configureWhenConfigured, + configure, + }); + const { multiselect, text } = createUnexpectedPromptGuards(); + + const prompter = createPrompter({ + select: select as unknown as WizardPrompter["select"], + multiselect, + text, + }); + + const runtime = createExitThrowingRuntime(); + try { + const cfg = await setupChannels( + { + channels: { + telegram: { + botToken: "old-token", + }, + }, + } as OpenClawConfig, + runtime, + prompter, + { + skipConfirm: true, + quickstartDefaults: true, + onSelection: selection, + onAccountId, + }, + ); + + expect(configureWhenConfigured).toHaveBeenCalledWith( + expect.objectContaining({ configured: true, label: expect.any(String) }), + ); + expect(configure).not.toHaveBeenCalled(); + expect(selection).toHaveBeenCalledWith([]); + expect(onAccountId).not.toHaveBeenCalled(); + expect(cfg.channels?.telegram?.botToken).toBe("old-token"); + } finally { + restore(); + } + }); + + it("prefers configureInteractive over configureWhenConfigured when both hooks exist", async () => { + const select = vi.fn(async ({ message }: { message: string }) => { + if (message === "Select channel (QuickStart)") { + return "telegram"; + } + throw new Error(`unexpected select prompt: ${message}`); + }); + const selection = vi.fn(); + const onAccountId = vi.fn(); + const configureInteractive = vi.fn(async () => "skip" as const); + const configureWhenConfigured = vi.fn(async () => { + throw new Error("configureWhenConfigured should not run when configureInteractive exists"); + }); + const restore = patchChannelOnboardingAdapter("telegram", { + getStatus: vi.fn(async ({ cfg }) => ({ + channel: "telegram", + configured: Boolean(cfg.channels?.telegram?.botToken), + statusLines: [], + })), + configureInteractive, + configureWhenConfigured, + }); + const { multiselect, text } = createUnexpectedPromptGuards(); + + const prompter = createPrompter({ + select: select as unknown as WizardPrompter["select"], + multiselect, + text, + }); + + const runtime = createExitThrowingRuntime(); + try { + await setupChannels( + { + channels: { + telegram: { + botToken: "old-token", + }, + }, + } as OpenClawConfig, + runtime, + prompter, + { + skipConfirm: true, + quickstartDefaults: true, + onSelection: selection, + onAccountId, + }, + ); + + expect(configureInteractive).toHaveBeenCalledWith( + expect.objectContaining({ configured: true, label: expect.any(String) }), + ); + expect(configureWhenConfigured).not.toHaveBeenCalled(); + expect(selection).toHaveBeenCalledWith([]); + expect(onAccountId).not.toHaveBeenCalled(); + } finally { + restore(); + } + }); }); diff --git a/src/commands/onboard-channels.ts b/src/commands/onboard-channels.ts index 32510c29f39..6e79379e1f1 100644 --- a/src/commands/onboard-channels.ts +++ b/src/commands/onboard-channels.ts @@ -27,7 +27,9 @@ import { listChannelOnboardingAdapters, } from "./onboarding/registry.js"; import type { + ChannelOnboardingConfiguredResult, ChannelOnboardingDmPolicy, + ChannelOnboardingResult, ChannelOnboardingStatus, SetupChannelsOptions, } from "./onboarding/types.js"; @@ -488,6 +490,26 @@ export async function setupChannels( return true; }; + const applyOnboardingResult = async (channel: ChannelChoice, result: ChannelOnboardingResult) => { + next = result.cfg; + if (result.accountId) { + recordAccount(channel, result.accountId); + } + addSelection(channel); + await refreshStatus(channel); + }; + + const applyCustomOnboardingResult = async ( + channel: ChannelChoice, + result: ChannelOnboardingConfiguredResult, + ) => { + if (result === "skip") { + return false; + } + await applyOnboardingResult(channel, result); + return true; + }; + const configureChannel = async (channel: ChannelChoice) => { const adapter = getChannelOnboardingAdapter(channel); if (!adapter) { @@ -503,17 +525,29 @@ export async function setupChannels( shouldPromptAccountIds, forceAllowFrom: forceAllowFromChannels.has(channel), }); - next = result.cfg; - if (result.accountId) { - recordAccount(channel, result.accountId); - } - addSelection(channel); - await refreshStatus(channel); + await applyOnboardingResult(channel, result); }; const handleConfiguredChannel = async (channel: ChannelChoice, label: string) => { const plugin = getChannelPlugin(channel); const adapter = getChannelOnboardingAdapter(channel); + if (adapter?.configureWhenConfigured) { + const custom = await adapter.configureWhenConfigured({ + cfg: next, + runtime, + prompter, + options, + accountOverrides, + shouldPromptAccountIds, + forceAllowFrom: forceAllowFromChannels.has(channel), + configured: true, + label, + }); + if (!(await applyCustomOnboardingResult(channel, custom))) { + return; + } + return; + } const supportsDisable = Boolean( options?.allowDisable && (plugin?.config.setAccountEnabled || adapter?.disable), ); @@ -615,9 +649,27 @@ export async function setupChannels( } const plugin = getChannelPlugin(channel); + const adapter = getChannelOnboardingAdapter(channel); const label = plugin?.meta.label ?? catalogEntry?.meta.label ?? channel; const status = statusByChannel.get(channel); const configured = status?.configured ?? false; + if (adapter?.configureInteractive) { + const custom = await adapter.configureInteractive({ + cfg: next, + runtime, + prompter, + options, + accountOverrides, + shouldPromptAccountIds, + forceAllowFrom: forceAllowFromChannels.has(channel), + configured, + label, + }); + if (!(await applyCustomOnboardingResult(channel, custom))) { + return; + } + return; + } if (configured) { await handleConfiguredChannel(channel, label); return; diff --git a/src/commands/onboard-custom.test.ts b/src/commands/onboard-custom.test.ts index c79c30daff2..55be1b89dc3 100644 --- a/src/commands/onboard-custom.test.ts +++ b/src/commands/onboard-custom.test.ts @@ -78,48 +78,49 @@ function expectOpenAiCompatResult(params: { describe("promptCustomApiConfig", () => { afterEach(() => { vi.unstubAllGlobals(); + vi.unstubAllEnvs(); vi.useRealTimers(); }); it("handles openai flow and saves alias", async () => { const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "llama3", "custom", "local"], - select: ["openai"], + select: ["plaintext", "openai"], }); stubFetchSequence([{ ok: true }]); const result = await runPromptCustomApi(prompter); - expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 1, result }); + expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 2, result }); expect(result.config.agents?.defaults?.models?.["custom/llama3"]?.alias).toBe("local"); }); it("retries when verification fails", async () => { const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "bad-model", "good-model", "custom", ""], - select: ["openai", "model"], + select: ["plaintext", "openai", "model"], }); stubFetchSequence([{ ok: false, status: 400 }, { ok: true }]); await runPromptCustomApi(prompter); expect(prompter.text).toHaveBeenCalledTimes(6); - expect(prompter.select).toHaveBeenCalledTimes(2); + expect(prompter.select).toHaveBeenCalledTimes(3); }); it("detects openai compatibility when unknown", async () => { const prompter = createTestPrompter({ text: ["https://example.com/v1", "test-key", "detected-model", "custom", "alias"], - select: ["unknown"], + select: ["plaintext", "unknown"], }); stubFetchSequence([{ ok: true }]); const result = await runPromptCustomApi(prompter); - expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 1, result }); + expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 2, result }); }); it("uses expanded max_tokens for openai verification probes", async () => { const prompter = createTestPrompter({ text: ["https://example.com/v1", "test-key", "detected-model", "custom", "alias"], - select: ["openai"], + select: ["plaintext", "openai"], }); const fetchMock = stubFetchSequence([{ ok: true }]); @@ -133,7 +134,7 @@ describe("promptCustomApiConfig", () => { it("uses expanded max_tokens for anthropic verification probes", async () => { const prompter = createTestPrompter({ text: ["https://example.com", "test-key", "detected-model", "custom", "alias"], - select: ["unknown"], + select: ["plaintext", "unknown"], }); const fetchMock = stubFetchSequence([{ ok: false, status: 404 }, { ok: true }]); @@ -156,7 +157,7 @@ describe("promptCustomApiConfig", () => { "custom", "", ], - select: ["unknown", "baseUrl"], + select: ["plaintext", "unknown", "baseUrl", "plaintext"], }); stubFetchSequence([{ ok: false, status: 404 }, { ok: false, status: 404 }, { ok: true }]); await runPromptCustomApi(prompter); @@ -170,7 +171,7 @@ describe("promptCustomApiConfig", () => { it("renames provider id when baseUrl differs", async () => { const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "llama3", "custom", ""], - select: ["openai"], + select: ["plaintext", "openai"], }); stubFetchSequence([{ ok: true }]); const result = await runPromptCustomApi(prompter, { @@ -204,7 +205,7 @@ describe("promptCustomApiConfig", () => { vi.useFakeTimers(); const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "slow-model", "fast-model", "custom", ""], - select: ["openai", "model"], + select: ["plaintext", "openai", "model"], }); const fetchMock = vi @@ -224,6 +225,65 @@ describe("promptCustomApiConfig", () => { expect(prompter.text).toHaveBeenCalledTimes(6); }); + + it("stores env SecretRef for custom provider when selected", async () => { + vi.stubEnv("CUSTOM_PROVIDER_API_KEY", "test-env-key"); + const prompter = createTestPrompter({ + text: ["https://example.com/v1", "CUSTOM_PROVIDER_API_KEY", "detected-model", "custom", ""], + select: ["ref", "env", "openai"], + }); + const fetchMock = stubFetchSequence([{ ok: true }]); + + const result = await runPromptCustomApi(prompter); + + expect(result.config.models?.providers?.custom?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "CUSTOM_PROVIDER_API_KEY", + }); + const firstCall = fetchMock.mock.calls[0]?.[1] as + | { headers?: Record } + | undefined; + expect(firstCall?.headers?.Authorization).toBe("Bearer test-env-key"); + }); + + it("re-prompts source after provider ref preflight fails and succeeds with env ref", async () => { + vi.stubEnv("CUSTOM_PROVIDER_API_KEY", "test-env-key"); + const prompter = createTestPrompter({ + text: [ + "https://example.com/v1", + "/providers/custom/apiKey", + "CUSTOM_PROVIDER_API_KEY", + "detected-model", + "custom", + "", + ], + select: ["ref", "provider", "filemain", "env", "openai"], + }); + stubFetchSequence([{ ok: true }]); + + const result = await runPromptCustomApi(prompter, { + secrets: { + providers: { + filemain: { + source: "file", + path: "/tmp/openclaw-missing-provider.json", + mode: "json", + }, + }, + }, + }); + + expect(prompter.note).toHaveBeenCalledWith( + expect.stringContaining("Could not validate provider reference"), + "Reference check failed", + ); + expect(result.config.models?.providers?.custom?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "CUSTOM_PROVIDER_API_KEY", + }); + }); }); describe("applyCustomApiConfig", () => { diff --git a/src/commands/onboard-custom.ts b/src/commands/onboard-custom.ts index a00471701b2..11b7fcc75da 100644 --- a/src/commands/onboard-custom.ts +++ b/src/commands/onboard-custom.ts @@ -2,11 +2,18 @@ import { DEFAULT_PROVIDER } from "../agents/defaults.js"; import { buildModelAliasIndex, modelKey } from "../agents/model-selection.js"; import type { OpenClawConfig } from "../config/config.js"; import type { ModelProviderConfig } from "../config/types.models.js"; +import { isSecretRef, type SecretInput } from "../config/types.secrets.js"; import type { RuntimeEnv } from "../runtime.js"; import { fetchWithTimeout } from "../utils/fetch-timeout.js"; +import { + normalizeSecretInput, + normalizeOptionalSecretInput, +} from "../utils/normalize-secret-input.js"; import type { WizardPrompter } from "../wizard/prompts.js"; +import { ensureApiKeyFromEnvOrPrompt } from "./auth-choice.apply-helpers.js"; import { applyPrimaryModel } from "./model-picker.js"; import { normalizeAlias } from "./models/shared.js"; +import type { SecretInputMode } from "./onboard-types.js"; const DEFAULT_OLLAMA_BASE_URL = "http://127.0.0.1:11434/v1"; const DEFAULT_CONTEXT_WINDOW = 4096; @@ -62,7 +69,7 @@ export type ApplyCustomApiConfigParams = { baseUrl: string; modelId: string; compatibility: CustomApiCompatibility; - apiKey?: string; + apiKey?: SecretInput; providerId?: string; alias?: string; }; @@ -245,6 +252,13 @@ type VerificationResult = { error?: unknown; }; +function normalizeOptionalProviderApiKey(value: unknown): SecretInput | undefined { + if (isSecretRef(value)) { + return value; + } + return normalizeOptionalSecretInput(value); +} + function resolveVerificationEndpoint(params: { baseUrl: string; modelId: string; @@ -337,8 +351,10 @@ async function requestAnthropicVerification(params: { async function promptBaseUrlAndKey(params: { prompter: WizardPrompter; + config: OpenClawConfig; + secretInputMode?: SecretInputMode; initialBaseUrl?: string; -}): Promise<{ baseUrl: string; apiKey: string }> { +}): Promise<{ baseUrl: string; apiKey?: SecretInput; resolvedApiKey: string }> { const baseUrlInput = await params.prompter.text({ message: "API Base URL", initialValue: params.initialBaseUrl ?? DEFAULT_OLLAMA_BASE_URL, @@ -352,12 +368,27 @@ async function promptBaseUrlAndKey(params: { } }, }); - const apiKeyInput = await params.prompter.text({ - message: "API Key (leave blank if not required)", - placeholder: "sk-...", - initialValue: "", + const baseUrl = baseUrlInput.trim(); + const providerHint = buildEndpointIdFromUrl(baseUrl) || "custom"; + let apiKeyInput: SecretInput | undefined; + const resolvedApiKey = await ensureApiKeyFromEnvOrPrompt({ + config: params.config, + provider: providerHint, + envLabel: "CUSTOM_API_KEY", + promptMessage: "API Key (leave blank if not required)", + normalize: normalizeSecretInput, + validate: () => undefined, + prompter: params.prompter, + secretInputMode: params.secretInputMode, + setCredential: async (apiKey) => { + apiKeyInput = apiKey; + }, }); - return { baseUrl: baseUrlInput.trim(), apiKey: apiKeyInput.trim() }; + return { + baseUrl, + apiKey: normalizeOptionalProviderApiKey(apiKeyInput), + resolvedApiKey: normalizeSecretInput(resolvedApiKey), + }; } type CustomApiRetryChoice = "baseUrl" | "model" | "both"; @@ -385,22 +416,27 @@ async function promptCustomApiModelId(prompter: WizardPrompter): Promise async function applyCustomApiRetryChoice(params: { prompter: WizardPrompter; + config: OpenClawConfig; + secretInputMode?: SecretInputMode; retryChoice: CustomApiRetryChoice; - current: { baseUrl: string; apiKey: string; modelId: string }; -}): Promise<{ baseUrl: string; apiKey: string; modelId: string }> { - let { baseUrl, apiKey, modelId } = params.current; + current: { baseUrl: string; apiKey?: SecretInput; resolvedApiKey: string; modelId: string }; +}): Promise<{ baseUrl: string; apiKey?: SecretInput; resolvedApiKey: string; modelId: string }> { + let { baseUrl, apiKey, resolvedApiKey, modelId } = params.current; if (params.retryChoice === "baseUrl" || params.retryChoice === "both") { const retryInput = await promptBaseUrlAndKey({ prompter: params.prompter, + config: params.config, + secretInputMode: params.secretInputMode, initialBaseUrl: baseUrl, }); baseUrl = retryInput.baseUrl; apiKey = retryInput.apiKey; + resolvedApiKey = retryInput.resolvedApiKey; } if (params.retryChoice === "model" || params.retryChoice === "both") { modelId = await promptCustomApiModelId(params.prompter); } - return { baseUrl, apiKey, modelId }; + return { baseUrl, apiKey, resolvedApiKey, modelId }; } function resolveProviderApi( @@ -541,7 +577,8 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom const mergedModels = hasModel ? existingModels : [...existingModels, nextModel]; const { apiKey: existingApiKey, ...existingProviderRest } = existingProvider ?? {}; const normalizedApiKey = - params.apiKey?.trim() || (existingApiKey ? existingApiKey.trim() : undefined); + normalizeOptionalProviderApiKey(params.apiKey) ?? + normalizeOptionalProviderApiKey(existingApiKey); let config: OpenClawConfig = { ...params.config, @@ -595,12 +632,18 @@ export async function promptCustomApiConfig(params: { prompter: WizardPrompter; runtime: RuntimeEnv; config: OpenClawConfig; + secretInputMode?: SecretInputMode; }): Promise { const { prompter, runtime, config } = params; - const baseInput = await promptBaseUrlAndKey({ prompter }); + const baseInput = await promptBaseUrlAndKey({ + prompter, + config, + secretInputMode: params.secretInputMode, + }); let baseUrl = baseInput.baseUrl; let apiKey = baseInput.apiKey; + let resolvedApiKey = baseInput.resolvedApiKey; const compatibilityChoice = await prompter.select({ message: "Endpoint compatibility", @@ -620,13 +663,21 @@ export async function promptCustomApiConfig(params: { let verifiedFromProbe = false; if (!compatibility) { const probeSpinner = prompter.progress("Detecting endpoint type..."); - const openaiProbe = await requestOpenAiVerification({ baseUrl, apiKey, modelId }); + const openaiProbe = await requestOpenAiVerification({ + baseUrl, + apiKey: resolvedApiKey, + modelId, + }); if (openaiProbe.ok) { probeSpinner.stop("Detected OpenAI-compatible endpoint."); compatibility = "openai"; verifiedFromProbe = true; } else { - const anthropicProbe = await requestAnthropicVerification({ baseUrl, apiKey, modelId }); + const anthropicProbe = await requestAnthropicVerification({ + baseUrl, + apiKey: resolvedApiKey, + modelId, + }); if (anthropicProbe.ok) { probeSpinner.stop("Detected Anthropic-compatible endpoint."); compatibility = "anthropic"; @@ -638,10 +689,12 @@ export async function promptCustomApiConfig(params: { "Endpoint detection", ); const retryChoice = await promptCustomApiRetryChoice(prompter); - ({ baseUrl, apiKey, modelId } = await applyCustomApiRetryChoice({ + ({ baseUrl, apiKey, resolvedApiKey, modelId } = await applyCustomApiRetryChoice({ prompter, + config, + secretInputMode: params.secretInputMode, retryChoice, - current: { baseUrl, apiKey, modelId }, + current: { baseUrl, apiKey, resolvedApiKey, modelId }, })); continue; } @@ -655,8 +708,8 @@ export async function promptCustomApiConfig(params: { const verifySpinner = prompter.progress("Verifying..."); const result = compatibility === "anthropic" - ? await requestAnthropicVerification({ baseUrl, apiKey, modelId }) - : await requestOpenAiVerification({ baseUrl, apiKey, modelId }); + ? await requestAnthropicVerification({ baseUrl, apiKey: resolvedApiKey, modelId }) + : await requestOpenAiVerification({ baseUrl, apiKey: resolvedApiKey, modelId }); if (result.ok) { verifySpinner.stop("Verification successful."); break; @@ -667,10 +720,12 @@ export async function promptCustomApiConfig(params: { verifySpinner.stop(`Verification failed: ${formatVerificationError(result.error)}`); } const retryChoice = await promptCustomApiRetryChoice(prompter); - ({ baseUrl, apiKey, modelId } = await applyCustomApiRetryChoice({ + ({ baseUrl, apiKey, resolvedApiKey, modelId } = await applyCustomApiRetryChoice({ prompter, + config, + secretInputMode: params.secretInputMode, retryChoice, - current: { baseUrl, apiKey, modelId }, + current: { baseUrl, apiKey, resolvedApiKey, modelId }, })); if (compatibilityChoice === "unknown") { compatibility = null; diff --git a/src/commands/onboard-non-interactive.provider-auth.test.ts b/src/commands/onboard-non-interactive.provider-auth.test.ts index 1bca5a57ec3..077b2c6d672 100644 --- a/src/commands/onboard-non-interactive.provider-auth.test.ts +++ b/src/commands/onboard-non-interactive.provider-auth.test.ts @@ -48,7 +48,7 @@ type ProviderAuthConfigSnapshot = { { baseUrl?: string; api?: string; - apiKey?: string; + apiKey?: string | { source?: string; id?: string }; models?: Array<{ id?: string }>; } >; @@ -145,6 +145,14 @@ async function runCustomLocalNonInteractive( } async function readCustomLocalProviderApiKey(configPath: string): Promise { + const cfg = await readJsonFile(configPath); + const apiKey = cfg.models?.providers?.[CUSTOM_LOCAL_PROVIDER_ID]?.apiKey; + return typeof apiKey === "string" ? apiKey : undefined; +} + +async function readCustomLocalProviderApiKeyInput( + configPath: string, +): Promise { const cfg = await readJsonFile(configPath); return cfg.models?.providers?.[CUSTOM_LOCAL_PROVIDER_ID]?.apiKey; } @@ -349,6 +357,121 @@ describe("onboard (non-interactive): provider auth", () => { }); }); + it.each([ + { + name: "anthropic", + prefix: "openclaw-onboard-ref-flag-anthropic-", + authChoice: "apiKey", + optionKey: "anthropicApiKey", + flagName: "--anthropic-api-key", + envVar: "ANTHROPIC_API_KEY", + }, + { + name: "openai", + prefix: "openclaw-onboard-ref-flag-openai-", + authChoice: "openai-api-key", + optionKey: "openaiApiKey", + flagName: "--openai-api-key", + envVar: "OPENAI_API_KEY", + }, + { + name: "openrouter", + prefix: "openclaw-onboard-ref-flag-openrouter-", + authChoice: "openrouter-api-key", + optionKey: "openrouterApiKey", + flagName: "--openrouter-api-key", + envVar: "OPENROUTER_API_KEY", + }, + { + name: "xai", + prefix: "openclaw-onboard-ref-flag-xai-", + authChoice: "xai-api-key", + optionKey: "xaiApiKey", + flagName: "--xai-api-key", + envVar: "XAI_API_KEY", + }, + { + name: "volcengine", + prefix: "openclaw-onboard-ref-flag-volcengine-", + authChoice: "volcengine-api-key", + optionKey: "volcengineApiKey", + flagName: "--volcengine-api-key", + envVar: "VOLCANO_ENGINE_API_KEY", + }, + { + name: "byteplus", + prefix: "openclaw-onboard-ref-flag-byteplus-", + authChoice: "byteplus-api-key", + optionKey: "byteplusApiKey", + flagName: "--byteplus-api-key", + envVar: "BYTEPLUS_API_KEY", + }, + ])( + "fails fast for $name when --secret-input-mode ref uses explicit key without env and does not leak the key", + async ({ prefix, authChoice, optionKey, flagName, envVar }) => { + await withOnboardEnv(prefix, async ({ runtime }) => { + const providedSecret = `${envVar.toLowerCase()}-should-not-leak`; + const options: Record = { + authChoice, + secretInputMode: "ref", + [optionKey]: providedSecret, + skipSkills: true, + }; + const envOverrides: Record = { + [envVar]: undefined, + }; + + await withEnvAsync(envOverrides, async () => { + let thrown: Error | undefined; + try { + await runNonInteractiveOnboardingWithDefaults(runtime, options); + } catch (error) { + thrown = error as Error; + } + expect(thrown).toBeDefined(); + const message = String(thrown?.message ?? ""); + expect(message).toContain( + `${flagName} cannot be used with --secret-input-mode ref unless ${envVar} is set in env.`, + ); + expect(message).toContain( + `Set ${envVar} in env and omit ${flagName}, or use --secret-input-mode plaintext.`, + ); + expect(message).not.toContain(providedSecret); + }); + }); + }, + ); + + it("stores the detected env alias as keyRef for opencode ref mode", async () => { + await withOnboardEnv("openclaw-onboard-ref-opencode-alias-", async ({ runtime }) => { + await withEnvAsync( + { + OPENCODE_API_KEY: undefined, + OPENCODE_ZEN_API_KEY: "opencode-zen-env-key", + }, + async () => { + await runNonInteractiveOnboardingWithDefaults(runtime, { + authChoice: "opencode-zen", + secretInputMode: "ref", + skipSkills: true, + }); + + const store = ensureAuthProfileStore(); + const profile = store.profiles["opencode:default"]; + expect(profile?.type).toBe("api_key"); + if (profile?.type === "api_key") { + expect(profile.key).toBeUndefined(); + expect(profile.keyRef).toEqual({ + source: "env", + provider: "default", + id: "OPENCODE_ZEN_API_KEY", + }); + } + }, + ); + }); + }); + it("rejects vLLM auth choice in non-interactive mode", async () => { await withOnboardEnv("openclaw-onboard-vllm-non-interactive-", async ({ runtime }) => { await expect( @@ -508,6 +631,49 @@ describe("onboard (non-interactive): provider auth", () => { ); }); + it("stores CUSTOM_API_KEY env ref for non-interactive custom provider auth in ref mode", async () => { + await withOnboardEnv( + "openclaw-onboard-custom-provider-env-ref-", + async ({ configPath, runtime }) => { + process.env.CUSTOM_API_KEY = "custom-env-key"; + await runCustomLocalNonInteractive(runtime, { + secretInputMode: "ref", + }); + expect(await readCustomLocalProviderApiKeyInput(configPath)).toEqual({ + source: "env", + provider: "default", + id: "CUSTOM_API_KEY", + }); + }, + ); + }); + + it("fails fast for custom provider ref mode when --custom-api-key is set but CUSTOM_API_KEY env is missing", async () => { + await withOnboardEnv("openclaw-onboard-custom-provider-ref-flag-", async ({ runtime }) => { + const providedSecret = "custom-inline-key-should-not-leak"; + await withEnvAsync({ CUSTOM_API_KEY: undefined }, async () => { + let thrown: Error | undefined; + try { + await runCustomLocalNonInteractive(runtime, { + secretInputMode: "ref", + customApiKey: providedSecret, + }); + } catch (error) { + thrown = error as Error; + } + expect(thrown).toBeDefined(); + const message = String(thrown?.message ?? ""); + expect(message).toContain( + "--custom-api-key cannot be used with --secret-input-mode ref unless CUSTOM_API_KEY is set in env.", + ); + expect(message).toContain( + "Set CUSTOM_API_KEY in env and omit --custom-api-key, or use --secret-input-mode plaintext.", + ); + expect(message).not.toContain(providedSecret); + }); + }); + }); + it("uses matching profile fallback for non-interactive custom provider auth", async () => { await withOnboardEnv( "openclaw-onboard-custom-provider-profile-fallback-", diff --git a/src/commands/onboard-non-interactive/api-keys.ts b/src/commands/onboard-non-interactive/api-keys.ts index 11fda28352c..e55943e22d5 100644 --- a/src/commands/onboard-non-interactive/api-keys.ts +++ b/src/commands/onboard-non-interactive/api-keys.ts @@ -7,9 +7,18 @@ import { resolveEnvApiKey } from "../../agents/model-auth.js"; import type { OpenClawConfig } from "../../config/config.js"; import type { RuntimeEnv } from "../../runtime.js"; import { normalizeOptionalSecretInput } from "../../utils/normalize-secret-input.js"; +import type { SecretInputMode } from "../onboard-types.js"; export type NonInteractiveApiKeySource = "flag" | "env" | "profile"; +function parseEnvVarNameFromSourceLabel(source: string | undefined): string | undefined { + if (!source) { + return undefined; + } + const match = /^(?:shell env: |env: )([A-Z][A-Z0-9_]*)$/.exec(source.trim()); + return match?.[1]; +} + async function resolveApiKeyFromProfiles(params: { provider: string; cfg: OpenClawConfig; @@ -50,23 +59,49 @@ export async function resolveNonInteractiveApiKey(params: { agentDir?: string; allowProfile?: boolean; required?: boolean; -}): Promise<{ key: string; source: NonInteractiveApiKeySource } | null> { + secretInputMode?: SecretInputMode; +}): Promise<{ key: string; source: NonInteractiveApiKeySource; envVarName?: string } | null> { const flagKey = normalizeOptionalSecretInput(params.flagValue); + const envResolved = resolveEnvApiKey(params.provider); + const explicitEnvVar = params.envVarName?.trim(); + const explicitEnvKey = explicitEnvVar + ? normalizeOptionalSecretInput(process.env[explicitEnvVar]) + : undefined; + const resolvedEnvKey = envResolved?.apiKey ?? explicitEnvKey; + const resolvedEnvVarName = parseEnvVarNameFromSourceLabel(envResolved?.source) ?? explicitEnvVar; + + if (params.secretInputMode === "ref") { + if (!resolvedEnvKey && flagKey) { + params.runtime.error( + [ + `${params.flagName} cannot be used with --secret-input-mode ref unless ${params.envVar} is set in env.`, + `Set ${params.envVar} in env and omit ${params.flagName}, or use --secret-input-mode plaintext.`, + ].join("\n"), + ); + params.runtime.exit(1); + return null; + } + if (resolvedEnvKey) { + if (!resolvedEnvVarName) { + params.runtime.error( + [ + `--secret-input-mode ref requires an explicit environment variable for provider "${params.provider}".`, + `Set ${params.envVar} in env and retry, or use --secret-input-mode plaintext.`, + ].join("\n"), + ); + params.runtime.exit(1); + return null; + } + return { key: resolvedEnvKey, source: "env", envVarName: resolvedEnvVarName }; + } + } + if (flagKey) { return { key: flagKey, source: "flag" }; } - const envResolved = resolveEnvApiKey(params.provider); - if (envResolved?.apiKey) { - return { key: envResolved.apiKey, source: "env" }; - } - - const explicitEnvVar = params.envVarName?.trim(); - if (explicitEnvVar) { - const explicitEnvKey = normalizeOptionalSecretInput(process.env[explicitEnvVar]); - if (explicitEnvKey) { - return { key: explicitEnvKey, source: "env" }; - } + if (resolvedEnvKey) { + return { key: resolvedEnvKey, source: "env", envVarName: resolvedEnvVarName }; } if (params.allowProfile ?? true) { diff --git a/src/commands/onboard-non-interactive/local/auth-choice.ts b/src/commands/onboard-non-interactive/local/auth-choice.ts index 9f9ce49a581..54a38d84412 100644 --- a/src/commands/onboard-non-interactive/local/auth-choice.ts +++ b/src/commands/onboard-non-interactive/local/auth-choice.ts @@ -2,10 +2,11 @@ import { upsertAuthProfile } from "../../../agents/auth-profiles.js"; import { normalizeProviderId } from "../../../agents/model-selection.js"; import { parseDurationMs } from "../../../cli/parse-duration.js"; import type { OpenClawConfig } from "../../../config/config.js"; -import { upsertSharedEnvVar } from "../../../infra/env-file.js"; +import type { SecretInput } from "../../../config/types.secrets.js"; import type { RuntimeEnv } from "../../../runtime.js"; -import { shortenHomePath } from "../../../utils.js"; +import { resolveDefaultSecretProviderAlias } from "../../../secrets/ref-contract.js"; import { normalizeSecretInput } from "../../../utils/normalize-secret-input.js"; +import { normalizeSecretInputModeInput } from "../../auth-choice.apply-helpers.js"; import { buildTokenProfileId, validateAnthropicSetupToken } from "../../auth-token.js"; import { applyGoogleGeminiModelDefault } from "../../google-gemini-model-default.js"; import { applyPrimaryModel } from "../../model-picker.js"; @@ -34,6 +35,7 @@ import { applyZaiConfig, setAnthropicApiKey, setCloudflareAiGatewayConfig, + setByteplusApiKey, setQianfanApiKey, setGeminiApiKey, setKilocodeApiKey, @@ -42,9 +44,11 @@ import { setMistralApiKey, setMinimaxApiKey, setMoonshotApiKey, + setOpenaiApiKey, setOpencodeZenApiKey, setOpenrouterApiKey, setSyntheticApiKey, + setVolcengineApiKey, setXaiApiKey, setVeniceApiKey, setTogetherApiKey, @@ -64,6 +68,10 @@ import { applyOpenAIConfig } from "../../openai-model-default.js"; import { detectZaiEndpoint } from "../../zai-endpoint-detect.js"; import { resolveNonInteractiveApiKey } from "../api-keys.js"; +type ResolvedNonInteractiveApiKey = NonNullable< + Awaited> +>; + export async function applyNonInteractiveAuthChoice(params: { nextConfig: OpenClawConfig; authChoice: AuthChoice; @@ -73,6 +81,59 @@ export async function applyNonInteractiveAuthChoice(params: { }): Promise { const { authChoice, opts, runtime, baseConfig } = params; let nextConfig = params.nextConfig; + const requestedSecretInputMode = normalizeSecretInputModeInput(opts.secretInputMode); + if (opts.secretInputMode && !requestedSecretInputMode) { + runtime.error('Invalid --secret-input-mode. Use "plaintext" or "ref".'); + runtime.exit(1); + return null; + } + const apiKeyStorageOptions = requestedSecretInputMode + ? { secretInputMode: requestedSecretInputMode } + : undefined; + const toStoredSecretInput = (resolved: ResolvedNonInteractiveApiKey): SecretInput | null => { + if (requestedSecretInputMode !== "ref") { + return resolved.key; + } + if (resolved.source !== "env") { + return resolved.key; + } + if (!resolved.envVarName) { + runtime.error( + [ + `Unable to determine which environment variable to store as a ref for provider "${authChoice}".`, + "Set an explicit provider env var and retry, or use --secret-input-mode plaintext.", + ].join("\n"), + ); + runtime.exit(1); + return null; + } + return { + source: "env", + provider: resolveDefaultSecretProviderAlias(baseConfig, "env", { + preferFirstProviderForSource: true, + }), + id: resolved.envVarName, + }; + }; + const resolveApiKey = (input: Parameters[0]) => + resolveNonInteractiveApiKey({ + ...input, + secretInputMode: requestedSecretInputMode, + }); + const maybeSetResolvedApiKey = async ( + resolved: ResolvedNonInteractiveApiKey, + setter: (value: SecretInput) => Promise | void, + ): Promise => { + if (resolved.source === "profile") { + return true; + } + const stored = toStoredSecretInput(resolved); + if (!stored) { + return false; + } + await setter(stored); + return true; + }; if (authChoice === "claude-cli" || authChoice === "codex-cli") { runtime.error( @@ -108,7 +169,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "apiKey") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "anthropic", cfg: baseConfig, flagValue: opts.anthropicApiKey, @@ -119,8 +180,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setAnthropicApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setAnthropicApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } return applyAuthProfileConfig(nextConfig, { profileId: "anthropic:default", @@ -185,7 +250,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "gemini-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "google", cfg: baseConfig, flagValue: opts.geminiApiKey, @@ -196,8 +261,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setGeminiApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setGeminiApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "google:default", @@ -214,7 +283,7 @@ export async function applyNonInteractiveAuthChoice(params: { authChoice === "zai-global" || authChoice === "zai-cn" ) { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "zai", cfg: baseConfig, flagValue: opts.zaiApiKey, @@ -225,8 +294,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setZaiApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setZaiApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "zai:default", @@ -263,7 +336,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "xiaomi-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "xiaomi", cfg: baseConfig, flagValue: opts.xiaomiApiKey, @@ -274,8 +347,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setXiaomiApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setXiaomiApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "xiaomi:default", @@ -286,7 +363,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "xai-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "xai", cfg: baseConfig, flagValue: opts.xaiApiKey, @@ -297,8 +374,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - setXaiApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setXaiApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "xai:default", @@ -309,7 +390,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "mistral-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "mistral", cfg: baseConfig, flagValue: opts.mistralApiKey, @@ -320,8 +401,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setMistralApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setMistralApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "mistral:default", @@ -332,7 +417,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "volcengine-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "volcengine", cfg: baseConfig, flagValue: opts.volcengineApiKey, @@ -343,19 +428,23 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - const result = upsertSharedEnvVar({ - key: "VOLCANO_ENGINE_API_KEY", - value: resolved.key, - }); - process.env.VOLCANO_ENGINE_API_KEY = resolved.key; - runtime.log(`Saved VOLCANO_ENGINE_API_KEY to ${shortenHomePath(result.path)}`); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setVolcengineApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId: "volcengine:default", + provider: "volcengine", + mode: "api_key", + }); return applyPrimaryModel(nextConfig, "volcengine-plan/ark-code-latest"); } if (authChoice === "byteplus-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "byteplus", cfg: baseConfig, flagValue: opts.byteplusApiKey, @@ -366,19 +455,23 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - const result = upsertSharedEnvVar({ - key: "BYTEPLUS_API_KEY", - value: resolved.key, - }); - process.env.BYTEPLUS_API_KEY = resolved.key; - runtime.log(`Saved BYTEPLUS_API_KEY to ${shortenHomePath(result.path)}`); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setByteplusApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId: "byteplus:default", + provider: "byteplus", + mode: "api_key", + }); return applyPrimaryModel(nextConfig, "byteplus-plan/ark-code-latest"); } if (authChoice === "qianfan-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "qianfan", cfg: baseConfig, flagValue: opts.qianfanApiKey, @@ -389,8 +482,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - setQianfanApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setQianfanApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "qianfan:default", @@ -401,27 +498,34 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "openai-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "openai", cfg: baseConfig, flagValue: opts.openaiApiKey, flagName: "--openai-api-key", envVar: "OPENAI_API_KEY", runtime, - allowProfile: false, }); if (!resolved) { return null; } - const key = resolved.key; - const result = upsertSharedEnvVar({ key: "OPENAI_API_KEY", value: key }); - process.env.OPENAI_API_KEY = key; - runtime.log(`Saved OPENAI_API_KEY to ${shortenHomePath(result.path)}`); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setOpenaiApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; + } + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId: "openai:default", + provider: "openai", + mode: "api_key", + }); return applyOpenAIConfig(nextConfig); } if (authChoice === "openrouter-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "openrouter", cfg: baseConfig, flagValue: opts.openrouterApiKey, @@ -432,8 +536,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setOpenrouterApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setOpenrouterApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "openrouter:default", @@ -444,7 +552,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "kilocode-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "kilocode", cfg: baseConfig, flagValue: opts.kilocodeApiKey, @@ -455,8 +563,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setKilocodeApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setKilocodeApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "kilocode:default", @@ -467,7 +579,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "litellm-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "litellm", cfg: baseConfig, flagValue: opts.litellmApiKey, @@ -478,8 +590,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setLitellmApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setLitellmApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "litellm:default", @@ -490,7 +606,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "ai-gateway-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "vercel-ai-gateway", cfg: baseConfig, flagValue: opts.aiGatewayApiKey, @@ -501,8 +617,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setVercelAiGatewayApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setVercelAiGatewayApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "vercel-ai-gateway:default", @@ -525,7 +645,7 @@ export async function applyNonInteractiveAuthChoice(params: { runtime.exit(1); return null; } - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "cloudflare-ai-gateway", cfg: baseConfig, flagValue: opts.cloudflareAiGatewayApiKey, @@ -537,7 +657,17 @@ export async function applyNonInteractiveAuthChoice(params: { return null; } if (resolved.source !== "profile") { - await setCloudflareAiGatewayConfig(accountId, gatewayId, resolved.key); + const stored = toStoredSecretInput(resolved); + if (!stored) { + return null; + } + await setCloudflareAiGatewayConfig( + accountId, + gatewayId, + stored, + undefined, + apiKeyStorageOptions, + ); } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "cloudflare-ai-gateway:default", @@ -553,7 +683,7 @@ export async function applyNonInteractiveAuthChoice(params: { const applyMoonshotApiKeyChoice = async ( applyConfig: (cfg: OpenClawConfig) => OpenClawConfig, ): Promise => { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "moonshot", cfg: baseConfig, flagValue: opts.moonshotApiKey, @@ -564,8 +694,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setMoonshotApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setMoonshotApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "moonshot:default", @@ -584,7 +718,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "kimi-code-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "kimi-coding", cfg: baseConfig, flagValue: opts.kimiCodeApiKey, @@ -595,8 +729,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setKimiCodingApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setKimiCodingApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "kimi-coding:default", @@ -607,7 +745,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "synthetic-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "synthetic", cfg: baseConfig, flagValue: opts.syntheticApiKey, @@ -618,8 +756,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setSyntheticApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setSyntheticApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "synthetic:default", @@ -630,7 +772,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "venice-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "venice", cfg: baseConfig, flagValue: opts.veniceApiKey, @@ -641,8 +783,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setVeniceApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setVeniceApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "venice:default", @@ -661,7 +807,7 @@ export async function applyNonInteractiveAuthChoice(params: { const isCn = authChoice === "minimax-api-key-cn"; const providerId = isCn ? "minimax-cn" : "minimax"; const profileId = `${providerId}:default`; - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: providerId, cfg: baseConfig, flagValue: opts.minimaxApiKey, @@ -672,8 +818,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setMinimaxApiKey(resolved.key, undefined, profileId); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setMinimaxApiKey(value, undefined, profileId, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId, @@ -692,7 +842,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "opencode-zen") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "opencode", cfg: baseConfig, flagValue: opts.opencodeZenApiKey, @@ -703,8 +853,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setOpencodeZenApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setOpencodeZenApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "opencode:default", @@ -715,7 +869,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "together-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "together", cfg: baseConfig, flagValue: opts.togetherApiKey, @@ -726,8 +880,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setTogetherApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setTogetherApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "together:default", @@ -738,7 +896,7 @@ export async function applyNonInteractiveAuthChoice(params: { } if (authChoice === "huggingface-api-key") { - const resolved = await resolveNonInteractiveApiKey({ + const resolved = await resolveApiKey({ provider: "huggingface", cfg: baseConfig, flagValue: opts.huggingfaceApiKey, @@ -749,8 +907,12 @@ export async function applyNonInteractiveAuthChoice(params: { if (!resolved) { return null; } - if (resolved.source !== "profile") { - await setHuggingfaceApiKey(resolved.key); + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setHuggingfaceApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; } nextConfig = applyAuthProfileConfig(nextConfig, { profileId: "huggingface:default", @@ -774,7 +936,7 @@ export async function applyNonInteractiveAuthChoice(params: { baseUrl: customAuth.baseUrl, providerId: customAuth.providerId, }); - const resolvedCustomApiKey = await resolveNonInteractiveApiKey({ + const resolvedCustomApiKey = await resolveApiKey({ provider: resolvedProviderId.providerId, cfg: baseConfig, flagValue: customAuth.apiKey, @@ -784,12 +946,24 @@ export async function applyNonInteractiveAuthChoice(params: { runtime, required: false, }); + let customApiKeyInput: SecretInput | undefined; + if (resolvedCustomApiKey) { + if (requestedSecretInputMode === "ref") { + const stored = toStoredSecretInput(resolvedCustomApiKey); + if (!stored) { + return null; + } + customApiKeyInput = stored; + } else { + customApiKeyInput = resolvedCustomApiKey.key; + } + } const result = applyCustomApiConfig({ config: nextConfig, baseUrl: customAuth.baseUrl, modelId: customAuth.modelId, compatibility: customAuth.compatibility, - apiKey: resolvedCustomApiKey?.key, + apiKey: customApiKeyInput, providerId: customAuth.providerId, }); if (result.providerIdRenamedFrom && result.providerId) { diff --git a/src/commands/onboard-types.ts b/src/commands/onboard-types.ts index fa655752b1f..fee12d392bb 100644 --- a/src/commands/onboard-types.ts +++ b/src/commands/onboard-types.ts @@ -87,6 +87,7 @@ export type NodeManagerChoice = "npm" | "pnpm" | "bun"; export type ChannelChoice = ChannelId; // Legacy alias (pre-rename). export type ProviderChoice = ChannelChoice; +export type SecretInputMode = "plaintext" | "ref"; export type OnboardOptions = { mode?: OnboardMode; @@ -97,6 +98,7 @@ export type OnboardOptions = { /** Required for non-interactive onboarding; skips the interactive risk prompt when true. */ acceptRisk?: boolean; reset?: boolean; + resetScope?: ResetScope; authChoice?: AuthChoice; /** Used when `authChoice=token` in non-interactive mode. */ tokenProvider?: string; @@ -106,6 +108,8 @@ export type OnboardOptions = { tokenProfileId?: string; /** Used when `authChoice=token` in non-interactive mode. */ tokenExpiresIn?: string; + /** API key persistence mode for onboarding flows (default: plaintext). */ + secretInputMode?: SecretInputMode; anthropicApiKey?: string; openaiApiKey?: string; mistralApiKey?: string; diff --git a/src/commands/onboard.test.ts b/src/commands/onboard.test.ts new file mode 100644 index 00000000000..4fa6b04cc12 --- /dev/null +++ b/src/commands/onboard.test.ts @@ -0,0 +1,141 @@ +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; + +const mocks = vi.hoisted(() => ({ + runInteractiveOnboarding: vi.fn(async () => {}), + runNonInteractiveOnboarding: vi.fn(async () => {}), + readConfigFileSnapshot: vi.fn(async () => ({ exists: false, valid: false, config: {} })), + handleReset: vi.fn(async () => {}), +})); + +vi.mock("./onboard-interactive.js", () => ({ + runInteractiveOnboarding: mocks.runInteractiveOnboarding, +})); + +vi.mock("./onboard-non-interactive.js", () => ({ + runNonInteractiveOnboarding: mocks.runNonInteractiveOnboarding, +})); + +vi.mock("../config/config.js", () => ({ + readConfigFileSnapshot: mocks.readConfigFileSnapshot, +})); + +vi.mock("./onboard-helpers.js", () => ({ + DEFAULT_WORKSPACE: "~/.openclaw/workspace", + handleReset: mocks.handleReset, +})); + +const { onboardCommand } = await import("./onboard.js"); + +function makeRuntime(): RuntimeEnv { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn() as unknown as RuntimeEnv["exit"], + }; +} + +describe("onboardCommand", () => { + afterEach(() => { + vi.clearAllMocks(); + mocks.readConfigFileSnapshot.mockResolvedValue({ exists: false, valid: false, config: {} }); + }); + + it("fails fast for invalid secret-input-mode before onboarding starts", async () => { + const runtime = makeRuntime(); + + await onboardCommand( + { + secretInputMode: "invalid" as never, + }, + runtime, + ); + + expect(runtime.error).toHaveBeenCalledWith( + 'Invalid --secret-input-mode. Use "plaintext" or "ref".', + ); + expect(runtime.exit).toHaveBeenCalledWith(1); + expect(mocks.runInteractiveOnboarding).not.toHaveBeenCalled(); + expect(mocks.runNonInteractiveOnboarding).not.toHaveBeenCalled(); + }); + + it("defaults --reset to config+creds+sessions scope", async () => { + const runtime = makeRuntime(); + + await onboardCommand( + { + reset: true, + }, + runtime, + ); + + expect(mocks.handleReset).toHaveBeenCalledWith( + "config+creds+sessions", + expect.any(String), + runtime, + ); + }); + + it("uses configured default workspace for --reset when --workspace is not provided", async () => { + const runtime = makeRuntime(); + mocks.readConfigFileSnapshot.mockResolvedValue({ + exists: true, + valid: true, + config: { + agents: { + defaults: { + workspace: "/tmp/openclaw-custom-workspace", + }, + }, + }, + }); + + await onboardCommand( + { + reset: true, + }, + runtime, + ); + + expect(mocks.handleReset).toHaveBeenCalledWith( + "config+creds+sessions", + path.resolve("/tmp/openclaw-custom-workspace"), + runtime, + ); + }); + + it("accepts explicit --reset-scope full", async () => { + const runtime = makeRuntime(); + + await onboardCommand( + { + reset: true, + resetScope: "full", + }, + runtime, + ); + + expect(mocks.handleReset).toHaveBeenCalledWith("full", expect.any(String), runtime); + }); + + it("fails fast for invalid --reset-scope", async () => { + const runtime = makeRuntime(); + + await onboardCommand( + { + reset: true, + resetScope: "invalid" as never, + }, + runtime, + ); + + expect(runtime.error).toHaveBeenCalledWith( + 'Invalid --reset-scope. Use "config", "config+creds+sessions", or "full".', + ); + expect(runtime.exit).toHaveBeenCalledWith(1); + expect(mocks.handleReset).not.toHaveBeenCalled(); + expect(mocks.runInteractiveOnboarding).not.toHaveBeenCalled(); + expect(mocks.runNonInteractiveOnboarding).not.toHaveBeenCalled(); + }); +}); diff --git a/src/commands/onboard.ts b/src/commands/onboard.ts index 2ddcb309cb0..1901d70e08f 100644 --- a/src/commands/onboard.ts +++ b/src/commands/onboard.ts @@ -8,7 +8,9 @@ import { isDeprecatedAuthChoice, normalizeLegacyOnboardAuthChoice } from "./auth import { DEFAULT_WORKSPACE, handleReset } from "./onboard-helpers.js"; import { runInteractiveOnboarding } from "./onboard-interactive.js"; import { runNonInteractiveOnboarding } from "./onboard-non-interactive.js"; -import type { OnboardOptions } from "./onboard-types.js"; +import type { OnboardOptions, ResetScope } from "./onboard-types.js"; + +const VALID_RESET_SCOPES = new Set(["config", "config+creds+sessions", "full"]); export async function onboardCommand(opts: OnboardOptions, runtime: RuntimeEnv = defaultRuntime) { assertSupportedRuntime(runtime); @@ -35,6 +37,21 @@ export async function onboardCommand(opts: OnboardOptions, runtime: RuntimeEnv = normalizedAuthChoice === opts.authChoice && flow === opts.flow ? opts : { ...opts, authChoice: normalizedAuthChoice, flow }; + if ( + normalizedOpts.secretInputMode && + normalizedOpts.secretInputMode !== "plaintext" && + normalizedOpts.secretInputMode !== "ref" + ) { + runtime.error('Invalid --secret-input-mode. Use "plaintext" or "ref".'); + runtime.exit(1); + return; + } + + if (normalizedOpts.resetScope && !VALID_RESET_SCOPES.has(normalizedOpts.resetScope)) { + runtime.error('Invalid --reset-scope. Use "config", "config+creds+sessions", or "full".'); + runtime.exit(1); + return; + } if (normalizedOpts.nonInteractive && normalizedOpts.acceptRisk !== true) { runtime.error( @@ -53,7 +70,8 @@ export async function onboardCommand(opts: OnboardOptions, runtime: RuntimeEnv = const baseConfig = snapshot.valid ? snapshot.config : {}; const workspaceDefault = normalizedOpts.workspace ?? baseConfig.agents?.defaults?.workspace ?? DEFAULT_WORKSPACE; - await handleReset("full", resolveUserPath(workspaceDefault), runtime); + const resetScope: ResetScope = normalizedOpts.resetScope ?? "config+creds+sessions"; + await handleReset(resetScope, resolveUserPath(workspaceDefault), runtime); } if (process.platform === "win32") { diff --git a/src/commands/sessions-cleanup.test.ts b/src/commands/sessions-cleanup.test.ts index 31ece2c3501..6dc9556cae2 100644 --- a/src/commands/sessions-cleanup.test.ts +++ b/src/commands/sessions-cleanup.test.ts @@ -7,6 +7,8 @@ const mocks = vi.hoisted(() => ({ resolveSessionStoreTargets: vi.fn(), resolveMaintenanceConfig: vi.fn(), loadSessionStore: vi.fn(), + resolveSessionFilePath: vi.fn(), + resolveSessionFilePathOptions: vi.fn(), pruneStaleEntries: vi.fn(), capEntryCount: vi.fn(), updateSessionStore: vi.fn(), @@ -24,6 +26,8 @@ vi.mock("./session-store-targets.js", () => ({ vi.mock("../config/sessions.js", () => ({ resolveMaintenanceConfig: mocks.resolveMaintenanceConfig, loadSessionStore: mocks.loadSessionStore, + resolveSessionFilePath: mocks.resolveSessionFilePath, + resolveSessionFilePathOptions: mocks.resolveSessionFilePathOptions, pruneStaleEntries: mocks.pruneStaleEntries, capEntryCount: mocks.capEntryCount, updateSessionStore: mocks.updateSessionStore, @@ -74,8 +78,12 @@ describe("sessionsCleanupCommand", () => { return 0; }, ); + mocks.resolveSessionFilePathOptions.mockReturnValue({}); + mocks.resolveSessionFilePath.mockImplementation( + (sessionId: string) => `/missing/${sessionId}.jsonl`, + ); mocks.capEntryCount.mockImplementation(() => 0); - mocks.updateSessionStore.mockResolvedValue(undefined); + mocks.updateSessionStore.mockResolvedValue(0); mocks.enforceSessionDiskBudget.mockResolvedValue({ totalBytesBefore: 1000, totalBytesAfter: 700, @@ -130,6 +138,7 @@ describe("sessionsCleanupCommand", () => { overBudget: true, }, }); + return 0; }, ); @@ -196,6 +205,29 @@ describe("sessionsCleanupCommand", () => { ); }); + it("counts missing transcript entries when --fix-missing is enabled in dry-run", async () => { + mocks.enforceSessionDiskBudget.mockResolvedValue(null); + mocks.loadSessionStore.mockReturnValue({ + missing: { sessionId: "missing-transcript", updatedAt: 1 }, + }); + + const { runtime, logs } = makeRuntime(); + await sessionsCleanupCommand( + { + json: true, + dryRun: true, + fixMissing: true, + }, + runtime, + ); + + expect(logs).toHaveLength(1); + const payload = JSON.parse(logs[0] ?? "{}") as Record; + expect(payload.beforeCount).toBe(1); + expect(payload.afterCount).toBe(0); + expect(payload.missing).toBe(1); + }); + it("renders a dry-run action table with keep/prune actions", async () => { mocks.enforceSessionDiskBudget.mockResolvedValue(null); mocks.loadSessionStore.mockReturnValue({ diff --git a/src/commands/sessions-cleanup.ts b/src/commands/sessions-cleanup.ts index d09d986aea0..151fa531e04 100644 --- a/src/commands/sessions-cleanup.ts +++ b/src/commands/sessions-cleanup.ts @@ -1,7 +1,10 @@ +import fs from "node:fs"; import { loadConfig } from "../config/config.js"; import { capEntryCount, enforceSessionDiskBudget, + resolveSessionFilePath, + resolveSessionFilePathOptions, loadSessionStore, pruneStaleEntries, resolveMaintenanceConfig, @@ -33,9 +36,15 @@ export type SessionsCleanupOptions = { enforce?: boolean; activeKey?: string; json?: boolean; + fixMissing?: boolean; }; -type SessionCleanupAction = "keep" | "prune-stale" | "cap-overflow" | "evict-budget"; +type SessionCleanupAction = + | "keep" + | "prune-missing" + | "prune-stale" + | "cap-overflow" + | "evict-budget"; const ACTION_PAD = 12; @@ -50,6 +59,7 @@ type SessionCleanupSummary = { dryRun: boolean; beforeCount: number; afterCount: number; + missing: number; pruned: number; capped: number; diskBudget: Awaited>; @@ -60,10 +70,14 @@ type SessionCleanupSummary = { function resolveSessionCleanupAction(params: { key: string; + missingKeys: Set; staleKeys: Set; cappedKeys: Set; budgetEvictedKeys: Set; }): SessionCleanupAction { + if (params.missingKeys.has(params.key)) { + return "prune-missing"; + } if (params.staleKeys.has(params.key)) { return "prune-stale"; } @@ -84,6 +98,9 @@ function formatCleanupActionCell(action: SessionCleanupAction, rich: boolean): s if (action === "keep") { return theme.muted(label); } + if (action === "prune-missing") { + return theme.error(label); + } if (action === "prune-stale") { return theme.warn(label); } @@ -95,6 +112,7 @@ function formatCleanupActionCell(action: SessionCleanupAction, rich: boolean): s function buildActionRows(params: { beforeStore: Record; + missingKeys: Set; staleKeys: Set; cappedKeys: Set; budgetEvictedKeys: Set; @@ -103,6 +121,7 @@ function buildActionRows(params: { ...row, action: resolveSessionCleanupAction({ key: row.key, + missingKeys: params.missingKeys, staleKeys: params.staleKeys, cappedKeys: params.cappedKeys, budgetEvictedKeys: params.budgetEvictedKeys, @@ -110,17 +129,52 @@ function buildActionRows(params: { })); } +function pruneMissingTranscriptEntries(params: { + store: Record; + storePath: string; + onPruned?: (key: string) => void; +}): number { + const sessionPathOpts = resolveSessionFilePathOptions({ + storePath: params.storePath, + }); + let removed = 0; + for (const [key, entry] of Object.entries(params.store)) { + if (!entry?.sessionId) { + continue; + } + const transcriptPath = resolveSessionFilePath(entry.sessionId, entry, sessionPathOpts); + if (!fs.existsSync(transcriptPath)) { + delete params.store[key]; + removed += 1; + params.onPruned?.(key); + } + } + return removed; +} + async function previewStoreCleanup(params: { target: SessionStoreTarget; mode: "warn" | "enforce"; dryRun: boolean; activeKey?: string; + fixMissing?: boolean; }) { const maintenance = resolveMaintenanceConfig(); const beforeStore = loadSessionStore(params.target.storePath, { skipCache: true }); const previewStore = structuredClone(beforeStore); const staleKeys = new Set(); const cappedKeys = new Set(); + const missingKeys = new Set(); + const missing = + params.fixMissing === true + ? pruneMissingTranscriptEntries({ + store: previewStore, + storePath: params.target.storePath, + onPruned: (key) => { + missingKeys.add(key); + }, + }) + : 0; const pruned = pruneStaleEntries(previewStore, maintenance.pruneAfterMs, { log: false, onPruned: ({ key }) => { @@ -151,6 +205,7 @@ async function previewStoreCleanup(params: { const beforeCount = Object.keys(beforeStore).length; const afterPreviewCount = Object.keys(previewStore).length; const wouldMutate = + missing > 0 || pruned > 0 || capped > 0 || Boolean((diskBudget?.removedEntries ?? 0) > 0 || (diskBudget?.removedFiles ?? 0) > 0); @@ -162,6 +217,7 @@ async function previewStoreCleanup(params: { dryRun: params.dryRun, beforeCount, afterCount: afterPreviewCount, + missing, pruned, capped, diskBudget, @@ -175,6 +231,7 @@ async function previewStoreCleanup(params: { staleKeys, cappedKeys, budgetEvictedKeys, + missingKeys, }), }; } @@ -196,6 +253,7 @@ function renderStoreDryRunPlan(params: { params.runtime.log( `Entries: ${params.summary.beforeCount} -> ${params.summary.afterCount} (remove ${params.summary.beforeCount - params.summary.afterCount})`, ); + params.runtime.log(`Would prune missing transcripts: ${params.summary.missing}`); params.runtime.log(`Would prune stale: ${params.summary.pruned}`); params.runtime.log(`Would cap overflow: ${params.summary.capped}`); if (params.summary.diskBudget) { @@ -256,6 +314,7 @@ export async function sessionsCleanupCommand(opts: SessionsCleanupOptions, runti mode, dryRun: Boolean(opts.dryRun), activeKey: opts.activeKey, + fixMissing: Boolean(opts.fixMissing), }); previewResults.push(result); } @@ -303,10 +362,16 @@ export async function sessionsCleanupCommand(opts: SessionsCleanupOptions, runti const appliedReportRef: { current: SessionMaintenanceApplyReport | null } = { current: null, }; - await updateSessionStore( + const missingApplied = await updateSessionStore( target.storePath, - async () => { - // Maintenance runs in saveSessionStoreUnlocked(); no direct store mutation needed here. + async (store) => { + if (!opts.fixMissing) { + return 0; + } + return pruneMissingTranscriptEntries({ + store, + storePath: target.storePath, + }); }, { activeSessionKey: opts.activeKey, @@ -331,6 +396,7 @@ export async function sessionsCleanupCommand(opts: SessionsCleanupOptions, runti dryRun: false, beforeCount: 0, afterCount: 0, + missing: 0, pruned: 0, capped: 0, diskBudget: null, @@ -347,10 +413,12 @@ export async function sessionsCleanupCommand(opts: SessionsCleanupOptions, runti dryRun: false, beforeCount: appliedReport.beforeCount, afterCount: appliedReport.afterCount, + missing: missingApplied, pruned: appliedReport.pruned, capped: appliedReport.capped, diskBudget: appliedReport.diskBudget, wouldMutate: + missingApplied > 0 || appliedReport.pruned > 0 || appliedReport.capped > 0 || Boolean( diff --git a/src/config/config.allowlist-requires-allowfrom.test.ts b/src/config/config.allowlist-requires-allowfrom.test.ts new file mode 100644 index 00000000000..5f1a4749008 --- /dev/null +++ b/src/config/config.allowlist-requires-allowfrom.test.ts @@ -0,0 +1,147 @@ +import { describe, expect, it } from "vitest"; +import { validateConfigObject } from "./config.js"; + +describe('dmPolicy="allowlist" requires non-empty effective allowFrom', () => { + it('rejects telegram dmPolicy="allowlist" without allowFrom', () => { + const res = validateConfigObject({ + channels: { telegram: { dmPolicy: "allowlist", botToken: "fake" } }, + }); + expect(res.ok).toBe(false); + if (!res.ok) { + expect(res.issues.some((i) => i.path.includes("channels.telegram.allowFrom"))).toBe(true); + } + }); + + it('rejects signal dmPolicy="allowlist" without allowFrom', () => { + const res = validateConfigObject({ + channels: { signal: { dmPolicy: "allowlist" } }, + }); + expect(res.ok).toBe(false); + if (!res.ok) { + expect(res.issues.some((i) => i.path.includes("channels.signal.allowFrom"))).toBe(true); + } + }); + + it('rejects discord dmPolicy="allowlist" without allowFrom', () => { + const res = validateConfigObject({ + channels: { discord: { dmPolicy: "allowlist" } }, + }); + expect(res.ok).toBe(false); + if (!res.ok) { + expect( + res.issues.some((i) => i.path.includes("channels.discord") && i.path.includes("allowFrom")), + ).toBe(true); + } + }); + + it('rejects whatsapp dmPolicy="allowlist" without allowFrom', () => { + const res = validateConfigObject({ + channels: { whatsapp: { dmPolicy: "allowlist" } }, + }); + expect(res.ok).toBe(false); + if (!res.ok) { + expect(res.issues.some((i) => i.path.includes("channels.whatsapp.allowFrom"))).toBe(true); + } + }); + + it('accepts dmPolicy="pairing" without allowFrom', () => { + const res = validateConfigObject({ + channels: { telegram: { dmPolicy: "pairing", botToken: "fake" } }, + }); + expect(res.ok).toBe(true); + }); +}); + +describe('account dmPolicy="allowlist" uses inherited allowFrom', () => { + it("accepts telegram account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + telegram: { + allowFrom: ["12345"], + accounts: { bot1: { dmPolicy: "allowlist", botToken: "fake" } }, + }, + }, + }); + expect(res.ok).toBe(true); + }); + + it("rejects telegram account allowlist when neither account nor parent has allowFrom", () => { + const res = validateConfigObject({ + channels: { telegram: { accounts: { bot1: { dmPolicy: "allowlist", botToken: "fake" } } } }, + }); + expect(res.ok).toBe(false); + if (!res.ok) { + expect( + res.issues.some((i) => i.path.includes("channels.telegram.accounts.bot1.allowFrom")), + ).toBe(true); + } + }); + + it("accepts signal account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + signal: { allowFrom: ["+15550001111"], accounts: { work: { dmPolicy: "allowlist" } } }, + }, + }); + expect(res.ok).toBe(true); + }); + + it("accepts discord account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + discord: { allowFrom: ["123456789"], accounts: { work: { dmPolicy: "allowlist" } } }, + }, + }); + expect(res.ok).toBe(true); + }); + + it("accepts slack account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + slack: { + allowFrom: ["U123"], + botToken: "xoxb-top", + appToken: "xapp-top", + accounts: { + work: { dmPolicy: "allowlist", botToken: "xoxb-work", appToken: "xapp-work" }, + }, + }, + }, + }); + expect(res.ok).toBe(true); + }); + + it("accepts whatsapp account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + whatsapp: { allowFrom: ["+15550001111"], accounts: { work: { dmPolicy: "allowlist" } } }, + }, + }); + expect(res.ok).toBe(true); + }); + + it("accepts imessage account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + imessage: { allowFrom: ["alice"], accounts: { work: { dmPolicy: "allowlist" } } }, + }, + }); + expect(res.ok).toBe(true); + }); + + it("accepts irc account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { irc: { allowFrom: ["nick"], accounts: { work: { dmPolicy: "allowlist" } } } }, + }); + expect(res.ok).toBe(true); + }); + + it("accepts bluebubbles account allowlist when parent allowFrom exists", () => { + const res = validateConfigObject({ + channels: { + bluebubbles: { allowFrom: ["sender"], accounts: { work: { dmPolicy: "allowlist" } } }, + }, + }); + expect(res.ok).toBe(true); + }); +}); diff --git a/src/config/config.plugin-validation.test.ts b/src/config/config.plugin-validation.test.ts index d9e6b3190e1..02542eac39b 100644 --- a/src/config/config.plugin-validation.test.ts +++ b/src/config/config.plugin-validation.test.ts @@ -65,17 +65,18 @@ describe("config plugin validation", () => { } }); - it("rejects missing plugin ids in entries", async () => { + it("warns for missing plugin ids in entries instead of failing validation", async () => { const home = await createCaseHome(); const res = validateInHome(home, { agents: { list: [{ id: "pi" }] }, plugins: { enabled: false, entries: { "missing-plugin": { enabled: true } } }, }); - expect(res.ok).toBe(false); - if (!res.ok) { - expect(res.issues).toContainEqual({ + expect(res.ok).toBe(true); + if (res.ok) { + expect(res.warnings).toContainEqual({ path: "plugins.entries.missing-plugin", - message: "plugin not found: missing-plugin", + message: + "plugin not found: missing-plugin (stale config entry ignored; remove it from plugins config)", }); } }); @@ -234,4 +235,32 @@ describe("config plugin validation", () => { }); } }); + + it("accepts heartbeat directPolicy enum values", async () => { + const home = await createCaseHome(); + const res = validateInHome(home, { + agents: { + defaults: { heartbeat: { target: "last", directPolicy: "block" } }, + list: [{ id: "pi", heartbeat: { directPolicy: "allow" } }], + }, + }); + expect(res.ok).toBe(true); + }); + + it("rejects invalid heartbeat directPolicy values", async () => { + const home = await createCaseHome(); + const res = validateInHome(home, { + agents: { + defaults: { heartbeat: { directPolicy: "maybe" } }, + list: [{ id: "pi" }], + }, + }); + expect(res.ok).toBe(false); + if (!res.ok) { + const hasIssue = res.issues.some( + (issue) => issue.path === "agents.defaults.heartbeat.directPolicy", + ); + expect(hasIssue).toBe(true); + } + }); }); diff --git a/src/config/config.secrets-schema.test.ts b/src/config/config.secrets-schema.test.ts new file mode 100644 index 00000000000..56b0f2e06e3 --- /dev/null +++ b/src/config/config.secrets-schema.test.ts @@ -0,0 +1,180 @@ +import { describe, expect, it } from "vitest"; +import { validateConfigObjectRaw } from "./validation.js"; + +describe("config secret refs schema", () => { + it("accepts top-level secrets sources and model apiKey refs", () => { + const result = validateConfigObjectRaw({ + secrets: { + providers: { + default: { source: "env" }, + filemain: { + source: "file", + path: "~/.openclaw/secrets.json", + mode: "json", + timeoutMs: 10_000, + }, + vault: { + source: "exec", + command: "/usr/local/bin/openclaw-secret-resolver", + args: ["resolve"], + allowSymlinkCommand: true, + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }); + + expect(result.ok).toBe(true); + }); + + it("accepts openai-codex-responses as a model api value", () => { + const result = validateConfigObjectRaw({ + models: { + providers: { + "openai-codex": { + baseUrl: "https://chatgpt.com/backend-api", + api: "openai-codex-responses", + models: [{ id: "gpt-5.3-codex", name: "gpt-5.3-codex" }], + }, + }, + }, + }); + + expect(result.ok).toBe(true); + }); + + it("accepts googlechat serviceAccount refs", () => { + const result = validateConfigObjectRaw({ + channels: { + googlechat: { + serviceAccountRef: { + source: "file", + provider: "filemain", + id: "/channels/googlechat/serviceAccount", + }, + }, + }, + }); + + expect(result.ok).toBe(true); + }); + + it("accepts skills entry apiKey refs", () => { + const result = validateConfigObjectRaw({ + skills: { + entries: { + "review-pr": { + enabled: true, + apiKey: { source: "env", provider: "default", id: "SKILL_REVIEW_PR_API_KEY" }, + }, + }, + }, + }); + + expect(result.ok).toBe(true); + }); + + it('accepts file refs with id "value" for singleValue mode providers', () => { + const result = validateConfigObjectRaw({ + secrets: { + providers: { + rawfile: { + source: "file", + path: "~/.openclaw/token.txt", + mode: "singleValue", + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "file", provider: "rawfile", id: "value" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }); + + expect(result.ok).toBe(true); + }); + + it("rejects invalid secret ref id", () => { + const result = validateConfigObjectRaw({ + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "bad id with spaces" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }); + + expect(result.ok).toBe(false); + if (!result.ok) { + expect( + result.issues.some((issue) => issue.path.includes("models.providers.openai.apiKey")), + ).toBe(true); + } + }); + + it("rejects env refs that are not env var names", () => { + const result = validateConfigObjectRaw({ + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "/providers/openai/apiKey" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }); + + expect(result.ok).toBe(false); + if (!result.ok) { + expect( + result.issues.some( + (issue) => + issue.path.includes("models.providers.openai.apiKey") && + issue.message.includes("Env secret reference id"), + ), + ).toBe(true); + } + }); + + it("rejects file refs that are not absolute JSON pointers", () => { + const result = validateConfigObjectRaw({ + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "file", provider: "default", id: "providers/openai/apiKey" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }); + + expect(result.ok).toBe(false); + if (!result.ok) { + expect( + result.issues.some( + (issue) => + issue.path.includes("models.providers.openai.apiKey") && + issue.message.includes("absolute JSON pointer"), + ), + ).toBe(true); + } + }); +}); diff --git a/src/config/config.ts b/src/config/config.ts index a20d9495b00..df667d498b1 100644 --- a/src/config/config.ts +++ b/src/config/config.ts @@ -1,11 +1,14 @@ export { clearConfigCache, + clearRuntimeConfigSnapshot, createConfigIO, + getRuntimeConfigSnapshot, loadConfig, parseConfigJson5, readConfigFileSnapshot, readConfigFileSnapshotForWrite, resolveConfigSnapshotHash, + setRuntimeConfigSnapshot, writeConfigFile, } from "./io.js"; export { migrateLegacyConfig } from "./legacy-migrate.js"; diff --git a/src/config/includes.test.ts b/src/config/includes.test.ts index 38360642ee3..71ebb3e3870 100644 --- a/src/config/includes.test.ts +++ b/src/config/includes.test.ts @@ -5,6 +5,7 @@ import { describe, expect, it } from "vitest"; import { CircularIncludeError, ConfigIncludeError, + MAX_INCLUDE_FILE_BYTES, deepMerge, type IncludeResolver, resolveConfigIncludes, @@ -629,7 +630,7 @@ describe("security: path traversal protection (CWE-22)", () => { "{ logging: { redactSensitive: 'tools' } }\n", "utf-8", ); - await fs.symlink(realRoot, linkRoot); + await fs.symlink(realRoot, linkRoot, process.platform === "win32" ? "junction" : undefined); const result = resolveConfigIncludes( { $include: "./includes/extra.json5" }, @@ -640,5 +641,55 @@ describe("security: path traversal protection (CWE-22)", () => { await fs.rm(tempRoot, { recursive: true, force: true }); } }); + + it("rejects include files that are hardlinked aliases", async () => { + if (process.platform === "win32") { + return; + } + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-includes-hardlink-")); + try { + const configDir = path.join(tempRoot, "config"); + const outsideDir = path.join(tempRoot, "outside"); + await fs.mkdir(configDir, { recursive: true }); + await fs.mkdir(outsideDir, { recursive: true }); + const includePath = path.join(configDir, "extra.json5"); + const outsidePath = path.join(outsideDir, "secret.json5"); + await fs.writeFile(outsidePath, '{"logging":{"redactSensitive":"tools"}}\n', "utf-8"); + try { + await fs.link(outsidePath, includePath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + expect(() => + resolveConfigIncludes( + { $include: "./extra.json5" }, + path.join(configDir, "openclaw.json"), + ), + ).toThrow(/security checks|hardlink/i); + } finally { + await fs.rm(tempRoot, { recursive: true, force: true }); + } + }); + + it("rejects oversized include files", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-includes-big-")); + try { + const configDir = path.join(tempRoot, "config"); + await fs.mkdir(configDir, { recursive: true }); + const includePath = path.join(configDir, "big.json5"); + const payload = "a".repeat(MAX_INCLUDE_FILE_BYTES + 1); + await fs.writeFile(includePath, `{"blob":"${payload}"}`, "utf-8"); + + expect(() => + resolveConfigIncludes({ $include: "./big.json5" }, path.join(configDir, "openclaw.json")), + ).toThrow(/security checks|max/i); + } finally { + await fs.rm(tempRoot, { recursive: true, force: true }); + } + }); }); }); diff --git a/src/config/includes.ts b/src/config/includes.ts index c9a14a36397..9486aabdf1f 100644 --- a/src/config/includes.ts +++ b/src/config/includes.ts @@ -13,12 +13,14 @@ import fs from "node:fs"; import path from "node:path"; import JSON5 from "json5"; +import { canUseBoundaryFileOpen, openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { isPathInside } from "../security/scan-paths.js"; import { isPlainObject } from "../utils.js"; import { isBlockedObjectKey } from "./prototype-keys.js"; export const INCLUDE_KEY = "$include"; export const MAX_INCLUDE_DEPTH = 10; +export const MAX_INCLUDE_FILE_BYTES = 2 * 1024 * 1024; // ============================================================================ // Types @@ -26,9 +28,18 @@ export const MAX_INCLUDE_DEPTH = 10; export type IncludeResolver = { readFile: (path: string) => string; + readFileWithGuards?: (params: IncludeFileReadParams) => string; parseJson: (raw: string) => unknown; }; +export type IncludeFileReadParams = { + includePath: string; + resolvedPath: string; + rootRealDir: string; + ioFs?: typeof fs; + maxBytes?: number; +}; + // ============================================================================ // Errors // ============================================================================ @@ -227,8 +238,18 @@ class IncludeProcessor { private readFile(includePath: string, resolvedPath: string): string { try { + if (this.resolver.readFileWithGuards) { + return this.resolver.readFileWithGuards({ + includePath, + resolvedPath, + rootRealDir: this.rootRealDir, + }); + } return this.resolver.readFile(resolvedPath); } catch (err) { + if (err instanceof ConfigIncludeError) { + throw err; + } throw new ConfigIncludeError( `Failed to read include file: ${includePath} (resolved: ${resolvedPath})`, includePath, @@ -265,12 +286,51 @@ function safeRealpath(target: string): string { } } +export function readConfigIncludeFileWithGuards(params: IncludeFileReadParams): string { + const ioFs = params.ioFs ?? fs; + const maxBytes = params.maxBytes ?? MAX_INCLUDE_FILE_BYTES; + if (!canUseBoundaryFileOpen(ioFs)) { + return ioFs.readFileSync(params.resolvedPath, "utf-8"); + } + + const opened = openBoundaryFileSync({ + absolutePath: params.resolvedPath, + rootPath: params.rootRealDir, + rootRealPath: params.rootRealDir, + boundaryLabel: "config directory", + skipLexicalRootCheck: true, + maxBytes, + ioFs, + }); + if (!opened.ok) { + if (opened.reason === "validation") { + throw new ConfigIncludeError( + `Include file failed security checks (regular file, max ${maxBytes} bytes, no hardlinks): ${params.includePath}`, + params.includePath, + ); + } + throw new ConfigIncludeError( + `Failed to read include file: ${params.includePath} (resolved: ${params.resolvedPath})`, + params.includePath, + opened.error instanceof Error ? opened.error : undefined, + ); + } + + try { + return ioFs.readFileSync(opened.fd, "utf-8"); + } finally { + ioFs.closeSync(opened.fd); + } +} + // ============================================================================ // Public API // ============================================================================ const defaultResolver: IncludeResolver = { readFile: (p) => fs.readFileSync(p, "utf-8"), + readFileWithGuards: ({ includePath, resolvedPath, rootRealDir }) => + readConfigIncludeFileWithGuards({ includePath, resolvedPath, rootRealDir }), parseJson: (raw) => JSON5.parse(raw), }; diff --git a/src/config/io.runtime-snapshot-write.test.ts b/src/config/io.runtime-snapshot-write.test.ts new file mode 100644 index 00000000000..0a37de08aaa --- /dev/null +++ b/src/config/io.runtime-snapshot-write.test.ts @@ -0,0 +1,64 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempHome } from "./home-env.test-harness.js"; +import { + clearConfigCache, + clearRuntimeConfigSnapshot, + loadConfig, + setRuntimeConfigSnapshot, + writeConfigFile, +} from "./io.js"; +import type { OpenClawConfig } from "./types.js"; + +describe("runtime config snapshot writes", () => { + it("preserves source secret refs when writeConfigFile receives runtime-resolved config", async () => { + await withTempHome("openclaw-config-runtime-write-", async (home) => { + const configPath = path.join(home, ".openclaw", "openclaw.json"); + const sourceConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [], + }, + }, + }, + }; + const runtimeConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", + models: [], + }, + }, + }, + }; + + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.writeFile(configPath, `${JSON.stringify(sourceConfig, null, 2)}\n`, "utf8"); + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + expect(loadConfig().models?.providers?.openai?.apiKey).toBe("sk-runtime-resolved"); + + await writeConfigFile(loadConfig()); + + const persisted = JSON.parse(await fs.readFile(configPath, "utf8")) as { + models?: { providers?: { openai?: { apiKey?: unknown } } }; + }; + expect(persisted.models?.providers?.openai?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); +}); diff --git a/src/config/io.ts b/src/config/io.ts index c74992c4938..136ea5eae6c 100644 --- a/src/config/io.ts +++ b/src/config/io.ts @@ -34,7 +34,11 @@ import { resolveConfigEnvVars, } from "./env-substitution.js"; import { applyConfigEnvVars } from "./env-vars.js"; -import { ConfigIncludeError, resolveConfigIncludes } from "./includes.js"; +import { + ConfigIncludeError, + readConfigIncludeFileWithGuards, + resolveConfigIncludes, +} from "./includes.js"; import { findLegacyConfigIssues } from "./legacy.js"; import { applyMergePatch } from "./merge-patch.js"; import { normalizeExecSafeBinProfilesInConfig } from "./normalize-exec-safe-bin.js"; @@ -634,6 +638,13 @@ function resolveConfigIncludesForRead( ): unknown { return resolveConfigIncludes(parsed, configPath, { readFile: (candidate) => deps.fs.readFileSync(candidate, "utf-8"), + readFileWithGuards: ({ includePath, resolvedPath, rootRealDir }) => + readConfigIncludeFileWithGuards({ + includePath, + resolvedPath, + rootRealDir, + ioFs: deps.fs, + }), parseJson: (raw) => deps.json5.parse(raw), }); } @@ -1032,6 +1043,13 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) { try { const resolvedIncludes = resolveConfigIncludes(snapshot.parsed, configPath, { readFile: (candidate) => deps.fs.readFileSync(candidate, "utf-8"), + readFileWithGuards: ({ includePath, resolvedPath, rootRealDir }) => + readConfigIncludeFileWithGuards({ + includePath, + resolvedPath, + rootRealDir, + ioFs: deps.fs, + }), parseJson: (raw) => deps.json5.parse(raw), }); const collected = new Map(); @@ -1280,6 +1298,8 @@ let configCache: { expiresAt: number; config: OpenClawConfig; } | null = null; +let runtimeConfigSnapshot: OpenClawConfig | null = null; +let runtimeConfigSourceSnapshot: OpenClawConfig | null = null; function resolveConfigCacheMs(env: NodeJS.ProcessEnv): number { const raw = env.OPENCLAW_CONFIG_CACHE_MS?.trim(); @@ -1307,7 +1327,29 @@ export function clearConfigCache(): void { configCache = null; } +export function setRuntimeConfigSnapshot( + config: OpenClawConfig, + sourceConfig?: OpenClawConfig, +): void { + runtimeConfigSnapshot = config; + runtimeConfigSourceSnapshot = sourceConfig ?? null; + clearConfigCache(); +} + +export function clearRuntimeConfigSnapshot(): void { + runtimeConfigSnapshot = null; + runtimeConfigSourceSnapshot = null; + clearConfigCache(); +} + +export function getRuntimeConfigSnapshot(): OpenClawConfig | null { + return runtimeConfigSnapshot; +} + export function loadConfig(): OpenClawConfig { + if (runtimeConfigSnapshot) { + return runtimeConfigSnapshot; + } const io = createConfigIO(); const configPath = io.configPath; const now = Date.now(); @@ -1344,9 +1386,14 @@ export async function writeConfigFile( options: ConfigWriteOptions = {}, ): Promise { const io = createConfigIO(); + let nextCfg = cfg; + if (runtimeConfigSnapshot && runtimeConfigSourceSnapshot) { + const runtimePatch = createMergePatch(runtimeConfigSnapshot, cfg); + nextCfg = coerceConfig(applyMergePatch(runtimeConfigSourceSnapshot, runtimePatch)); + } const sameConfigPath = options.expectedConfigPath === undefined || options.expectedConfigPath === io.configPath; - await io.writeConfigFile(cfg, { + await io.writeConfigFile(nextCfg, { envSnapshotForRestore: sameConfigPath ? options.envSnapshotForRestore : undefined, unsetPaths: options.unsetPaths, }); diff --git a/src/config/plugin-auto-enable.test.ts b/src/config/plugin-auto-enable.test.ts index 1c289b17fde..ebe2a859f4b 100644 --- a/src/config/plugin-auto-enable.test.ts +++ b/src/config/plugin-auto-enable.test.ts @@ -141,6 +141,34 @@ describe("applyPluginAutoEnable", () => { expect(result.config.plugins?.entries?.["google-gemini-cli-auth"]?.enabled).toBe(true); }); + it("auto-enables acpx plugin when ACP is configured", () => { + const result = applyPluginAutoEnable({ + config: { + acp: { + enabled: true, + }, + }, + env: {}, + }); + + expect(result.config.plugins?.entries?.acpx?.enabled).toBe(true); + expect(result.changes.join("\n")).toContain("ACP runtime configured, enabled automatically."); + }); + + it("does not auto-enable acpx when a different ACP backend is configured", () => { + const result = applyPluginAutoEnable({ + config: { + acp: { + enabled: true, + backend: "custom-runtime", + }, + }, + env: {}, + }); + + expect(result.config.plugins?.entries?.acpx?.enabled).toBeUndefined(); + }); + it("skips when plugins are globally disabled", () => { const result = applyPluginAutoEnable({ config: { diff --git a/src/config/plugin-auto-enable.ts b/src/config/plugin-auto-enable.ts index 554e96843bc..eccb6f980ed 100644 --- a/src/config/plugin-auto-enable.ts +++ b/src/config/plugin-auto-enable.ts @@ -354,6 +354,16 @@ function resolveConfiguredPlugins( }); } } + const backendRaw = + typeof cfg.acp?.backend === "string" ? cfg.acp.backend.trim().toLowerCase() : ""; + const acpConfigured = + cfg.acp?.enabled === true || cfg.acp?.dispatch?.enabled === true || backendRaw === "acpx"; + if (acpConfigured && (!backendRaw || backendRaw === "acpx")) { + changes.push({ + pluginId: "acpx", + reason: "ACP runtime configured", + }); + } return changes; } diff --git a/src/config/redact-snapshot.test.ts b/src/config/redact-snapshot.test.ts index ee3dc62b421..8d353c4e2d6 100644 --- a/src/config/redact-snapshot.test.ts +++ b/src/config/redact-snapshot.test.ts @@ -95,7 +95,6 @@ describe("redactConfigSnapshot", () => { }, shortSecret: { token: "short" }, }); - const result = redactConfigSnapshot(snapshot); const cfg = result.config as typeof snapshot.config; @@ -112,6 +111,46 @@ describe("redactConfigSnapshot", () => { expect(cfg.shortSecret.token).toBe(REDACTED_SENTINEL); }); + it("redacts googlechat serviceAccount object payloads", () => { + const snapshot = makeSnapshot({ + channels: { + googlechat: { + serviceAccount: { + type: "service_account", + client_email: "bot@example.iam.gserviceaccount.com", + private_key: "-----BEGIN PRIVATE KEY-----secret-----END PRIVATE KEY-----", + }, + }, + }, + }); + + const result = redactConfigSnapshot(snapshot); + const channels = result.config.channels as Record>; + expect(channels.googlechat.serviceAccount).toBe(REDACTED_SENTINEL); + }); + + it("redacts object-valued apiKey refs in model providers", () => { + const snapshot = makeSnapshot({ + models: { + providers: { + openai: { + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + baseUrl: "https://api.openai.com", + }, + }, + }, + }); + + const result = redactConfigSnapshot(snapshot); + const models = result.config.models as Record>>; + expect(models.providers.openai.apiKey).toEqual({ + source: REDACTED_SENTINEL, + provider: REDACTED_SENTINEL, + id: REDACTED_SENTINEL, + }); + expect(models.providers.openai.baseUrl).toBe("https://api.openai.com"); + }); + it("preserves non-sensitive fields", () => { const snapshot = makeSnapshot({ ui: { seamColor: "#0088cc" }, diff --git a/src/config/redact-snapshot.ts b/src/config/redact-snapshot.ts index 91b2e76f990..b9ebeac84bf 100644 --- a/src/config/redact-snapshot.ts +++ b/src/config/redact-snapshot.ts @@ -17,6 +17,31 @@ function isEnvVarPlaceholder(value: string): boolean { return ENV_VAR_PLACEHOLDER_PATTERN.test(value.trim()); } +function isWholeObjectSensitivePath(path: string): boolean { + const lowered = path.toLowerCase(); + return lowered.endsWith("serviceaccount") || lowered.endsWith("serviceaccountref"); +} + +function collectSensitiveStrings(value: unknown, values: string[]): void { + if (typeof value === "string") { + if (!isEnvVarPlaceholder(value)) { + values.push(value); + } + return; + } + if (Array.isArray(value)) { + for (const item of value) { + collectSensitiveStrings(item, values); + } + return; + } + if (value && typeof value === "object") { + for (const item of Object.values(value as Record)) { + collectSensitiveStrings(item, values); + } + } +} + function isExplicitlyNonSensitivePath(hints: ConfigUiHints | undefined, paths: string[]): boolean { if (!hints) { return false; @@ -149,7 +174,19 @@ function redactObjectWithLookup( result[key] = REDACTED_SENTINEL; values.push(value); } else if (typeof value === "object" && value !== null) { - result[key] = redactObjectWithLookup(value, lookup, candidate, values, hints); + if (hints[candidate]?.sensitive === true && !Array.isArray(value)) { + collectSensitiveStrings(value, values); + result[key] = REDACTED_SENTINEL; + } else { + result[key] = redactObjectWithLookup(value, lookup, candidate, values, hints); + } + } else if ( + hints[candidate]?.sensitive === true && + value !== undefined && + value !== null + ) { + // Keep primitives at explicitly-sensitive paths fully redacted. + result[key] = REDACTED_SENTINEL; } break; } @@ -221,6 +258,16 @@ function redactObjectGuessing( ) { result[key] = REDACTED_SENTINEL; values.push(value); + } else if ( + !isExplicitlyNonSensitivePath(hints, [dotPath, wildcardPath]) && + isSensitivePath(dotPath) && + isWholeObjectSensitivePath(dotPath) && + value && + typeof value === "object" && + !Array.isArray(value) + ) { + collectSensitiveStrings(value, values); + result[key] = REDACTED_SENTINEL; } else if (typeof value === "object" && value !== null) { result[key] = redactObjectGuessing(value, dotPath, values, hints); } else { diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts index e5fcb3aa6b7..e07e3ea6bd1 100644 --- a/src/config/schema.help.ts +++ b/src/config/schema.help.ts @@ -153,6 +153,28 @@ export const FIELD_HELP: Record = { "Use this legacy ElevenLabs API key for Talk mode only during migration, and keep secrets in env-backed storage. Prefer talk.providers.elevenlabs.apiKey (fallback: ELEVENLABS_API_KEY).", "talk.interruptOnSpeech": "If true (default), stop assistant speech when the user starts speaking in Talk mode. Keep enabled for conversational turn-taking.", + acp: "ACP runtime controls for enabling dispatch, selecting backends, constraining allowed agent targets, and tuning streamed turn projection behavior.", + "acp.enabled": + "Global ACP feature gate. Keep disabled unless ACP runtime + policy are configured.", + "acp.dispatch.enabled": + "Independent dispatch gate for ACP session turns. Disable to keep ACP commands available while blocking ACP turn execution.", + "acp.backend": + "Default ACP runtime backend id (for example: acpx). Must match a registered ACP runtime plugin backend.", + "acp.defaultAgent": + "Fallback ACP target agent id used when ACP spawns do not specify an explicit target.", + "acp.allowedAgents": + "Allowlist of ACP target agent ids permitted for ACP runtime sessions. Empty means no additional allowlist restriction.", + "acp.maxConcurrentSessions": + "Maximum concurrently active ACP sessions across this gateway process.", + "acp.stream": "ACP streaming projection controls for chunk sizing and coalescer flush timing.", + "acp.stream.coalesceIdleMs": + "Coalescer idle flush window in milliseconds for ACP streamed text before block replies are emitted.", + "acp.stream.maxChunkChars": + "Maximum chunk size for ACP streamed block projection before splitting into multiple block replies.", + "acp.runtime.ttlMinutes": + "Idle runtime TTL in minutes for ACP session workers before eligible cleanup.", + "acp.runtime.installCommand": + "Optional operator install/setup command shown by `/acp install` and `/acp doctor` when ACP backend wiring is missing.", "agents.list.*.skills": "Optional allowlist of skills for this agent (omit = all skills; empty = no skills).", "agents.list[].skills": @@ -330,7 +352,7 @@ export const FIELD_HELP: Record = { "gateway.nodes.allowCommands": "Extra node.invoke commands to allow beyond the gateway defaults (array of command strings). Enabling dangerous commands here is a security-sensitive override and is flagged by `openclaw security audit`.", "gateway.nodes.denyCommands": - "Commands to block even if present in node claims or default allowlist.", + "Node command names to block even if present in node claims or default allowlist (exact command-name matching only, e.g. `system.run`; does not inspect shell text inside that command).", nodeHost: "Node host controls for features exposed from this gateway node to other nodes or clients. Keep defaults unless you intentionally proxy local capabilities across your node network.", "nodeHost.browserProxy": @@ -608,7 +630,7 @@ export const FIELD_HELP: Record = { models: "Model catalog root for provider definitions, merge/replace behavior, and optional Bedrock discovery integration. Keep provider definitions explicit and validated before relying on production failover paths.", "models.mode": - 'Controls provider catalog behavior: "merge" keeps built-ins and overlays your custom providers, while "replace" uses only your configured providers. Keep "merge" unless you intentionally want a strict custom list.', + 'Controls provider catalog behavior: "merge" keeps built-ins and overlays your custom providers, while "replace" uses only your configured providers. In "merge", matching provider IDs preserve non-empty agent models.json apiKey/baseUrl values and fall back to config when agent values are empty or missing.', "models.providers": "Provider map keyed by provider ID containing connection/auth settings and concrete model definitions. Use stable provider keys so references from agents and tooling remain portable across environments.", "models.providers.*.baseUrl": @@ -908,6 +930,10 @@ export const FIELD_HELP: Record = { "User-prompt template used for the pre-compaction memory flush turn when generating memory candidates. Use this only when you need custom extraction instructions beyond the default memory flush behavior.", "agents.defaults.compaction.memoryFlush.systemPrompt": "System-prompt override for the pre-compaction memory flush turn to control extraction style and safety constraints. Use carefully so custom instructions do not reduce memory quality or leak sensitive context.", + "agents.defaults.embeddedPi": + "Embedded Pi runner hardening controls for how workspace-local Pi settings are trusted and applied in OpenClaw sessions.", + "agents.defaults.embeddedPi.projectSettingsPolicy": + 'How embedded Pi handles workspace-local `.pi/config/settings.json`: "sanitize" (default) strips shellPath/shellCommandPrefix, "ignore" disables project settings entirely, and "trusted" applies project settings as-is.', "agents.defaults.humanDelay.mode": 'Delay style for block replies ("off", "natural", "custom").', "agents.defaults.humanDelay.minMs": "Minimum delay in ms for custom humanDelay (default: 800).", "agents.defaults.humanDelay.maxMs": "Maximum delay in ms for custom humanDelay (default: 2500).", @@ -973,6 +999,8 @@ export const FIELD_HELP: Record = { "Controls interval for repeated typing indicators while replies are being prepared in typing-capable channels. Increase to reduce chatty updates or decrease for more active typing feedback.", "session.typingMode": 'Controls typing behavior timing: "never", "instant", "thinking", or "message" based emission points. Keep conservative modes in high-volume channels to avoid unnecessary typing noise.', + "session.parentForkMaxTokens": + "Maximum parent-session token count allowed for thread/session inheritance forking. If the parent exceeds this, OpenClaw starts a fresh thread session instead of forking; set 0 to disable this protection.", "session.mainKey": 'Overrides the canonical main session key used for continuity when dmScope or routing logic points to "main". Use a stable value only if you intentionally need custom session anchoring.', "session.sendPolicy": @@ -1236,6 +1264,10 @@ export const FIELD_HELP: Record = { "Shows degraded/error heartbeat alerts when true so operator channels surface problems promptly. Keep enabled in production so broken channel states are visible.", "channels.defaults.heartbeat.useIndicator": "Enables concise indicator-style heartbeat rendering instead of verbose status text where supported. Use indicator mode for dense dashboards with many active channels.", + "agents.defaults.heartbeat.directPolicy": + 'Controls whether heartbeat delivery may target direct/DM chats: "allow" (default) permits DM delivery and "block" suppresses direct-target sends.', + "agents.list.*.heartbeat.directPolicy": + 'Per-agent override for heartbeat direct/DM delivery policy; use "block" for agents that should only send heartbeat alerts to non-DM destinations.', "channels.telegram.configWrites": "Allow Telegram to write config in response to channel events/commands (default: true).", "channels.telegram.botToken": @@ -1358,6 +1390,8 @@ export const FIELD_HELP: Record = { "Auto-unfocus TTL in hours for Discord thread-bound sessions (/focus and spawned thread sessions). Set 0 to disable (default: 24). Overrides session.threadBindings.ttlHours when set.", "channels.discord.threadBindings.spawnSubagentSessions": "Allow subagent spawns with thread=true to auto-create and bind Discord threads (default: false; opt-in). Set true to enable thread-bound subagent spawns for this account/channel.", + "channels.discord.threadBindings.spawnAcpSessions": + "Allow /acp spawn to auto-create and bind Discord threads for ACP sessions (default: false; opt-in). Set true to enable thread-bound ACP spawns for this account/channel.", "channels.discord.ui.components.accentColor": "Accent color for Discord component containers (hex). Set per account via channels.discord.accounts..ui.components.accentColor.", "channels.discord.voice.enabled": diff --git a/src/config/schema.hints.test.ts b/src/config/schema.hints.test.ts index dec154d0485..41ac8b1aa5d 100644 --- a/src/config/schema.hints.test.ts +++ b/src/config/schema.hints.test.ts @@ -133,6 +133,7 @@ describe("mapSensitivePaths", () => { expect(hints["agents.defaults.memorySearch.remote.apiKey"]?.sensitive).toBe(true); expect(hints["agents.list[].memorySearch.remote.apiKey"]?.sensitive).toBe(true); expect(hints["channels.discord.accounts.*.token"]?.sensitive).toBe(true); + expect(hints["channels.googlechat.serviceAccount"]?.sensitive).toBe(true); expect(hints["gateway.auth.token"]?.sensitive).toBe(true); expect(hints["skills.entries.*.apiKey"]?.sensitive).toBe(true); }); diff --git a/src/config/schema.hints.ts b/src/config/schema.hints.ts index 06fa93efea5..05b31d695b3 100644 --- a/src/config/schema.hints.ts +++ b/src/config/schema.hints.ts @@ -109,7 +109,13 @@ const NORMALIZED_SENSITIVE_KEY_WHITELIST_SUFFIXES = SENSITIVE_KEY_WHITELIST_SUFF suffix.toLowerCase(), ); -const SENSITIVE_PATTERNS = [/token$/i, /password/i, /secret/i, /api.?key/i]; +const SENSITIVE_PATTERNS = [ + /token$/i, + /password/i, + /secret/i, + /api.?key/i, + /serviceaccount(?:ref)?$/i, +]; function isWhitelistedSensitivePath(path: string): boolean { const lowerPath = path.toLowerCase(); diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts index 7a12e9293ba..5372bb9cccc 100644 --- a/src/config/schema.labels.ts +++ b/src/config/schema.labels.ts @@ -359,6 +359,18 @@ export const FIELD_LABELS: Record = { "auth.profiles": "Auth Profiles", "auth.order": "Auth Profile Order", "auth.cooldowns": "Auth Cooldowns", + acp: "ACP", + "acp.enabled": "ACP Enabled", + "acp.dispatch.enabled": "ACP Dispatch Enabled", + "acp.backend": "ACP Backend", + "acp.defaultAgent": "ACP Default Agent", + "acp.allowedAgents": "ACP Allowed Agents", + "acp.maxConcurrentSessions": "ACP Max Concurrent Sessions", + "acp.stream": "ACP Stream", + "acp.stream.coalesceIdleMs": "ACP Stream Coalesce Idle (ms)", + "acp.stream.maxChunkChars": "ACP Stream Max Chunk Chars", + "acp.runtime.ttlMinutes": "ACP Runtime TTL (minutes)", + "acp.runtime.installCommand": "ACP Runtime Install Command", models: "Models", "models.mode": "Model Catalog Mode", "models.providers": "Model Providers", @@ -402,6 +414,10 @@ export const FIELD_LABELS: Record = { "Compaction Memory Flush Soft Threshold", "agents.defaults.compaction.memoryFlush.prompt": "Compaction Memory Flush Prompt", "agents.defaults.compaction.memoryFlush.systemPrompt": "Compaction Memory Flush System Prompt", + "agents.defaults.embeddedPi": "Embedded Pi", + "agents.defaults.embeddedPi.projectSettingsPolicy": "Embedded Pi Project Settings Policy", + "agents.defaults.heartbeat.directPolicy": "Heartbeat Direct Policy", + "agents.list.*.heartbeat.directPolicy": "Heartbeat Direct Policy", "agents.defaults.heartbeat.suppressToolErrorWarnings": "Heartbeat Suppress Tool Error Warnings", "agents.defaults.sandbox.browser.network": "Sandbox Browser Network", "agents.defaults.sandbox.browser.cdpSourceRange": "Sandbox Browser CDP Source Port Range", @@ -455,6 +471,7 @@ export const FIELD_LABELS: Record = { "session.store": "Session Store Path", "session.typingIntervalSeconds": "Session Typing Interval (seconds)", "session.typingMode": "Session Typing Mode", + "session.parentForkMaxTokens": "Session Parent Fork Max Tokens", "session.mainKey": "Session Main Key", "session.sendPolicy": "Session Send Policy", "session.sendPolicy.default": "Session Send Policy Default Action", @@ -672,6 +689,7 @@ export const FIELD_LABELS: Record = { "channels.discord.threadBindings.enabled": "Discord Thread Binding Enabled", "channels.discord.threadBindings.ttlHours": "Discord Thread Binding TTL (hours)", "channels.discord.threadBindings.spawnSubagentSessions": "Discord Thread-Bound Subagent Spawn", + "channels.discord.threadBindings.spawnAcpSessions": "Discord Thread-Bound ACP Spawn", "channels.discord.ui.components.accentColor": "Discord Component Accent Color", "channels.discord.intents.presence": "Discord Presence Intent", "channels.discord.intents.guildMembers": "Discord Guild Members Intent", diff --git a/src/config/schema.test.ts b/src/config/schema.test.ts index 98a6065cb31..804286219ac 100644 --- a/src/config/schema.test.ts +++ b/src/config/schema.test.ts @@ -7,9 +7,11 @@ describe("config schema", () => { const schema = res.schema as { properties?: Record }; expect(schema.properties?.gateway).toBeTruthy(); expect(schema.properties?.agents).toBeTruthy(); + expect(schema.properties?.acp).toBeTruthy(); expect(schema.properties?.$schema).toBeUndefined(); expect(res.uiHints.gateway?.label).toBe("Gateway"); expect(res.uiHints["gateway.auth.token"]?.sensitive).toBe(true); + expect(res.uiHints["channels.discord.threadBindings.spawnAcpSessions"]?.label).toBeTruthy(); expect(res.version).toBeTruthy(); expect(res.generatedAt).toBeTruthy(); }); diff --git a/src/config/sessions/types.ts b/src/config/sessions/types.ts index e0077267742..c62ab8ff966 100644 --- a/src/config/sessions/types.ts +++ b/src/config/sessions/types.ts @@ -22,6 +22,49 @@ export type SessionOrigin = { threadId?: string | number; }; +export type SessionAcpIdentitySource = "ensure" | "status" | "event"; + +export type SessionAcpIdentityState = "pending" | "resolved"; + +export type SessionAcpIdentity = { + state: SessionAcpIdentityState; + acpxRecordId?: string; + acpxSessionId?: string; + agentSessionId?: string; + source: SessionAcpIdentitySource; + lastUpdatedAt: number; +}; + +export type SessionAcpMeta = { + backend: string; + agent: string; + runtimeSessionName: string; + identity?: SessionAcpIdentity; + mode: "persistent" | "oneshot"; + runtimeOptions?: AcpSessionRuntimeOptions; + cwd?: string; + state: "idle" | "running" | "error"; + lastActivityAt: number; + lastError?: string; +}; + +export type AcpSessionRuntimeOptions = { + /** + * ACP runtime mode set via session/set_mode (for example: "plan", "normal", "auto"). + */ + runtimeMode?: string; + /** ACP runtime config option: model id. */ + model?: string; + /** Working directory override for ACP session turns. */ + cwd?: string; + /** ACP runtime config option: permission profile id. */ + permissionProfile?: string; + /** ACP runtime config option: per-turn timeout in seconds. */ + timeoutSeconds?: number; + /** Backend-specific option bag mapped through session/set_config_option. */ + backendExtras?: Record; +}; + export type SessionEntry = { /** * Last delivered heartbeat payload (used to suppress duplicate heartbeat notifications). @@ -41,6 +84,14 @@ export type SessionEntry = { spawnDepth?: number; systemSent?: boolean; abortedLastRun?: boolean; + /** + * Session-level stop cutoff captured when /stop is received. + * Messages at/before this boundary are skipped to avoid replaying + * queued pre-stop backlog. + */ + abortCutoffMessageSid?: string; + /** Epoch ms cutoff paired with abortCutoffMessageSid when available. */ + abortCutoffTimestamp?: number; chatType?: SessionChatType; thinkingLevel?: string; verboseLevel?: string; @@ -112,6 +163,7 @@ export type SessionEntry = { lastThreadId?: string | number; skillsSnapshot?: SessionSkillSnapshot; systemPromptReport?: SessionSystemPromptReport; + acp?: SessionAcpMeta; }; function normalizeRuntimeField(value: string | undefined): string | undefined { diff --git a/src/config/telegram-webhook-port.test.ts b/src/config/telegram-webhook-port.test.ts index c7dd79237fd..80fdf3a5ce8 100644 --- a/src/config/telegram-webhook-port.test.ts +++ b/src/config/telegram-webhook-port.test.ts @@ -15,7 +15,7 @@ describe("Telegram webhookPort config", () => { expect(res.ok).toBe(true); }); - it("rejects non-positive webhookPort", () => { + it("accepts webhookPort set to 0 for ephemeral port binding", () => { const res = validateConfigObject({ channels: { telegram: { @@ -25,6 +25,19 @@ describe("Telegram webhookPort config", () => { }, }, }); + expect(res.ok).toBe(true); + }); + + it("rejects negative webhookPort", () => { + const res = validateConfigObject({ + channels: { + telegram: { + webhookUrl: "https://example.com/telegram-webhook", + webhookSecret: "secret", + webhookPort: -1, + }, + }, + }); expect(res.ok).toBe(false); if (!res.ok) { expect(res.issues.some((issue) => issue.path === "channels.telegram.webhookPort")).toBe(true); diff --git a/src/config/types.acp.ts b/src/config/types.acp.ts new file mode 100644 index 00000000000..f69971ced93 --- /dev/null +++ b/src/config/types.acp.ts @@ -0,0 +1,31 @@ +export type AcpDispatchConfig = { + /** Master switch for ACP turn dispatch in the reply pipeline. */ + enabled?: boolean; +}; + +export type AcpStreamConfig = { + /** Coalescer idle flush window in milliseconds for ACP streamed text. */ + coalesceIdleMs?: number; + /** Maximum text size per streamed chunk. */ + maxChunkChars?: number; +}; + +export type AcpRuntimeConfig = { + /** Idle runtime TTL in minutes for ACP session workers. */ + ttlMinutes?: number; + /** Optional operator install/setup command shown by `/acp install` and `/acp doctor`. */ + installCommand?: string; +}; + +export type AcpConfig = { + /** Global ACP runtime gate. */ + enabled?: boolean; + dispatch?: AcpDispatchConfig; + /** Backend id registered by ACP runtime plugin (for example: acpx). */ + backend?: string; + defaultAgent?: string; + allowedAgents?: string[]; + maxConcurrentSessions?: number; + stream?: AcpStreamConfig; + runtime?: AcpRuntimeConfig; +}; diff --git a/src/config/types.agent-defaults.ts b/src/config/types.agent-defaults.ts index e8eac685086..38cbea44588 100644 --- a/src/config/types.agent-defaults.ts +++ b/src/config/types.agent-defaults.ts @@ -158,6 +158,16 @@ export type AgentDefaultsConfig = { contextPruning?: AgentContextPruningConfig; /** Compaction tuning and pre-compaction memory flush behavior. */ compaction?: AgentCompactionConfig; + /** Embedded Pi runner hardening and compatibility controls. */ + embeddedPi?: { + /** + * How embedded Pi should trust workspace-local `.pi/config/settings.json`. + * - sanitize (default): apply project settings except shellPath/shellCommandPrefix + * - ignore: ignore project settings entirely + * - trusted: trust project settings as-is + */ + projectSettingsPolicy?: "trusted" | "sanitize" | "ignore"; + }; /** Vector memory search configuration (per-agent overrides supported). */ memorySearch?: MemorySearchConfig; /** Default thinking level when no /think directive is present. */ @@ -213,6 +223,8 @@ export type AgentDefaultsConfig = { session?: string; /** Delivery target ("last", "none", or a channel id). */ target?: "last" | "none" | ChannelId; + /** Direct/DM delivery policy. Default: "allow". */ + directPolicy?: "allow" | "block"; /** Optional delivery override (E.164 for WhatsApp, chat id for Telegram). Supports :topic:NNN suffix for Telegram topics. */ to?: string; /** Optional account id for multi-account channels. */ diff --git a/src/config/types.base.ts b/src/config/types.base.ts index cb1b926b53f..676767fc901 100644 --- a/src/config/types.base.ts +++ b/src/config/types.base.ts @@ -112,6 +112,12 @@ export type SessionConfig = { store?: string; typingIntervalSeconds?: number; typingMode?: TypingMode; + /** + * Max parent transcript token count allowed for thread/session forking. + * If parent totalTokens is above this value, OpenClaw skips parent fork and + * starts a fresh thread session instead. Set to 0 to disable this guard. + */ + parentForkMaxTokens?: number; mainKey?: string; sendPolicy?: SessionSendPolicyConfig; agentToAgent?: { diff --git a/src/config/types.discord.ts b/src/config/types.discord.ts index 1b43ddeb48b..b5b414153b5 100644 --- a/src/config/types.discord.ts +++ b/src/config/types.discord.ts @@ -163,6 +163,11 @@ export type DiscordThreadBindingsConfig = { * threads for subagent sessions. Default: false (opt-in). */ spawnSubagentSessions?: boolean; + /** + * Allow `/acp spawn` to auto-create + bind Discord threads for ACP + * sessions. Default: false (opt-in). + */ + spawnAcpSessions?: boolean; }; export type DiscordSlashCommandConfig = { diff --git a/src/config/types.googlechat.ts b/src/config/types.googlechat.ts index 070bf379b3b..091c4f0f271 100644 --- a/src/config/types.googlechat.ts +++ b/src/config/types.googlechat.ts @@ -5,6 +5,7 @@ import type { ReplyToMode, } from "./types.base.js"; import type { DmConfig } from "./types.messages.js"; +import type { SecretRef } from "./types.secrets.js"; export type GoogleChatDmConfig = { /** If false, ignore all incoming Google Chat DMs. Default: true. */ @@ -63,8 +64,10 @@ export type GoogleChatAccountConfig = { defaultTo?: string; /** Per-space configuration keyed by space id or name. */ groups?: Record; - /** Service account JSON (inline string or object). */ - serviceAccount?: string | Record; + /** Service account JSON (inline string, object, or secret reference). */ + serviceAccount?: string | Record | SecretRef; + /** Explicit secret reference for service account JSON. */ + serviceAccountRef?: SecretRef; /** Service account JSON file path. */ serviceAccountFile?: string; /** Webhook audience type (app-url or project-number). */ diff --git a/src/config/types.models.ts b/src/config/types.models.ts index ebc81f54bdd..252e635e856 100644 --- a/src/config/types.models.ts +++ b/src/config/types.models.ts @@ -1,11 +1,17 @@ -export type ModelApi = - | "openai-completions" - | "openai-responses" - | "anthropic-messages" - | "google-generative-ai" - | "github-copilot" - | "bedrock-converse-stream" - | "ollama"; +import type { SecretInput } from "./types.secrets.js"; + +export const MODEL_APIS = [ + "openai-completions", + "openai-responses", + "openai-codex-responses", + "anthropic-messages", + "google-generative-ai", + "github-copilot", + "bedrock-converse-stream", + "ollama", +] as const; + +export type ModelApi = (typeof MODEL_APIS)[number]; export type ModelCompatConfig = { supportsStore?: boolean; @@ -43,7 +49,7 @@ export type ModelDefinitionConfig = { export type ModelProviderConfig = { baseUrl: string; - apiKey?: string; + apiKey?: SecretInput; auth?: ModelProviderAuthMode; api?: ModelApi; headers?: Record; diff --git a/src/config/types.openclaw.ts b/src/config/types.openclaw.ts index 5b6b2240235..f3374083de8 100644 --- a/src/config/types.openclaw.ts +++ b/src/config/types.openclaw.ts @@ -1,3 +1,4 @@ +import type { AcpConfig } from "./types.acp.js"; import type { AgentBinding, AgentsConfig } from "./types.agents.js"; import type { ApprovalsConfig } from "./types.approvals.js"; import type { AuthConfig } from "./types.auth.js"; @@ -22,6 +23,7 @@ import type { import type { ModelsConfig } from "./types.models.js"; import type { NodeHostConfig } from "./types.node-host.js"; import type { PluginsConfig } from "./types.plugins.js"; +import type { SecretsConfig } from "./types.secrets.js"; import type { SkillsConfig } from "./types.skills.js"; import type { ToolsConfig } from "./types.tools.js"; @@ -33,6 +35,7 @@ export type OpenClawConfig = { lastTouchedAt?: string; }; auth?: AuthConfig; + acp?: AcpConfig; env?: { /** Opt-in: import missing secrets from a login shell environment (exec `$SHELL -l -c 'env -0'`). */ shellEnv?: { @@ -86,6 +89,7 @@ export type OpenClawConfig = { avatar?: string; }; }; + secrets?: SecretsConfig; skills?: SkillsConfig; plugins?: PluginsConfig; models?: ModelsConfig; diff --git a/src/config/types.secrets.ts b/src/config/types.secrets.ts new file mode 100644 index 00000000000..5f009f79e5a --- /dev/null +++ b/src/config/types.secrets.ts @@ -0,0 +1,151 @@ +export type SecretRefSource = "env" | "file" | "exec"; + +/** + * Stable identifier for a secret in a configured source. + * Examples: + * - env source: provider "default", id "OPENAI_API_KEY" + * - file source: provider "mounted-json", id "/providers/openai/apiKey" + * - exec source: provider "vault", id "openai/api-key" + */ +export type SecretRef = { + source: SecretRefSource; + provider: string; + id: string; +}; + +export type SecretInput = string | SecretRef; +export const DEFAULT_SECRET_PROVIDER_ALIAS = "default"; +const ENV_SECRET_TEMPLATE_RE = /^\$\{([A-Z][A-Z0-9_]{0,127})\}$/; + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +export function isSecretRef(value: unknown): value is SecretRef { + if (!isRecord(value)) { + return false; + } + if (Object.keys(value).length !== 3) { + return false; + } + return ( + (value.source === "env" || value.source === "file" || value.source === "exec") && + typeof value.provider === "string" && + value.provider.trim().length > 0 && + typeof value.id === "string" && + value.id.trim().length > 0 + ); +} + +function isLegacySecretRefWithoutProvider( + value: unknown, +): value is { source: SecretRefSource; id: string } { + if (!isRecord(value)) { + return false; + } + return ( + (value.source === "env" || value.source === "file" || value.source === "exec") && + typeof value.id === "string" && + value.id.trim().length > 0 && + value.provider === undefined + ); +} + +export function parseEnvTemplateSecretRef( + value: unknown, + provider = DEFAULT_SECRET_PROVIDER_ALIAS, +): SecretRef | null { + if (typeof value !== "string") { + return null; + } + const match = ENV_SECRET_TEMPLATE_RE.exec(value.trim()); + if (!match) { + return null; + } + return { + source: "env", + provider: provider.trim() || DEFAULT_SECRET_PROVIDER_ALIAS, + id: match[1], + }; +} + +export function coerceSecretRef( + value: unknown, + defaults?: { + env?: string; + file?: string; + exec?: string; + }, +): SecretRef | null { + if (isSecretRef(value)) { + return value; + } + if (isLegacySecretRefWithoutProvider(value)) { + const provider = + value.source === "env" + ? (defaults?.env ?? DEFAULT_SECRET_PROVIDER_ALIAS) + : value.source === "file" + ? (defaults?.file ?? DEFAULT_SECRET_PROVIDER_ALIAS) + : (defaults?.exec ?? DEFAULT_SECRET_PROVIDER_ALIAS); + return { + source: value.source, + provider, + id: value.id, + }; + } + const envTemplate = parseEnvTemplateSecretRef(value, defaults?.env); + if (envTemplate) { + return envTemplate; + } + return null; +} + +export type EnvSecretProviderConfig = { + source: "env"; + /** Optional env var allowlist (exact names). */ + allowlist?: string[]; +}; + +export type FileSecretProviderMode = "singleValue" | "json"; + +export type FileSecretProviderConfig = { + source: "file"; + path: string; + mode?: FileSecretProviderMode; + timeoutMs?: number; + maxBytes?: number; +}; + +export type ExecSecretProviderConfig = { + source: "exec"; + command: string; + args?: string[]; + timeoutMs?: number; + noOutputTimeoutMs?: number; + maxOutputBytes?: number; + jsonOnly?: boolean; + env?: Record; + passEnv?: string[]; + trustedDirs?: string[]; + allowInsecurePath?: boolean; + allowSymlinkCommand?: boolean; +}; + +export type SecretProviderConfig = + | EnvSecretProviderConfig + | FileSecretProviderConfig + | ExecSecretProviderConfig; + +export type SecretsConfig = { + providers?: Record; + defaults?: { + env?: string; + file?: string; + exec?: string; + }; + resolution?: { + maxProviderConcurrency?: number; + maxRefsPerProvider?: number; + maxBatchBytes?: number; + }; +}; diff --git a/src/config/types.skills.ts b/src/config/types.skills.ts index 0b14893b8be..c09523ba459 100644 --- a/src/config/types.skills.ts +++ b/src/config/types.skills.ts @@ -1,6 +1,8 @@ +import type { SecretInput } from "./types.secrets.js"; + export type SkillConfig = { enabled?: boolean; - apiKey?: string; + apiKey?: SecretInput; env?: Record; config?: Record; }; diff --git a/src/config/types.ts b/src/config/types.ts index 4260dd43931..50ee48c9b54 100644 --- a/src/config/types.ts +++ b/src/config/types.ts @@ -2,6 +2,7 @@ export * from "./types.agent-defaults.js"; export * from "./types.agents.js"; +export * from "./types.acp.js"; export * from "./types.approvals.js"; export * from "./types.auth.js"; export * from "./types.base.js"; @@ -22,6 +23,7 @@ export * from "./types.msteams.js"; export * from "./types.plugins.js"; export * from "./types.queue.js"; export * from "./types.sandbox.js"; +export * from "./types.secrets.js"; export * from "./types.signal.js"; export * from "./types.skills.js"; export * from "./types.slack.js"; diff --git a/src/config/validation.ts b/src/config/validation.ts index 746f89ef0e4..fab6351254c 100644 --- a/src/config/validation.ts +++ b/src/config/validation.ts @@ -315,7 +315,11 @@ function validateConfigObjectWithPluginsBase( } const { registry, knownIds, normalizedPlugins } = ensureRegistry(); - const pushMissingPluginIssue = (path: string, pluginId: string) => { + const pushMissingPluginIssue = ( + path: string, + pluginId: string, + opts?: { warnOnly?: boolean }, + ) => { if (LEGACY_REMOVED_PLUGIN_IDS.has(pluginId)) { warnings.push({ path, @@ -323,6 +327,13 @@ function validateConfigObjectWithPluginsBase( }); return; } + if (opts?.warnOnly) { + warnings.push({ + path, + message: `plugin not found: ${pluginId} (stale config entry ignored; remove it from plugins config)`, + }); + return; + } issues.push({ path, message: `plugin not found: ${pluginId}`, @@ -335,7 +346,8 @@ function validateConfigObjectWithPluginsBase( if (entries && isRecord(entries)) { for (const pluginId of Object.keys(entries)) { if (!knownIds.has(pluginId)) { - pushMissingPluginIssue(`plugins.entries.${pluginId}`, pluginId); + // Keep gateway startup resilient when plugins are removed/renamed across upgrades. + pushMissingPluginIssue(`plugins.entries.${pluginId}`, pluginId, { warnOnly: true }); } } } diff --git a/src/config/zod-schema.agent-defaults.ts b/src/config/zod-schema.agent-defaults.ts index aa39a70978b..3e304361396 100644 --- a/src/config/zod-schema.agent-defaults.ts +++ b/src/config/zod-schema.agent-defaults.ts @@ -96,6 +96,14 @@ export const AgentDefaultsSchema = z }) .strict() .optional(), + embeddedPi: z + .object({ + projectSettingsPolicy: z + .union([z.literal("trusted"), z.literal("sanitize"), z.literal("ignore")]) + .optional(), + }) + .strict() + .optional(), thinkingDefault: z .union([ z.literal("off"), diff --git a/src/config/zod-schema.agent-runtime.ts b/src/config/zod-schema.agent-runtime.ts index c477cc1743b..9df0776b956 100644 --- a/src/config/zod-schema.agent-runtime.ts +++ b/src/config/zod-schema.agent-runtime.ts @@ -26,6 +26,7 @@ export const HeartbeatSchema = z session: z.string().optional(), includeReasoning: z.boolean().optional(), target: z.string().optional(), + directPolicy: z.union([z.literal("allow"), z.literal("block")]).optional(), to: z.string().optional(), accountId: z.string().optional(), prompt: z.string().optional(), diff --git a/src/config/zod-schema.core.ts b/src/config/zod-schema.core.ts index d99ebe3b907..711faf5e90c 100644 --- a/src/config/zod-schema.core.ts +++ b/src/config/zod-schema.core.ts @@ -1,18 +1,187 @@ +import path from "node:path"; import { z } from "zod"; import { isSafeExecutableValue } from "../infra/exec-safety.js"; +import { isValidFileSecretRefId } from "../secrets/ref-contract.js"; +import { MODEL_APIS } from "./types.models.js"; import { createAllowDenyChannelRulesSchema } from "./zod-schema.allowdeny.js"; import { sensitive } from "./zod-schema.sensitive.js"; -export const ModelApiSchema = z.union([ - z.literal("openai-completions"), - z.literal("openai-responses"), - z.literal("anthropic-messages"), - z.literal("google-generative-ai"), - z.literal("github-copilot"), - z.literal("bedrock-converse-stream"), - z.literal("ollama"), +const ENV_SECRET_REF_ID_PATTERN = /^[A-Z][A-Z0-9_]{0,127}$/; +const SECRET_PROVIDER_ALIAS_PATTERN = /^[a-z][a-z0-9_-]{0,63}$/; +const EXEC_SECRET_REF_ID_PATTERN = /^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$/; +const WINDOWS_ABS_PATH_PATTERN = /^[A-Za-z]:[\\/]/; +const WINDOWS_UNC_PATH_PATTERN = /^\\\\[^\\]+\\[^\\]+/; + +function isAbsolutePath(value: string): boolean { + return ( + path.isAbsolute(value) || + WINDOWS_ABS_PATH_PATTERN.test(value) || + WINDOWS_UNC_PATH_PATTERN.test(value) + ); +} + +const EnvSecretRefSchema = z + .object({ + source: z.literal("env"), + provider: z + .string() + .regex( + SECRET_PROVIDER_ALIAS_PATTERN, + 'Secret reference provider must match /^[a-z][a-z0-9_-]{0,63}$/ (example: "default").', + ), + id: z + .string() + .regex( + ENV_SECRET_REF_ID_PATTERN, + 'Env secret reference id must match /^[A-Z][A-Z0-9_]{0,127}$/ (example: "OPENAI_API_KEY").', + ), + }) + .strict(); + +const FileSecretRefSchema = z + .object({ + source: z.literal("file"), + provider: z + .string() + .regex( + SECRET_PROVIDER_ALIAS_PATTERN, + 'Secret reference provider must match /^[a-z][a-z0-9_-]{0,63}$/ (example: "default").', + ), + id: z + .string() + .refine( + isValidFileSecretRefId, + 'File secret reference id must be an absolute JSON pointer (example: "/providers/openai/apiKey"), or "value" for singleValue mode.', + ), + }) + .strict(); + +const ExecSecretRefSchema = z + .object({ + source: z.literal("exec"), + provider: z + .string() + .regex( + SECRET_PROVIDER_ALIAS_PATTERN, + 'Secret reference provider must match /^[a-z][a-z0-9_-]{0,63}$/ (example: "default").', + ), + id: z + .string() + .regex( + EXEC_SECRET_REF_ID_PATTERN, + 'Exec secret reference id must match /^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$/ (example: "vault/openai/api-key").', + ), + }) + .strict(); + +export const SecretRefSchema = z.discriminatedUnion("source", [ + EnvSecretRefSchema, + FileSecretRefSchema, + ExecSecretRefSchema, ]); +export const SecretInputSchema = z.union([z.string(), SecretRefSchema]); + +const SecretsEnvProviderSchema = z + .object({ + source: z.literal("env"), + allowlist: z.array(z.string().regex(ENV_SECRET_REF_ID_PATTERN)).max(256).optional(), + }) + .strict(); + +const SecretsFileProviderSchema = z + .object({ + source: z.literal("file"), + path: z.string().min(1), + mode: z.union([z.literal("singleValue"), z.literal("json")]).optional(), + timeoutMs: z.number().int().positive().max(120000).optional(), + maxBytes: z + .number() + .int() + .positive() + .max(20 * 1024 * 1024) + .optional(), + }) + .strict(); + +const SecretsExecProviderSchema = z + .object({ + source: z.literal("exec"), + command: z + .string() + .min(1) + .refine((value) => isSafeExecutableValue(value), "secrets.providers.*.command is unsafe.") + .refine( + (value) => isAbsolutePath(value), + "secrets.providers.*.command must be an absolute path.", + ), + args: z.array(z.string().max(1024)).max(128).optional(), + timeoutMs: z.number().int().positive().max(120000).optional(), + noOutputTimeoutMs: z.number().int().positive().max(120000).optional(), + maxOutputBytes: z + .number() + .int() + .positive() + .max(20 * 1024 * 1024) + .optional(), + jsonOnly: z.boolean().optional(), + env: z.record(z.string(), z.string()).optional(), + passEnv: z.array(z.string().regex(ENV_SECRET_REF_ID_PATTERN)).max(128).optional(), + trustedDirs: z + .array( + z + .string() + .min(1) + .refine((value) => isAbsolutePath(value), "trustedDirs entries must be absolute paths."), + ) + .max(64) + .optional(), + allowInsecurePath: z.boolean().optional(), + allowSymlinkCommand: z.boolean().optional(), + }) + .strict(); + +export const SecretProviderSchema = z.discriminatedUnion("source", [ + SecretsEnvProviderSchema, + SecretsFileProviderSchema, + SecretsExecProviderSchema, +]); + +export const SecretsConfigSchema = z + .object({ + providers: z + .object({ + // Keep this as a record so users can define multiple providers per source. + }) + .catchall(SecretProviderSchema) + .optional(), + defaults: z + .object({ + env: z.string().regex(SECRET_PROVIDER_ALIAS_PATTERN).optional(), + file: z.string().regex(SECRET_PROVIDER_ALIAS_PATTERN).optional(), + exec: z.string().regex(SECRET_PROVIDER_ALIAS_PATTERN).optional(), + }) + .strict() + .optional(), + resolution: z + .object({ + maxProviderConcurrency: z.number().int().positive().max(16).optional(), + maxRefsPerProvider: z.number().int().positive().max(4096).optional(), + maxBatchBytes: z + .number() + .int() + .positive() + .max(5 * 1024 * 1024) + .optional(), + }) + .strict() + .optional(), + }) + .strict() + .optional(); + +export const ModelApiSchema = z.enum(MODEL_APIS); + export const ModelCompatSchema = z .object({ supportsStore: z.boolean().optional(), @@ -58,7 +227,7 @@ export const ModelDefinitionSchema = z export const ModelProviderSchema = z .object({ baseUrl: z.string().min(1), - apiKey: z.string().optional().register(sensitive), + apiKey: SecretInputSchema.optional().register(sensitive), auth: z .union([z.literal("api-key"), z.literal("aws-sdk"), z.literal("oauth"), z.literal("token")]) .optional(), @@ -342,6 +511,32 @@ export const requireOpenAllowFrom = (params: { }); }; +/** + * Validate that dmPolicy="allowlist" has a non-empty allowFrom array. + * Without this, all DMs are silently dropped because the allowlist is empty + * and no senders can match. + */ +export const requireAllowlistAllowFrom = (params: { + policy?: string; + allowFrom?: Array; + ctx: z.RefinementCtx; + path: Array; + message: string; +}) => { + if (params.policy !== "allowlist") { + return; + } + const allow = normalizeAllowFrom(params.allowFrom); + if (allow.length > 0) { + return; + } + params.ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: params.path, + message: params.message, + }); +}; + export const MSTeamsReplyStyleSchema = z.enum(["thread", "top-level"]); export const RetryConfigSchema = z diff --git a/src/config/zod-schema.providers-core.ts b/src/config/zod-schema.providers-core.ts index 806eb8f89ce..5c69682123e 100644 --- a/src/config/zod-schema.providers-core.ts +++ b/src/config/zod-schema.providers-core.ts @@ -25,9 +25,11 @@ import { MarkdownConfigSchema, MSTeamsReplyStyleSchema, ProviderCommandsSchema, + SecretRefSchema, ReplyToModeSchema, RetryConfigSchema, TtsConfigSchema, + requireAllowlistAllowFrom, requireOpenAllowFrom, } from "./zod-schema.core.js"; import { sensitive } from "./zod-schema.sensitive.js"; @@ -165,11 +167,39 @@ export const TelegramAccountSchemaBase = z .strict() .optional(), proxy: z.string().optional(), - webhookUrl: z.string().optional(), - webhookSecret: z.string().optional().register(sensitive), - webhookPath: z.string().optional(), - webhookHost: z.string().optional(), - webhookPort: z.number().int().positive().optional(), + webhookUrl: z + .string() + .optional() + .describe( + "Public HTTPS webhook URL registered with Telegram for inbound updates. This must be internet-reachable and requires channels.telegram.webhookSecret.", + ), + webhookSecret: z + .string() + .optional() + .describe( + "Secret token sent to Telegram during webhook registration and verified on inbound webhook requests. Telegram returns this value for verification; this is not the gateway auth token and not the bot token.", + ) + .register(sensitive), + webhookPath: z + .string() + .optional() + .describe( + "Local webhook route path served by the gateway listener. Defaults to /telegram-webhook.", + ), + webhookHost: z + .string() + .optional() + .describe( + "Local bind host for the webhook listener. Defaults to 127.0.0.1; keep loopback unless you intentionally expose direct ingress.", + ), + webhookPort: z + .number() + .int() + .nonnegative() + .optional() + .describe( + "Local bind port for the webhook listener. Defaults to 8787; set to 0 to let the OS assign an ephemeral port.", + ), actions: z .object({ reactions: z.boolean().optional(), @@ -190,14 +220,10 @@ export const TelegramAccountSchemaBase = z export const TelegramAccountSchema = TelegramAccountSchemaBase.superRefine((value, ctx) => { normalizeTelegramStreamingConfig(value); - requireOpenAllowFrom({ - policy: value.dmPolicy, - allowFrom: value.allowFrom, - ctx, - path: ["allowFrom"], - message: - 'channels.telegram.dmPolicy="open" requires channels.telegram.allowFrom to include "*"', - }); + // Account-level schemas skip allowFrom validation because accounts inherit + // allowFrom from the parent channel config at runtime (resolveTelegramAccount + // shallow-merges top-level and account values in src/telegram/accounts.ts). + // Validation is enforced at the top-level TelegramConfigSchema instead. validateTelegramCustomCommands(value, ctx); }); @@ -213,8 +239,42 @@ export const TelegramConfigSchema = TelegramAccountSchemaBase.extend({ message: 'channels.telegram.dmPolicy="open" requires channels.telegram.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.telegram.dmPolicy="allowlist" requires channels.telegram.allowFrom to contain at least one sender ID', + }); validateTelegramCustomCommands(value, ctx); + if (value.accounts) { + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = account.allowFrom ?? value.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.telegram.accounts.*.dmPolicy="open" requires channels.telegram.accounts.*.allowFrom (or channels.telegram.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.telegram.accounts.*.dmPolicy="allowlist" requires channels.telegram.accounts.*.allowFrom (or channels.telegram.allowFrom) to contain at least one sender ID', + }); + } + } + const baseWebhookUrl = typeof value.webhookUrl === "string" ? value.webhookUrl.trim() : ""; const baseWebhookSecret = typeof value.webhookSecret === "string" ? value.webhookSecret.trim() : ""; @@ -235,6 +295,27 @@ export const TelegramConfigSchema = TelegramAccountSchemaBase.extend({ if (account.enabled === false) { continue; } + const effectiveDmPolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = Array.isArray(account.allowFrom) + ? account.allowFrom + : value.allowFrom; + requireOpenAllowFrom({ + policy: effectiveDmPolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.telegram.accounts.*.dmPolicy="open" requires channels.telegram.allowFrom or channels.telegram.accounts.*.allowFrom to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectiveDmPolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.telegram.accounts.*.dmPolicy="allowlist" requires channels.telegram.allowFrom or channels.telegram.accounts.*.allowFrom to contain at least one sender ID', + }); + const accountWebhookUrl = typeof account.webhookUrl === "string" ? account.webhookUrl.trim() : ""; if (!accountWebhookUrl) { @@ -405,6 +486,7 @@ export const DiscordAccountSchema = z enabled: z.boolean().optional(), ttlHours: z.number().nonnegative().optional(), spawnSubagentSessions: z.boolean().optional(), + spawnAcpSessions: z.boolean().optional(), }) .strict() .optional(), @@ -466,22 +548,62 @@ export const DiscordAccountSchema = z }); } - const dmPolicy = value.dmPolicy ?? value.dm?.policy ?? "pairing"; - const allowFrom = value.allowFrom ?? value.dm?.allowFrom; - const allowFromPath = - value.allowFrom !== undefined ? (["allowFrom"] as const) : (["dm", "allowFrom"] as const); - requireOpenAllowFrom({ - policy: dmPolicy, - allowFrom, - ctx, - path: [...allowFromPath], - message: - 'channels.discord.dmPolicy="open" requires channels.discord.allowFrom (or channels.discord.dm.allowFrom) to include "*"', - }); + // DM allowlist validation is enforced at DiscordConfigSchema so account entries + // can inherit top-level allowFrom via runtime shallow merge. }); export const DiscordConfigSchema = DiscordAccountSchema.extend({ accounts: z.record(z.string(), DiscordAccountSchema.optional()).optional(), +}).superRefine((value, ctx) => { + const dmPolicy = value.dmPolicy ?? value.dm?.policy ?? "pairing"; + const allowFrom = value.allowFrom ?? value.dm?.allowFrom; + const allowFromPath = + value.allowFrom !== undefined ? (["allowFrom"] as const) : (["dm", "allowFrom"] as const); + requireOpenAllowFrom({ + policy: dmPolicy, + allowFrom, + ctx, + path: [...allowFromPath], + message: + 'channels.discord.dmPolicy="open" requires channels.discord.allowFrom (or channels.discord.dm.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: dmPolicy, + allowFrom, + ctx, + path: [...allowFromPath], + message: + 'channels.discord.dmPolicy="allowlist" requires channels.discord.allowFrom (or channels.discord.dm.allowFrom) to contain at least one sender ID', + }); + + if (!value.accounts) { + return; + } + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = + account.dmPolicy ?? account.dm?.policy ?? value.dmPolicy ?? value.dm?.policy ?? "pairing"; + const effectiveAllowFrom = + account.allowFrom ?? account.dm?.allowFrom ?? value.allowFrom ?? value.dm?.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.discord.accounts.*.dmPolicy="open" requires channels.discord.accounts.*.allowFrom (or channels.discord.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.discord.accounts.*.dmPolicy="allowlist" requires channels.discord.accounts.*.allowFrom (or channels.discord.allowFrom) to contain at least one sender ID', + }); + } }); export const GoogleChatDmSchema = z @@ -500,6 +622,14 @@ export const GoogleChatDmSchema = z message: 'channels.googlechat.dm.policy="open" requires channels.googlechat.dm.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.policy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.googlechat.dm.policy="allowlist" requires channels.googlechat.dm.allowFrom to contain at least one sender ID', + }); }); export const GoogleChatGroupSchema = z @@ -525,7 +655,11 @@ export const GoogleChatAccountSchema = z groupAllowFrom: z.array(z.union([z.string(), z.number()])).optional(), groups: z.record(z.string(), GoogleChatGroupSchema.optional()).optional(), defaultTo: z.string().optional(), - serviceAccount: z.union([z.string(), z.record(z.string(), z.unknown())]).optional(), + serviceAccount: z + .union([z.string(), z.record(z.string(), z.unknown()), SecretRefSchema]) + .optional() + .register(sensitive), + serviceAccountRef: SecretRefSchema.optional().register(sensitive), serviceAccountFile: z.string().optional(), audienceType: z.enum(["app-url", "project-number"]).optional(), audience: z.string().optional(), @@ -669,21 +803,11 @@ export const SlackAccountSchema = z ackReaction: z.string().optional(), }) .strict() - .superRefine((value, ctx) => { + .superRefine((value) => { normalizeSlackStreamingConfig(value); - const dmPolicy = value.dmPolicy ?? value.dm?.policy ?? "pairing"; - const allowFrom = value.allowFrom ?? value.dm?.allowFrom; - const allowFromPath = - value.allowFrom !== undefined ? (["allowFrom"] as const) : (["dm", "allowFrom"] as const); - requireOpenAllowFrom({ - policy: dmPolicy, - allowFrom, - ctx, - path: [...allowFromPath], - message: - 'channels.slack.dmPolicy="open" requires channels.slack.allowFrom (or channels.slack.dm.allowFrom) to include "*"', - }); + // DM allowlist validation is enforced at SlackConfigSchema so account entries + // can inherit top-level allowFrom via runtime shallow merge. }); export const SlackConfigSchema = SlackAccountSchema.safeExtend({ @@ -693,6 +817,27 @@ export const SlackConfigSchema = SlackAccountSchema.safeExtend({ groupPolicy: GroupPolicySchema.optional().default("allowlist"), accounts: z.record(z.string(), SlackAccountSchema.optional()).optional(), }).superRefine((value, ctx) => { + const dmPolicy = value.dmPolicy ?? value.dm?.policy ?? "pairing"; + const allowFrom = value.allowFrom ?? value.dm?.allowFrom; + const allowFromPath = + value.allowFrom !== undefined ? (["allowFrom"] as const) : (["dm", "allowFrom"] as const); + requireOpenAllowFrom({ + policy: dmPolicy, + allowFrom, + ctx, + path: [...allowFromPath], + message: + 'channels.slack.dmPolicy="open" requires channels.slack.allowFrom (or channels.slack.dm.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: dmPolicy, + allowFrom, + ctx, + path: [...allowFromPath], + message: + 'channels.slack.dmPolicy="allowlist" requires channels.slack.allowFrom (or channels.slack.dm.allowFrom) to contain at least one sender ID', + }); + const baseMode = value.mode ?? "socket"; if (baseMode === "http" && !value.signingSecret) { ctx.addIssue({ @@ -712,6 +857,26 @@ export const SlackConfigSchema = SlackAccountSchema.safeExtend({ continue; } const accountMode = account.mode ?? baseMode; + const effectivePolicy = + account.dmPolicy ?? account.dm?.policy ?? value.dmPolicy ?? value.dm?.policy ?? "pairing"; + const effectiveAllowFrom = + account.allowFrom ?? account.dm?.allowFrom ?? value.allowFrom ?? value.dm?.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.slack.accounts.*.dmPolicy="open" requires channels.slack.accounts.*.allowFrom (or channels.slack.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.slack.accounts.*.dmPolicy="allowlist" requires channels.slack.accounts.*.allowFrom (or channels.slack.allowFrom) to contain at least one sender ID', + }); if (accountMode !== "http") { continue; } @@ -772,15 +937,10 @@ export const SignalAccountSchemaBase = z }) .strict(); -export const SignalAccountSchema = SignalAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ - policy: value.dmPolicy, - allowFrom: value.allowFrom, - ctx, - path: ["allowFrom"], - message: 'channels.signal.dmPolicy="open" requires channels.signal.allowFrom to include "*"', - }); -}); +// Account-level schemas skip allowFrom validation because accounts inherit +// allowFrom from the parent channel config at runtime. +// Validation is enforced at the top-level SignalConfigSchema instead. +export const SignalAccountSchema = SignalAccountSchemaBase; export const SignalConfigSchema = SignalAccountSchemaBase.extend({ accounts: z.record(z.string(), SignalAccountSchema.optional()).optional(), @@ -792,6 +952,41 @@ export const SignalConfigSchema = SignalAccountSchemaBase.extend({ path: ["allowFrom"], message: 'channels.signal.dmPolicy="open" requires channels.signal.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.signal.dmPolicy="allowlist" requires channels.signal.allowFrom to contain at least one sender ID', + }); + + if (!value.accounts) { + return; + } + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = account.allowFrom ?? value.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.signal.accounts.*.dmPolicy="open" requires channels.signal.accounts.*.allowFrom (or channels.signal.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.signal.accounts.*.dmPolicy="allowlist" requires channels.signal.accounts.*.allowFrom (or channels.signal.allowFrom) to contain at least one sender ID', + }); + } }); export const IrcGroupSchema = z @@ -864,6 +1059,14 @@ function refineIrcAllowFromAndNickserv(value: IrcBaseConfig, ctx: z.RefinementCt path: ["allowFrom"], message: 'channels.irc.dmPolicy="open" requires channels.irc.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.irc.dmPolicy="allowlist" requires channels.irc.allowFrom to contain at least one sender ID', + }); if (value.nickserv?.register && !value.nickserv.registerEmail?.trim()) { ctx.addIssue({ code: z.ZodIssueCode.custom, @@ -873,14 +1076,50 @@ function refineIrcAllowFromAndNickserv(value: IrcBaseConfig, ctx: z.RefinementCt } } +// Account-level schemas skip allowFrom validation because accounts inherit +// allowFrom from the parent channel config at runtime. +// Validation is enforced at the top-level IrcConfigSchema instead. export const IrcAccountSchema = IrcAccountSchemaBase.superRefine((value, ctx) => { - refineIrcAllowFromAndNickserv(value, ctx); + // Only validate nickserv at account level, not allowFrom (inherited from parent). + if (value.nickserv?.register && !value.nickserv.registerEmail?.trim()) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: ["nickserv", "registerEmail"], + message: "channels.irc.nickserv.register=true requires channels.irc.nickserv.registerEmail", + }); + } }); export const IrcConfigSchema = IrcAccountSchemaBase.extend({ accounts: z.record(z.string(), IrcAccountSchema.optional()).optional(), }).superRefine((value, ctx) => { refineIrcAllowFromAndNickserv(value, ctx); + if (!value.accounts) { + return; + } + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = account.allowFrom ?? value.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.irc.accounts.*.dmPolicy="open" requires channels.irc.accounts.*.allowFrom (or channels.irc.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.irc.accounts.*.dmPolicy="allowlist" requires channels.irc.accounts.*.allowFrom (or channels.irc.allowFrom) to contain at least one sender ID', + }); + } }); export const IMessageAccountSchemaBase = z @@ -936,16 +1175,10 @@ export const IMessageAccountSchemaBase = z }) .strict(); -export const IMessageAccountSchema = IMessageAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ - policy: value.dmPolicy, - allowFrom: value.allowFrom, - ctx, - path: ["allowFrom"], - message: - 'channels.imessage.dmPolicy="open" requires channels.imessage.allowFrom to include "*"', - }); -}); +// Account-level schemas skip allowFrom validation because accounts inherit +// allowFrom from the parent channel config at runtime. +// Validation is enforced at the top-level IMessageConfigSchema instead. +export const IMessageAccountSchema = IMessageAccountSchemaBase; export const IMessageConfigSchema = IMessageAccountSchemaBase.extend({ accounts: z.record(z.string(), IMessageAccountSchema.optional()).optional(), @@ -958,6 +1191,41 @@ export const IMessageConfigSchema = IMessageAccountSchemaBase.extend({ message: 'channels.imessage.dmPolicy="open" requires channels.imessage.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.imessage.dmPolicy="allowlist" requires channels.imessage.allowFrom to contain at least one sender ID', + }); + + if (!value.accounts) { + return; + } + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = account.allowFrom ?? value.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.imessage.accounts.*.dmPolicy="open" requires channels.imessage.accounts.*.allowFrom (or channels.imessage.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.imessage.accounts.*.dmPolicy="allowlist" requires channels.imessage.accounts.*.allowFrom (or channels.imessage.allowFrom) to contain at least one sender ID', + }); + } }); const BlueBubblesAllowFromEntry = z.union([z.string(), z.number()]); @@ -1017,15 +1285,10 @@ export const BlueBubblesAccountSchemaBase = z }) .strict(); -export const BlueBubblesAccountSchema = BlueBubblesAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ - policy: value.dmPolicy, - allowFrom: value.allowFrom, - ctx, - path: ["allowFrom"], - message: 'channels.bluebubbles.accounts.*.dmPolicy="open" requires allowFrom to include "*"', - }); -}); +// Account-level schemas skip allowFrom validation because accounts inherit +// allowFrom from the parent channel config at runtime. +// Validation is enforced at the top-level BlueBubblesConfigSchema instead. +export const BlueBubblesAccountSchema = BlueBubblesAccountSchemaBase; export const BlueBubblesConfigSchema = BlueBubblesAccountSchemaBase.extend({ accounts: z.record(z.string(), BlueBubblesAccountSchema.optional()).optional(), @@ -1039,6 +1302,41 @@ export const BlueBubblesConfigSchema = BlueBubblesAccountSchemaBase.extend({ message: 'channels.bluebubbles.dmPolicy="open" requires channels.bluebubbles.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.bluebubbles.dmPolicy="allowlist" requires channels.bluebubbles.allowFrom to contain at least one sender ID', + }); + + if (!value.accounts) { + return; + } + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = account.allowFrom ?? value.allowFrom; + requireOpenAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.bluebubbles.accounts.*.dmPolicy="open" requires channels.bluebubbles.accounts.*.allowFrom (or channels.bluebubbles.allowFrom) to include "*"', + }); + requireAllowlistAllowFrom({ + policy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.bluebubbles.accounts.*.dmPolicy="allowlist" requires channels.bluebubbles.accounts.*.allowFrom (or channels.bluebubbles.allowFrom) to contain at least one sender ID', + }); + } }); export const MSTeamsChannelSchema = z @@ -1110,4 +1408,12 @@ export const MSTeamsConfigSchema = z message: 'channels.msteams.dmPolicy="open" requires channels.msteams.allowFrom to include "*"', }); + requireAllowlistAllowFrom({ + policy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + path: ["allowFrom"], + message: + 'channels.msteams.dmPolicy="allowlist" requires channels.msteams.allowFrom to contain at least one sender ID', + }); }); diff --git a/src/config/zod-schema.providers-whatsapp.ts b/src/config/zod-schema.providers-whatsapp.ts index 4387ed1abb5..b8ff2938abb 100644 --- a/src/config/zod-schema.providers-whatsapp.ts +++ b/src/config/zod-schema.providers-whatsapp.ts @@ -63,6 +63,7 @@ function enforceOpenDmPolicyAllowFromStar(params: { allowFrom: unknown; ctx: z.RefinementCtx; message: string; + path?: Array; }) { if (params.dmPolicy !== "open") { return; @@ -75,7 +76,30 @@ function enforceOpenDmPolicyAllowFromStar(params: { } params.ctx.addIssue({ code: z.ZodIssueCode.custom, - path: ["allowFrom"], + path: params.path ?? ["allowFrom"], + message: params.message, + }); +} + +function enforceAllowlistDmPolicyAllowFrom(params: { + dmPolicy: unknown; + allowFrom: unknown; + ctx: z.RefinementCtx; + message: string; + path?: Array; +}) { + if (params.dmPolicy !== "allowlist") { + return; + } + const allow = (Array.isArray(params.allowFrom) ? params.allowFrom : []) + .map((v) => String(v).trim()) + .filter(Boolean); + if (allow.length > 0) { + return; + } + params.ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: params.path ?? ["allowFrom"], message: params.message, }); } @@ -86,16 +110,7 @@ export const WhatsAppAccountSchema = WhatsAppSharedSchema.extend({ /** Override auth directory for this WhatsApp account (Baileys multi-file auth state). */ authDir: z.string().optional(), mediaMaxMb: z.number().int().positive().optional(), -}) - .strict() - .superRefine((value, ctx) => { - enforceOpenDmPolicyAllowFromStar({ - dmPolicy: value.dmPolicy, - allowFrom: value.allowFrom, - ctx, - message: 'channels.whatsapp.accounts.*.dmPolicy="open" requires allowFrom to include "*"', - }); - }); +}).strict(); export const WhatsAppConfigSchema = WhatsAppSharedSchema.extend({ accounts: z.record(z.string(), WhatsAppAccountSchema.optional()).optional(), @@ -118,4 +133,37 @@ export const WhatsAppConfigSchema = WhatsAppSharedSchema.extend({ message: 'channels.whatsapp.dmPolicy="open" requires channels.whatsapp.allowFrom to include "*"', }); + enforceAllowlistDmPolicyAllowFrom({ + dmPolicy: value.dmPolicy, + allowFrom: value.allowFrom, + ctx, + message: + 'channels.whatsapp.dmPolicy="allowlist" requires channels.whatsapp.allowFrom to contain at least one sender ID', + }); + if (!value.accounts) { + return; + } + for (const [accountId, account] of Object.entries(value.accounts)) { + if (!account) { + continue; + } + const effectivePolicy = account.dmPolicy ?? value.dmPolicy; + const effectiveAllowFrom = account.allowFrom ?? value.allowFrom; + enforceOpenDmPolicyAllowFromStar({ + dmPolicy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.whatsapp.accounts.*.dmPolicy="open" requires channels.whatsapp.accounts.*.allowFrom (or channels.whatsapp.allowFrom) to include "*"', + }); + enforceAllowlistDmPolicyAllowFrom({ + dmPolicy: effectivePolicy, + allowFrom: effectiveAllowFrom, + ctx, + path: ["accounts", accountId, "allowFrom"], + message: + 'channels.whatsapp.accounts.*.dmPolicy="allowlist" requires channels.whatsapp.accounts.*.allowFrom (or channels.whatsapp.allowFrom) to contain at least one sender ID', + }); + } }); diff --git a/src/config/zod-schema.session-maintenance-extensions.test.ts b/src/config/zod-schema.session-maintenance-extensions.test.ts index 6efe8b39907..deb86999934 100644 --- a/src/config/zod-schema.session-maintenance-extensions.test.ts +++ b/src/config/zod-schema.session-maintenance-extensions.test.ts @@ -14,6 +14,19 @@ describe("SessionSchema maintenance extensions", () => { ).not.toThrow(); }); + it("accepts parentForkMaxTokens including 0 to disable the guard", () => { + expect(() => SessionSchema.parse({ parentForkMaxTokens: 100_000 })).not.toThrow(); + expect(() => SessionSchema.parse({ parentForkMaxTokens: 0 })).not.toThrow(); + }); + + it("rejects negative parentForkMaxTokens", () => { + expect(() => + SessionSchema.parse({ + parentForkMaxTokens: -1, + }), + ).toThrow(/parentForkMaxTokens/i); + }); + it("accepts disabling reset archive cleanup", () => { expect(() => SessionSchema.parse({ diff --git a/src/config/zod-schema.session.ts b/src/config/zod-schema.session.ts index 5af707b2804..de23c50846e 100644 --- a/src/config/zod-schema.session.ts +++ b/src/config/zod-schema.session.ts @@ -52,6 +52,7 @@ export const SessionSchema = z store: z.string().optional(), typingIntervalSeconds: z.number().int().positive().optional(), typingMode: TypingModeSchema.optional(), + parentForkMaxTokens: z.number().int().nonnegative().optional(), mainKey: z.string().optional(), sendPolicy: SessionSendPolicySchema.optional(), agentToAgent: z diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index 6ea3bd00287..e072c1fd968 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -4,7 +4,12 @@ import { parseDurationMs } from "../cli/parse-duration.js"; import { ToolsSchema } from "./zod-schema.agent-runtime.js"; import { AgentsSchema, AudioSchema, BindingsSchema, BroadcastSchema } from "./zod-schema.agents.js"; import { ApprovalsSchema } from "./zod-schema.approvals.js"; -import { HexColorSchema, ModelsConfigSchema } from "./zod-schema.core.js"; +import { + HexColorSchema, + ModelsConfigSchema, + SecretInputSchema, + SecretsConfigSchema, +} from "./zod-schema.core.js"; import { HookMappingSchema, HooksGmailSchema, InternalHooksSchema } from "./zod-schema.hooks.js"; import { InstallRecordShape } from "./zod-schema.installs.js"; import { ChannelsSchema } from "./zod-schema.providers.js"; @@ -289,6 +294,7 @@ export const OpenClawSchema = z }) .strict() .optional(), + secrets: SecretsConfigSchema, auth: z .object({ profiles: z @@ -316,6 +322,36 @@ export const OpenClawSchema = z }) .strict() .optional(), + acp: z + .object({ + enabled: z.boolean().optional(), + dispatch: z + .object({ + enabled: z.boolean().optional(), + }) + .strict() + .optional(), + backend: z.string().optional(), + defaultAgent: z.string().optional(), + allowedAgents: z.array(z.string()).optional(), + maxConcurrentSessions: z.number().int().positive().optional(), + stream: z + .object({ + coalesceIdleMs: z.number().int().nonnegative().optional(), + maxChunkChars: z.number().int().positive().optional(), + }) + .strict() + .optional(), + runtime: z + .object({ + ttlMinutes: z.number().int().positive().optional(), + installCommand: z.string().optional(), + }) + .strict() + .optional(), + }) + .strict() + .optional(), models: ModelsConfigSchema, nodeHost: NodeHostSchema, agents: AgentsSchema, @@ -691,7 +727,7 @@ export const OpenClawSchema = z z .object({ enabled: z.boolean().optional(), - apiKey: z.string().optional().register(sensitive), + apiKey: SecretInputSchema.optional().register(sensitive), env: z.record(z.string(), z.string()).optional(), config: z.record(z.string(), z.unknown()).optional(), }) diff --git a/src/cron/delivery.test.ts b/src/cron/delivery.test.ts index 6eaa5c66707..495e99d0039 100644 --- a/src/cron/delivery.test.ts +++ b/src/cron/delivery.test.ts @@ -54,4 +54,22 @@ describe("resolveCronDeliveryPlan", () => { expect(plan.channel).toBeUndefined(); expect(plan.to).toBe("https://example.invalid/cron"); }); + + it("threads delivery.accountId when explicitly configured", () => { + const plan = resolveCronDeliveryPlan( + makeJob({ + delivery: { + mode: "announce", + channel: "telegram", + to: "123", + accountId: " bot-a ", + }, + }), + ); + expect(plan.mode).toBe("announce"); + expect(plan.requested).toBe(true); + expect(plan.channel).toBe("telegram"); + expect(plan.to).toBe("123"); + expect(plan.accountId).toBe("bot-a"); + }); }); diff --git a/src/cron/delivery.ts b/src/cron/delivery.ts index 377cdb49b2f..9022d09fd5f 100644 --- a/src/cron/delivery.ts +++ b/src/cron/delivery.ts @@ -4,6 +4,7 @@ export type CronDeliveryPlan = { mode: CronDeliveryMode; channel?: CronMessageChannel; to?: string; + accountId?: string; source: "delivery" | "payload"; requested: boolean; }; @@ -27,6 +28,14 @@ function normalizeTo(value: unknown): string | undefined { return trimmed ? trimmed : undefined; } +function normalizeAccountId(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed ? trimmed : undefined; +} + export function resolveCronDeliveryPlan(job: CronJob): CronDeliveryPlan { const payload = job.payload.kind === "agentTurn" ? job.payload : null; const delivery = job.delivery; @@ -50,6 +59,9 @@ export function resolveCronDeliveryPlan(job: CronJob): CronDeliveryPlan { (delivery as { channel?: unknown } | undefined)?.channel, ); const deliveryTo = normalizeTo((delivery as { to?: unknown } | undefined)?.to); + const deliveryAccountId = normalizeAccountId( + (delivery as { accountId?: unknown } | undefined)?.accountId, + ); const channel = deliveryChannel ?? payloadChannel ?? "last"; const to = deliveryTo ?? payloadTo; @@ -59,6 +71,7 @@ export function resolveCronDeliveryPlan(job: CronJob): CronDeliveryPlan { mode: resolvedMode, channel: resolvedMode === "announce" ? channel : undefined, to, + accountId: deliveryAccountId, source: "delivery", requested: resolvedMode === "announce", }; diff --git a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts index 7d2dc3cf07a..01a407692e0 100644 --- a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts +++ b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts @@ -56,6 +56,7 @@ async function expectBestEffortTelegramNotDelivered( expect(res.status).toBe("ok"); expect(res.delivered).toBe(false); + expect(res.deliveryAttempted).toBe(true); expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); }); @@ -287,6 +288,33 @@ describe("runCronIsolatedAgentTurn", () => { }); }); + it("marks attempted when announce delivery reports false and best-effort is enabled", async () => { + await withTempCronHome(async (home) => { + const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); + const deps = createCliDeps(); + mockAgentPayloads([{ text: "hello from cron" }]); + vi.mocked(runSubagentAnnounceFlow).mockResolvedValueOnce(false); + + const res = await runTelegramAnnounceTurn({ + home, + storePath, + deps, + delivery: { + mode: "announce", + channel: "telegram", + to: "123", + bestEffort: true, + }, + }); + + expect(res.status).toBe("ok"); + expect(res.delivered).toBe(false); + expect(res.deliveryAttempted).toBe(true); + expect(runSubagentAnnounceFlow).toHaveBeenCalledTimes(1); + expect(deps.sendMessageTelegram).not.toHaveBeenCalled(); + }); + }); + it("ignores structured direct delivery failures when best-effort is enabled", async () => { await expectBestEffortTelegramNotDelivered({ text: "hello from cron", diff --git a/src/cron/isolated-agent/delivery-dispatch.ts b/src/cron/isolated-agent/delivery-dispatch.ts index 697c0e2b8a8..b071f63172d 100644 --- a/src/cron/isolated-agent/delivery-dispatch.ts +++ b/src/cron/isolated-agent/delivery-dispatch.ts @@ -8,6 +8,7 @@ import { resolveAgentMainSessionKey } from "../../config/sessions.js"; import { deliverOutboundPayloads } from "../../infra/outbound/deliver.js"; import { resolveAgentOutboundIdentity } from "../../infra/outbound/identity.js"; import { resolveOutboundSessionRoute } from "../../infra/outbound/outbound-session.js"; +import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js"; import { logWarn } from "../../logger.js"; import type { CronJob, CronRunTelemetry } from "../types.js"; import type { DeliveryTargetResolution } from "./delivery-target.js"; @@ -117,6 +118,7 @@ type DispatchCronDeliveryParams = { export type DispatchCronDeliveryState = { result?: RunCronAgentTurnResult; delivered: boolean; + deliveryAttempted: boolean; summary?: string; outputText?: string; synthesizedText?: string; @@ -134,6 +136,7 @@ export async function dispatchCronDelivery( // `true` means we confirmed at least one outbound send reached the target. // Keep this strict so timer fallback can safely decide whether to wake main. let delivered = params.skipMessagingToolDelivery; + let deliveryAttempted = params.skipMessagingToolDelivery; const failDeliveryTarget = (error: string) => params.withRunSession({ status: "error", @@ -141,6 +144,7 @@ export async function dispatchCronDelivery( errorKind: "delivery-target", summary, outputText, + deliveryAttempted, ...params.telemetry, }); @@ -162,9 +166,16 @@ export async function dispatchCronDelivery( return params.withRunSession({ status: "error", error: params.abortReason(), + deliveryAttempted, ...params.telemetry, }); } + deliveryAttempted = true; + const deliverySession = buildOutboundSessionContext({ + cfg: params.cfgWithAgentDefaults, + agentId: params.agentId, + sessionKey: params.agentSessionKey, + }); const deliveryResults = await deliverOutboundPayloads({ cfg: params.cfgWithAgentDefaults, channel: delivery.channel, @@ -172,7 +183,7 @@ export async function dispatchCronDelivery( accountId: delivery.accountId, threadId: delivery.threadId, payloads: payloadsForDelivery, - agentId: params.agentId, + session: deliverySession, identity, bestEffort: params.deliveryBestEffort, deps: createOutboundSendDeps(params.deps), @@ -187,6 +198,7 @@ export async function dispatchCronDelivery( summary, outputText, error: String(err), + deliveryAttempted, ...params.telemetry, }); } @@ -277,9 +289,11 @@ export async function dispatchCronDelivery( return params.withRunSession({ status: "error", error: params.abortReason(), + deliveryAttempted, ...params.telemetry, }); } + deliveryAttempted = true; const didAnnounce = await runSubagentAnnounceFlow({ childSessionKey: params.agentSessionKey, childRunId: `${params.job.id}:${params.runSessionId}:${params.runStartedAt}`, @@ -315,6 +329,7 @@ export async function dispatchCronDelivery( summary, outputText, error: message, + deliveryAttempted, ...params.telemetry, }); } @@ -327,6 +342,7 @@ export async function dispatchCronDelivery( summary, outputText, error: String(err), + deliveryAttempted, ...params.telemetry, }); } @@ -345,6 +361,7 @@ export async function dispatchCronDelivery( return { result: failDeliveryTarget(params.resolvedDelivery.error.message), delivered, + deliveryAttempted, summary, outputText, synthesizedText, @@ -357,9 +374,11 @@ export async function dispatchCronDelivery( status: "ok", summary, outputText, + deliveryAttempted, ...params.telemetry, }), delivered, + deliveryAttempted, summary, outputText, synthesizedText, @@ -383,6 +402,7 @@ export async function dispatchCronDelivery( return { result: directResult, delivered, + deliveryAttempted, summary, outputText, synthesizedText, @@ -395,6 +415,7 @@ export async function dispatchCronDelivery( return { result: announceResult, delivered, + deliveryAttempted, summary, outputText, synthesizedText, @@ -406,6 +427,7 @@ export async function dispatchCronDelivery( return { delivered, + deliveryAttempted, summary, outputText, synthesizedText, diff --git a/src/cron/isolated-agent/delivery-target.test.ts b/src/cron/isolated-agent/delivery-target.test.ts index ad1df42bb47..b28239adda8 100644 --- a/src/cron/isolated-agent/delivery-target.test.ts +++ b/src/cron/isolated-agent/delivery-target.test.ts @@ -299,4 +299,39 @@ describe("resolveDeliveryTarget", () => { expect(result.to).toBe("987654"); expect(result.ok).toBe(true); }); + + it("explicit delivery.accountId overrides session-derived accountId", async () => { + setMainSessionEntry({ + sessionId: "sess-5", + updatedAt: 1000, + lastChannel: "telegram", + lastTo: "chat-999", + lastAccountId: "default", + }); + + const result = await resolveDeliveryTarget(makeCfg({ bindings: [] }), AGENT_ID, { + channel: "telegram", + to: "chat-999", + accountId: "bot-b", + }); + + expect(result.ok).toBe(true); + expect(result.accountId).toBe("bot-b"); + }); + + it("explicit delivery.accountId overrides bindings-derived accountId", async () => { + setMainSessionEntry(undefined); + const cfg = makeCfg({ + bindings: [{ agentId: AGENT_ID, match: { channel: "telegram", accountId: "bound" } }], + }); + + const result = await resolveDeliveryTarget(cfg, AGENT_ID, { + channel: "telegram", + to: "chat-777", + accountId: "explicit", + }); + + expect(result.ok).toBe(true); + expect(result.accountId).toBe("explicit"); + }); }); diff --git a/src/cron/isolated-agent/delivery-target.ts b/src/cron/isolated-agent/delivery-target.ts index 0aa26188120..a8b4bc7d7fb 100644 --- a/src/cron/isolated-agent/delivery-target.ts +++ b/src/cron/isolated-agent/delivery-target.ts @@ -13,7 +13,7 @@ import { } from "../../infra/outbound/targets.js"; import { readChannelAllowFromStoreSync } from "../../pairing/pairing-store.js"; import { buildChannelAccountBindings } from "../../routing/bindings.js"; -import { normalizeAgentId } from "../../routing/session-key.js"; +import { normalizeAccountId, normalizeAgentId } from "../../routing/session-key.js"; import { resolveWhatsAppAccount } from "../../web/accounts.js"; import { normalizeWhatsAppTarget } from "../../whatsapp/normalize.js"; @@ -43,6 +43,7 @@ export async function resolveDeliveryTarget( channel?: "last" | ChannelId; to?: string; sessionKey?: string; + accountId?: string; }, ): Promise { const requestedChannel = typeof jobPayload.channel === "string" ? jobPayload.channel : "last"; @@ -114,6 +115,11 @@ export async function resolveDeliveryTarget( } } + // Explicit delivery account should override inferred session/binding account. + if (jobPayload.accountId) { + accountId = jobPayload.accountId; + } + // Carry threadId when it was explicitly set (from :topic: parsing or config) // or when delivering to the same recipient as the session's last conversation. // Session-derived threadIds are dropped when the target differs to prevent @@ -154,13 +160,15 @@ export async function resolveDeliveryTarget( let allowFromOverride: string[] | undefined; if (channel === "whatsapp") { - const configuredAllowFromRaw = resolveWhatsAppAccount({ cfg, accountId }).allowFrom ?? []; + const resolvedAccountId = normalizeAccountId(accountId); + const configuredAllowFromRaw = + resolveWhatsAppAccount({ cfg, accountId: resolvedAccountId }).allowFrom ?? []; const configuredAllowFrom = configuredAllowFromRaw .map((entry) => String(entry).trim()) .filter((entry) => entry && entry !== "*") .map((entry) => normalizeWhatsAppTarget(entry)) .filter((entry): entry is string => Boolean(entry)); - const storeAllowFrom = readChannelAllowFromStoreSync("whatsapp", process.env, accountId) + const storeAllowFrom = readChannelAllowFromStoreSync("whatsapp", process.env, resolvedAccountId) .map((entry) => normalizeWhatsAppTarget(entry)) .filter((entry): entry is string => Boolean(entry)); allowFromOverride = [...new Set([...configuredAllowFrom, ...storeAllowFrom])]; diff --git a/src/cron/isolated-agent/run.session-key.test.ts b/src/cron/isolated-agent/run.session-key.test.ts new file mode 100644 index 00000000000..20391b4142b --- /dev/null +++ b/src/cron/isolated-agent/run.session-key.test.ts @@ -0,0 +1,28 @@ +import { describe, expect, it } from "vitest"; +import { resolveCronAgentSessionKey } from "./session-key.js"; + +describe("resolveCronAgentSessionKey", () => { + it("builds an agent-scoped key for legacy aliases", () => { + expect(resolveCronAgentSessionKey({ sessionKey: "main", agentId: "main" })).toBe( + "agent:main:main", + ); + }); + + it("preserves canonical agent keys instead of prefixing twice", () => { + expect(resolveCronAgentSessionKey({ sessionKey: "agent:main:main", agentId: "main" })).toBe( + "agent:main:main", + ); + }); + + it("normalizes canonical keys to lowercase before reuse", () => { + expect( + resolveCronAgentSessionKey({ sessionKey: "AGENT:Main:Hook:Webhook:42", agentId: "x" }), + ).toBe("agent:main:hook:webhook:42"); + }); + + it("keeps hook keys scoped under the target agent", () => { + expect(resolveCronAgentSessionKey({ sessionKey: "hook:webhook:42", agentId: "main" })).toBe( + "agent:main:hook:webhook:42", + ); + }); +}); diff --git a/src/cron/isolated-agent/run.skill-filter.test.ts b/src/cron/isolated-agent/run.skill-filter.test.ts index 02d986819d9..2b6e4bbf7be 100644 --- a/src/cron/isolated-agent/run.skill-filter.test.ts +++ b/src/cron/isolated-agent/run.skill-filter.test.ts @@ -6,6 +6,13 @@ import { runWithModelFallback } from "../../agents/model-fallback.js"; const buildWorkspaceSkillSnapshotMock = vi.fn(); const resolveAgentConfigMock = vi.fn(); const resolveAgentSkillsFilterMock = vi.fn(); +const getModelRefStatusMock = vi.fn().mockReturnValue({ allowed: false }); +const isCliProviderMock = vi.fn().mockReturnValue(false); +const resolveAllowedModelRefMock = vi.fn(); +const resolveConfiguredModelRefMock = vi.fn(); +const resolveHooksGmailModelMock = vi.fn(); +const resolveThinkingDefaultMock = vi.fn(); +const logWarnMock = vi.fn(); vi.mock("../../agents/agent-scope.js", () => ({ resolveAgentConfig: resolveAgentConfigMock, @@ -36,14 +43,12 @@ vi.mock("../../agents/model-selection.js", async (importOriginal) => { const actual = await importOriginal(); return { ...actual, - getModelRefStatus: vi.fn().mockReturnValue({ allowed: false }), - isCliProvider: vi.fn().mockReturnValue(false), - resolveAllowedModelRef: vi - .fn() - .mockReturnValue({ ref: { provider: "openai", model: "gpt-4" } }), - resolveConfiguredModelRef: vi.fn().mockReturnValue({ provider: "openai", model: "gpt-4" }), - resolveHooksGmailModel: vi.fn().mockReturnValue(null), - resolveThinkingDefault: vi.fn().mockReturnValue(undefined), + getModelRefStatus: getModelRefStatusMock, + isCliProvider: isCliProviderMock, + resolveAllowedModelRef: resolveAllowedModelRefMock, + resolveConfiguredModelRef: resolveConfiguredModelRefMock, + resolveHooksGmailModel: resolveHooksGmailModelMock, + resolveThinkingDefault: resolveThinkingDefaultMock, }; }); @@ -138,7 +143,7 @@ vi.mock("../../infra/skills-remote.js", () => ({ })); vi.mock("../../logger.js", () => ({ - logWarn: vi.fn(), + logWarn: (...args: unknown[]) => logWarnMock(...args), })); vi.mock("../../security/external-content.js", () => ({ @@ -222,6 +227,13 @@ describe("runCronIsolatedAgentTurn — skill filter", () => { }); resolveAgentConfigMock.mockReturnValue(undefined); resolveAgentSkillsFilterMock.mockReturnValue(undefined); + resolveConfiguredModelRefMock.mockReturnValue({ provider: "openai", model: "gpt-4" }); + resolveAllowedModelRefMock.mockReturnValue({ ref: { provider: "openai", model: "gpt-4" } }); + resolveHooksGmailModelMock.mockReturnValue(null); + resolveThinkingDefaultMock.mockReturnValue(undefined); + getModelRefStatusMock.mockReturnValue({ allowed: false }); + isCliProviderMock.mockReturnValue(false); + logWarnMock.mockReset(); // Fresh session object per test — prevents mutation leaking between tests resolveCronSessionMock.mockReturnValue({ storePath: "/tmp/store.json", @@ -408,5 +420,78 @@ describe("runCronIsolatedAgentTurn — skill filter", () => { it("preserves defaults when agent overrides primary in object form", async () => { await expectPrimaryOverridePreservesDefaults({ primary: "anthropic/claude-sonnet-4-5" }); }); + + it("applies payload.model override when model is allowed", async () => { + resolveAllowedModelRefMock.mockReturnValueOnce({ + ref: { provider: "anthropic", model: "claude-sonnet-4-6" }, + }); + + const result = await runCronIsolatedAgentTurn( + makeParams({ + job: makeJob({ + payload: { kind: "agentTurn", message: "test", model: "anthropic/claude-sonnet-4-6" }, + }), + }), + ); + + expect(result.status).toBe("ok"); + expect(logWarnMock).not.toHaveBeenCalled(); + expect(runWithModelFallbackMock).toHaveBeenCalledOnce(); + const runParams = runWithModelFallbackMock.mock.calls[0][0]; + expect(runParams.provider).toBe("anthropic"); + expect(runParams.model).toBe("claude-sonnet-4-6"); + }); + + it("falls back to agent defaults when payload.model is not allowed", async () => { + resolveAllowedModelRefMock.mockReturnValueOnce({ + error: "model not allowed: anthropic/claude-sonnet-4-6", + }); + + const result = await runCronIsolatedAgentTurn( + makeParams({ + cfg: { + agents: { + defaults: { + model: { primary: "openai-codex/gpt-5.3-codex", fallbacks: defaultFallbacks }, + }, + }, + }, + job: makeJob({ + payload: { kind: "agentTurn", message: "test", model: "anthropic/claude-sonnet-4-6" }, + }), + }), + ); + + expect(result.status).toBe("ok"); + expect(logWarnMock).toHaveBeenCalledWith( + "cron: payload.model 'anthropic/claude-sonnet-4-6' not allowed, falling back to agent defaults", + ); + expect(runWithModelFallbackMock).toHaveBeenCalledOnce(); + const callCfg = runWithModelFallbackMock.mock.calls[0][0].cfg; + const model = callCfg?.agents?.defaults?.model as + | { primary?: string; fallbacks?: string[] } + | undefined; + expect(model?.primary).toBe("openai-codex/gpt-5.3-codex"); + expect(model?.fallbacks).toEqual(defaultFallbacks); + }); + + it("returns an error when payload.model is invalid", async () => { + resolveAllowedModelRefMock.mockReturnValueOnce({ + error: "invalid model: openai/", + }); + + const result = await runCronIsolatedAgentTurn( + makeParams({ + job: makeJob({ + payload: { kind: "agentTurn", message: "test", model: "openai/" }, + }), + }), + ); + + expect(result.status).toBe("error"); + expect(result.error).toBe("invalid model: openai/"); + expect(logWarnMock).not.toHaveBeenCalled(); + expect(runWithModelFallbackMock).not.toHaveBeenCalled(); + }); }); }); diff --git a/src/cron/isolated-agent/run.ts b/src/cron/isolated-agent/run.ts index dd5c28ae616..41ed8765522 100644 --- a/src/cron/isolated-agent/run.ts +++ b/src/cron/isolated-agent/run.ts @@ -40,7 +40,7 @@ import { import type { AgentDefaultsConfig } from "../../config/types.js"; import { registerAgentRunContext } from "../../infra/agent-events.js"; import { logWarn } from "../../logger.js"; -import { buildAgentMainSessionKey, normalizeAgentId } from "../../routing/session-key.js"; +import { normalizeAgentId } from "../../routing/session-key.js"; import { buildSafeExternalPrompt, detectSuspiciousPatterns, @@ -63,6 +63,7 @@ import { pickSummaryFromPayloads, resolveHeartbeatAckMaxChars, } from "./helpers.js"; +import { resolveCronAgentSessionKey } from "./session-key.js"; import { resolveCronSession } from "./session.js"; import { resolveCronSkillsSnapshot } from "./skills-snapshot.js"; @@ -77,6 +78,12 @@ export type RunCronAgentTurnResult = { * messages. See: https://github.com/openclaw/openclaw/issues/15692 */ delivered?: boolean; + /** + * `true` when cron attempted announce/direct delivery for this run. + * This is tracked separately from `delivered` because some announce paths + * cannot guarantee a final delivery ack synchronously. + */ + deliveryAttempted?: boolean; } & CronRunOutcome & CronRunTelemetry; @@ -136,10 +143,7 @@ export async function runCronIsolatedAgentTurn(params: { }; const baseSessionKey = (params.sessionKey?.trim() || `cron:${params.job.id}`).trim(); - const agentSessionKey = buildAgentMainSessionKey({ - agentId, - mainKey: baseSessionKey, - }); + const agentSessionKey = resolveCronAgentSessionKey({ sessionKey: baseSessionKey, agentId }); const workspaceDirRaw = resolveAgentWorkspaceDir(params.cfg, agentId); const agentDir = resolveAgentDir(params.cfg, agentId); @@ -198,10 +202,17 @@ export async function runCronIsolatedAgentTurn(params: { defaultModel: resolvedDefault.model, }); if ("error" in resolvedOverride) { - return { status: "error", error: resolvedOverride.error }; + if (resolvedOverride.error.startsWith("model not allowed:")) { + logWarn( + `cron: payload.model '${modelOverride}' not allowed, falling back to agent defaults`, + ); + } else { + return { status: "error", error: resolvedOverride.error }; + } + } else { + provider = resolvedOverride.ref.provider; + model = resolvedOverride.ref.model; } - provider = resolvedOverride.ref.provider; - model = resolvedOverride.ref.model; } const now = Date.now(); const cronSession = resolveCronSession({ @@ -307,6 +318,7 @@ export async function runCronIsolatedAgentTurn(params: { channel: deliveryPlan.channel ?? "last", to: deliveryPlan.to, sessionKey: params.job.sessionKey, + accountId: deliveryPlan.accountId, }); const { formattedTime, timeLine } = resolveCronStyleNow(params.cfg, now); @@ -557,7 +569,7 @@ export async function runCronIsolatedAgentTurn(params: { const embeddedRunError = hasErrorPayload ? (lastErrorPayloadText ?? "cron isolated run returned an error payload") : undefined; - const resolveRunOutcome = (params?: { delivered?: boolean }) => + const resolveRunOutcome = (params?: { delivered?: boolean; deliveryAttempted?: boolean }) => withRunSession({ status: hasErrorPayload ? "error" : "ok", ...(hasErrorPayload @@ -566,6 +578,7 @@ export async function runCronIsolatedAgentTurn(params: { summary, outputText, delivered: params?.delivered, + deliveryAttempted: params?.deliveryAttempted, ...telemetry, }); @@ -611,14 +624,23 @@ export async function runCronIsolatedAgentTurn(params: { withRunSession, }); if (deliveryResult.result) { + const resultWithDeliveryMeta: RunCronAgentTurnResult = { + ...deliveryResult.result, + deliveryAttempted: + deliveryResult.result.deliveryAttempted ?? deliveryResult.deliveryAttempted, + }; if (!hasErrorPayload || deliveryResult.result.status !== "ok") { - return deliveryResult.result; + return resultWithDeliveryMeta; } - return resolveRunOutcome({ delivered: deliveryResult.result.delivered }); + return resolveRunOutcome({ + delivered: deliveryResult.result.delivered, + deliveryAttempted: resultWithDeliveryMeta.deliveryAttempted, + }); } const delivered = deliveryResult.delivered; + const deliveryAttempted = deliveryResult.deliveryAttempted; summary = deliveryResult.summary; outputText = deliveryResult.outputText; - return resolveRunOutcome({ delivered }); + return resolveRunOutcome({ delivered, deliveryAttempted }); } diff --git a/src/cron/isolated-agent/session-key.ts b/src/cron/isolated-agent/session-key.ts new file mode 100644 index 00000000000..230b858fd88 --- /dev/null +++ b/src/cron/isolated-agent/session-key.ts @@ -0,0 +1,13 @@ +import { toAgentStoreSessionKey } from "../../routing/session-key.js"; + +export function resolveCronAgentSessionKey(params: { + sessionKey: string; + agentId: string; + mainKey?: string | undefined; +}): string { + return toAgentStoreSessionKey({ + agentId: params.agentId, + requestKey: params.sessionKey.trim(), + mainKey: params.mainKey, + }); +} diff --git a/src/cron/service.issue-regressions.test.ts b/src/cron/service.issue-regressions.test.ts index 7515b110250..469ff498019 100644 --- a/src/cron/service.issue-regressions.test.ts +++ b/src/cron/service.issue-regressions.test.ts @@ -9,7 +9,7 @@ import { CronService } from "./service.js"; import { createDeferred, createRunningCronServiceState } from "./service.test-harness.js"; import { computeJobNextRunAtMs } from "./service/jobs.js"; import { createCronServiceState, type CronEvent } from "./service/state.js"; -import { executeJobCore, onTimer, runMissedJobs } from "./service/timer.js"; +import { DEFAULT_JOB_TIMEOUT_MS, executeJobCore, onTimer, runMissedJobs } from "./service/timer.js"; import type { CronJob, CronJobState } from "./types.js"; const noopLogger = { @@ -838,6 +838,58 @@ describe("Cron issue regressions", () => { expect(job?.state.lastStatus).toBe("ok"); }); + it("does not time out agentTurn jobs at the default 10-minute safety window", async () => { + const store = await makeStorePath(); + const scheduledAt = Date.parse("2026-02-15T13:00:00.000Z"); + + const cronJob = createIsolatedRegressionJob({ + id: "agentturn-default-safety-window", + name: "agentturn default safety window", + scheduledAt, + schedule: { kind: "at", at: new Date(scheduledAt).toISOString() }, + payload: { kind: "agentTurn", message: "work" }, + state: { nextRunAtMs: scheduledAt }, + }); + await writeCronJobs(store.storePath, [cronJob]); + + let now = scheduledAt; + const deferredRun = createDeferred<{ status: "ok"; summary: string }>(); + const runIsolatedAgentJob = vi.fn(async ({ abortSignal }: { abortSignal?: AbortSignal }) => { + const result = await deferredRun.promise; + if (abortSignal?.aborted) { + return { status: "error" as const, error: String(abortSignal.reason) }; + } + now += 5; + return result; + }); + const state = createCronServiceState({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + nowMs: () => now, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob, + }); + + const timerPromise = onTimer(state); + let settled = false; + void timerPromise.finally(() => { + settled = true; + }); + + await vi.advanceTimersByTimeAsync(DEFAULT_JOB_TIMEOUT_MS + 1_000); + await Promise.resolve(); + expect(settled).toBe(false); + + deferredRun.resolve({ status: "ok", summary: "done" }); + await timerPromise; + + const job = state.store?.jobs.find((entry) => entry.id === "agentturn-default-safety-window"); + expect(job?.state.lastStatus).toBe("ok"); + expect(job?.state.lastError).toBeUndefined(); + }); + it("aborts isolated runs when cron timeout fires", async () => { vi.useRealTimers(); const store = await makeStorePath(); diff --git a/src/cron/service.runs-one-shot-main-job-disables-it.test.ts b/src/cron/service.runs-one-shot-main-job-disables-it.test.ts index 027a464357d..37079addef0 100644 --- a/src/cron/service.runs-one-shot-main-job-disables-it.test.ts +++ b/src/cron/service.runs-one-shot-main-job-disables-it.test.ts @@ -625,6 +625,28 @@ describe("CronService", () => { await store.cleanup(); }); + it("does not post isolated summary to main when announce delivery was attempted", async () => { + const runIsolatedAgentJob = vi.fn(async () => ({ + status: "ok" as const, + summary: "done", + delivered: false, + deliveryAttempted: true, + })); + const { store, cron, enqueueSystemEvent, requestHeartbeatNow, events } = + await createIsolatedAnnounceHarness(runIsolatedAgentJob); + await runIsolatedAnnounceJobAndWait({ + cron, + events, + name: "weekly attempted", + status: "ok", + }); + expect(runIsolatedAgentJob).toHaveBeenCalledTimes(1); + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); + cron.stop(); + await store.cleanup(); + }); + it("migrates legacy payload.provider to payload.channel on load", async () => { const rawJob = createLegacyDeliveryMigrationJob({ id: "legacy-1", diff --git a/src/cron/service/state.ts b/src/cron/service/state.ts index 19b139b3703..3ad9cc1f591 100644 --- a/src/cron/service/state.ts +++ b/src/cron/service/state.ts @@ -80,6 +80,11 @@ export type CronServiceDeps = { * https://github.com/openclaw/openclaw/issues/15692 */ delivered?: boolean; + /** + * `true` when announce/direct delivery was attempted for this run, even + * if the final per-message ack status is uncertain. + */ + deliveryAttempted?: boolean; } & CronRunOutcome & CronRunTelemetry >; diff --git a/src/cron/service/timeout-policy.test.ts b/src/cron/service/timeout-policy.test.ts new file mode 100644 index 00000000000..69ca6aa46c3 --- /dev/null +++ b/src/cron/service/timeout-policy.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it } from "vitest"; +import type { CronJob } from "../types.js"; +import { + AGENT_TURN_SAFETY_TIMEOUT_MS, + DEFAULT_JOB_TIMEOUT_MS, + resolveCronJobTimeoutMs, +} from "./timeout-policy.js"; + +function makeJob(payload: CronJob["payload"]): CronJob { + const sessionTarget = payload.kind === "agentTurn" ? "isolated" : "main"; + return { + id: "job-1", + name: "job", + createdAtMs: 0, + updatedAtMs: 0, + enabled: true, + schedule: { kind: "every", everyMs: 60_000 }, + sessionTarget, + wakeMode: "next-heartbeat", + payload, + state: {}, + }; +} + +describe("timeout-policy", () => { + it("uses default timeout for non-agent jobs", () => { + const timeout = resolveCronJobTimeoutMs(makeJob({ kind: "systemEvent", text: "hello" })); + expect(timeout).toBe(DEFAULT_JOB_TIMEOUT_MS); + }); + + it("uses expanded safety timeout for agentTurn jobs without explicit timeout", () => { + const timeout = resolveCronJobTimeoutMs(makeJob({ kind: "agentTurn", message: "hi" })); + expect(timeout).toBe(AGENT_TURN_SAFETY_TIMEOUT_MS); + }); + + it("disables timeout when timeoutSeconds <= 0", () => { + const timeout = resolveCronJobTimeoutMs( + makeJob({ kind: "agentTurn", message: "hi", timeoutSeconds: 0 }), + ); + expect(timeout).toBeUndefined(); + }); + + it("applies explicit timeoutSeconds when positive", () => { + const timeout = resolveCronJobTimeoutMs( + makeJob({ kind: "agentTurn", message: "hi", timeoutSeconds: 1.9 }), + ); + expect(timeout).toBe(1_900); + }); +}); diff --git a/src/cron/service/timeout-policy.ts b/src/cron/service/timeout-policy.ts new file mode 100644 index 00000000000..7b03b8bda52 --- /dev/null +++ b/src/cron/service/timeout-policy.ts @@ -0,0 +1,25 @@ +import type { CronJob } from "../types.js"; + +/** + * Maximum wall-clock time for a single job execution. Acts as a safety net + * on top of per-provider/per-agent timeouts to prevent one stuck job from + * wedging the entire cron lane. + */ +export const DEFAULT_JOB_TIMEOUT_MS = 10 * 60_000; // 10 minutes + +/** + * Agent turns can legitimately run much longer than generic cron jobs. + * Use a larger safety ceiling when no explicit timeout is set. + */ +export const AGENT_TURN_SAFETY_TIMEOUT_MS = 60 * 60_000; // 60 minutes + +export function resolveCronJobTimeoutMs(job: CronJob): number | undefined { + const configuredTimeoutMs = + job.payload.kind === "agentTurn" && typeof job.payload.timeoutSeconds === "number" + ? Math.floor(job.payload.timeoutSeconds * 1_000) + : undefined; + if (configuredTimeoutMs === undefined) { + return job.payload.kind === "agentTurn" ? AGENT_TURN_SAFETY_TIMEOUT_MS : DEFAULT_JOB_TIMEOUT_MS; + } + return configuredTimeoutMs <= 0 ? undefined : configuredTimeoutMs; +} diff --git a/src/cron/service/timer.ts b/src/cron/service/timer.ts index 34cdab97f5a..8267d4c970a 100644 --- a/src/cron/service/timer.ts +++ b/src/cron/service/timer.ts @@ -18,6 +18,9 @@ import { import { locked } from "./locked.js"; import type { CronEvent, CronServiceState } from "./state.js"; import { ensureLoaded, persist } from "./store.js"; +import { DEFAULT_JOB_TIMEOUT_MS, resolveCronJobTimeoutMs } from "./timeout-policy.js"; + +export { DEFAULT_JOB_TIMEOUT_MS } from "./timeout-policy.js"; const MAX_TIMER_DELAY_MS = 60_000; @@ -30,32 +33,15 @@ const MAX_TIMER_DELAY_MS = 60_000; */ const MIN_REFIRE_GAP_MS = 2_000; -/** - * Maximum wall-clock time for a single job execution. Acts as a safety net - * on top of the per-provider / per-agent timeouts to prevent one stuck job - * from wedging the entire cron lane. - */ -export const DEFAULT_JOB_TIMEOUT_MS = 10 * 60_000; // 10 minutes - type TimedCronRunOutcome = CronRunOutcome & CronRunTelemetry & { jobId: string; delivered?: boolean; + deliveryAttempted?: boolean; startedAt: number; endedAt: number; }; -function resolveCronJobTimeoutMs(job: CronJob): number | undefined { - const configuredTimeoutMs = - job.payload.kind === "agentTurn" && typeof job.payload.timeoutSeconds === "number" - ? Math.floor(job.payload.timeoutSeconds * 1_000) - : undefined; - if (configuredTimeoutMs === undefined) { - return DEFAULT_JOB_TIMEOUT_MS; - } - return configuredTimeoutMs <= 0 ? undefined : configuredTimeoutMs; -} - export async function executeJobCoreWithTimeout( state: CronServiceState, job: CronJob, @@ -606,7 +592,9 @@ export async function executeJobCore( state: CronServiceState, job: CronJob, abortSignal?: AbortSignal, -): Promise { +): Promise< + CronRunOutcome & CronRunTelemetry & { delivered?: boolean; deliveryAttempted?: boolean } +> { const resolveAbortError = () => ({ status: "error" as const, error: timeoutErrorMessage(), @@ -729,17 +717,22 @@ export async function executeJobCore( return { status: "error", error: timeoutErrorMessage() }; } - // Post a short summary back to the main session — but only when the - // isolated run did NOT already deliver its output to the target channel. - // When `res.delivered` is true the announce flow (or direct outbound - // delivery) already sent the result, so posting the summary to main - // would wake the main agent and cause a duplicate message. + // Post a short summary back to the main session only when announce + // delivery was requested and we are confident no outbound delivery path + // ran. If delivery was attempted but final ack is uncertain, suppress the + // main summary to avoid duplicate user-facing sends. // See: https://github.com/openclaw/openclaw/issues/15692 const summaryText = res.summary?.trim(); const deliveryPlan = resolveCronDeliveryPlan(job); const suppressMainSummary = res.status === "error" && res.errorKind === "delivery-target" && deliveryPlan.requested; - if (summaryText && deliveryPlan.requested && !res.delivered && !suppressMainSummary) { + if ( + summaryText && + deliveryPlan.requested && + !res.delivered && + res.deliveryAttempted !== true && + !suppressMainSummary + ) { const prefix = "Cron"; const label = res.status === "error" ? `${prefix} (error): ${summaryText}` : `${prefix}: ${summaryText}`; @@ -762,6 +755,7 @@ export async function executeJobCore( error: res.error, summary: res.summary, delivered: res.delivered, + deliveryAttempted: res.deliveryAttempted, sessionId: res.sessionId, sessionKey: res.sessionKey, model: res.model, diff --git a/src/cron/types.ts b/src/cron/types.ts index 837cba2168e..4480b22ae6b 100644 --- a/src/cron/types.ts +++ b/src/cron/types.ts @@ -22,6 +22,7 @@ export type CronDelivery = { mode: CronDeliveryMode; channel?: CronMessageChannel; to?: string; + accountId?: string; bestEffort?: boolean; }; diff --git a/src/daemon/launchd-plist.ts b/src/daemon/launchd-plist.ts index e685cd9941c..b292ff45974 100644 --- a/src/daemon/launchd-plist.ts +++ b/src/daemon/launchd-plist.ts @@ -106,5 +106,5 @@ export function buildLaunchAgentPlist({ ? `\n Comment\n ${plistEscape(comment.trim())}` : ""; const envXml = renderEnvDict(environment); - return `\n\n\n \n Label\n ${plistEscape(label)}\n ${commentXml}\n RunAtLoad\n \n KeepAlive\n \n ProgramArguments\n ${argsXml}\n \n ${workingDirXml}\n StandardOutPath\n ${plistEscape(stdoutPath)}\n StandardErrorPath\n ${plistEscape(stderrPath)}${envXml}\n \n\n`; + return `\n\n\n \n Label\n ${plistEscape(label)}\n ${commentXml}\n RunAtLoad\n \n KeepAlive\n \n ThrottleInterval\n 60\n ProgramArguments\n ${argsXml}\n \n ${workingDirXml}\n StandardOutPath\n ${plistEscape(stdoutPath)}\n StandardErrorPath\n ${plistEscape(stderrPath)}${envXml}\n \n\n`; } diff --git a/src/daemon/launchd.integration.test.ts b/src/daemon/launchd.integration.test.ts new file mode 100644 index 00000000000..8fcd4a4d896 --- /dev/null +++ b/src/daemon/launchd.integration.test.ts @@ -0,0 +1,145 @@ +import { spawnSync } from "node:child_process"; +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { PassThrough } from "node:stream"; +import { afterAll, beforeAll, describe, expect, it } from "vitest"; +import { + installLaunchAgent, + readLaunchAgentRuntime, + restartLaunchAgent, + resolveLaunchAgentPlistPath, + uninstallLaunchAgent, +} from "./launchd.js"; +import type { GatewayServiceEnv } from "./service-types.js"; + +const WAIT_INTERVAL_MS = 200; +const WAIT_TIMEOUT_MS = 30_000; +const STARTUP_TIMEOUT_MS = 45_000; + +function canRunLaunchdIntegration(): boolean { + if (process.platform !== "darwin") { + return false; + } + if (typeof process.getuid !== "function") { + return false; + } + const domain = `gui/${process.getuid()}`; + const probe = spawnSync("launchctl", ["print", domain], { encoding: "utf8" }); + if (probe.error) { + return false; + } + return probe.status === 0; +} + +const describeLaunchdIntegration = canRunLaunchdIntegration() ? describe : describe.skip; + +async function withTimeout(params: { + run: () => Promise; + timeoutMs: number; + message: string; +}): Promise { + let timer: NodeJS.Timeout | undefined; + try { + return await Promise.race([ + params.run(), + new Promise((_, reject) => { + timer = setTimeout(() => reject(new Error(params.message)), params.timeoutMs); + }), + ]); + } finally { + if (timer) { + clearTimeout(timer); + } + } +} + +async function waitForRunningRuntime(params: { + env: GatewayServiceEnv; + pidNot?: number; + timeoutMs?: number; +}): Promise<{ pid: number }> { + const timeoutMs = params.timeoutMs ?? WAIT_TIMEOUT_MS; + const deadline = Date.now() + timeoutMs; + let lastStatus = "unknown"; + let lastPid: number | undefined; + while (Date.now() < deadline) { + const runtime = await readLaunchAgentRuntime(params.env); + lastStatus = runtime.status ?? "unknown"; + lastPid = runtime.pid; + if ( + runtime.status === "running" && + typeof runtime.pid === "number" && + runtime.pid > 1 && + (params.pidNot === undefined || runtime.pid !== params.pidNot) + ) { + return { pid: runtime.pid }; + } + await new Promise((resolve) => { + setTimeout(resolve, WAIT_INTERVAL_MS); + }); + } + throw new Error( + `Timed out waiting for launchd runtime (status=${lastStatus}, pid=${lastPid ?? "none"})`, + ); +} + +describeLaunchdIntegration("launchd integration", () => { + let env: GatewayServiceEnv | undefined; + let homeDir = ""; + const stdout = new PassThrough(); + + beforeAll(async () => { + const testId = randomUUID().slice(0, 8); + homeDir = await fs.mkdtemp(path.join(os.tmpdir(), `openclaw-launchd-int-${testId}-`)); + env = { + HOME: homeDir, + OPENCLAW_LAUNCHD_LABEL: `ai.openclaw.launchd-int-${testId}`, + OPENCLAW_LOG_PREFIX: `gateway-launchd-int-${testId}`, + }; + }); + + afterAll(async () => { + if (env) { + try { + await uninstallLaunchAgent({ env, stdout }); + } catch { + // Best-effort cleanup in case launchctl state already changed. + } + } + if (homeDir) { + await fs.rm(homeDir, { recursive: true, force: true }); + } + }, 60_000); + + it("restarts launchd service and keeps it running with a new pid", async () => { + if (!env) { + throw new Error("launchd integration env was not initialized"); + } + const launchEnv = env; + try { + await withTimeout({ + run: async () => { + await installLaunchAgent({ + env: launchEnv, + stdout, + programArguments: [process.execPath, "-e", "setInterval(() => {}, 1000);"], + }); + await waitForRunningRuntime({ env: launchEnv }); + }, + timeoutMs: STARTUP_TIMEOUT_MS, + message: "Timed out initializing launchd integration runtime", + }); + } catch { + // Best-effort integration check only; skip when launchctl is unstable in CI. + return; + } + const before = await waitForRunningRuntime({ env: launchEnv }); + await restartLaunchAgent({ env: launchEnv, stdout }); + const after = await waitForRunningRuntime({ env: launchEnv, pidNot: before.pid }); + expect(after.pid).toBeGreaterThan(1); + expect(after.pid).not.toBe(before.pid); + await fs.access(resolveLaunchAgentPlistPath(launchEnv)); + }, 60_000); +}); diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index b68774cb19f..85c7a3350e9 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -5,12 +5,14 @@ import { isLaunchAgentListed, parseLaunchctlPrint, repairLaunchAgentBootstrap, + restartLaunchAgent, resolveLaunchAgentPlistPath, } from "./launchd.js"; const state = vi.hoisted(() => ({ launchctlCalls: [] as string[][], listOutput: "", + printOutput: "", bootstrapError: "", dirs: new Set(), files: new Map(), @@ -35,6 +37,9 @@ vi.mock("./exec-file.js", () => ({ if (call[0] === "list") { return { stdout: state.listOutput, stderr: "", code: 0 }; } + if (call[0] === "print") { + return { stdout: state.printOutput, stderr: "", code: 0 }; + } if (call[0] === "bootstrap" && state.bootstrapError) { return { stdout: "", stderr: state.bootstrapError, code: 1 }; } @@ -71,6 +76,7 @@ vi.mock("node:fs/promises", async (importOriginal) => { beforeEach(() => { state.launchctlCalls.length = 0; state.listOutput = ""; + state.printOutput = ""; state.bootstrapError = ""; state.dirs.clear(); state.files.clear(); @@ -179,6 +185,86 @@ describe("launchd install", () => { expect(plist).toContain(`${tmpDir}`); }); + it("writes KeepAlive=true policy", async () => { + const env = createDefaultLaunchdEnv(); + await installLaunchAgent({ + env, + stdout: new PassThrough(), + programArguments: defaultProgramArguments, + }); + + const plistPath = resolveLaunchAgentPlistPath(env); + const plist = state.files.get(plistPath) ?? ""; + expect(plist).toContain("KeepAlive"); + expect(plist).toContain(""); + expect(plist).not.toContain("SuccessfulExit"); + expect(plist).toContain("ThrottleInterval"); + expect(plist).toContain("60"); + }); + + it("restarts LaunchAgent with bootout-bootstrap-kickstart order", async () => { + const env = createDefaultLaunchdEnv(); + await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const plistPath = resolveLaunchAgentPlistPath(env); + const bootoutIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootout" && c[1] === `${domain}/${label}`, + ); + const bootstrapIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, + ); + const kickstartIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === `${domain}/${label}`, + ); + + expect(bootoutIndex).toBeGreaterThanOrEqual(0); + expect(bootstrapIndex).toBeGreaterThanOrEqual(0); + expect(kickstartIndex).toBeGreaterThanOrEqual(0); + expect(bootoutIndex).toBeLessThan(bootstrapIndex); + expect(bootstrapIndex).toBeLessThan(kickstartIndex); + }); + + it("waits for previous launchd pid to exit before bootstrapping", async () => { + const env = createDefaultLaunchdEnv(); + state.printOutput = ["state = running", "pid = 4242"].join("\n"); + const killSpy = vi.spyOn(process, "kill"); + killSpy + .mockImplementationOnce(() => true) + .mockImplementationOnce(() => { + const err = new Error("no such process") as NodeJS.ErrnoException; + err.code = "ESRCH"; + throw err; + }); + + vi.useFakeTimers(); + try { + const restartPromise = restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + await vi.advanceTimersByTimeAsync(250); + await restartPromise; + expect(killSpy).toHaveBeenCalledWith(4242, 0); + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const bootoutIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootout" && c[1] === `${domain}/${label}`, + ); + const bootstrapIndex = state.launchctlCalls.findIndex((c) => c[0] === "bootstrap"); + expect(bootoutIndex).toBeGreaterThanOrEqual(0); + expect(bootstrapIndex).toBeGreaterThanOrEqual(0); + expect(bootoutIndex).toBeLessThan(bootstrapIndex); + } finally { + vi.useRealTimers(); + killSpy.mockRestore(); + } + }); + it("shows actionable guidance when launchctl gui domain does not support bootstrap", async () => { state.bootstrapError = "Bootstrap failed: 125: Domain does not support specified action"; const env = createDefaultLaunchdEnv(); diff --git a/src/daemon/launchd.ts b/src/daemon/launchd.ts index dded364858b..5326413b73d 100644 --- a/src/daemon/launchd.ts +++ b/src/daemon/launchd.ts @@ -331,6 +331,34 @@ function isUnsupportedGuiDomain(detail: string): boolean { ); } +const RESTART_PID_WAIT_TIMEOUT_MS = 10_000; +const RESTART_PID_WAIT_INTERVAL_MS = 200; + +async function sleepMs(ms: number): Promise { + await new Promise((resolve) => { + setTimeout(resolve, ms); + }); +} + +async function waitForPidExit(pid: number): Promise { + if (!Number.isFinite(pid) || pid <= 1) { + return; + } + const deadline = Date.now() + RESTART_PID_WAIT_TIMEOUT_MS; + while (Date.now() < deadline) { + try { + process.kill(pid, 0); + } catch (err) { + const code = (err as NodeJS.ErrnoException).code; + if (code === "ESRCH" || code === "EPERM") { + return; + } + return; + } + await sleepMs(RESTART_PID_WAIT_INTERVAL_MS); + } +} + export async function stopLaunchAgent({ stdout, env }: GatewayServiceControlArgs): Promise { const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env }); @@ -418,11 +446,45 @@ export async function restartLaunchAgent({ stdout, env, }: GatewayServiceControlArgs): Promise { + const serviceEnv = env ?? (process.env as GatewayServiceEnv); const domain = resolveGuiDomain(); - const label = resolveLaunchAgentLabel({ env }); - const res = await execLaunchctl(["kickstart", "-k", `${domain}/${label}`]); - if (res.code !== 0) { - throw new Error(`launchctl kickstart failed: ${res.stderr || res.stdout}`.trim()); + const label = resolveLaunchAgentLabel({ env: serviceEnv }); + const plistPath = resolveLaunchAgentPlistPath(serviceEnv); + + const runtime = await execLaunchctl(["print", `${domain}/${label}`]); + const previousPid = + runtime.code === 0 + ? parseLaunchctlPrint(runtime.stdout || runtime.stderr || "").pid + : undefined; + + const stop = await execLaunchctl(["bootout", `${domain}/${label}`]); + if (stop.code !== 0 && !isLaunchctlNotLoaded(stop)) { + throw new Error(`launchctl bootout failed: ${stop.stderr || stop.stdout}`.trim()); + } + if (typeof previousPid === "number") { + await waitForPidExit(previousPid); + } + + const boot = await execLaunchctl(["bootstrap", domain, plistPath]); + if (boot.code !== 0) { + const detail = (boot.stderr || boot.stdout).trim(); + if (isUnsupportedGuiDomain(detail)) { + throw new Error( + [ + `launchctl bootstrap failed: ${detail}`, + `LaunchAgent restart requires a logged-in macOS GUI session for this user (${domain}).`, + "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", + "Fix: sign in to the macOS desktop as the target user and rerun `openclaw gateway restart`.", + "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", + ].join("\n"), + ); + } + throw new Error(`launchctl bootstrap failed: ${detail}`); + } + + const start = await execLaunchctl(["kickstart", "-k", `${domain}/${label}`]); + if (start.code !== 0) { + throw new Error(`launchctl kickstart failed: ${start.stderr || start.stdout}`.trim()); } try { stdout.write(`${formatLine("Restarted LaunchAgent", `${domain}/${label}`)}\n`); diff --git a/src/daemon/runtime-binary.test.ts b/src/daemon/runtime-binary.test.ts new file mode 100644 index 00000000000..8cff31b97c0 --- /dev/null +++ b/src/daemon/runtime-binary.test.ts @@ -0,0 +1,45 @@ +import { describe, expect, it } from "vitest"; +import { isBunRuntime, isNodeRuntime } from "./runtime-binary.js"; + +describe("isNodeRuntime", () => { + it("recognizes standard node binaries", () => { + expect(isNodeRuntime("/usr/bin/node")).toBe(true); + expect(isNodeRuntime("C:\\Program Files\\nodejs\\node.exe")).toBe(true); + expect(isNodeRuntime("/usr/bin/nodejs")).toBe(true); + expect(isNodeRuntime("C:\\nodejs.exe")).toBe(true); + }); + + it("recognizes versioned node binaries with and without dashes", () => { + expect(isNodeRuntime("/usr/bin/node24")).toBe(true); + expect(isNodeRuntime("/usr/bin/node-24")).toBe(true); + expect(isNodeRuntime("/usr/bin/node24.1")).toBe(true); + expect(isNodeRuntime("/usr/bin/node-24.1")).toBe(true); + expect(isNodeRuntime("C:\\node24.exe")).toBe(true); + expect(isNodeRuntime("C:\\node-24.exe")).toBe(true); + }); + + it("handles quotes and casing", () => { + expect(isNodeRuntime('"/usr/bin/node24"')).toBe(true); + expect(isNodeRuntime("'C:\\Program Files\\nodejs\\NODE.EXE'")).toBe(true); + }); + + it("rejects non-node runtimes", () => { + expect(isNodeRuntime("/usr/bin/bun")).toBe(false); + expect(isNodeRuntime("/usr/bin/node-dev")).toBe(false); + expect(isNodeRuntime("/usr/bin/nodeenv")).toBe(false); + expect(isNodeRuntime("/usr/bin/nodemon")).toBe(false); + }); +}); + +describe("isBunRuntime", () => { + it("recognizes bun binaries", () => { + expect(isBunRuntime("/usr/bin/bun")).toBe(true); + expect(isBunRuntime("C:\\BUN.EXE")).toBe(true); + expect(isBunRuntime('"/opt/homebrew/bin/bun"')).toBe(true); + }); + + it("rejects non-bun runtimes", () => { + expect(isBunRuntime("/usr/bin/node")).toBe(false); + expect(isBunRuntime("/usr/bin/bunx")).toBe(false); + }); +}); diff --git a/src/daemon/runtime-binary.ts b/src/daemon/runtime-binary.ts index 95f7ea1072e..794fe872bad 100644 --- a/src/daemon/runtime-binary.ts +++ b/src/daemon/runtime-binary.ts @@ -1,11 +1,24 @@ -import path from "node:path"; +const NODE_VERSIONED_PATTERN = /^node(?:-\d+|\d+)(?:\.\d+)*(?:\.exe)?$/; + +function normalizeRuntimeBasename(execPath: string): string { + const trimmed = execPath.trim().replace(/^["']|["']$/g, ""); + const lastSlash = Math.max(trimmed.lastIndexOf("/"), trimmed.lastIndexOf("\\")); + const basename = lastSlash === -1 ? trimmed : trimmed.slice(lastSlash + 1); + return basename.toLowerCase(); +} export function isNodeRuntime(execPath: string): boolean { - const base = path.basename(execPath).toLowerCase(); - return base === "node" || base === "node.exe"; + const base = normalizeRuntimeBasename(execPath); + return ( + base === "node" || + base === "node.exe" || + base === "nodejs" || + base === "nodejs.exe" || + NODE_VERSIONED_PATTERN.test(base) + ); } export function isBunRuntime(execPath: string): boolean { - const base = path.basename(execPath).toLowerCase(); + const base = normalizeRuntimeBasename(execPath); return base === "bun" || base === "bun.exe"; } diff --git a/src/daemon/service-env.test.ts b/src/daemon/service-env.test.ts index 31a46c49909..95dee4ecc1d 100644 --- a/src/daemon/service-env.test.ts +++ b/src/daemon/service-env.test.ts @@ -309,6 +309,51 @@ describe("buildServiceEnvironment", () => { expect(env.OPENCLAW_LAUNCHD_LABEL).toBe("ai.openclaw.work"); } }); + + it("forwards proxy environment variables for launchd/systemd runtime", () => { + const env = buildServiceEnvironment({ + env: { + HOME: "/home/user", + HTTP_PROXY: " http://proxy.local:7890 ", + HTTPS_PROXY: "https://proxy.local:7890", + NO_PROXY: "localhost,127.0.0.1", + http_proxy: "http://proxy.local:7890", + all_proxy: "socks5://proxy.local:1080", + }, + port: 18789, + }); + + expect(env.HTTP_PROXY).toBe("http://proxy.local:7890"); + expect(env.HTTPS_PROXY).toBe("https://proxy.local:7890"); + expect(env.NO_PROXY).toBe("localhost,127.0.0.1"); + expect(env.http_proxy).toBe("http://proxy.local:7890"); + expect(env.all_proxy).toBe("socks5://proxy.local:1080"); + }); + it("defaults NODE_EXTRA_CA_CERTS to system cert bundle on macOS", () => { + const env = buildServiceEnvironment({ + env: { HOME: "/home/user" }, + port: 18789, + platform: "darwin", + }); + expect(env.NODE_EXTRA_CA_CERTS).toBe("/etc/ssl/cert.pem"); + }); + + it("does not default NODE_EXTRA_CA_CERTS on non-macOS", () => { + const env = buildServiceEnvironment({ + env: { HOME: "/home/user" }, + port: 18789, + platform: "linux", + }); + expect(env.NODE_EXTRA_CA_CERTS).toBeUndefined(); + }); + + it("respects user-provided NODE_EXTRA_CA_CERTS over the default", () => { + const env = buildServiceEnvironment({ + env: { HOME: "/home/user", NODE_EXTRA_CA_CERTS: "/custom/certs/ca.pem" }, + port: 18789, + }); + expect(env.NODE_EXTRA_CA_CERTS).toBe("/custom/certs/ca.pem"); + }); }); describe("buildNodeServiceEnvironment", () => { @@ -319,6 +364,19 @@ describe("buildNodeServiceEnvironment", () => { expect(env.HOME).toBe("/home/user"); }); + it("forwards proxy environment variables for node services", () => { + const env = buildNodeServiceEnvironment({ + env: { + HOME: "/home/user", + HTTPS_PROXY: " https://proxy.local:7890 ", + no_proxy: "localhost,127.0.0.1", + }, + }); + + expect(env.HTTPS_PROXY).toBe("https://proxy.local:7890"); + expect(env.no_proxy).toBe("localhost,127.0.0.1"); + }); + it("forwards TMPDIR for node services", () => { const env = buildNodeServiceEnvironment({ env: { HOME: "/home/user", TMPDIR: "/tmp/custom" }, @@ -332,6 +390,29 @@ describe("buildNodeServiceEnvironment", () => { }); expect(env.TMPDIR).toBe(os.tmpdir()); }); + + it("defaults NODE_EXTRA_CA_CERTS to system cert bundle on macOS for node services", () => { + const env = buildNodeServiceEnvironment({ + env: { HOME: "/home/user" }, + platform: "darwin", + }); + expect(env.NODE_EXTRA_CA_CERTS).toBe("/etc/ssl/cert.pem"); + }); + + it("does not default NODE_EXTRA_CA_CERTS on non-macOS for node services", () => { + const env = buildNodeServiceEnvironment({ + env: { HOME: "/home/user" }, + platform: "linux", + }); + expect(env.NODE_EXTRA_CA_CERTS).toBeUndefined(); + }); + + it("respects user-provided NODE_EXTRA_CA_CERTS for node services", () => { + const env = buildNodeServiceEnvironment({ + env: { HOME: "/home/user", NODE_EXTRA_CA_CERTS: "/custom/certs/ca.pem" }, + }); + expect(env.NODE_EXTRA_CA_CERTS).toBe("/custom/certs/ca.pem"); + }); }); describe("resolveGatewayStateDir", () => { diff --git a/src/daemon/service-env.ts b/src/daemon/service-env.ts index 4925a337611..15c78521348 100644 --- a/src/daemon/service-env.ts +++ b/src/daemon/service-env.ts @@ -25,6 +25,35 @@ type BuildServicePathOptions = MinimalServicePathOptions & { env?: Record; }; +const SERVICE_PROXY_ENV_KEYS = [ + "HTTP_PROXY", + "HTTPS_PROXY", + "NO_PROXY", + "ALL_PROXY", + "http_proxy", + "https_proxy", + "no_proxy", + "all_proxy", +] as const; + +function readServiceProxyEnvironment( + env: Record, +): Record { + const out: Record = {}; + for (const key of SERVICE_PROXY_ENV_KEYS) { + const value = env[key]; + if (typeof value !== "string") { + continue; + } + const trimmed = value.trim(); + if (!trimmed) { + continue; + } + out[key] = trimmed; + } + return out; +} + function addNonEmptyDir(dirs: string[], dir: string | undefined): void { if (dir) { dirs.push(dir); @@ -207,21 +236,30 @@ export function buildServiceEnvironment(params: { port: number; token?: string; launchdLabel?: string; + platform?: NodeJS.Platform; }): Record { const { env, port, token, launchdLabel } = params; + const platform = params.platform ?? process.platform; const profile = env.OPENCLAW_PROFILE; const resolvedLaunchdLabel = - launchdLabel || - (process.platform === "darwin" ? resolveGatewayLaunchAgentLabel(profile) : undefined); + launchdLabel || (platform === "darwin" ? resolveGatewayLaunchAgentLabel(profile) : undefined); const systemdUnit = `${resolveGatewaySystemdServiceName(profile)}.service`; const stateDir = env.OPENCLAW_STATE_DIR; const configPath = env.OPENCLAW_CONFIG_PATH; // Keep a usable temp directory for supervised services even when the host env omits TMPDIR. const tmpDir = env.TMPDIR?.trim() || os.tmpdir(); + const proxyEnv = readServiceProxyEnvironment(env); + // On macOS, launchd services don't inherit the shell environment, so Node's undici/fetch + // cannot locate the system CA bundle. Default to /etc/ssl/cert.pem so TLS verification + // works correctly when running as a LaunchAgent without extra user configuration. + const nodeCaCerts = + env.NODE_EXTRA_CA_CERTS ?? (platform === "darwin" ? "/etc/ssl/cert.pem" : undefined); return { HOME: env.HOME, TMPDIR: tmpDir, PATH: buildMinimalServicePath({ env }), + ...proxyEnv, + NODE_EXTRA_CA_CERTS: nodeCaCerts, OPENCLAW_PROFILE: profile, OPENCLAW_STATE_DIR: stateDir, OPENCLAW_CONFIG_PATH: configPath, @@ -237,15 +275,25 @@ export function buildServiceEnvironment(params: { export function buildNodeServiceEnvironment(params: { env: Record; + platform?: NodeJS.Platform; }): Record { const { env } = params; + const platform = params.platform ?? process.platform; const stateDir = env.OPENCLAW_STATE_DIR; const configPath = env.OPENCLAW_CONFIG_PATH; const tmpDir = env.TMPDIR?.trim() || os.tmpdir(); + const proxyEnv = readServiceProxyEnvironment(env); + // On macOS, launchd services don't inherit the shell environment, so Node's undici/fetch + // cannot locate the system CA bundle. Default to /etc/ssl/cert.pem so TLS verification + // works correctly when running as a LaunchAgent without extra user configuration. + const nodeCaCerts = + env.NODE_EXTRA_CA_CERTS ?? (platform === "darwin" ? "/etc/ssl/cert.pem" : undefined); return { HOME: env.HOME, TMPDIR: tmpDir, PATH: buildMinimalServicePath({ env }), + ...proxyEnv, + NODE_EXTRA_CA_CERTS: nodeCaCerts, OPENCLAW_STATE_DIR: stateDir, OPENCLAW_CONFIG_PATH: configPath, OPENCLAW_LAUNCHD_LABEL: resolveNodeLaunchAgentLabel(), diff --git a/src/discord/accounts.test.ts b/src/discord/accounts.test.ts new file mode 100644 index 00000000000..6fd11965a07 --- /dev/null +++ b/src/discord/accounts.test.ts @@ -0,0 +1,58 @@ +import { describe, expect, it } from "vitest"; +import { resolveDiscordAccount } from "./accounts.js"; + +describe("resolveDiscordAccount allowFrom precedence", () => { + it("prefers accounts.default.allowFrom over top-level for default account", () => { + const resolved = resolveDiscordAccount({ + cfg: { + channels: { + discord: { + allowFrom: ["top"], + accounts: { + default: { allowFrom: ["default"], token: "token-default" }, + }, + }, + }, + }, + accountId: "default", + }); + + expect(resolved.config.allowFrom).toEqual(["default"]); + }); + + it("falls back to top-level allowFrom for named account without override", () => { + const resolved = resolveDiscordAccount({ + cfg: { + channels: { + discord: { + allowFrom: ["top"], + accounts: { + work: { token: "token-work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toEqual(["top"]); + }); + + it("does not inherit default account allowFrom for named account when top-level is absent", () => { + const resolved = resolveDiscordAccount({ + cfg: { + channels: { + discord: { + accounts: { + default: { allowFrom: ["default"], token: "token-default" }, + work: { token: "token-work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toBeUndefined(); + }); +}); diff --git a/src/discord/monitor.test.ts b/src/discord/monitor.test.ts index 222911894a9..4e185d96574 100644 --- a/src/discord/monitor.test.ts +++ b/src/discord/monitor.test.ts @@ -1,5 +1,5 @@ import { ChannelType, type Guild } from "@buape/carbon"; -import { describe, expect, it, vi } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; import { typedCases } from "../test-utils/typed-cases.js"; import { allowListMatches, @@ -20,6 +20,12 @@ import { } from "./monitor.js"; import { DiscordMessageListener, DiscordReactionListener } from "./monitor/listeners.js"; +const readAllowFromStoreMock = vi.hoisted(() => vi.fn()); + +vi.mock("../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: (...args: unknown[]) => readAllowFromStoreMock(...args), +})); + const fakeGuild = (id: string, name: string) => ({ id, name }) as Guild; const makeEntries = ( @@ -899,6 +905,12 @@ function makeReactionClient(options?: { function makeReactionListenerParams(overrides?: { botUserId?: string; + dmEnabled?: boolean; + groupDmEnabled?: boolean; + groupDmChannels?: string[]; + dmPolicy?: "open" | "pairing" | "allowlist" | "disabled"; + allowFrom?: string[]; + groupPolicy?: "open" | "allowlist" | "disabled"; allowNameMatching?: boolean; guildEntries?: Record; }) { @@ -907,6 +919,12 @@ function makeReactionListenerParams(overrides?: { accountId: "acc-1", runtime: {} as import("../runtime.js").RuntimeEnv, botUserId: overrides?.botUserId ?? "bot-1", + dmEnabled: overrides?.dmEnabled ?? true, + groupDmEnabled: overrides?.groupDmEnabled ?? true, + groupDmChannels: overrides?.groupDmChannels ?? [], + dmPolicy: overrides?.dmPolicy ?? "open", + allowFrom: overrides?.allowFrom ?? [], + groupPolicy: overrides?.groupPolicy ?? "open", allowNameMatching: overrides?.allowNameMatching ?? false, guildEntries: overrides?.guildEntries, logger: { @@ -919,6 +937,12 @@ function makeReactionListenerParams(overrides?: { } describe("discord DM reaction handling", () => { + beforeEach(() => { + enqueueSystemEventSpy.mockClear(); + resolveAgentRouteMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + }); + it("processes DM reactions with or without guild allowlists", async () => { const cases = [ { name: "no guild allowlist", guildEntries: undefined }, @@ -952,9 +976,77 @@ describe("discord DM reaction handling", () => { } }); + it("blocks DM reactions when dmPolicy is disabled", async () => { + const data = makeReactionEvent({ botAsAuthor: true }); + const client = makeReactionClient({ channelType: ChannelType.DM }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ dmPolicy: "disabled" }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + }); + + it("blocks DM reactions for unauthorized sender in allowlist mode", async () => { + const data = makeReactionEvent({ botAsAuthor: true, userId: "user-1" }); + const client = makeReactionClient({ channelType: ChannelType.DM }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + dmPolicy: "allowlist", + allowFrom: ["user:user-2"], + }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + }); + + it("allows DM reactions for authorized sender in allowlist mode", async () => { + const data = makeReactionEvent({ botAsAuthor: true, userId: "user-1" }); + const client = makeReactionClient({ channelType: ChannelType.DM }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + dmPolicy: "allowlist", + allowFrom: ["user:user-1"], + }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).toHaveBeenCalledOnce(); + }); + + it("blocks group DM reactions when group DMs are disabled", async () => { + const data = makeReactionEvent({ botAsAuthor: true }); + const client = makeReactionClient({ channelType: ChannelType.GroupDM }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ groupDmEnabled: false }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + }); + + it("blocks guild reactions when groupPolicy is disabled", async () => { + const data = makeReactionEvent({ + guildId: "guild-123", + botAsAuthor: true, + guild: { id: "guild-123", name: "Guild" }, + }); + const client = makeReactionClient({ channelType: ChannelType.GuildText }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ groupPolicy: "disabled" }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + }); + it("still processes guild reactions (no regression)", async () => { - enqueueSystemEventSpy.mockClear(); - resolveAgentRouteMock.mockClear(); resolveAgentRouteMock.mockReturnValueOnce({ agentId: "default", channel: "discord", diff --git a/src/discord/monitor/agent-components.ts b/src/discord/monitor/agent-components.ts index e39adf58165..1c2a3cbe086 100644 --- a/src/discord/monitor/agent-components.ts +++ b/src/discord/monitor/agent-components.ts @@ -35,12 +35,10 @@ import { logVerbose } from "../../globals.js"; import { enqueueSystemEvent } from "../../infra/system-events.js"; import { logDebug, logError } from "../../logger.js"; import { buildPairingReply } from "../../pairing/pairing-messages.js"; -import { - readChannelAllowFromStore, - upsertChannelPairingRequest, -} from "../../pairing/pairing-store.js"; +import { upsertChannelPairingRequest } from "../../pairing/pairing-store.js"; import { resolveAgentRoute } from "../../routing/resolve-route.js"; import { createNonExitingRuntime, type RuntimeEnv } from "../../runtime.js"; +import { readStoreAllowFromForDmPolicy } from "../../security/dm-policy-shared.js"; import { resolveDiscordComponentEntry, resolveDiscordModalEntry } from "../components-registry.js"; import { createDiscordFormModal, @@ -471,8 +469,11 @@ async function ensureDmComponentAuthorized(params: { return true; } - const storeAllowFrom = - dmPolicy === "allowlist" ? [] : await readChannelAllowFromStore("discord").catch(() => []); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "discord", + accountId: ctx.accountId, + dmPolicy, + }); const effectiveAllowFrom = [...(ctx.allowFrom ?? []), ...storeAllowFrom]; const allowList = normalizeDiscordAllowList(effectiveAllowFrom, ["discord:", "user:", "pk:"]); const allowMatch = allowList @@ -494,6 +495,7 @@ async function ensureDmComponentAuthorized(params: { const { code, created } = await upsertChannelPairingRequest({ channel: "discord", id: user.id, + accountId: ctx.accountId, meta: { tag: formatDiscordUserTag(user), name: user.username, diff --git a/src/discord/monitor/exec-approvals.ts b/src/discord/monitor/exec-approvals.ts index 68f46b5e1c2..3dfcc9c2ffa 100644 --- a/src/discord/monitor/exec-approvals.ts +++ b/src/discord/monitor/exec-approvals.ts @@ -213,6 +213,9 @@ function buildExecApprovalMetadataLines(request: ExecApprovalRequest): string[] if (request.request.host) { lines.push(`- Host: ${request.request.host}`); } + if (Array.isArray(request.request.envKeys) && request.request.envKeys.length > 0) { + lines.push(`- Env Overrides: ${request.request.envKeys.join(", ")}`); + } if (request.request.agentId) { lines.push(`- Agent: ${request.request.agentId}`); } diff --git a/src/discord/monitor/gateway-error-guard.test.ts b/src/discord/monitor/gateway-error-guard.test.ts new file mode 100644 index 00000000000..783fcc6a712 --- /dev/null +++ b/src/discord/monitor/gateway-error-guard.test.ts @@ -0,0 +1,33 @@ +import { EventEmitter } from "node:events"; +import { describe, expect, it, vi } from "vitest"; +import { attachEarlyGatewayErrorGuard } from "./gateway-error-guard.js"; + +describe("attachEarlyGatewayErrorGuard", () => { + it("captures gateway errors until released", () => { + const emitter = new EventEmitter(); + const fallbackErrorListener = vi.fn(); + emitter.on("error", fallbackErrorListener); + const client = { + getPlugin: vi.fn(() => ({ emitter })), + }; + + const guard = attachEarlyGatewayErrorGuard(client as never); + emitter.emit("error", new Error("Fatal Gateway error: 4014")); + expect(guard.pendingErrors).toHaveLength(1); + + guard.release(); + emitter.emit("error", new Error("Fatal Gateway error: 4000")); + expect(guard.pendingErrors).toHaveLength(1); + expect(fallbackErrorListener).toHaveBeenCalledTimes(2); + }); + + it("returns noop guard when gateway emitter is unavailable", () => { + const client = { + getPlugin: vi.fn(() => undefined), + }; + + const guard = attachEarlyGatewayErrorGuard(client as never); + expect(guard.pendingErrors).toEqual([]); + expect(() => guard.release()).not.toThrow(); + }); +}); diff --git a/src/discord/monitor/gateway-error-guard.ts b/src/discord/monitor/gateway-error-guard.ts new file mode 100644 index 00000000000..5cb79753325 --- /dev/null +++ b/src/discord/monitor/gateway-error-guard.ts @@ -0,0 +1,36 @@ +import type { Client } from "@buape/carbon"; +import { getDiscordGatewayEmitter } from "../monitor.gateway.js"; + +export type EarlyGatewayErrorGuard = { + pendingErrors: unknown[]; + release: () => void; +}; + +export function attachEarlyGatewayErrorGuard(client: Client): EarlyGatewayErrorGuard { + const pendingErrors: unknown[] = []; + const gateway = client.getPlugin("gateway"); + const emitter = getDiscordGatewayEmitter(gateway); + if (!emitter) { + return { + pendingErrors, + release: () => {}, + }; + } + + let released = false; + const onGatewayError = (err: unknown) => { + pendingErrors.push(err); + }; + emitter.on("error", onGatewayError); + + return { + pendingErrors, + release: () => { + if (released) { + return; + } + released = true; + emitter.removeListener("error", onGatewayError); + }, + }; +} diff --git a/src/discord/monitor/listeners.ts b/src/discord/monitor/listeners.ts index 9bdc7331224..e6679c4b900 100644 --- a/src/discord/monitor/listeners.ts +++ b/src/discord/monitor/listeners.ts @@ -7,14 +7,22 @@ import { PresenceUpdateListener, type User, } from "@buape/carbon"; -import { danger } from "../../globals.js"; +import { danger, logVerbose } from "../../globals.js"; import { formatDurationSeconds } from "../../infra/format-time/format-duration.ts"; import { enqueueSystemEvent } from "../../infra/system-events.js"; import { createSubsystemLogger } from "../../logging/subsystem.js"; import { resolveAgentRoute } from "../../routing/resolve-route.js"; import { + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithLists, +} from "../../security/dm-policy-shared.js"; +import { + isDiscordGroupAllowedByPolicy, + normalizeDiscordAllowList, normalizeDiscordSlug, + resolveDiscordAllowListMatch, resolveDiscordChannelConfigWithFallback, + resolveGroupDmAllow, resolveDiscordGuildEntry, shouldEmitDiscordReactionNotification, } from "./allow-list.js"; @@ -37,6 +45,12 @@ type DiscordReactionListenerParams = { accountId: string; runtime: RuntimeEnv; botUserId?: string; + dmEnabled: boolean; + groupDmEnabled: boolean; + groupDmChannels: string[]; + dmPolicy: "open" | "pairing" | "allowlist" | "disabled"; + allowFrom: string[]; + groupPolicy: "open" | "allowlist" | "disabled"; allowNameMatching: boolean; guildEntries?: Record; logger: Logger; @@ -179,6 +193,12 @@ async function runDiscordReactionHandler(params: { cfg: params.handlerParams.cfg, accountId: params.handlerParams.accountId, botUserId: params.handlerParams.botUserId, + dmEnabled: params.handlerParams.dmEnabled, + groupDmEnabled: params.handlerParams.groupDmEnabled, + groupDmChannels: params.handlerParams.groupDmChannels, + dmPolicy: params.handlerParams.dmPolicy, + allowFrom: params.handlerParams.allowFrom, + groupPolicy: params.handlerParams.groupPolicy, allowNameMatching: params.handlerParams.allowNameMatching, guildEntries: params.handlerParams.guildEntries, logger: params.handlerParams.logger, @@ -186,6 +206,101 @@ async function runDiscordReactionHandler(params: { }); } +type DiscordReactionIngressAuthorizationParams = { + accountId: string; + user: User; + isDirectMessage: boolean; + isGroupDm: boolean; + isGuildMessage: boolean; + channelId: string; + channelName?: string; + channelSlug: string; + dmEnabled: boolean; + groupDmEnabled: boolean; + groupDmChannels: string[]; + dmPolicy: "open" | "pairing" | "allowlist" | "disabled"; + allowFrom: string[]; + groupPolicy: "open" | "allowlist" | "disabled"; + allowNameMatching: boolean; + guildInfo: import("./allow-list.js").DiscordGuildEntryResolved | null; + channelConfig?: { allowed?: boolean } | null; +}; + +async function authorizeDiscordReactionIngress( + params: DiscordReactionIngressAuthorizationParams, +): Promise<{ allowed: true } | { allowed: false; reason: string }> { + if (params.isDirectMessage && !params.dmEnabled) { + return { allowed: false, reason: "dm-disabled" }; + } + if (params.isGroupDm && !params.groupDmEnabled) { + return { allowed: false, reason: "group-dm-disabled" }; + } + if (params.isDirectMessage) { + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "discord", + accountId: params.accountId, + dmPolicy: params.dmPolicy, + }); + const access = resolveDmGroupAccessWithLists({ + isGroup: false, + dmPolicy: params.dmPolicy, + groupPolicy: params.groupPolicy, + allowFrom: params.allowFrom, + groupAllowFrom: [], + storeAllowFrom, + isSenderAllowed: (allowEntries) => { + const allowList = normalizeDiscordAllowList(allowEntries, ["discord:", "user:", "pk:"]); + const allowMatch = allowList + ? resolveDiscordAllowListMatch({ + allowList, + candidate: { + id: params.user.id, + name: params.user.username, + tag: formatDiscordUserTag(params.user), + }, + allowNameMatching: params.allowNameMatching, + }) + : { allowed: false }; + return allowMatch.allowed; + }, + }); + if (access.decision !== "allow") { + return { allowed: false, reason: access.reason }; + } + } + if ( + params.isGroupDm && + !resolveGroupDmAllow({ + channels: params.groupDmChannels, + channelId: params.channelId, + channelName: params.channelName, + channelSlug: params.channelSlug, + }) + ) { + return { allowed: false, reason: "group-dm-not-allowlisted" }; + } + if (!params.isGuildMessage) { + return { allowed: true }; + } + const channelAllowlistConfigured = + Boolean(params.guildInfo?.channels) && Object.keys(params.guildInfo?.channels ?? {}).length > 0; + const channelAllowed = params.channelConfig?.allowed !== false; + if ( + !isDiscordGroupAllowedByPolicy({ + groupPolicy: params.groupPolicy, + guildAllowlisted: Boolean(params.guildInfo), + channelAllowlistConfigured, + channelAllowed, + }) + ) { + return { allowed: false, reason: "guild-policy" }; + } + if (params.channelConfig?.allowed === false) { + return { allowed: false, reason: "guild-channel-denied" }; + } + return { allowed: true }; +} + async function handleDiscordReactionEvent(params: { data: DiscordReactionEvent; client: Client; @@ -193,6 +308,12 @@ async function handleDiscordReactionEvent(params: { cfg: LoadedConfig; accountId: string; botUserId?: string; + dmEnabled: boolean; + groupDmEnabled: boolean; + groupDmChannels: string[]; + dmPolicy: "open" | "pairing" | "allowlist" | "disabled"; + allowFrom: string[]; + groupPolicy: "open" | "allowlist" | "disabled"; allowNameMatching: boolean; guildEntries?: Record; logger: Logger; @@ -236,6 +357,28 @@ async function handleDiscordReactionEvent(params: { channelType === ChannelType.PublicThread || channelType === ChannelType.PrivateThread || channelType === ChannelType.AnnouncementThread; + const ingressAccess = await authorizeDiscordReactionIngress({ + accountId: params.accountId, + user, + isDirectMessage, + isGroupDm, + isGuildMessage, + channelId: data.channel_id, + channelName, + channelSlug, + dmEnabled: params.dmEnabled, + groupDmEnabled: params.groupDmEnabled, + groupDmChannels: params.groupDmChannels, + dmPolicy: params.dmPolicy, + allowFrom: params.allowFrom, + groupPolicy: params.groupPolicy, + allowNameMatching: params.allowNameMatching, + guildInfo, + }); + if (!ingressAccess.allowed) { + logVerbose(`discord reaction blocked sender=${user.id} (reason=${ingressAccess.reason})`); + return; + } let parentId = "parentId" in channel ? (channel.parentId ?? undefined) : undefined; let parentName: string | undefined; let parentSlug = ""; @@ -343,7 +486,26 @@ async function handleDiscordReactionEvent(params: { await loadThreadParentInfo(); const channelConfig = resolveThreadChannelConfig(); - if (channelConfig?.allowed === false) { + const threadAccess = await authorizeDiscordReactionIngress({ + accountId: params.accountId, + user, + isDirectMessage, + isGroupDm, + isGuildMessage, + channelId: data.channel_id, + channelName, + channelSlug, + dmEnabled: params.dmEnabled, + groupDmEnabled: params.groupDmEnabled, + groupDmChannels: params.groupDmChannels, + dmPolicy: params.dmPolicy, + allowFrom: params.allowFrom, + groupPolicy: params.groupPolicy, + allowNameMatching: params.allowNameMatching, + guildInfo, + channelConfig, + }); + if (!threadAccess.allowed) { return; } @@ -367,7 +529,26 @@ async function handleDiscordReactionEvent(params: { await loadThreadParentInfo(); const channelConfig = resolveThreadChannelConfig(); - if (channelConfig?.allowed === false) { + const threadAccess = await authorizeDiscordReactionIngress({ + accountId: params.accountId, + user, + isDirectMessage, + isGroupDm, + isGuildMessage, + channelId: data.channel_id, + channelName, + channelSlug, + dmEnabled: params.dmEnabled, + groupDmEnabled: params.groupDmEnabled, + groupDmChannels: params.groupDmChannels, + dmPolicy: params.dmPolicy, + allowFrom: params.allowFrom, + groupPolicy: params.groupPolicy, + allowNameMatching: params.allowNameMatching, + guildInfo, + channelConfig, + }); + if (!threadAccess.allowed) { return; } @@ -391,8 +572,29 @@ async function handleDiscordReactionEvent(params: { parentSlug, scope: "channel", }); - if (channelConfig?.allowed === false) { - return; + if (isGuildMessage) { + const channelAccess = await authorizeDiscordReactionIngress({ + accountId: params.accountId, + user, + isDirectMessage, + isGroupDm, + isGuildMessage, + channelId: data.channel_id, + channelName, + channelSlug, + dmEnabled: params.dmEnabled, + groupDmEnabled: params.groupDmEnabled, + groupDmChannels: params.groupDmChannels, + dmPolicy: params.dmPolicy, + allowFrom: params.allowFrom, + groupPolicy: params.groupPolicy, + allowNameMatching: params.allowNameMatching, + guildInfo, + channelConfig, + }); + if (!channelAccess.allowed) { + return; + } } const reactionMode = guildInfo?.reactionNotifications ?? "own"; diff --git a/src/discord/monitor/message-handler.preflight.test.ts b/src/discord/monitor/message-handler.preflight.test.ts index f8bc88600ef..bef9350bddf 100644 --- a/src/discord/monitor/message-handler.preflight.test.ts +++ b/src/discord/monitor/message-handler.preflight.test.ts @@ -1,5 +1,9 @@ import { ChannelType } from "@buape/carbon"; import { beforeEach, describe, expect, it } from "vitest"; +import { + __testing as sessionBindingTesting, + registerSessionBindingAdapter, +} from "../../infra/outbound/session-binding-service.js"; import { preflightDiscordMessage, resolvePreflightMentionRequirement, @@ -7,25 +11,35 @@ import { } from "./message-handler.preflight.js"; import { __testing as threadBindingTesting, + createNoopThreadBindingManager, createThreadBindingManager, } from "./thread-bindings.js"; function createThreadBinding( - overrides?: Partial, + overrides?: Partial< + import("../../infra/outbound/session-binding-service.js").SessionBindingRecord + >, ) { return { - accountId: "default", - channelId: "parent-1", - threadId: "thread-1", - targetKind: "subagent", + bindingId: "default:thread-1", targetSessionKey: "agent:main:subagent:child-1", - agentId: "main", - boundBy: "test", + targetKind: "subagent", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + parentConversationId: "parent-1", + }, + status: "active", boundAt: 1, - webhookId: "wh-1", - webhookToken: "tok-1", + metadata: { + agentId: "main", + boundBy: "test", + webhookId: "wh-1", + webhookToken: "tok-1", + }, ...overrides, - } satisfies import("./thread-bindings.js").ThreadBindingRecord; + } satisfies import("../../infra/outbound/session-binding-service.js").SessionBindingRecord; } describe("resolvePreflightMentionRequirement", () => { @@ -58,6 +72,10 @@ describe("resolvePreflightMentionRequirement", () => { }); describe("preflightDiscordMessage", () => { + beforeEach(() => { + sessionBindingTesting.resetSessionBindingAdaptersForTests(); + }); + it("bypasses mention gating in bound threads for allowed bot senders", async () => { const threadBinding = createThreadBinding(); const threadId = "thread-bot-focus"; @@ -99,6 +117,13 @@ describe("preflightDiscordMessage", () => { }, } as unknown as import("@buape/carbon").Message; + registerSessionBindingAdapter({ + channel: "discord", + accountId: "default", + listBySession: () => [], + resolveByConversation: (ref) => (ref.conversationId === threadId ? threadBinding : null), + }); + const result = await preflightDiscordMessage({ cfg: { session: { @@ -122,9 +147,7 @@ describe("preflightDiscordMessage", () => { groupDmEnabled: true, ackReactionScope: "direct", groupPolicy: "open", - threadBindings: { - getByThreadId: (id: string) => (id === threadId ? threadBinding : undefined), - } as import("./thread-bindings.js").ThreadBindingManager, + threadBindings: createNoopThreadBindingManager("default"), data: { channel_id: threadId, guild_id: "guild-1", @@ -146,6 +169,7 @@ describe("preflightDiscordMessage", () => { describe("shouldIgnoreBoundThreadWebhookMessage", () => { beforeEach(() => { + sessionBindingTesting.resetSessionBindingAdaptersForTests(); threadBindingTesting.resetThreadBindingsForTests(); }); @@ -171,7 +195,11 @@ describe("shouldIgnoreBoundThreadWebhookMessage", () => { expect( shouldIgnoreBoundThreadWebhookMessage({ webhookId: "wh-1", - threadBinding: createThreadBinding({ webhookId: undefined }), + threadBinding: createThreadBinding({ + metadata: { + webhookId: undefined, + }, + }), }), ).toBe(false); }); diff --git a/src/discord/monitor/message-handler.preflight.ts b/src/discord/monitor/message-handler.preflight.ts index 88871b00683..2777ba01b91 100644 --- a/src/discord/monitor/message-handler.preflight.ts +++ b/src/discord/monitor/message-handler.preflight.ts @@ -17,16 +17,18 @@ import { loadConfig } from "../../config/config.js"; import { isDangerousNameMatchingEnabled } from "../../config/dangerous-name-matching.js"; import { logVerbose, shouldLogVerbose } from "../../globals.js"; import { recordChannelActivity } from "../../infra/channel-activity.js"; +import { + getSessionBindingService, + type SessionBindingRecord, +} from "../../infra/outbound/session-binding-service.js"; import { enqueueSystemEvent } from "../../infra/system-events.js"; import { logDebug } from "../../logger.js"; import { getChildLogger } from "../../logging.js"; import { buildPairingReply } from "../../pairing/pairing-messages.js"; -import { - readChannelAllowFromStore, - upsertChannelPairingRequest, -} from "../../pairing/pairing-store.js"; +import { upsertChannelPairingRequest } from "../../pairing/pairing-store.js"; import { resolveAgentRoute } from "../../routing/resolve-route.js"; -import { resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; +import { DEFAULT_ACCOUNT_ID, resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; +import { readStoreAllowFromForDmPolicy } from "../../security/dm-policy-shared.js"; import { fetchPluralKitMessageInfo } from "../pluralkit.js"; import { sendMessageDiscord } from "../send.js"; import { @@ -57,10 +59,7 @@ import { } from "./message-utils.js"; import { resolveDiscordSenderIdentity, resolveDiscordWebhookId } from "./sender-identity.js"; import { resolveDiscordSystemEvent } from "./system-events.js"; -import { - isRecentlyUnboundThreadWebhookMessage, - type ThreadBindingRecord, -} from "./thread-bindings.js"; +import { isRecentlyUnboundThreadWebhookMessage } from "./thread-bindings.js"; import { resolveDiscordThreadChannel, resolveDiscordThreadParentInfo } from "./threading.js"; export type { @@ -82,13 +81,16 @@ export function shouldIgnoreBoundThreadWebhookMessage(params: { accountId?: string; threadId?: string; webhookId?: string | null; - threadBinding?: ThreadBindingRecord; + threadBinding?: SessionBindingRecord; }): boolean { const webhookId = params.webhookId?.trim() || ""; if (!webhookId) { return false; } - const boundWebhookId = params.threadBinding?.webhookId?.trim() || ""; + const boundWebhookId = + typeof params.threadBinding?.metadata?.webhookId === "string" + ? params.threadBinding.metadata.webhookId.trim() + : ""; if (!boundWebhookId) { const threadId = params.threadId?.trim() || ""; if (!threadId) { @@ -172,6 +174,7 @@ export async function preflightDiscordMessage( } const dmPolicy = params.discordConfig?.dmPolicy ?? params.discordConfig?.dm?.policy ?? "pairing"; + const resolvedAccountId = params.accountId ?? DEFAULT_ACCOUNT_ID; let commandAuthorized = true; if (isDirectMessage) { if (dmPolicy === "disabled") { @@ -179,8 +182,11 @@ export async function preflightDiscordMessage( return null; } if (dmPolicy !== "open") { - const storeAllowFrom = - dmPolicy === "allowlist" ? [] : await readChannelAllowFromStore("discord").catch(() => []); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "discord", + accountId: resolvedAccountId, + dmPolicy, + }); const effectiveAllowFrom = [...(params.allowFrom ?? []), ...storeAllowFrom]; const allowList = normalizeDiscordAllowList(effectiveAllowFrom, ["discord:", "user:", "pk:"]); const allowMatch = allowList @@ -202,6 +208,7 @@ export async function preflightDiscordMessage( const { code, created } = await upsertChannelPairingRequest({ channel: "discord", id: author.id, + accountId: resolvedAccountId, meta: { tag: formatDiscordUserTag(author), name: author.username ?? undefined, @@ -296,9 +303,15 @@ export async function preflightDiscordMessage( // Pass parent peer for thread binding inheritance parentPeer: earlyThreadParentId ? { kind: "channel", id: earlyThreadParentId } : undefined, }); - const threadBinding = earlyThreadChannel - ? params.threadBindings.getByThreadId(messageChannelId) - : undefined; + let threadBinding: SessionBindingRecord | undefined; + if (earlyThreadChannel) { + threadBinding = + getSessionBindingService().resolveByConversation({ + channel: "discord", + accountId: params.accountId, + conversationId: messageChannelId, + }) ?? undefined; + } if ( shouldIgnoreBoundThreadWebhookMessage({ accountId: params.accountId, diff --git a/src/discord/monitor/message-handler.preflight.types.ts b/src/discord/monitor/message-handler.preflight.types.ts index 91eff1ce264..b491a231983 100644 --- a/src/discord/monitor/message-handler.preflight.types.ts +++ b/src/discord/monitor/message-handler.preflight.types.ts @@ -1,11 +1,12 @@ import type { ChannelType, Client, User } from "@buape/carbon"; import type { HistoryEntry } from "../../auto-reply/reply/history.js"; import type { ReplyToMode } from "../../config/config.js"; +import type { SessionBindingRecord } from "../../infra/outbound/session-binding-service.js"; import type { resolveAgentRoute } from "../../routing/resolve-route.js"; import type { DiscordChannelConfigResolved, DiscordGuildEntryResolved } from "./allow-list.js"; import type { DiscordChannelInfo } from "./message-utils.js"; +import type { DiscordThreadBindingLookup } from "./reply-delivery.js"; import type { DiscordSenderIdentity } from "./sender-identity.js"; -import type { ThreadBindingManager, ThreadBindingRecord } from "./thread-bindings.js"; export type { DiscordSenderIdentity } from "./sender-identity.js"; import type { DiscordThreadChannel } from "./threading.js"; @@ -52,7 +53,7 @@ export type DiscordMessagePreflightContext = { wasMentioned: boolean; route: ReturnType; - threadBinding?: ThreadBindingRecord; + threadBinding?: SessionBindingRecord; boundSessionKey?: string; boundAgentId?: string; @@ -83,7 +84,7 @@ export type DiscordMessagePreflightContext = { canDetectMention: boolean; historyEntry?: HistoryEntry; - threadBindings: ThreadBindingManager; + threadBindings: DiscordThreadBindingLookup; discordRestFetch?: typeof fetch; }; @@ -106,7 +107,7 @@ export type DiscordMessagePreflightParams = { guildEntries?: Record; ackReactionScope: DiscordMessagePreflightContext["ackReactionScope"]; groupPolicy: DiscordMessagePreflightContext["groupPolicy"]; - threadBindings: ThreadBindingManager; + threadBindings: DiscordThreadBindingLookup; discordRestFetch?: typeof fetch; data: DiscordMessageEvent; client: Client; diff --git a/src/discord/monitor/message-handler.process.test.ts b/src/discord/monitor/message-handler.process.test.ts index a7333794cbb..bce0325042a 100644 --- a/src/discord/monitor/message-handler.process.test.ts +++ b/src/discord/monitor/message-handler.process.test.ts @@ -53,8 +53,12 @@ const dispatchInboundMessage = vi.fn(async (_params?: DispatchInboundParams) => counts: { final: 0, tool: 0, block: 0 }, })); const recordInboundSession = vi.fn(async () => {}); -const readSessionUpdatedAt = vi.fn(() => undefined); -const resolveStorePath = vi.fn(() => "/tmp/openclaw-discord-process-test-sessions.json"); +const configSessionsMocks = vi.hoisted(() => ({ + readSessionUpdatedAt: vi.fn(() => undefined), + resolveStorePath: vi.fn(() => "/tmp/openclaw-discord-process-test-sessions.json"), +})); +const readSessionUpdatedAt = configSessionsMocks.readSessionUpdatedAt; +const resolveStorePath = configSessionsMocks.resolveStorePath; vi.mock("../send.js", () => ({ reactMessageDiscord: sendMocks.reactMessageDiscord, @@ -105,8 +109,8 @@ vi.mock("../../channels/session.js", () => ({ })); vi.mock("../../config/sessions.js", () => ({ - readSessionUpdatedAt, - resolveStorePath, + readSessionUpdatedAt: configSessionsMocks.readSessionUpdatedAt, + resolveStorePath: configSessionsMocks.resolveStorePath, })); const { processDiscordMessage } = await import("./message-handler.process.js"); diff --git a/src/discord/monitor/message-utils.test.ts b/src/discord/monitor/message-utils.test.ts index de8976ce5d2..28dd142a1e4 100644 --- a/src/discord/monitor/message-utils.test.ts +++ b/src/discord/monitor/message-utils.test.ts @@ -323,6 +323,78 @@ describe("resolveDiscordMessageText", () => { expect(text).toBe(" (1 sticker)"); }); + + it("uses embed title when content is empty", () => { + const text = resolveDiscordMessageText( + asMessage({ + content: "", + embeds: [{ title: "Breaking" }], + }), + ); + + expect(text).toBe("Breaking"); + }); + + it("uses embed description when content is empty", () => { + const text = resolveDiscordMessageText( + asMessage({ + content: "", + embeds: [{ description: "Details" }], + }), + ); + + expect(text).toBe("Details"); + }); + + it("joins embed title and description when content is empty", () => { + const text = resolveDiscordMessageText( + asMessage({ + content: "", + embeds: [{ title: "Breaking", description: "Details" }], + }), + ); + + expect(text).toBe("Breaking\nDetails"); + }); + + it("prefers message content over embed fallback text", () => { + const text = resolveDiscordMessageText( + asMessage({ + content: "hello from content", + embeds: [{ title: "Breaking", description: "Details" }], + }), + ); + + expect(text).toBe("hello from content"); + }); + + it("joins forwarded snapshot embed title and description when content is empty", () => { + const text = resolveDiscordMessageText( + asMessage({ + content: "", + rawData: { + message_snapshots: [ + { + message: { + content: "", + embeds: [{ title: "Forwarded title", description: "Forwarded details" }], + attachments: [], + author: { + id: "u2", + username: "Bob", + discriminator: "0", + }, + }, + }, + ], + }, + }), + { includeForwarded: true }, + ); + + expect(text).toContain("[Forwarded message from @Bob]"); + expect(text).toContain("Forwarded title\nForwarded details"); + }); }); describe("resolveDiscordChannelInfo", () => { diff --git a/src/discord/monitor/message-utils.ts b/src/discord/monitor/message-utils.ts index 3c523d277ef..b18e877b1ce 100644 --- a/src/discord/monitor/message-utils.ts +++ b/src/discord/monitor/message-utils.ts @@ -403,17 +403,32 @@ function buildDiscordMediaPlaceholder(params: { return attachmentText || stickerText || ""; } +export function resolveDiscordEmbedText( + embed?: { title?: string | null; description?: string | null } | null, +): string { + const title = embed?.title?.trim() || ""; + const description = embed?.description?.trim() || ""; + if (title && description) { + return `${title}\n${description}`; + } + return title || description || ""; +} + export function resolveDiscordMessageText( message: Message, options?: { fallbackText?: string; includeForwarded?: boolean }, ): string { + const embedText = resolveDiscordEmbedText( + (message.embeds?.[0] as { title?: string | null; description?: string | null } | undefined) ?? + null, + ); const baseText = message.content?.trim() || buildDiscordMediaPlaceholder({ attachments: message.attachments ?? undefined, stickers: resolveDiscordMessageStickers(message), }) || - message.embeds?.[0]?.description || + embedText || options?.fallbackText?.trim() || ""; if (!options?.includeForwarded) { @@ -477,8 +492,7 @@ function resolveDiscordSnapshotMessageText(snapshot: DiscordSnapshotMessage): st attachments: snapshot.attachments ?? undefined, stickers: resolveDiscordSnapshotStickers(snapshot), }); - const embed = snapshot.embeds?.[0]; - const embedText = embed?.description?.trim() || embed?.title?.trim() || ""; + const embedText = resolveDiscordEmbedText(snapshot.embeds?.[0]); return content || attachmentText || embedText || ""; } diff --git a/src/discord/monitor/native-command.ts b/src/discord/monitor/native-command.ts index 1629f03fba1..feeb89f2dd6 100644 --- a/src/discord/monitor/native-command.ts +++ b/src/discord/monitor/native-command.ts @@ -46,13 +46,11 @@ import { logVerbose } from "../../globals.js"; import { createSubsystemLogger } from "../../logging/subsystem.js"; import { getAgentScopedMediaLocalRoots } from "../../media/local-roots.js"; import { buildPairingReply } from "../../pairing/pairing-messages.js"; -import { - readChannelAllowFromStore, - upsertChannelPairingRequest, -} from "../../pairing/pairing-store.js"; +import { upsertChannelPairingRequest } from "../../pairing/pairing-store.js"; import { resolveAgentRoute } from "../../routing/resolve-route.js"; import { resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; import { buildUntrustedChannelMetadata } from "../../security/channel-metadata.js"; +import { readStoreAllowFromForDmPolicy } from "../../security/dm-policy-shared.js"; import { chunkItems } from "../../utils/chunk-items.js"; import { withTimeout } from "../../utils/with-timeout.js"; import { loadWebMedia } from "../../web/media.js"; @@ -1360,8 +1358,11 @@ async function dispatchDiscordCommandInteraction(params: { return; } if (dmPolicy !== "open") { - const storeAllowFrom = - dmPolicy === "allowlist" ? [] : await readChannelAllowFromStore("discord").catch(() => []); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "discord", + accountId, + dmPolicy, + }); const effectiveAllowFrom = [ ...(discordConfig?.allowFrom ?? discordConfig?.dm?.allowFrom ?? []), ...storeAllowFrom, @@ -1384,6 +1385,7 @@ async function dispatchDiscordCommandInteraction(params: { const { code, created } = await upsertChannelPairingRequest({ channel: "discord", id: user.id, + accountId, meta: { tag: sender.tag, name: sender.name, diff --git a/src/discord/monitor/provider.lifecycle.test.ts b/src/discord/monitor/provider.lifecycle.test.ts index 9b74a0badfb..f29bd8e8cc1 100644 --- a/src/discord/monitor/provider.lifecycle.test.ts +++ b/src/discord/monitor/provider.lifecycle.test.ts @@ -49,23 +49,38 @@ describe("runDiscordGatewayLifecycle", () => { accountId?: string; start?: () => Promise; stop?: () => Promise; + isDisallowedIntentsError?: (err: unknown) => boolean; + pendingGatewayErrors?: unknown[]; }) => { const start = vi.fn(params?.start ?? (async () => undefined)); const stop = vi.fn(params?.stop ?? (async () => undefined)); const threadStop = vi.fn(); + const runtimeLog = vi.fn(); + const runtimeError = vi.fn(); + const runtimeExit = vi.fn(); + const releaseEarlyGatewayErrorGuard = vi.fn(); + const runtime: RuntimeEnv = { + log: runtimeLog, + error: runtimeError, + exit: runtimeExit, + }; return { start, stop, threadStop, + runtimeError, + releaseEarlyGatewayErrorGuard, lifecycleParams: { accountId: params?.accountId ?? "default", client: { getPlugin: vi.fn(() => undefined) } as unknown as Client, - runtime: {} as RuntimeEnv, - isDisallowedIntentsError: () => false, + runtime, + isDisallowedIntentsError: params?.isDisallowedIntentsError ?? (() => false), voiceManager: null, voiceManagerRef: { current: null }, execApprovalsHandler: { start, stop }, threadBindings: { stop: threadStop }, + pendingGatewayErrors: params?.pendingGatewayErrors, + releaseEarlyGatewayErrorGuard, }, }; }; @@ -75,6 +90,7 @@ describe("runDiscordGatewayLifecycle", () => { stop: ReturnType; threadStop: ReturnType; waitCalls: number; + releaseEarlyGatewayErrorGuard: ReturnType; }) { expect(params.start).toHaveBeenCalledTimes(1); expect(params.stop).toHaveBeenCalledTimes(1); @@ -82,39 +98,109 @@ describe("runDiscordGatewayLifecycle", () => { expect(unregisterGatewayMock).toHaveBeenCalledWith("default"); expect(stopGatewayLoggingMock).toHaveBeenCalledTimes(1); expect(params.threadStop).toHaveBeenCalledTimes(1); + expect(params.releaseEarlyGatewayErrorGuard).toHaveBeenCalledTimes(1); } it("cleans up thread bindings when exec approvals startup fails", async () => { const { runDiscordGatewayLifecycle } = await import("./provider.lifecycle.js"); - const { lifecycleParams, start, stop, threadStop } = createLifecycleHarness({ - start: async () => { - throw new Error("startup failed"); - }, - }); + const { lifecycleParams, start, stop, threadStop, releaseEarlyGatewayErrorGuard } = + createLifecycleHarness({ + start: async () => { + throw new Error("startup failed"); + }, + }); await expect(runDiscordGatewayLifecycle(lifecycleParams)).rejects.toThrow("startup failed"); - expectLifecycleCleanup({ start, stop, threadStop, waitCalls: 0 }); + expectLifecycleCleanup({ + start, + stop, + threadStop, + waitCalls: 0, + releaseEarlyGatewayErrorGuard, + }); }); it("cleans up when gateway wait fails after startup", async () => { const { runDiscordGatewayLifecycle } = await import("./provider.lifecycle.js"); waitForDiscordGatewayStopMock.mockRejectedValueOnce(new Error("gateway wait failed")); - const { lifecycleParams, start, stop, threadStop } = createLifecycleHarness(); + const { lifecycleParams, start, stop, threadStop, releaseEarlyGatewayErrorGuard } = + createLifecycleHarness(); await expect(runDiscordGatewayLifecycle(lifecycleParams)).rejects.toThrow( "gateway wait failed", ); - expectLifecycleCleanup({ start, stop, threadStop, waitCalls: 1 }); + expectLifecycleCleanup({ + start, + stop, + threadStop, + waitCalls: 1, + releaseEarlyGatewayErrorGuard, + }); }); it("cleans up after successful gateway wait", async () => { const { runDiscordGatewayLifecycle } = await import("./provider.lifecycle.js"); - const { lifecycleParams, start, stop, threadStop } = createLifecycleHarness(); + const { lifecycleParams, start, stop, threadStop, releaseEarlyGatewayErrorGuard } = + createLifecycleHarness(); await expect(runDiscordGatewayLifecycle(lifecycleParams)).resolves.toBeUndefined(); - expectLifecycleCleanup({ start, stop, threadStop, waitCalls: 1 }); + expectLifecycleCleanup({ + start, + stop, + threadStop, + waitCalls: 1, + releaseEarlyGatewayErrorGuard, + }); + }); + + it("handles queued disallowed intents errors without waiting for gateway events", async () => { + const { runDiscordGatewayLifecycle } = await import("./provider.lifecycle.js"); + const { + lifecycleParams, + start, + stop, + threadStop, + runtimeError, + releaseEarlyGatewayErrorGuard, + } = createLifecycleHarness({ + pendingGatewayErrors: [new Error("Fatal Gateway error: 4014")], + isDisallowedIntentsError: (err) => String(err).includes("4014"), + }); + + await expect(runDiscordGatewayLifecycle(lifecycleParams)).resolves.toBeUndefined(); + + expect(runtimeError).toHaveBeenCalledWith( + expect.stringContaining("discord: gateway closed with code 4014"), + ); + expectLifecycleCleanup({ + start, + stop, + threadStop, + waitCalls: 0, + releaseEarlyGatewayErrorGuard, + }); + }); + + it("throws queued non-disallowed fatal gateway errors", async () => { + const { runDiscordGatewayLifecycle } = await import("./provider.lifecycle.js"); + const { lifecycleParams, start, stop, threadStop, releaseEarlyGatewayErrorGuard } = + createLifecycleHarness({ + pendingGatewayErrors: [new Error("Fatal Gateway error: 4000")], + }); + + await expect(runDiscordGatewayLifecycle(lifecycleParams)).rejects.toThrow( + "Fatal Gateway error: 4000", + ); + + expectLifecycleCleanup({ + start, + stop, + threadStop, + waitCalls: 0, + releaseEarlyGatewayErrorGuard, + }); }); }); diff --git a/src/discord/monitor/provider.lifecycle.ts b/src/discord/monitor/provider.lifecycle.ts index 8e5177bb945..489657d08bd 100644 --- a/src/discord/monitor/provider.lifecycle.ts +++ b/src/discord/monitor/provider.lifecycle.ts @@ -22,6 +22,8 @@ export async function runDiscordGatewayLifecycle(params: { voiceManagerRef: { current: DiscordVoiceManager | null }; execApprovalsHandler: ExecApprovalsHandler | null; threadBindings: { stop: () => void }; + pendingGatewayErrors?: unknown[]; + releaseEarlyGatewayErrorGuard?: () => void; }) { const gateway = params.client.getPlugin("gateway"); if (gateway) { @@ -74,11 +76,48 @@ export async function runDiscordGatewayLifecycle(params: { gatewayEmitter?.on("debug", onGatewayDebug); let sawDisallowedIntents = false; + const logGatewayError = (err: unknown) => { + if (params.isDisallowedIntentsError(err)) { + sawDisallowedIntents = true; + params.runtime.error?.( + danger( + "discord: gateway closed with code 4014 (missing privileged gateway intents). Enable the required intents in the Discord Developer Portal or disable them in config.", + ), + ); + return; + } + params.runtime.error?.(danger(`discord gateway error: ${String(err)}`)); + }; + const shouldStopOnGatewayError = (err: unknown) => { + const message = String(err); + return ( + message.includes("Max reconnect attempts") || + message.includes("Fatal Gateway error") || + params.isDisallowedIntentsError(err) + ); + }; try { if (params.execApprovalsHandler) { await params.execApprovalsHandler.start(); } + // Drain gateway errors emitted before lifecycle listeners were attached. + const pendingGatewayErrors = params.pendingGatewayErrors ?? []; + if (pendingGatewayErrors.length > 0) { + const queuedErrors = [...pendingGatewayErrors]; + pendingGatewayErrors.length = 0; + for (const err of queuedErrors) { + logGatewayError(err); + if (!shouldStopOnGatewayError(err)) { + continue; + } + if (params.isDisallowedIntentsError(err)) { + return; + } + throw err; + } + } + await waitForDiscordGatewayStop({ gateway: gateway ? { @@ -87,32 +126,15 @@ export async function runDiscordGatewayLifecycle(params: { } : undefined, abortSignal: params.abortSignal, - onGatewayError: (err) => { - if (params.isDisallowedIntentsError(err)) { - sawDisallowedIntents = true; - params.runtime.error?.( - danger( - "discord: gateway closed with code 4014 (missing privileged gateway intents). Enable the required intents in the Discord Developer Portal or disable them in config.", - ), - ); - return; - } - params.runtime.error?.(danger(`discord gateway error: ${String(err)}`)); - }, - shouldStopOnError: (err) => { - const message = String(err); - return ( - message.includes("Max reconnect attempts") || - message.includes("Fatal Gateway error") || - params.isDisallowedIntentsError(err) - ); - }, + onGatewayError: logGatewayError, + shouldStopOnError: shouldStopOnGatewayError, }); } catch (err) { if (!sawDisallowedIntents && !params.isDisallowedIntentsError(err)) { throw err; } } finally { + params.releaseEarlyGatewayErrorGuard?.(); unregisterGateway(params.accountId); stopGatewayLogging(); if (helloTimeoutId) { diff --git a/src/discord/monitor/provider.test.ts b/src/discord/monitor/provider.test.ts index 14b137fd1bd..f7a767c596a 100644 --- a/src/discord/monitor/provider.test.ts +++ b/src/discord/monitor/provider.test.ts @@ -1,11 +1,15 @@ +import { EventEmitter } from "node:events"; import { beforeEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../../config/config.js"; import type { RuntimeEnv } from "../../runtime.js"; const { + clientFetchUserMock, + clientGetPluginMock, createDiscordNativeCommandMock, createNoopThreadBindingManagerMock, createThreadBindingManagerMock, + reconcileAcpThreadBindingsOnStartupMock, createdBindingManagers, listNativeCommandSpecsForConfigMock, listSkillCommandsForAgentsMock, @@ -14,9 +18,13 @@ const { resolveDiscordAllowlistConfigMock, resolveNativeCommandsEnabledMock, resolveNativeSkillsEnabledMock, + resolveThreadBindingSessionTtlMsMock, + resolveThreadBindingsEnabledMock, } = vi.hoisted(() => { const createdBindingManagers: Array<{ stop: ReturnType }> = []; return { + clientFetchUserMock: vi.fn(async (_target: string) => ({ id: "bot-1" })), + clientGetPluginMock: vi.fn<(_name: string) => unknown>(() => undefined), createDiscordNativeCommandMock: vi.fn(() => ({ name: "mock-command" })), createNoopThreadBindingManagerMock: vi.fn(() => { const manager = { stop: vi.fn() }; @@ -28,6 +36,11 @@ const { createdBindingManagers.push(manager); return manager; }), + reconcileAcpThreadBindingsOnStartupMock: vi.fn(() => ({ + checked: 0, + removed: 0, + staleSessionKeys: [], + })), createdBindingManagers, listNativeCommandSpecsForConfigMock: vi.fn(() => [{ name: "cmd" }]), listSkillCommandsForAgentsMock: vi.fn(() => []), @@ -50,6 +63,8 @@ const { })), resolveNativeCommandsEnabledMock: vi.fn(() => true), resolveNativeSkillsEnabledMock: vi.fn(() => false), + resolveThreadBindingSessionTtlMsMock: vi.fn(() => undefined), + resolveThreadBindingsEnabledMock: vi.fn(() => true), }; }); @@ -65,11 +80,11 @@ vi.mock("@buape/carbon", () => { async handleDeployRequest() { return undefined; } - async fetchUser(_target: string) { - return { id: "bot-1" }; + async fetchUser(target: string) { + return await clientFetchUserMock(target); } - getPlugin(_name: string) { - return undefined; + getPlugin(name: string) { + return clientGetPluginMock(name); } } return { Client, ReadyListener }; @@ -219,6 +234,9 @@ vi.mock("./rest-fetch.js", () => ({ vi.mock("./thread-bindings.js", () => ({ createNoopThreadBindingManager: createNoopThreadBindingManagerMock, createThreadBindingManager: createThreadBindingManagerMock, + reconcileAcpThreadBindingsOnStartup: reconcileAcpThreadBindingsOnStartupMock, + resolveThreadBindingSessionTtlMs: resolveThreadBindingSessionTtlMsMock, + resolveThreadBindingsEnabled: resolveThreadBindingsEnabledMock, })); describe("monitorDiscordProvider", () => { @@ -242,9 +260,16 @@ describe("monitorDiscordProvider", () => { }) as OpenClawConfig; beforeEach(() => { + clientFetchUserMock.mockClear().mockResolvedValue({ id: "bot-1" }); + clientGetPluginMock.mockClear().mockReturnValue(undefined); createDiscordNativeCommandMock.mockClear().mockReturnValue({ name: "mock-command" }); createNoopThreadBindingManagerMock.mockClear(); createThreadBindingManagerMock.mockClear(); + reconcileAcpThreadBindingsOnStartupMock.mockClear().mockReturnValue({ + checked: 0, + removed: 0, + staleSessionKeys: [], + }); createdBindingManagers.length = 0; listNativeCommandSpecsForConfigMock.mockClear().mockReturnValue([{ name: "cmd" }]); listSkillCommandsForAgentsMock.mockClear().mockReturnValue([]); @@ -258,6 +283,8 @@ describe("monitorDiscordProvider", () => { }); resolveNativeCommandsEnabledMock.mockClear().mockReturnValue(true); resolveNativeSkillsEnabledMock.mockClear().mockReturnValue(false); + resolveThreadBindingSessionTtlMsMock.mockClear().mockReturnValue(undefined); + resolveThreadBindingsEnabledMock.mockClear().mockReturnValue(true); }); it("stops thread bindings when startup fails before lifecycle begins", async () => { @@ -289,5 +316,30 @@ describe("monitorDiscordProvider", () => { expect(monitorLifecycleMock).toHaveBeenCalledTimes(1); expect(createdBindingManagers).toHaveLength(1); expect(createdBindingManagers[0]?.stop).toHaveBeenCalledTimes(1); + expect(reconcileAcpThreadBindingsOnStartupMock).toHaveBeenCalledTimes(1); + }); + + it("captures gateway errors emitted before lifecycle wait starts", async () => { + const { monitorDiscordProvider } = await import("./provider.js"); + const emitter = new EventEmitter(); + clientGetPluginMock.mockImplementation((name: string) => + name === "gateway" ? { emitter, disconnect: vi.fn() } : undefined, + ); + clientFetchUserMock.mockImplementationOnce(async () => { + emitter.emit("error", new Error("Fatal Gateway error: 4014")); + return { id: "bot-1" }; + }); + + await monitorDiscordProvider({ + config: baseConfig(), + runtime: baseRuntime(), + }); + + expect(monitorLifecycleMock).toHaveBeenCalledTimes(1); + const lifecycleArgs = monitorLifecycleMock.mock.calls[0]?.[0] as { + pendingGatewayErrors?: unknown[]; + }; + expect(lifecycleArgs.pendingGatewayErrors).toHaveLength(1); + expect(String(lifecycleArgs.pendingGatewayErrors?.[0])).toContain("4014"); }); }); diff --git a/src/discord/monitor/provider.ts b/src/discord/monitor/provider.ts index 15c8e2aa7b4..5949b67ce9d 100644 --- a/src/discord/monitor/provider.ts +++ b/src/discord/monitor/provider.ts @@ -51,6 +51,7 @@ import { } from "./agent-components.js"; import { resolveDiscordSlashCommandConfig } from "./commands.js"; import { createExecApprovalButton, DiscordExecApprovalHandler } from "./exec-approvals.js"; +import { attachEarlyGatewayErrorGuard } from "./gateway-error-guard.js"; import { createDiscordGatewayPlugin } from "./gateway-plugin.js"; import { DiscordMessageListener, @@ -70,7 +71,13 @@ import { resolveDiscordPresenceUpdate } from "./presence.js"; import { resolveDiscordAllowlistConfig } from "./provider.allowlist.js"; import { runDiscordGatewayLifecycle } from "./provider.lifecycle.js"; import { resolveDiscordRestFetch } from "./rest-fetch.js"; -import { createNoopThreadBindingManager, createThreadBindingManager } from "./thread-bindings.js"; +import { + createNoopThreadBindingManager, + createThreadBindingManager, + resolveThreadBindingSessionTtlMs, + resolveThreadBindingsEnabled, + reconcileAcpThreadBindingsOnStartup, +} from "./thread-bindings.js"; import { formatThreadBindingTtlLabel } from "./thread-bindings.messages.js"; export type MonitorDiscordOpts = { @@ -103,47 +110,6 @@ function summarizeGuilds(entries?: Record) { return `${sample.join(", ")}${suffix}`; } -const DEFAULT_THREAD_BINDING_TTL_HOURS = 24; - -function normalizeThreadBindingTtlHours(raw: unknown): number | undefined { - if (typeof raw !== "number" || !Number.isFinite(raw)) { - return undefined; - } - if (raw < 0) { - return undefined; - } - return raw; -} - -function resolveThreadBindingSessionTtlMs(params: { - channelTtlHoursRaw: unknown; - sessionTtlHoursRaw: unknown; -}): number { - const ttlHours = - normalizeThreadBindingTtlHours(params.channelTtlHoursRaw) ?? - normalizeThreadBindingTtlHours(params.sessionTtlHoursRaw) ?? - DEFAULT_THREAD_BINDING_TTL_HOURS; - return Math.floor(ttlHours * 60 * 60 * 1000); -} - -function normalizeThreadBindingsEnabled(raw: unknown): boolean | undefined { - if (typeof raw !== "boolean") { - return undefined; - } - return raw; -} - -function resolveThreadBindingsEnabled(params: { - channelEnabledRaw: unknown; - sessionEnabledRaw: unknown; -}): boolean { - return ( - normalizeThreadBindingsEnabled(params.channelEnabledRaw) ?? - normalizeThreadBindingsEnabled(params.sessionEnabledRaw) ?? - true - ); -} - function formatThreadBindingSessionTtlLabel(ttlMs: number): string { const label = formatThreadBindingTtlLabel(ttlMs); return label === "disabled" ? "off" : label; @@ -364,7 +330,20 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) { sessionTtlMs: threadBindingSessionTtlMs, }) : createNoopThreadBindingManager(account.accountId); + if (threadBindingsEnabled) { + const reconciliation = reconcileAcpThreadBindingsOnStartup({ + cfg, + accountId: account.accountId, + sendFarewell: false, + }); + if (reconciliation.removed > 0) { + logVerbose( + `discord: removed ${reconciliation.removed}/${reconciliation.checked} stale ACP thread bindings on startup for account ${account.accountId}`, + ); + } + } let lifecycleStarted = false; + let releaseEarlyGatewayErrorGuard = () => {}; try { const commands: BaseCommand[] = commandSpecs.map((spec) => createDiscordNativeCommand({ @@ -496,6 +475,8 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) { }, clientPlugins, ); + const earlyGatewayErrorGuard = attachEarlyGatewayErrorGuard(client); + releaseEarlyGatewayErrorGuard = earlyGatewayErrorGuard.release; await deployDiscordCommands({ client, runtime, enabled: nativeEnabled }); @@ -561,6 +542,12 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) { accountId: account.accountId, runtime, botUserId, + dmEnabled, + groupDmEnabled, + groupDmChannels: groupDmChannels ?? [], + dmPolicy, + allowFrom: allowFrom ?? [], + groupPolicy, allowNameMatching: isDangerousNameMatchingEnabled(discordCfg), guildEntries, logger, @@ -573,6 +560,12 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) { accountId: account.accountId, runtime, botUserId, + dmEnabled, + groupDmEnabled, + groupDmChannels: groupDmChannels ?? [], + dmPolicy, + allowFrom: allowFrom ?? [], + groupPolicy, allowNameMatching: isDangerousNameMatchingEnabled(discordCfg), guildEntries, logger, @@ -600,8 +593,11 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) { voiceManagerRef, execApprovalsHandler, threadBindings, + pendingGatewayErrors: earlyGatewayErrorGuard.pendingErrors, + releaseEarlyGatewayErrorGuard, }); } finally { + releaseEarlyGatewayErrorGuard(); if (!lifecycleStarted) { threadBindings.stop(); } diff --git a/src/discord/monitor/reply-delivery.test.ts b/src/discord/monitor/reply-delivery.test.ts index 1eb3200baca..7a585a7d84b 100644 --- a/src/discord/monitor/reply-delivery.test.ts +++ b/src/discord/monitor/reply-delivery.test.ts @@ -165,6 +165,23 @@ describe("deliverDiscordReply", () => { ); }); + it("preserves leading whitespace in delivered text chunks", async () => { + await deliverDiscordReply({ + replies: [{ text: " leading text" }], + target: "channel:789", + token: "token", + runtime, + textLimit: 2000, + }); + + expect(sendMessageDiscordMock).toHaveBeenCalledTimes(1); + expect(sendMessageDiscordMock).toHaveBeenCalledWith( + "channel:789", + " leading text", + expect.objectContaining({ token: "token" }), + ); + }); + it("sends bound-session text replies through webhook delivery", async () => { const threadBindings = await createBoundThreadBindings({ label: "codex-refactor" }); diff --git a/src/discord/monitor/reply-delivery.ts b/src/discord/monitor/reply-delivery.ts index 0ee36b57654..c82d6c77894 100644 --- a/src/discord/monitor/reply-delivery.ts +++ b/src/discord/monitor/reply-delivery.ts @@ -8,7 +8,19 @@ import { convertMarkdownTables } from "../../markdown/tables.js"; import type { RuntimeEnv } from "../../runtime.js"; import { chunkDiscordTextWithMode } from "../chunk.js"; import { sendMessageDiscord, sendVoiceMessageDiscord, sendWebhookMessageDiscord } from "../send.js"; -import type { ThreadBindingManager, ThreadBindingRecord } from "./thread-bindings.js"; + +export type DiscordThreadBindingLookupRecord = { + accountId: string; + threadId: string; + agentId: string; + label?: string; + webhookId?: string; + webhookToken?: string; +}; + +export type DiscordThreadBindingLookup = { + listBySessionKey: (targetSessionKey: string) => DiscordThreadBindingLookupRecord[]; +}; function resolveTargetChannelId(target: string): string | undefined { if (!target.startsWith("channel:")) { @@ -19,10 +31,10 @@ function resolveTargetChannelId(target: string): string | undefined { } function resolveBoundThreadBinding(params: { - threadBindings?: ThreadBindingManager; + threadBindings?: DiscordThreadBindingLookup; sessionKey?: string; target: string; -}): ThreadBindingRecord | undefined { +}): DiscordThreadBindingLookupRecord | undefined { const sessionKey = params.sessionKey?.trim(); if (!params.threadBindings || !sessionKey) { return undefined; @@ -38,7 +50,7 @@ function resolveBoundThreadBinding(params: { return bindings.find((entry) => entry.threadId === targetChannelId); } -function resolveBindingPersona(binding: ThreadBindingRecord | undefined): { +function resolveBindingPersona(binding: DiscordThreadBindingLookupRecord | undefined): { username?: string; avatarUrl?: string; } { @@ -67,14 +79,14 @@ async function sendDiscordChunkWithFallback(params: { accountId?: string; rest?: RequestClient; replyTo?: string; - binding?: ThreadBindingRecord; + binding?: DiscordThreadBindingLookupRecord; username?: string; avatarUrl?: string; }) { - const text = params.text.trim(); - if (!text) { + if (!params.text.trim()) { return; } + const text = params.text; const binding = params.binding; if (binding?.webhookId && binding?.webhookToken) { try { @@ -134,7 +146,7 @@ export async function deliverDiscordReply(params: { tableMode?: MarkdownTableMode; chunkMode?: ChunkMode; sessionKey?: string; - threadBindings?: ThreadBindingManager; + threadBindings?: DiscordThreadBindingLookup; }) { const chunkLimit = Math.min(params.textLimit, 2000); const replyTo = params.replyToId?.trim() || undefined; diff --git a/src/discord/monitor/thread-bindings.config.ts b/src/discord/monitor/thread-bindings.config.ts new file mode 100644 index 00000000000..dddd42c61ad --- /dev/null +++ b/src/discord/monitor/thread-bindings.config.ts @@ -0,0 +1,21 @@ +import { + resolveThreadBindingSessionTtlMs, + resolveThreadBindingsEnabled, +} from "../../channels/thread-bindings-policy.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { normalizeAccountId } from "../../routing/session-key.js"; + +export { resolveThreadBindingSessionTtlMs, resolveThreadBindingsEnabled }; + +export function resolveDiscordThreadBindingSessionTtlMs(params: { + cfg: OpenClawConfig; + accountId?: string; +}): number { + const accountId = normalizeAccountId(params.accountId); + const root = params.cfg.channels?.discord?.threadBindings; + const account = params.cfg.channels?.discord?.accounts?.[accountId]?.threadBindings; + return resolveThreadBindingSessionTtlMs({ + channelTtlHoursRaw: account?.ttlHours ?? root?.ttlHours, + sessionTtlHoursRaw: params.cfg.session?.threadBindings?.ttlHours, + }); +} diff --git a/src/discord/monitor/thread-bindings.discord-api.ts b/src/discord/monitor/thread-bindings.discord-api.ts index d08f78f27ec..faac1cce4e8 100644 --- a/src/discord/monitor/thread-bindings.discord-api.ts +++ b/src/discord/monitor/thread-bindings.discord-api.ts @@ -3,7 +3,7 @@ import { logVerbose } from "../../globals.js"; import { createDiscordRestClient } from "../client.js"; import { sendMessageDiscord, sendWebhookMessageDiscord } from "../send.js"; import { createThreadDiscord } from "../send.messages.js"; -import { summarizeBindingPersona } from "./thread-bindings.messages.js"; +import { resolveThreadBindingPersonaFromRecord } from "./thread-bindings.persona.js"; import { BINDINGS_BY_THREAD_ID, REUSABLE_WEBHOOKS_BY_ACCOUNT_CHANNEL, @@ -138,7 +138,7 @@ export async function maybeSendBindingMessage(params: { webhookToken: record.webhookToken, accountId: record.accountId, threadId: record.threadId, - username: summarizeBindingPersona(record), + username: resolveThreadBindingPersonaFromRecord(record), }); return; } catch (err) { diff --git a/src/discord/monitor/thread-bindings.lifecycle.ts b/src/discord/monitor/thread-bindings.lifecycle.ts index b741b38483f..282cac42537 100644 --- a/src/discord/monitor/thread-bindings.lifecycle.ts +++ b/src/discord/monitor/thread-bindings.lifecycle.ts @@ -1,3 +1,5 @@ +import { readAcpSessionEntry } from "../../acp/runtime/session-meta.js"; +import type { OpenClawConfig } from "../../config/config.js"; import { normalizeAccountId } from "../../routing/session-key.js"; import { parseDiscordTarget } from "../targets.js"; import { resolveChannelIdForBinding } from "./thread-bindings.discord-api.js"; @@ -22,6 +24,12 @@ import { } from "./thread-bindings.state.js"; import type { ThreadBindingRecord, ThreadBindingTargetKind } from "./thread-bindings.types.js"; +export type AcpThreadBindingReconciliationResult = { + checked: number; + removed: number; + staleSessionKeys: string[]; +}; + function resolveBindingIdsForTargetSession(params: { targetSessionKey: string; accountId?: string; @@ -212,3 +220,62 @@ export function setThreadBindingTtlBySessionKey(params: { } return updated; } + +export function reconcileAcpThreadBindingsOnStartup(params: { + cfg: OpenClawConfig; + accountId?: string; + sendFarewell?: boolean; +}): AcpThreadBindingReconciliationResult { + const manager = getThreadBindingManager(params.accountId); + if (!manager) { + return { + checked: 0, + removed: 0, + staleSessionKeys: [], + }; + } + + const acpBindings = manager.listBindings().filter((binding) => binding.targetKind === "acp"); + const staleBindings = acpBindings.filter((binding) => { + const sessionKey = binding.targetSessionKey.trim(); + if (!sessionKey) { + return true; + } + const session = readAcpSessionEntry({ + cfg: params.cfg, + sessionKey, + }); + // Session store read failures are transient; never auto-unbind on uncertain reads. + if (session?.storeReadFailed) { + return false; + } + return !session?.acp; + }); + if (staleBindings.length === 0) { + return { + checked: acpBindings.length, + removed: 0, + staleSessionKeys: [], + }; + } + + const staleSessionKeys: string[] = []; + let removed = 0; + for (const binding of staleBindings) { + staleSessionKeys.push(binding.targetSessionKey); + const unbound = manager.unbindThread({ + threadId: binding.threadId, + reason: "stale-session", + sendFarewell: params.sendFarewell ?? false, + }); + if (unbound) { + removed += 1; + } + } + + return { + checked: acpBindings.length, + removed, + staleSessionKeys: [...new Set(staleSessionKeys)], + }; +} diff --git a/src/discord/monitor/thread-bindings.manager.ts b/src/discord/monitor/thread-bindings.manager.ts index a4fd5f63cef..6b50028b8a3 100644 --- a/src/discord/monitor/thread-bindings.manager.ts +++ b/src/discord/monitor/thread-bindings.manager.ts @@ -424,6 +424,9 @@ export function createThreadBindingManager( registerSessionBindingAdapter({ channel: "discord", accountId, + capabilities: { + placements: ["current", "child"], + }, bind: async (input) => { if (input.conversation.channel !== "discord") { return null; @@ -433,6 +436,7 @@ export function createThreadBindingManager( return null; } const conversationId = input.conversation.conversationId.trim(); + const placement = input.placement === "child" ? "child" : "current"; const metadata = input.metadata ?? {}; const label = typeof metadata.label === "string" ? metadata.label.trim() || undefined : undefined; @@ -446,10 +450,27 @@ export function createThreadBindingManager( typeof metadata.boundBy === "string" ? metadata.boundBy.trim() || undefined : undefined; const agentId = typeof metadata.agentId === "string" ? metadata.agentId.trim() || undefined : undefined; + let threadId: string | undefined; + let channelId = input.conversation.parentConversationId?.trim() || undefined; + let createThread = false; + + if (placement === "child") { + createThread = true; + if (!channelId && conversationId) { + channelId = + (await resolveChannelIdForBinding({ + accountId, + token: resolveCurrentToken(), + threadId: conversationId, + })) ?? undefined; + } + } else { + threadId = conversationId || undefined; + } const bound = await manager.bindTarget({ - threadId: conversationId || undefined, - channelId: input.conversation.parentConversationId?.trim() || undefined, - createThread: !conversationId, + threadId, + channelId, + createThread, threadName, targetKind: toThreadBindingTargetKind(input.targetKind), targetSessionKey, diff --git a/src/discord/monitor/thread-bindings.messages.ts b/src/discord/monitor/thread-bindings.messages.ts index e6691949c6c..27363cb3215 100644 --- a/src/discord/monitor/thread-bindings.messages.ts +++ b/src/discord/monitor/thread-bindings.messages.ts @@ -1,72 +1,6 @@ -import { DEFAULT_FAREWELL_TEXT, type ThreadBindingRecord } from "./thread-bindings.types.js"; - -function normalizeThreadBindingMessageTtlMs(raw: unknown): number { - if (typeof raw !== "number" || !Number.isFinite(raw)) { - return 0; - } - const ttlMs = Math.floor(raw); - if (ttlMs < 0) { - return 0; - } - return ttlMs; -} - -export function formatThreadBindingTtlLabel(ttlMs: number): string { - if (ttlMs <= 0) { - return "disabled"; - } - if (ttlMs < 60_000) { - return "<1m"; - } - const totalMinutes = Math.floor(ttlMs / 60_000); - if (totalMinutes % 60 === 0) { - return `${Math.floor(totalMinutes / 60)}h`; - } - return `${totalMinutes}m`; -} - -export function resolveThreadBindingThreadName(params: { - agentId?: string; - label?: string; -}): string { - const label = params.label?.trim(); - const base = label || params.agentId?.trim() || "agent"; - const raw = `🤖 ${base}`.replace(/\s+/g, " ").trim(); - return raw.slice(0, 100); -} - -export function resolveThreadBindingIntroText(params: { - agentId?: string; - label?: string; - sessionTtlMs?: number; -}): string { - const label = params.label?.trim(); - const base = label || params.agentId?.trim() || "agent"; - const normalized = base.replace(/\s+/g, " ").trim().slice(0, 100) || "agent"; - const ttlMs = normalizeThreadBindingMessageTtlMs(params.sessionTtlMs); - if (ttlMs > 0) { - return `🤖 ${normalized} session active (auto-unfocus in ${formatThreadBindingTtlLabel(ttlMs)}). Messages here go directly to this session.`; - } - return `🤖 ${normalized} session active. Messages here go directly to this session.`; -} - -export function resolveThreadBindingFarewellText(params: { - reason?: string; - farewellText?: string; - sessionTtlMs: number; -}): string { - const custom = params.farewellText?.trim(); - if (custom) { - return custom; - } - if (params.reason === "ttl-expired") { - return `Session ended automatically after ${formatThreadBindingTtlLabel(params.sessionTtlMs)}. Messages here will no longer be routed.`; - } - return DEFAULT_FAREWELL_TEXT; -} - -export function summarizeBindingPersona(record: ThreadBindingRecord): string { - const label = record.label?.trim(); - const base = label || record.agentId; - return (`🤖 ${base}`.trim() || "🤖 agent").slice(0, 80); -} +export { + formatThreadBindingTtlLabel, + resolveThreadBindingFarewellText, + resolveThreadBindingIntroText, + resolveThreadBindingThreadName, +} from "../../channels/thread-bindings-messages.js"; diff --git a/src/discord/monitor/thread-bindings.persona.test.ts b/src/discord/monitor/thread-bindings.persona.test.ts new file mode 100644 index 00000000000..7087cff09a4 --- /dev/null +++ b/src/discord/monitor/thread-bindings.persona.test.ts @@ -0,0 +1,33 @@ +import { describe, expect, it } from "vitest"; +import { + resolveThreadBindingPersona, + resolveThreadBindingPersonaFromRecord, +} from "./thread-bindings.persona.js"; +import type { ThreadBindingRecord } from "./thread-bindings.types.js"; + +describe("thread binding persona", () => { + it("prefers explicit label and prefixes with gear", () => { + expect(resolveThreadBindingPersona({ label: "codex thread", agentId: "codex" })).toBe( + "⚙️ codex thread", + ); + }); + + it("falls back to agent id when label is missing", () => { + expect(resolveThreadBindingPersona({ agentId: "codex" })).toBe("⚙️ codex"); + }); + + it("builds persona from binding record", () => { + const record = { + accountId: "default", + channelId: "parent-1", + threadId: "thread-1", + targetKind: "acp", + targetSessionKey: "agent:codex:acp:session-1", + agentId: "codex", + boundBy: "system", + boundAt: Date.now(), + label: "codex-thread", + } satisfies ThreadBindingRecord; + expect(resolveThreadBindingPersonaFromRecord(record)).toBe("⚙️ codex-thread"); + }); +}); diff --git a/src/discord/monitor/thread-bindings.persona.ts b/src/discord/monitor/thread-bindings.persona.ts new file mode 100644 index 00000000000..bb7485f15d1 --- /dev/null +++ b/src/discord/monitor/thread-bindings.persona.ts @@ -0,0 +1,25 @@ +import { SYSTEM_MARK } from "../../infra/system-message.js"; +import type { ThreadBindingRecord } from "./thread-bindings.types.js"; + +const THREAD_BINDING_PERSONA_MAX_CHARS = 80; + +function normalizePersonaLabel(value: string | undefined): string | undefined { + if (!value) { + return undefined; + } + const normalized = value.replace(/\s+/g, " ").trim(); + return normalized || undefined; +} + +export function resolveThreadBindingPersona(params: { label?: string; agentId?: string }): string { + const base = + normalizePersonaLabel(params.label) || normalizePersonaLabel(params.agentId) || "agent"; + return `${SYSTEM_MARK} ${base}`.slice(0, THREAD_BINDING_PERSONA_MAX_CHARS); +} + +export function resolveThreadBindingPersonaFromRecord(record: ThreadBindingRecord): string { + return resolveThreadBindingPersona({ + label: record.label, + agentId: record.agentId, + }); +} diff --git a/src/discord/monitor/thread-bindings.ts b/src/discord/monitor/thread-bindings.ts index 88802151093..6bde0daff2b 100644 --- a/src/discord/monitor/thread-bindings.ts +++ b/src/discord/monitor/thread-bindings.ts @@ -9,6 +9,16 @@ export { resolveThreadBindingIntroText, resolveThreadBindingThreadName, } from "./thread-bindings.messages.js"; +export { + resolveThreadBindingPersona, + resolveThreadBindingPersonaFromRecord, +} from "./thread-bindings.persona.js"; + +export { + resolveDiscordThreadBindingSessionTtlMs, + resolveThreadBindingSessionTtlMs, + resolveThreadBindingsEnabled, +} from "./thread-bindings.config.js"; export { isRecentlyUnboundThreadWebhookMessage } from "./thread-bindings.state.js"; @@ -16,10 +26,13 @@ export { autoBindSpawnedDiscordSubagent, listThreadBindingsBySessionKey, listThreadBindingsForAccount, + reconcileAcpThreadBindingsOnStartup, setThreadBindingTtlBySessionKey, unbindThreadBindingsBySessionKey, } from "./thread-bindings.lifecycle.js"; +export type { AcpThreadBindingReconciliationResult } from "./thread-bindings.lifecycle.js"; + export { __testing, createNoopThreadBindingManager, diff --git a/src/discord/monitor/thread-bindings.ttl.test.ts b/src/discord/monitor/thread-bindings.ttl.test.ts index a452c581327..0c122eedab8 100644 --- a/src/discord/monitor/thread-bindings.ttl.test.ts +++ b/src/discord/monitor/thread-bindings.ttl.test.ts @@ -2,6 +2,7 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; const hoisted = vi.hoisted(() => { const sendMessageDiscord = vi.fn(async (_to: string, _text: string, _opts?: unknown) => ({})); @@ -22,6 +23,7 @@ const hoisted = vi.hoisted(() => { }, })); const createThreadDiscord = vi.fn(async (..._args: unknown[]) => ({ id: "thread-created" })); + const readAcpSessionEntry = vi.fn(); return { sendMessageDiscord, sendWebhookMessageDiscord, @@ -29,6 +31,7 @@ const hoisted = vi.hoisted(() => { restPost, createDiscordRestClient, createThreadDiscord, + readAcpSessionEntry, }; }); @@ -45,10 +48,15 @@ vi.mock("../send.messages.js", () => ({ createThreadDiscord: hoisted.createThreadDiscord, })); +vi.mock("../../acp/runtime/session-meta.js", () => ({ + readAcpSessionEntry: hoisted.readAcpSessionEntry, +})); + const { __testing, autoBindSpawnedDiscordSubagent, createThreadBindingManager, + reconcileAcpThreadBindingsOnStartup, resolveThreadBindingIntroText, setThreadBindingTtlBySessionKey, unbindThreadBindingsBySessionKey, @@ -63,6 +71,7 @@ describe("thread binding ttl", () => { hoisted.restPost.mockClear(); hoisted.createDiscordRestClient.mockClear(); hoisted.createThreadDiscord.mockClear(); + hoisted.readAcpSessionEntry.mockReset().mockReturnValue(null); vi.useRealTimers(); }); @@ -97,6 +106,16 @@ describe("thread binding ttl", () => { expect(intro).toContain("auto-unfocus in 24h"); }); + it("includes cwd near the top of intro text", () => { + const intro = resolveThreadBindingIntroText({ + agentId: "codex", + sessionTtlMs: 24 * 60 * 60 * 1000, + sessionCwd: "/home/bob/clawd", + sessionDetails: ["session ids: pending (available after the first reply)"], + }); + expect(intro).toContain("\ncwd: /home/bob/clawd\nsession ids: pending"); + }); + it("auto-unfocuses expired bindings and sends a ttl-expired message", async () => { vi.useFakeTimers(); try { @@ -479,6 +498,119 @@ describe("thread binding ttl", () => { expect(b.getByThreadId("thread-1")?.targetSessionKey).toBe("agent:main:subagent:b"); }); + it("removes stale ACP bindings during startup reconciliation", async () => { + const manager = createThreadBindingManager({ + accountId: "default", + persist: false, + enableSweeper: false, + sessionTtlMs: 24 * 60 * 60 * 1000, + }); + + await manager.bindTarget({ + threadId: "thread-acp-healthy", + channelId: "parent-1", + targetKind: "acp", + targetSessionKey: "agent:codex:acp:healthy", + agentId: "codex", + webhookId: "wh-1", + webhookToken: "tok-1", + }); + await manager.bindTarget({ + threadId: "thread-acp-stale", + channelId: "parent-1", + targetKind: "acp", + targetSessionKey: "agent:codex:acp:stale", + agentId: "codex", + webhookId: "wh-1", + webhookToken: "tok-1", + }); + await manager.bindTarget({ + threadId: "thread-subagent", + channelId: "parent-1", + targetKind: "subagent", + targetSessionKey: "agent:main:subagent:child", + agentId: "main", + webhookId: "wh-1", + webhookToken: "tok-1", + }); + + hoisted.readAcpSessionEntry.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey ?? ""; + if (sessionKey === "agent:codex:acp:healthy") { + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime:healthy", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }; + } + return { + sessionKey, + storeSessionKey: sessionKey, + acp: undefined, + }; + }); + + const result = reconcileAcpThreadBindingsOnStartup({ + cfg: {} as OpenClawConfig, + accountId: "default", + }); + + expect(result.checked).toBe(2); + expect(result.removed).toBe(1); + expect(result.staleSessionKeys).toContain("agent:codex:acp:stale"); + expect(manager.getByThreadId("thread-acp-healthy")).toBeDefined(); + expect(manager.getByThreadId("thread-acp-stale")).toBeUndefined(); + expect(manager.getByThreadId("thread-subagent")).toBeDefined(); + expect(hoisted.sendMessageDiscord).not.toHaveBeenCalled(); + expect(hoisted.sendWebhookMessageDiscord).not.toHaveBeenCalled(); + }); + + it("keeps ACP bindings when session store reads fail during startup reconciliation", async () => { + const manager = createThreadBindingManager({ + accountId: "default", + persist: false, + enableSweeper: false, + sessionTtlMs: 24 * 60 * 60 * 1000, + }); + + await manager.bindTarget({ + threadId: "thread-acp-uncertain", + channelId: "parent-1", + targetKind: "acp", + targetSessionKey: "agent:codex:acp:uncertain", + agentId: "codex", + webhookId: "wh-1", + webhookToken: "tok-1", + }); + + hoisted.readAcpSessionEntry.mockReturnValue({ + sessionKey: "agent:codex:acp:uncertain", + storeSessionKey: "agent:codex:acp:uncertain", + cfg: {} as OpenClawConfig, + storePath: "/tmp/mock-sessions.json", + storeReadFailed: true, + entry: undefined, + acp: undefined, + }); + + const result = reconcileAcpThreadBindingsOnStartup({ + cfg: {} as OpenClawConfig, + accountId: "default", + }); + + expect(result.checked).toBe(1); + expect(result.removed).toBe(0); + expect(result.staleSessionKeys).toEqual([]); + expect(manager.getByThreadId("thread-acp-uncertain")).toBeDefined(); + }); + it("persists unbinds even when no manager is active", () => { const previousStateDir = process.env.OPENCLAW_STATE_DIR; const stateDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-thread-bindings-")); diff --git a/src/discord/monitor/threading.starter.test.ts b/src/discord/monitor/threading.starter.test.ts new file mode 100644 index 00000000000..07268d7fae9 --- /dev/null +++ b/src/discord/monitor/threading.starter.test.ts @@ -0,0 +1,55 @@ +import { ChannelType, type Client } from "@buape/carbon"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + __resetDiscordThreadStarterCacheForTest, + resolveDiscordThreadStarter, +} from "./threading.js"; + +describe("resolveDiscordThreadStarter", () => { + beforeEach(() => { + __resetDiscordThreadStarterCacheForTest(); + }); + + it("falls back to joined embed title and description when content is empty", async () => { + const get = vi.fn().mockResolvedValue({ + content: " ", + embeds: [{ title: "Alert", description: "Details" }], + author: { username: "Alice", discriminator: "0" }, + timestamp: "2026-02-24T12:00:00.000Z", + }); + const client = { rest: { get } } as unknown as Client; + + const result = await resolveDiscordThreadStarter({ + channel: { id: "thread-1" }, + client, + parentId: "parent-1", + parentType: ChannelType.GuildText, + resolveTimestampMs: () => 123, + }); + + expect(result).toEqual({ + text: "Alert\nDetails", + author: "Alice", + timestamp: 123, + }); + }); + + it("prefers starter content over embed fallback text", async () => { + const get = vi.fn().mockResolvedValue({ + content: "starter content", + embeds: [{ title: "Alert", description: "Details" }], + author: { username: "Alice", discriminator: "0" }, + }); + const client = { rest: { get } } as unknown as Client; + + const result = await resolveDiscordThreadStarter({ + channel: { id: "thread-1" }, + client, + parentId: "parent-1", + parentType: ChannelType.GuildText, + resolveTimestampMs: () => undefined, + }); + + expect(result?.text).toBe("starter content"); + }); +}); diff --git a/src/discord/monitor/threading.ts b/src/discord/monitor/threading.ts index 877329c2995..14377d8e644 100644 --- a/src/discord/monitor/threading.ts +++ b/src/discord/monitor/threading.ts @@ -7,7 +7,11 @@ import { buildAgentSessionKey } from "../../routing/resolve-route.js"; import { truncateUtf16Safe } from "../../utils.js"; import type { DiscordChannelConfigResolved } from "./allow-list.js"; import type { DiscordMessageEvent } from "./listeners.js"; -import { resolveDiscordChannelInfo, resolveDiscordMessageChannelId } from "./message-utils.js"; +import { + resolveDiscordChannelInfo, + resolveDiscordEmbedText, + resolveDiscordMessageChannelId, +} from "./message-utils.js"; export type DiscordThreadChannel = { id: string; @@ -172,7 +176,7 @@ export async function resolveDiscordThreadStarter(params: { Routes.channelMessage(messageChannelId, params.channel.id), )) as { content?: string | null; - embeds?: Array<{ description?: string | null }>; + embeds?: Array<{ title?: string | null; description?: string | null }>; member?: { nick?: string | null; displayName?: string | null }; author?: { id?: string | null; @@ -184,7 +188,9 @@ export async function resolveDiscordThreadStarter(params: { if (!starter) { return null; } - const text = starter.content?.trim() ?? starter.embeds?.[0]?.description?.trim() ?? ""; + const content = starter.content?.trim() ?? ""; + const embedText = resolveDiscordEmbedText(starter.embeds?.[0]); + const text = content || embedText; if (!text) { return null; } diff --git a/src/docker-setup.test.ts b/src/docker-setup.test.ts index 20f754990e3..8737ff5a793 100644 --- a/src/docker-setup.test.ts +++ b/src/docker-setup.test.ts @@ -168,6 +168,27 @@ describe("docker-setup.sh", () => { expect(identityDirStat.isDirectory()).toBe(true); }); + it("reuses existing config token when OPENCLAW_GATEWAY_TOKEN is unset", async () => { + const activeSandbox = requireSandbox(sandbox); + const configDir = join(activeSandbox.rootDir, "config-token-reuse"); + const workspaceDir = join(activeSandbox.rootDir, "workspace-token-reuse"); + await mkdir(configDir, { recursive: true }); + await writeFile( + join(configDir, "openclaw.json"), + JSON.stringify({ gateway: { auth: { mode: "token", token: "config-token-123" } } }), + ); + + const result = runDockerSetup(activeSandbox, { + OPENCLAW_GATEWAY_TOKEN: undefined, + OPENCLAW_CONFIG_DIR: configDir, + OPENCLAW_WORKSPACE_DIR: workspaceDir, + }); + + expect(result.status).toBe(0); + const envFile = await readFile(join(activeSandbox.rootDir, ".env"), "utf8"); + expect(envFile).toContain("OPENCLAW_GATEWAY_TOKEN=config-token-123"); + }); + it("rejects injected multiline OPENCLAW_EXTRA_MOUNTS values", async () => { const activeSandbox = requireSandbox(sandbox); diff --git a/src/dockerfile.test.ts b/src/dockerfile.test.ts index 4e75caeb420..5cd55d9b53f 100644 --- a/src/dockerfile.test.ts +++ b/src/dockerfile.test.ts @@ -9,7 +9,7 @@ const dockerfilePath = join(repoRoot, "Dockerfile"); describe("Dockerfile", () => { it("installs optional browser dependencies after pnpm install", async () => { const dockerfile = await readFile(dockerfilePath, "utf8"); - const installIndex = dockerfile.indexOf("RUN pnpm install --frozen-lockfile"); + const installIndex = dockerfile.indexOf("pnpm install --frozen-lockfile"); const browserArgIndex = dockerfile.indexOf("ARG OPENCLAW_INSTALL_BROWSER"); expect(installIndex).toBeGreaterThan(-1); diff --git a/src/entry.ts b/src/entry.ts index 92bd00640de..6f664edced0 100644 --- a/src/entry.ts +++ b/src/entry.ts @@ -15,6 +15,16 @@ const ENTRY_WRAPPER_PAIRS = [ { wrapperBasename: "openclaw.js", entryBasename: "entry.js" }, ] as const; +function shouldForceReadOnlyAuthStore(argv: string[]): boolean { + const tokens = argv.slice(2).filter((token) => token.length > 0 && !token.startsWith("-")); + for (let index = 0; index < tokens.length - 1; index += 1) { + if (tokens[index] === "secrets" && tokens[index + 1] === "audit") { + return true; + } + } + return false; +} + // Guard: only run entry-point logic when this file is the main module. // The bundler may import entry.js as a shared dependency when dist/index.js // is the actual entry point; without this guard the top-level code below @@ -32,6 +42,10 @@ if ( installProcessWarningFilter(); normalizeEnv(); + if (shouldForceReadOnlyAuthStore(process.argv)) { + process.env.OPENCLAW_AUTH_STORE_READONLY = "1"; + } + if (process.argv.includes("--no-color")) { process.env.NO_COLOR = "1"; process.env.FORCE_COLOR = "0"; diff --git a/src/gateway/client.ts b/src/gateway/client.ts index c95bbbcc36d..b9e7dd24830 100644 --- a/src/gateway/client.ts +++ b/src/gateway/client.ts @@ -21,7 +21,7 @@ import { type GatewayClientMode, type GatewayClientName, } from "../utils/message-channel.js"; -import { buildDeviceAuthPayload } from "./device-auth.js"; +import { buildDeviceAuthPayloadV3 } from "./device-auth.js"; import { isSecureWebSocketUrl } from "./net.js"; import { type ConnectParams, @@ -52,6 +52,7 @@ export type GatewayClientOptions = { clientDisplayName?: string; clientVersion?: string; platform?: string; + deviceFamily?: string; mode?: GatewayClientMode; role?: string; scopes?: string[]; @@ -265,11 +266,12 @@ export class GatewayClient { : undefined; const signedAtMs = Date.now(); const scopes = this.opts.scopes ?? ["operator.admin"]; + const platform = this.opts.platform ?? process.platform; const device = (() => { if (!this.opts.deviceIdentity) { return undefined; } - const payload = buildDeviceAuthPayload({ + const payload = buildDeviceAuthPayloadV3({ deviceId: this.opts.deviceIdentity.deviceId, clientId: this.opts.clientName ?? GATEWAY_CLIENT_NAMES.GATEWAY_CLIENT, clientMode: this.opts.mode ?? GATEWAY_CLIENT_MODES.BACKEND, @@ -278,6 +280,8 @@ export class GatewayClient { signedAtMs, token: authToken ?? null, nonce, + platform, + deviceFamily: this.opts.deviceFamily, }); const signature = signDevicePayload(this.opts.deviceIdentity.privateKeyPem, payload); return { @@ -295,7 +299,8 @@ export class GatewayClient { id: this.opts.clientName ?? GATEWAY_CLIENT_NAMES.GATEWAY_CLIENT, displayName: this.opts.clientDisplayName, version: this.opts.clientVersion ?? "dev", - platform: this.opts.platform ?? process.platform, + platform, + deviceFamily: this.opts.deviceFamily, mode: this.opts.mode ?? GATEWAY_CLIENT_MODES.BACKEND, instanceId: this.opts.instanceId, }, diff --git a/src/gateway/config-reload.test.ts b/src/gateway/config-reload.test.ts index 08952449031..25137aef031 100644 --- a/src/gateway/config-reload.test.ts +++ b/src/gateway/config-reload.test.ts @@ -153,6 +153,12 @@ describe("buildGatewayReloadPlan", () => { expect(plan.noopPaths).toContain("gateway.remote.url"); }); + it("treats secrets config changes as no-op for gateway restart planning", () => { + const plan = buildGatewayReloadPlan(["secrets.providers.default.path"]); + expect(plan.restartGateway).toBe(false); + expect(plan.noopPaths).toContain("secrets.providers.default.path"); + }); + it("defaults unknown paths to restart", () => { const plan = buildGatewayReloadPlan(["unknownField"]); expect(plan.restartGateway).toBe(true); @@ -279,4 +285,54 @@ describe("startGatewayConfigReloader", () => { await reloader.stop(); }); + + it("contains restart callback failures and retries on subsequent changes", async () => { + const readSnapshot = vi + .fn<() => Promise>() + .mockResolvedValueOnce( + makeSnapshot({ + config: { + gateway: { reload: { debounceMs: 0 }, port: 18790 }, + }, + hash: "restart-1", + }), + ) + .mockResolvedValueOnce( + makeSnapshot({ + config: { + gateway: { reload: { debounceMs: 0 }, port: 18791 }, + }, + hash: "restart-2", + }), + ); + const { watcher, onHotReload, onRestart, log, reloader } = createReloaderHarness(readSnapshot); + onRestart.mockRejectedValueOnce(new Error("restart-check failed")); + onRestart.mockResolvedValueOnce(undefined); + + const unhandled: unknown[] = []; + const onUnhandled = (reason: unknown) => { + unhandled.push(reason); + }; + process.on("unhandledRejection", onUnhandled); + try { + watcher.emit("change"); + await vi.runOnlyPendingTimersAsync(); + await Promise.resolve(); + + expect(onHotReload).not.toHaveBeenCalled(); + expect(onRestart).toHaveBeenCalledTimes(1); + expect(log.error).toHaveBeenCalledWith("config restart failed: Error: restart-check failed"); + expect(unhandled).toEqual([]); + + watcher.emit("change"); + await vi.runOnlyPendingTimersAsync(); + await Promise.resolve(); + + expect(onRestart).toHaveBeenCalledTimes(2); + expect(unhandled).toEqual([]); + } finally { + process.off("unhandledRejection", onUnhandled); + await reloader.stop(); + } + }); }); diff --git a/src/gateway/config-reload.ts b/src/gateway/config-reload.ts index 64f04b15e65..3dedff84c49 100644 --- a/src/gateway/config-reload.ts +++ b/src/gateway/config-reload.ts @@ -82,6 +82,7 @@ const BASE_RELOAD_RULES_TAIL: ReloadRule[] = [ { prefix: "session", kind: "none" }, { prefix: "talk", kind: "none" }, { prefix: "skills", kind: "none" }, + { prefix: "secrets", kind: "none" }, { prefix: "plugins", kind: "restart" }, { prefix: "ui", kind: "none" }, { prefix: "gateway", kind: "restart" }, @@ -255,7 +256,7 @@ export function startGatewayConfigReloader(opts: { initialConfig: OpenClawConfig; readSnapshot: () => Promise; onHotReload: (plan: GatewayReloadPlan, nextConfig: OpenClawConfig) => Promise; - onRestart: (plan: GatewayReloadPlan, nextConfig: OpenClawConfig) => void; + onRestart: (plan: GatewayReloadPlan, nextConfig: OpenClawConfig) => void | Promise; log: { info: (msg: string) => void; warn: (msg: string) => void; @@ -291,7 +292,16 @@ export function startGatewayConfigReloader(opts: { return; } restartQueued = true; - opts.onRestart(plan, nextConfig); + void (async () => { + try { + await opts.onRestart(plan, nextConfig); + } catch (err) { + // Restart checks can fail (for example unresolved SecretRefs). Keep the + // reloader alive and allow a future change to retry restart scheduling. + restartQueued = false; + opts.log.error(`config restart failed: ${String(err)}`); + } + })(); }; const handleMissingSnapshot = (snapshot: ConfigFileSnapshot): boolean => { diff --git a/src/gateway/control-ui.ts b/src/gateway/control-ui.ts index aa5f3b90ead..ed7b7330e91 100644 --- a/src/gateway/control-ui.ts +++ b/src/gateway/control-ui.ts @@ -2,6 +2,7 @@ import fs from "node:fs"; import type { IncomingMessage, ServerResponse } from "node:http"; import path from "node:path"; import type { OpenClawConfig } from "../config/config.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { resolveControlUiRootSync } from "../infra/control-ui-assets.js"; import { isWithinDir } from "../infra/path-safety.js"; import { openVerifiedFileSync } from "../infra/safe-open-sync.js"; @@ -210,11 +211,6 @@ function serveResolvedIndexHtml(res: ServerResponse, body: string) { res.end(body); } -function isContainedPath(baseDir: string, targetPath: string): boolean { - const relative = path.relative(baseDir, targetPath); - return relative !== ".." && !relative.startsWith(`..${path.sep}`) && !path.isAbsolute(relative); -} - function isExpectedSafePathError(error: unknown): boolean { const code = typeof error === "object" && error !== null && "code" in error ? String(error.code) : ""; @@ -237,25 +233,20 @@ function resolveSafeControlUiFile( rootReal: string, filePath: string, ): { path: string; fd: number } | null { - try { - const fileReal = fs.realpathSync(filePath); - if (!isContainedPath(rootReal, fileReal)) { - return null; + const opened = openBoundaryFileSync({ + absolutePath: filePath, + rootPath: rootReal, + rootRealPath: rootReal, + boundaryLabel: "control ui root", + skipLexicalRootCheck: true, + }); + if (!opened.ok) { + if (opened.reason === "io") { + throw opened.error; } - const opened = openVerifiedFileSync({ filePath: fileReal, resolvedPath: fileReal }); - if (!opened.ok) { - if (opened.reason === "io") { - throw opened.error; - } - return null; - } - return { path: opened.path, fd: opened.fd }; - } catch (error) { - if (isExpectedSafePathError(error)) { - return null; - } - throw error; + return null; } + return { path: opened.path, fd: opened.fd }; } function isSafeRelativePath(relPath: string) { diff --git a/src/gateway/credentials.test.ts b/src/gateway/credentials.test.ts index 83ac99dbc80..1de2ce06541 100644 --- a/src/gateway/credentials.test.ts +++ b/src/gateway/credentials.test.ts @@ -86,6 +86,42 @@ describe("resolveGatewayCredentialsFromConfig", () => { expectEnvGatewayCredentials(resolved); }); + it("falls back to remote credentials in local mode when local auth is missing", () => { + const resolved = resolveGatewayCredentialsFromConfig({ + cfg: cfg({ + gateway: { + mode: "local", + remote: { token: "remote-token", password: "remote-password" }, + auth: {}, + }, + }), + env: {} as NodeJS.ProcessEnv, + includeLegacyEnv: false, + }); + expect(resolved).toEqual({ + token: "remote-token", + password: "remote-password", + }); + }); + + it("keeps local credentials ahead of remote fallback in local mode", () => { + const resolved = resolveGatewayCredentialsFromConfig({ + cfg: cfg({ + gateway: { + mode: "local", + remote: { token: "remote-token", password: "remote-password" }, + auth: { token: "local-token", password: "local-password" }, + }, + }), + env: {} as NodeJS.ProcessEnv, + includeLegacyEnv: false, + }); + expect(resolved).toEqual({ + token: "local-token", + password: "local-password", + }); + }); + it("uses remote-mode remote credentials before env and local config", () => { const resolved = resolveRemoteModeWithRemoteCredentials(); expect(resolved).toEqual({ diff --git a/src/gateway/credentials.ts b/src/gateway/credentials.ts index ff974728360..ace7ba4fd27 100644 --- a/src/gateway/credentials.ts +++ b/src/gateway/credentials.ts @@ -116,7 +116,7 @@ export function resolveGatewayCredentialsFromConfig(params: { const mode: GatewayCredentialMode = params.modeOverride ?? (params.cfg.gateway?.mode === "remote" ? "remote" : "local"); - const remote = mode === "remote" ? params.cfg.gateway?.remote : undefined; + const remote = params.cfg.gateway?.remote; const envToken = readGatewayTokenEnv(env, includeLegacyEnv); const envPassword = readGatewayPasswordEnv(env, includeLegacyEnv); @@ -129,9 +129,14 @@ export function resolveGatewayCredentialsFromConfig(params: { const localPasswordPrecedence = params.localPasswordPrecedence ?? "env-first"; if (mode === "local") { + // In local mode, prefer gateway.auth.token, but also accept gateway.remote.token + // as a fallback for cron commands and other local gateway clients. + // This allows users in remote mode to use a single token for all operations. + const fallbackToken = localToken ?? remoteToken; + const fallbackPassword = localPassword ?? remotePassword; const localResolved = resolveGatewayCredentialsFromValues({ - configToken: localToken, - configPassword: localPassword, + configToken: fallbackToken, + configPassword: fallbackPassword, env, includeLegacyEnv, tokenPrecedence: localTokenPrecedence, diff --git a/src/gateway/device-auth.test.ts b/src/gateway/device-auth.test.ts new file mode 100644 index 00000000000..9d7ac3fb7b5 --- /dev/null +++ b/src/gateway/device-auth.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { buildDeviceAuthPayloadV3, normalizeDeviceMetadataForAuth } from "./device-auth.js"; + +describe("device-auth payload vectors", () => { + it("builds canonical v3 payload", () => { + const payload = buildDeviceAuthPayloadV3({ + deviceId: "dev-1", + clientId: "openclaw-macos", + clientMode: "ui", + role: "operator", + scopes: ["operator.admin", "operator.read"], + signedAtMs: 1_700_000_000_000, + token: "tok-123", + nonce: "nonce-abc", + platform: " IOS ", + deviceFamily: " iPhone ", + }); + + expect(payload).toBe( + "v3|dev-1|openclaw-macos|ui|operator|operator.admin,operator.read|1700000000000|tok-123|nonce-abc|ios|iphone", + ); + }); + + it("normalizes metadata with ASCII-only lowercase", () => { + expect(normalizeDeviceMetadataForAuth(" İOS ")).toBe("İos"); + expect(normalizeDeviceMetadataForAuth(" MAC ")).toBe("mac"); + expect(normalizeDeviceMetadataForAuth(undefined)).toBe(""); + }); +}); diff --git a/src/gateway/device-auth.ts b/src/gateway/device-auth.ts index 2e5b9e6fa20..e0ef2c4eeec 100644 --- a/src/gateway/device-auth.ts +++ b/src/gateway/device-auth.ts @@ -9,6 +9,28 @@ export type DeviceAuthPayloadParams = { nonce: string; }; +export type DeviceAuthPayloadV3Params = DeviceAuthPayloadParams & { + platform?: string | null; + deviceFamily?: string | null; +}; + +function toLowerAscii(input: string): string { + return input.replace(/[A-Z]/g, (char) => String.fromCharCode(char.charCodeAt(0) + 32)); +} + +export function normalizeDeviceMetadataForAuth(value?: string | null): string { + if (typeof value !== "string") { + return ""; + } + const trimmed = value.trim(); + if (!trimmed) { + return ""; + } + // Keep cross-runtime normalization deterministic (TS/Swift/Kotlin) by only + // lowercasing ASCII metadata fields used in auth payloads. + return toLowerAscii(trimmed); +} + export function buildDeviceAuthPayload(params: DeviceAuthPayloadParams): string { const scopes = params.scopes.join(","); const token = params.token ?? ""; @@ -24,3 +46,23 @@ export function buildDeviceAuthPayload(params: DeviceAuthPayloadParams): string params.nonce, ].join("|"); } + +export function buildDeviceAuthPayloadV3(params: DeviceAuthPayloadV3Params): string { + const scopes = params.scopes.join(","); + const token = params.token ?? ""; + const platform = normalizeDeviceMetadataForAuth(params.platform); + const deviceFamily = normalizeDeviceMetadataForAuth(params.deviceFamily); + return [ + "v3", + params.deviceId, + params.clientId, + params.clientMode, + params.role, + scopes, + String(params.signedAtMs), + token, + params.nonce, + platform, + deviceFamily, + ].join("|"); +} diff --git a/src/gateway/exec-approval-manager.ts b/src/gateway/exec-approval-manager.ts index 5e582d42a03..320b4da0b1f 100644 --- a/src/gateway/exec-approval-manager.ts +++ b/src/gateway/exec-approval-manager.ts @@ -1,20 +1,13 @@ import { randomUUID } from "node:crypto"; -import type { ExecApprovalDecision } from "../infra/exec-approvals.js"; +import type { + ExecApprovalDecision, + ExecApprovalRequestPayload as InfraExecApprovalRequestPayload, +} from "../infra/exec-approvals.js"; // Grace period to keep resolved entries for late awaitDecision calls const RESOLVED_ENTRY_GRACE_MS = 15_000; -export type ExecApprovalRequestPayload = { - command: string; - cwd?: string | null; - nodeId?: string | null; - host?: string | null; - security?: string | null; - ask?: string | null; - agentId?: string | null; - resolvedPath?: string | null; - sessionKey?: string | null; -}; +export type ExecApprovalRequestPayload = InfraExecApprovalRequestPayload; export type ExecApprovalRecord = { id: string; diff --git a/src/gateway/gateway-misc.test.ts b/src/gateway/gateway-misc.test.ts index a202e4b2915..e6f65ed1b77 100644 --- a/src/gateway/gateway-misc.test.ts +++ b/src/gateway/gateway-misc.test.ts @@ -334,6 +334,19 @@ describe("resolveNodeCommandAllowlist", () => { } }); + it("includes Android notifications.list by default", () => { + const allow = resolveNodeCommandAllowlist( + {}, + { + platform: "android 16", + deviceFamily: "Android", + }, + ); + + expect(allow.has("notifications.list")).toBe(true); + expect(allow.has("system.notify")).toBe(false); + }); + it("can explicitly allow dangerous commands via allowCommands", () => { const allow = resolveNodeCommandAllowlist( { diff --git a/src/gateway/gateway-models.profiles.live.test.ts b/src/gateway/gateway-models.profiles.live.test.ts index 3b2888da49d..09c4226c3ac 100644 --- a/src/gateway/gateway-models.profiles.live.test.ts +++ b/src/gateway/gateway-models.profiles.live.test.ts @@ -40,6 +40,11 @@ const THINKING_LEVEL = "high"; const THINKING_TAG_RE = /<\s*\/?\s*(?:think(?:ing)?|thought|antthinking)\s*>/i; const FINAL_TAG_RE = /<\s*\/?\s*final\s*>/i; const ANTHROPIC_MAGIC_STRING_TRIGGER_REFUSAL = "ANTHROPIC_MAGIC_STRING_TRIGGER_REFUSAL"; +const GATEWAY_LIVE_DEFAULT_TIMEOUT_MS = 20 * 60 * 1000; +const GATEWAY_LIVE_UNBOUNDED_TIMEOUT_MS = 60 * 60 * 1000; +const GATEWAY_LIVE_MAX_TIMEOUT_MS = 2 * 60 * 60 * 1000; +const GATEWAY_LIVE_MAX_MODELS = resolveGatewayLiveMaxModels(); +const GATEWAY_LIVE_SUITE_TIMEOUT_MS = resolveGatewayLiveSuiteTimeoutMs(GATEWAY_LIVE_MAX_MODELS); const describeLive = LIVE || GATEWAY_LIVE ? describe : describe.skip; @@ -64,6 +69,27 @@ function toInt(value: string | undefined, fallback: number): number { return Number.isFinite(parsed) ? parsed : fallback; } +function resolveGatewayLiveMaxModels(): number { + const gatewayMax = toInt(process.env.OPENCLAW_LIVE_GATEWAY_MAX_MODELS, -1); + if (gatewayMax >= 0) { + return gatewayMax; + } + // Reuse shared live-model cap when gateway-specific cap is not provided. + return Math.max(0, toInt(process.env.OPENCLAW_LIVE_MAX_MODELS, 0)); +} + +function resolveGatewayLiveSuiteTimeoutMs(maxModels: number): number { + if (maxModels <= 0) { + return GATEWAY_LIVE_UNBOUNDED_TIMEOUT_MS; + } + // Gateway live runs multiple probes per model; scale timeout by model cap. + const estimated = 5 * 60 * 1000 + maxModels * 90 * 1000; + return Math.max( + GATEWAY_LIVE_DEFAULT_TIMEOUT_MS, + Math.min(GATEWAY_LIVE_MAX_TIMEOUT_MS, estimated), + ); +} + function capByProviderSpread( items: T[], maxItems: number, @@ -1144,7 +1170,7 @@ describeLive("gateway live (dev agent, profile keys)", () => { const useModern = !rawModels || rawModels === "modern" || rawModels === "all"; const useExplicit = Boolean(rawModels) && !useModern; const filter = useExplicit ? parseFilter(rawModels) : null; - const maxModels = toInt(process.env.OPENCLAW_LIVE_GATEWAY_MAX_MODELS, 0); + const maxModels = GATEWAY_LIVE_MAX_MODELS; const wanted = filter ? all.filter((m) => filter.has(`${m.provider}/${m.id}`)) : all.filter((m) => isModernModelRef({ provider: m.provider, id: m.id })); @@ -1224,7 +1250,7 @@ describeLive("gateway live (dev agent, profile keys)", () => { logProgress("[minimax-anthropic] missing minimax provider config; skipping"); } }, - 20 * 60 * 1000, + GATEWAY_LIVE_SUITE_TIMEOUT_MS, ); it("z.ai fallback handles anthropic tool history", async () => { diff --git a/src/gateway/hooks.test.ts b/src/gateway/hooks.test.ts index fe60d792af0..bc2defccdb5 100644 --- a/src/gateway/hooks.test.ts +++ b/src/gateway/hooks.test.ts @@ -7,6 +7,7 @@ import { createIMessageTestPlugin } from "../test-utils/imessage-test-plugin.js" import { extractHookToken, isHookAgentAllowed, + normalizeHookDispatchSessionKey, resolveHookSessionKey, resolveHookTargetAgentId, normalizeAgentPayload, @@ -280,6 +281,24 @@ describe("gateway hooks helpers", () => { expect(resolvedKey).toEqual({ ok: true, value: "hook:ingress" }); }); + test("normalizeHookDispatchSessionKey strips duplicate target agent prefix", () => { + expect( + normalizeHookDispatchSessionKey({ + sessionKey: "agent:hooks:slack:channel:c123", + targetAgentId: "hooks", + }), + ).toBe("slack:channel:c123"); + }); + + test("normalizeHookDispatchSessionKey preserves non-target agent scoped keys", () => { + expect( + normalizeHookDispatchSessionKey({ + sessionKey: "agent:main:slack:channel:c123", + targetAgentId: "hooks", + }), + ).toBe("agent:main:slack:channel:c123"); + }); + test("resolveHooksConfig validates defaultSessionKey and generated fallback against prefixes", () => { expect(() => resolveHooksConfig({ diff --git a/src/gateway/hooks.ts b/src/gateway/hooks.ts index d4696fd1295..957056babcd 100644 --- a/src/gateway/hooks.ts +++ b/src/gateway/hooks.ts @@ -5,7 +5,7 @@ import { listChannelPlugins } from "../channels/plugins/index.js"; import type { ChannelId } from "../channels/plugins/types.js"; import type { OpenClawConfig } from "../config/config.js"; import { readJsonBodyWithLimit, requestBodyErrorToText } from "../infra/http-body.js"; -import { normalizeAgentId } from "../routing/session-key.js"; +import { normalizeAgentId, parseAgentSessionKey } from "../routing/session-key.js"; import { normalizeMessageChannel } from "../utils/message-channel.js"; import { type HookMappingResolved, resolveHookMappings } from "./hooks-mapping.js"; @@ -332,6 +332,25 @@ export function resolveHookSessionKey(params: { return { ok: true, value: generated }; } +export function normalizeHookDispatchSessionKey(params: { + sessionKey: string; + targetAgentId: string | undefined; +}): string { + const trimmed = params.sessionKey.trim(); + if (!trimmed || !params.targetAgentId) { + return trimmed; + } + const parsed = parseAgentSessionKey(trimmed); + if (!parsed) { + return trimmed; + } + const targetAgentId = normalizeAgentId(params.targetAgentId); + if (parsed.agentId !== targetAgentId) { + return `agent:${parsed.agentId}:${parsed.rest}`; + } + return parsed.rest; +} + export function normalizeAgentPayload(payload: Record): | { ok: true; diff --git a/src/gateway/method-scopes.ts b/src/gateway/method-scopes.ts index f52b24de759..8d1dbdb3cb7 100644 --- a/src/gateway/method-scopes.ts +++ b/src/gateway/method-scopes.ts @@ -101,6 +101,7 @@ const METHOD_SCOPE_GROUPS: Record = { "agents.delete", "skills.install", "skills.update", + "secrets.reload", "cron.add", "cron.update", "cron.remove", diff --git a/src/gateway/node-command-policy.ts b/src/gateway/node-command-policy.ts index ec829b0c5f6..e074b6681f7 100644 --- a/src/gateway/node-command-policy.ts +++ b/src/gateway/node-command-policy.ts @@ -1,4 +1,9 @@ import type { OpenClawConfig } from "../config/config.js"; +import { + NODE_BROWSER_PROXY_COMMAND, + NODE_SYSTEM_NOTIFY_COMMAND, + NODE_SYSTEM_RUN_COMMANDS, +} from "../infra/node-commands.js"; import type { NodeSession } from "./node-registry.js"; const CANVAS_COMMANDS = [ @@ -18,6 +23,7 @@ const CAMERA_DANGEROUS_COMMANDS = ["camera.snap", "camera.clip"]; const SCREEN_DANGEROUS_COMMANDS = ["screen.record"]; const LOCATION_COMMANDS = ["location.get"]; +const NOTIFICATION_COMMANDS = ["notifications.list"]; const DEVICE_COMMANDS = ["device.info", "device.status"]; @@ -37,9 +43,13 @@ const MOTION_COMMANDS = ["motion.activity", "motion.pedometer"]; const SMS_DANGEROUS_COMMANDS = ["sms.send"]; // iOS nodes don't implement system.run/which, but they do support notifications. -const IOS_SYSTEM_COMMANDS = ["system.notify"]; +const IOS_SYSTEM_COMMANDS = [NODE_SYSTEM_NOTIFY_COMMAND]; -const SYSTEM_COMMANDS = ["system.run", "system.which", "system.notify", "browser.proxy"]; +const SYSTEM_COMMANDS = [ + ...NODE_SYSTEM_RUN_COMMANDS, + NODE_SYSTEM_NOTIFY_COMMAND, + NODE_BROWSER_PROXY_COMMAND, +]; // "High risk" node commands. These can be enabled by explicitly adding them to // `gateway.nodes.allowCommands` (and ensuring they're not blocked by denyCommands). @@ -69,6 +79,7 @@ const PLATFORM_DEFAULTS: Record = { ...CANVAS_COMMANDS, ...CAMERA_COMMANDS, ...LOCATION_COMMANDS, + ...NOTIFICATION_COMMANDS, ...DEVICE_COMMANDS, ...CONTACTS_COMMANDS, ...CALENDAR_COMMANDS, diff --git a/src/gateway/node-invoke-system-run-approval-errors.ts b/src/gateway/node-invoke-system-run-approval-errors.ts new file mode 100644 index 00000000000..9c50a5004b1 --- /dev/null +++ b/src/gateway/node-invoke-system-run-approval-errors.ts @@ -0,0 +1,29 @@ +export type SystemRunApprovalGuardError = { + ok: false; + message: string; + details: Record; +}; + +export function systemRunApprovalGuardError(params: { + code: string; + message: string; + details?: Record; +}): SystemRunApprovalGuardError { + const details = params.details ? { ...params.details } : {}; + return { + ok: false, + message: params.message, + details: { + code: params.code, + ...details, + }, + }; +} + +export function systemRunApprovalRequired(runId: string): SystemRunApprovalGuardError { + return systemRunApprovalGuardError({ + code: "APPROVAL_REQUIRED", + message: "approval required", + details: { runId }, + }); +} diff --git a/src/gateway/node-invoke-system-run-approval-match.test.ts b/src/gateway/node-invoke-system-run-approval-match.test.ts new file mode 100644 index 00000000000..9ba85d5350d --- /dev/null +++ b/src/gateway/node-invoke-system-run-approval-match.test.ts @@ -0,0 +1,167 @@ +import { describe, expect, test } from "vitest"; +import { buildSystemRunApprovalBindingV1 } from "../infra/system-run-approval-binding.js"; +import { evaluateSystemRunApprovalMatch } from "./node-invoke-system-run-approval-match.js"; + +describe("evaluateSystemRunApprovalMatch", () => { + test("rejects approvals that do not carry v1 binding", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["echo", "SAFE"], + request: { + host: "node", + command: "echo SAFE", + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + }, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.code).toBe("APPROVAL_REQUEST_MISMATCH"); + }); + + test("enforces exact argv binding in v1 object", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["echo", "SAFE"], + request: { + host: "node", + command: "echo SAFE", + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: ["echo", "SAFE"], + cwd: null, + agentId: null, + sessionKey: null, + }).binding, + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + }, + }); + expect(result).toEqual({ ok: true }); + }); + + test("rejects argv mismatch in v1 object", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["echo", "SAFE"], + request: { + host: "node", + command: "echo SAFE", + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: ["echo SAFE"], + cwd: null, + agentId: null, + sessionKey: null, + }).binding, + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + }, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.code).toBe("APPROVAL_REQUEST_MISMATCH"); + }); + + test("rejects env overrides when v1 binding has no env hash", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["git", "diff"], + request: { + host: "node", + command: "git diff", + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + }).binding, + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + env: { GIT_EXTERNAL_DIFF: "/tmp/pwn.sh" }, + }, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.code).toBe("APPROVAL_ENV_BINDING_MISSING"); + }); + + test("accepts matching env hash with reordered keys", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["git", "diff"], + request: { + host: "node", + command: "git diff", + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + env: { SAFE_A: "1", SAFE_B: "2" }, + }).binding, + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + env: { SAFE_B: "2", SAFE_A: "1" }, + }, + }); + expect(result).toEqual({ ok: true }); + }); + + test("rejects non-node host requests", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["echo", "SAFE"], + request: { + host: "gateway", + command: "echo SAFE", + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + }, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.code).toBe("APPROVAL_REQUEST_MISMATCH"); + }); + + test("uses v1 binding even when legacy command text diverges", () => { + const result = evaluateSystemRunApprovalMatch({ + argv: ["echo", "SAFE"], + request: { + host: "node", + command: "echo STALE", + commandArgv: ["echo STALE"], + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: ["echo", "SAFE"], + cwd: null, + agentId: null, + sessionKey: null, + }).binding, + }, + binding: { + cwd: null, + agentId: null, + sessionKey: null, + }, + }); + expect(result).toEqual({ ok: true }); + }); +}); diff --git a/src/gateway/node-invoke-system-run-approval-match.ts b/src/gateway/node-invoke-system-run-approval-match.ts new file mode 100644 index 00000000000..c67231f760c --- /dev/null +++ b/src/gateway/node-invoke-system-run-approval-match.ts @@ -0,0 +1,55 @@ +import type { ExecApprovalRequestPayload } from "../infra/exec-approvals.js"; +import { + buildSystemRunApprovalBindingV1, + missingSystemRunApprovalBindingV1, + matchSystemRunApprovalBindingV1, + type SystemRunApprovalMatchResult, +} from "../infra/system-run-approval-binding.js"; + +export type SystemRunApprovalBinding = { + cwd: string | null; + agentId: string | null; + sessionKey: string | null; + env?: unknown; +}; + +function requestMismatch(): SystemRunApprovalMatchResult { + return { + ok: false, + code: "APPROVAL_REQUEST_MISMATCH", + message: "approval id does not match request", + }; +} + +export { toSystemRunApprovalMismatchError } from "../infra/system-run-approval-binding.js"; +export type { SystemRunApprovalMatchResult } from "../infra/system-run-approval-binding.js"; + +export function evaluateSystemRunApprovalMatch(params: { + argv: string[]; + request: ExecApprovalRequestPayload; + binding: SystemRunApprovalBinding; +}): SystemRunApprovalMatchResult { + if (params.request.host !== "node") { + return requestMismatch(); + } + + const actualBinding = buildSystemRunApprovalBindingV1({ + argv: params.argv, + cwd: params.binding.cwd, + agentId: params.binding.agentId, + sessionKey: params.binding.sessionKey, + env: params.binding.env, + }); + + const expectedBinding = params.request.systemRunBindingV1; + if (!expectedBinding) { + return missingSystemRunApprovalBindingV1({ + actualEnvKeys: actualBinding.envKeys, + }); + } + return matchSystemRunApprovalBindingV1({ + expected: expectedBinding, + actual: actualBinding.binding, + actualEnvKeys: actualBinding.envKeys, + }); +} diff --git a/src/gateway/node-invoke-system-run-approval.test.ts b/src/gateway/node-invoke-system-run-approval.test.ts index 196b5947f45..50798323a3b 100644 --- a/src/gateway/node-invoke-system-run-approval.test.ts +++ b/src/gateway/node-invoke-system-run-approval.test.ts @@ -1,4 +1,8 @@ import { describe, expect, test } from "vitest"; +import { + buildSystemRunApprovalBindingV1, + buildSystemRunApprovalEnvBinding, +} from "../infra/system-run-approval-binding.js"; import { ExecApprovalManager, type ExecApprovalRecord } from "./exec-approval-manager.js"; import { sanitizeSystemRunParamsForForwarding } from "./node-invoke-system-run-approval.js"; @@ -13,13 +17,25 @@ describe("sanitizeSystemRunParamsForForwarding", () => { }, }; - function makeRecord(command: string): ExecApprovalRecord { + function makeRecord( + command: string, + commandArgv?: string[], + bindingArgv?: string[], + ): ExecApprovalRecord { + const effectiveBindingArgv = bindingArgv ?? commandArgv ?? [command]; return { id: "approval-1", request: { host: "node", nodeId: "node-1", command, + commandArgv, + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: effectiveBindingArgv, + cwd: null, + agentId: null, + sessionKey: null, + }).binding, cwd: null, agentId: null, sessionKey: null, @@ -95,7 +111,16 @@ describe("sanitizeSystemRunParamsForForwarding", () => { }, nodeId: "node-1", client, - execApprovalManager: manager(makeRecord("echo SAFE&&whoami")), + execApprovalManager: manager( + makeRecord("echo SAFE&&whoami", undefined, [ + "cmd.exe", + "/d", + "/s", + "/c", + "echo", + "SAFE&&whoami", + ]), + ), nowMs: now, }); expectAllowOnceForwardingResult(result); @@ -133,12 +158,202 @@ describe("sanitizeSystemRunParamsForForwarding", () => { nodeId: "node-1", client, execApprovalManager: manager( - makeRecord('/usr/bin/env BASH_ENV=/tmp/payload.sh bash -lc "echo SAFE"'), + makeRecord('/usr/bin/env BASH_ENV=/tmp/payload.sh bash -lc "echo SAFE"', undefined, [ + "/usr/bin/env", + "BASH_ENV=/tmp/payload.sh", + "bash", + "-lc", + "echo SAFE", + ]), ), nowMs: now, }); expectAllowOnceForwardingResult(result); }); + + test("rejects trailing-space argv mismatch against legacy command-only approval", () => { + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["runner "], + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(makeRecord("runner")), + nowMs: now, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.message).toContain("approval id does not match request"); + expect(result.details?.code).toBe("APPROVAL_REQUEST_MISMATCH"); + }); + + test("enforces commandArgv identity when approval includes argv binding", () => { + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["echo", "SAFE"], + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(makeRecord("echo SAFE", ["echo SAFE"])), + nowMs: now, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.message).toContain("approval id does not match request"); + expect(result.details?.code).toBe("APPROVAL_REQUEST_MISMATCH"); + }); + + test("accepts matching commandArgv binding for trailing-space argv", () => { + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["runner "], + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(makeRecord('"runner "', ["runner "])), + nowMs: now, + }); + expectAllowOnceForwardingResult(result); + }); + + test("uses systemRunPlanV2 for forwarded command context and ignores caller tampering", () => { + const record = makeRecord("echo SAFE", ["echo", "SAFE"]); + record.request.systemRunPlanV2 = { + version: 2, + argv: ["/usr/bin/echo", "SAFE"], + cwd: "/real/cwd", + rawCommand: "/usr/bin/echo SAFE", + agentId: "main", + sessionKey: "agent:main:main", + }; + record.request.systemRunBindingV1 = buildSystemRunApprovalBindingV1({ + argv: ["/usr/bin/echo", "SAFE"], + cwd: "/real/cwd", + agentId: "main", + sessionKey: "agent:main:main", + }).binding; + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["echo", "PWNED"], + rawCommand: "echo PWNED", + cwd: "/tmp/attacker-link/sub", + agentId: "attacker", + sessionKey: "agent:attacker:main", + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(record), + nowMs: now, + }); + expectAllowOnceForwardingResult(result); + if (!result.ok) { + throw new Error("unreachable"); + } + const forwarded = result.params as Record; + expect(forwarded.command).toEqual(["/usr/bin/echo", "SAFE"]); + expect(forwarded.rawCommand).toBe("/usr/bin/echo SAFE"); + expect(forwarded.cwd).toBe("/real/cwd"); + expect(forwarded.agentId).toBe("main"); + expect(forwarded.sessionKey).toBe("agent:main:main"); + }); + + test("rejects env overrides when approval record lacks env binding", () => { + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["git", "diff"], + rawCommand: "git diff", + env: { GIT_EXTERNAL_DIFF: "/tmp/pwn.sh" }, + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(makeRecord("git diff", ["git", "diff"])), + nowMs: now, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.details?.code).toBe("APPROVAL_ENV_BINDING_MISSING"); + }); + + test("rejects env hash mismatch", () => { + const record = makeRecord("git diff", ["git", "diff"]); + record.request.systemRunBindingV1 = { + version: 1, + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + envHash: buildSystemRunApprovalEnvBinding({ SAFE: "1" }).envHash, + }; + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["git", "diff"], + rawCommand: "git diff", + env: { SAFE: "2" }, + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(record), + nowMs: now, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.details?.code).toBe("APPROVAL_ENV_MISMATCH"); + }); + + test("accepts matching env hash with reordered keys", () => { + const record = makeRecord("git diff", ["git", "diff"]); + const binding = buildSystemRunApprovalEnvBinding({ SAFE_A: "1", SAFE_B: "2" }); + record.request.systemRunBindingV1 = { + version: 1, + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + envHash: binding.envHash, + }; + const result = sanitizeSystemRunParamsForForwarding({ + rawParams: { + command: ["git", "diff"], + rawCommand: "git diff", + env: { SAFE_B: "2", SAFE_A: "1" }, + runId: "approval-1", + approved: true, + approvalDecision: "allow-once", + }, + nodeId: "node-1", + client, + execApprovalManager: manager(record), + nowMs: now, + }); + expectAllowOnceForwardingResult(result); + }); + test("consumes allow-once approvals and blocks same runId replay", async () => { const approvalManager = new ExecApprovalManager(); const runId = "approval-replay-1"; @@ -147,6 +362,13 @@ describe("sanitizeSystemRunParamsForForwarding", () => { host: "node", nodeId: "node-1", command: "echo SAFE", + commandArgv: ["echo", "SAFE"], + systemRunBindingV1: buildSystemRunApprovalBindingV1({ + argv: ["echo", "SAFE"], + cwd: null, + agentId: null, + sessionKey: null, + }).binding, cwd: null, agentId: null, sessionKey: null, diff --git a/src/gateway/node-invoke-system-run-approval.ts b/src/gateway/node-invoke-system-run-approval.ts index d5600adf032..efee11572b1 100644 --- a/src/gateway/node-invoke-system-run-approval.ts +++ b/src/gateway/node-invoke-system-run-approval.ts @@ -1,5 +1,14 @@ +import { resolveSystemRunApprovalRuntimeContext } from "../infra/system-run-approval-context.js"; import { resolveSystemRunCommand } from "../infra/system-run-command.js"; import type { ExecApprovalRecord } from "./exec-approval-manager.js"; +import { + systemRunApprovalGuardError, + systemRunApprovalRequired, +} from "./node-invoke-system-run-approval-errors.js"; +import { + evaluateSystemRunApprovalMatch, + toSystemRunApprovalMismatchError, +} from "./node-invoke-system-run-approval-match.js"; type SystemRunParamsLike = { command?: unknown; @@ -53,40 +62,6 @@ function clientHasApprovals(client: ApprovalClient | null): boolean { return scopes.includes("operator.admin") || scopes.includes("operator.approvals"); } -function approvalMatchesRequest( - cmdText: string, - params: SystemRunParamsLike, - record: ExecApprovalRecord, -): boolean { - if (record.request.host !== "node") { - return false; - } - - if (!cmdText || record.request.command !== cmdText) { - return false; - } - - const reqCwd = record.request.cwd ?? null; - const runCwd = normalizeString(params.cwd) ?? null; - if (reqCwd !== runCwd) { - return false; - } - - const reqAgentId = record.request.agentId ?? null; - const runAgentId = normalizeString(params.agentId) ?? null; - if (reqAgentId !== runAgentId) { - return false; - } - - const reqSessionKey = record.request.sessionKey ?? null; - const runSessionKey = normalizeString(params.sessionKey) ?? null; - if (reqSessionKey !== runSessionKey) { - return false; - } - - return true; -} - function pickSystemRunParams(raw: Record): Record { // Defensive allowlist: only forward fields that the node-host `system.run` handler understands. // This prevents future internal control fields from being smuggled through the gateway. @@ -129,19 +104,6 @@ export function sanitizeSystemRunParamsForForwarding(opts: { } const p = obj as SystemRunParamsLike; - const cmdTextResolution = resolveSystemRunCommand({ - command: p.command, - rawCommand: p.rawCommand, - }); - if (!cmdTextResolution.ok) { - return { - ok: false, - message: cmdTextResolution.message, - details: cmdTextResolution.details, - }; - } - const cmdText = cmdTextResolution.cmdText; - const approved = p.approved === true; const requestedDecision = normalizeApprovalDecision(p.approvalDecision); const wantsApprovalOverride = approved || requestedDecision !== null; @@ -151,67 +113,76 @@ export function sanitizeSystemRunParamsForForwarding(opts: { const next: Record = pickSystemRunParams(obj); if (!wantsApprovalOverride) { + const cmdTextResolution = resolveSystemRunCommand({ + command: p.command, + rawCommand: p.rawCommand, + }); + if (!cmdTextResolution.ok) { + return { + ok: false, + message: cmdTextResolution.message, + details: cmdTextResolution.details, + }; + } return { ok: true, params: next }; } const runId = normalizeString(p.runId); if (!runId) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "MISSING_RUN_ID", message: "approval override requires params.runId", - details: { code: "MISSING_RUN_ID" }, - }; + }); } const manager = opts.execApprovalManager; if (!manager) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "APPROVALS_UNAVAILABLE", message: "exec approvals unavailable", - details: { code: "APPROVALS_UNAVAILABLE" }, - }; + }); } const snapshot = manager.getSnapshot(runId); if (!snapshot) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "UNKNOWN_APPROVAL_ID", message: "unknown or expired approval id", - details: { code: "UNKNOWN_APPROVAL_ID", runId }, - }; + details: { runId }, + }); } const nowMs = typeof opts.nowMs === "number" ? opts.nowMs : Date.now(); if (nowMs > snapshot.expiresAtMs) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "APPROVAL_EXPIRED", message: "approval expired", - details: { code: "APPROVAL_EXPIRED", runId }, - }; + details: { runId }, + }); } const targetNodeId = normalizeString(opts.nodeId); if (!targetNodeId) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "MISSING_NODE_ID", message: "node.invoke requires nodeId", - details: { code: "MISSING_NODE_ID", runId }, - }; + details: { runId }, + }); } const approvalNodeId = normalizeString(snapshot.request.nodeId); if (!approvalNodeId) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "APPROVAL_NODE_BINDING_MISSING", message: "approval id missing node binding", - details: { code: "APPROVAL_NODE_BINDING_MISSING", runId }, - }; + details: { runId }, + }); } if (approvalNodeId !== targetNodeId) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "APPROVAL_NODE_MISMATCH", message: "approval id not valid for this node", - details: { code: "APPROVAL_NODE_MISMATCH", runId }, - }; + details: { runId }, + }); } // Prefer binding by device identity (stable across reconnects / per-call clients like callGateway()). @@ -220,39 +191,80 @@ export function sanitizeSystemRunParamsForForwarding(opts: { const clientDeviceId = opts.client?.connect?.device?.id ?? null; if (snapshotDeviceId) { if (snapshotDeviceId !== clientDeviceId) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "APPROVAL_DEVICE_MISMATCH", message: "approval id not valid for this device", - details: { code: "APPROVAL_DEVICE_MISMATCH", runId }, - }; + details: { runId }, + }); } } else if ( snapshot.requestedByConnId && snapshot.requestedByConnId !== (opts.client?.connId ?? null) ) { - return { - ok: false, + return systemRunApprovalGuardError({ + code: "APPROVAL_CLIENT_MISMATCH", message: "approval id not valid for this client", - details: { code: "APPROVAL_CLIENT_MISMATCH", runId }, - }; + details: { runId }, + }); } - if (!approvalMatchesRequest(cmdText, p, snapshot)) { + const runtimeContext = resolveSystemRunApprovalRuntimeContext({ + planV2: snapshot.request.systemRunPlanV2 ?? null, + command: p.command, + rawCommand: p.rawCommand, + cwd: p.cwd, + agentId: p.agentId, + sessionKey: p.sessionKey, + }); + if (!runtimeContext.ok) { return { ok: false, - message: "approval id does not match request", - details: { code: "APPROVAL_REQUEST_MISMATCH", runId }, + message: runtimeContext.message, + details: runtimeContext.details, }; } + if (runtimeContext.planV2) { + next.command = [...runtimeContext.planV2.argv]; + if (runtimeContext.rawCommand) { + next.rawCommand = runtimeContext.rawCommand; + } else { + delete next.rawCommand; + } + if (runtimeContext.cwd) { + next.cwd = runtimeContext.cwd; + } else { + delete next.cwd; + } + if (runtimeContext.agentId) { + next.agentId = runtimeContext.agentId; + } else { + delete next.agentId; + } + if (runtimeContext.sessionKey) { + next.sessionKey = runtimeContext.sessionKey; + } else { + delete next.sessionKey; + } + } + + const approvalMatch = evaluateSystemRunApprovalMatch({ + argv: runtimeContext.argv, + request: snapshot.request, + binding: { + cwd: runtimeContext.cwd, + agentId: runtimeContext.agentId, + sessionKey: runtimeContext.sessionKey, + env: p.env, + }, + }); + if (!approvalMatch.ok) { + return toSystemRunApprovalMismatchError({ runId, match: approvalMatch }); + } // Normal path: enforce the decision recorded by the gateway. if (snapshot.decision === "allow-once") { if (typeof manager.consumeAllowOnce !== "function" || !manager.consumeAllowOnce(runId)) { - return { - ok: false, - message: "approval required", - details: { code: "APPROVAL_REQUIRED", runId }, - }; + return systemRunApprovalRequired(runId); } next.approved = true; next.approvalDecision = "allow-once"; @@ -282,9 +294,5 @@ export function sanitizeSystemRunParamsForForwarding(opts: { return { ok: true, params: next }; } - return { - ok: false, - message: "approval required", - details: { code: "APPROVAL_REQUIRED", runId }, - }; + return systemRunApprovalRequired(runId); } diff --git a/src/gateway/protocol/schema/agent.ts b/src/gateway/protocol/schema/agent.ts index b8c883f7f53..1508c38f70e 100644 --- a/src/gateway/protocol/schema/agent.ts +++ b/src/gateway/protocol/schema/agent.ts @@ -22,6 +22,8 @@ export const SendParamsSchema = Type.Object( gifPlayback: Type.Optional(Type.Boolean()), channel: Type.Optional(Type.String()), accountId: Type.Optional(Type.String()), + /** Optional agent id for per-agent media root resolution on gateway sends. */ + agentId: Type.Optional(Type.String()), /** Thread id (channel-specific meaning, e.g. Telegram forum topic id). */ threadId: Type.Optional(Type.String()), /** Optional session key for mirroring delivered output back into the transcript. */ diff --git a/src/gateway/protocol/schema/cron.ts b/src/gateway/protocol/schema/cron.ts index dae3b340d7e..7e0ebe54917 100644 --- a/src/gateway/protocol/schema/cron.ts +++ b/src/gateway/protocol/schema/cron.ts @@ -138,6 +138,7 @@ export const CronPayloadPatchSchema = Type.Union([ const CronDeliverySharedProperties = { channel: Type.Optional(Type.Union([Type.Literal("last"), NonEmptyString])), + accountId: Type.Optional(NonEmptyString), bestEffort: Type.Optional(Type.Boolean()), }; diff --git a/src/gateway/protocol/schema/devices.ts b/src/gateway/protocol/schema/devices.ts index 752347a092f..813390775c7 100644 --- a/src/gateway/protocol/schema/devices.ts +++ b/src/gateway/protocol/schema/devices.ts @@ -42,6 +42,7 @@ export const DevicePairRequestedEventSchema = Type.Object( publicKey: NonEmptyString, displayName: Type.Optional(NonEmptyString), platform: Type.Optional(NonEmptyString), + deviceFamily: Type.Optional(NonEmptyString), clientId: Type.Optional(NonEmptyString), clientMode: Type.Optional(NonEmptyString), role: Type.Optional(NonEmptyString), diff --git a/src/gateway/protocol/schema/exec-approvals.ts b/src/gateway/protocol/schema/exec-approvals.ts index a7c5fcf09bb..0358cde48fe 100644 --- a/src/gateway/protocol/schema/exec-approvals.ts +++ b/src/gateway/protocol/schema/exec-approvals.ts @@ -89,6 +89,21 @@ export const ExecApprovalRequestParamsSchema = Type.Object( { id: Type.Optional(NonEmptyString), command: NonEmptyString, + commandArgv: Type.Optional(Type.Array(Type.String())), + systemRunPlanV2: Type.Optional( + Type.Object( + { + version: Type.Literal(2), + argv: Type.Array(Type.String()), + cwd: Type.Union([Type.String(), Type.Null()]), + rawCommand: Type.Union([Type.String(), Type.Null()]), + agentId: Type.Union([Type.String(), Type.Null()]), + sessionKey: Type.Union([Type.String(), Type.Null()]), + }, + { additionalProperties: false }, + ), + ), + env: Type.Optional(Type.Record(NonEmptyString, Type.String())), cwd: Type.Optional(Type.Union([Type.String(), Type.Null()])), nodeId: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), host: Type.Optional(Type.Union([Type.String(), Type.Null()])), @@ -97,6 +112,10 @@ export const ExecApprovalRequestParamsSchema = Type.Object( agentId: Type.Optional(Type.Union([Type.String(), Type.Null()])), resolvedPath: Type.Optional(Type.Union([Type.String(), Type.Null()])), sessionKey: Type.Optional(Type.Union([Type.String(), Type.Null()])), + turnSourceChannel: Type.Optional(Type.Union([Type.String(), Type.Null()])), + turnSourceTo: Type.Optional(Type.Union([Type.String(), Type.Null()])), + turnSourceAccountId: Type.Optional(Type.Union([Type.String(), Type.Null()])), + turnSourceThreadId: Type.Optional(Type.Union([Type.String(), Type.Number(), Type.Null()])), timeoutMs: Type.Optional(Type.Integer({ minimum: 1 })), twoPhase: Type.Optional(Type.Boolean()), }, diff --git a/src/gateway/security-path.test.ts b/src/gateway/security-path.test.ts new file mode 100644 index 00000000000..f665efbfb35 --- /dev/null +++ b/src/gateway/security-path.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, it } from "vitest"; +import { + PROTECTED_PLUGIN_ROUTE_PREFIXES, + canonicalizePathForSecurity, + isPathProtectedByPrefixes, + isProtectedPluginRoutePath, +} from "./security-path.js"; + +describe("security-path canonicalization", () => { + it("canonicalizes decoded case/slash variants", () => { + expect(canonicalizePathForSecurity("/API/channels//nostr/default/profile/")).toEqual({ + canonicalPath: "/api/channels/nostr/default/profile", + candidates: ["/api/channels/nostr/default/profile"], + malformedEncoding: false, + rawNormalizedPath: "/api/channels/nostr/default/profile", + }); + const encoded = canonicalizePathForSecurity("/api/%63hannels%2Fnostr%2Fdefault%2Fprofile"); + expect(encoded.canonicalPath).toBe("/api/channels/nostr/default/profile"); + expect(encoded.candidates).toContain("/api/%63hannels%2fnostr%2fdefault%2fprofile"); + expect(encoded.candidates).toContain("/api/channels/nostr/default/profile"); + }); + + it("resolves traversal after repeated decoding", () => { + expect( + canonicalizePathForSecurity("/api/foo/..%2fchannels/nostr/default/profile").canonicalPath, + ).toBe("/api/channels/nostr/default/profile"); + expect( + canonicalizePathForSecurity("/api/foo/%252e%252e%252fchannels/nostr/default/profile") + .canonicalPath, + ).toBe("/api/channels/nostr/default/profile"); + }); + + it("marks malformed encoding", () => { + expect(canonicalizePathForSecurity("/api/channels%2").malformedEncoding).toBe(true); + expect(canonicalizePathForSecurity("/api/channels%zz").malformedEncoding).toBe(true); + }); +}); + +describe("security-path protected-prefix matching", () => { + const channelVariants = [ + "/API/channels/nostr/default/profile", + "/api/channels%2Fnostr%2Fdefault%2Fprofile", + "/api/%63hannels/nostr/default/profile", + "/api/foo/..%2fchannels/nostr/default/profile", + "/api/foo/%2e%2e%2fchannels/nostr/default/profile", + "/api/foo/%252e%252e%252fchannels/nostr/default/profile", + "/api/channels%2", + "/api/channels%zz", + ]; + + for (const path of channelVariants) { + it(`protects plugin channel path variant: ${path}`, () => { + expect(isProtectedPluginRoutePath(path)).toBe(true); + expect(isPathProtectedByPrefixes(path, PROTECTED_PLUGIN_ROUTE_PREFIXES)).toBe(true); + }); + } + + it("does not protect unrelated paths", () => { + expect(isProtectedPluginRoutePath("/plugin/public")).toBe(false); + expect(isProtectedPluginRoutePath("/api/channels-public")).toBe(false); + expect(isProtectedPluginRoutePath("/api/foo/..%2fchannels-public")).toBe(false); + expect(isProtectedPluginRoutePath("/api/channel")).toBe(false); + }); +}); diff --git a/src/gateway/security-path.ts b/src/gateway/security-path.ts new file mode 100644 index 00000000000..7b9fa493aac --- /dev/null +++ b/src/gateway/security-path.ts @@ -0,0 +1,127 @@ +export type SecurityPathCanonicalization = { + canonicalPath: string; + candidates: string[]; + malformedEncoding: boolean; + rawNormalizedPath: string; +}; + +const MAX_PATH_DECODE_PASSES = 3; + +function normalizePathSeparators(pathname: string): string { + const collapsed = pathname.replace(/\/{2,}/g, "/"); + if (collapsed.length <= 1) { + return collapsed; + } + return collapsed.replace(/\/+$/, ""); +} + +function normalizeProtectedPrefix(prefix: string): string { + return normalizePathSeparators(prefix.toLowerCase()) || "/"; +} + +function resolveDotSegments(pathname: string): string { + try { + return new URL(pathname, "http://localhost").pathname; + } catch { + return pathname; + } +} + +function normalizePathForSecurity(pathname: string): string { + return normalizePathSeparators(resolveDotSegments(pathname).toLowerCase()) || "/"; +} + +function pushNormalizedCandidate(candidates: string[], seen: Set, value: string): void { + const normalized = normalizePathForSecurity(value); + if (seen.has(normalized)) { + return; + } + seen.add(normalized); + candidates.push(normalized); +} + +export function buildCanonicalPathCandidates( + pathname: string, + maxDecodePasses = MAX_PATH_DECODE_PASSES, +): { candidates: string[]; malformedEncoding: boolean } { + const candidates: string[] = []; + const seen = new Set(); + pushNormalizedCandidate(candidates, seen, pathname); + + let decoded = pathname; + let malformedEncoding = false; + for (let pass = 0; pass < maxDecodePasses; pass++) { + let nextDecoded = decoded; + try { + nextDecoded = decodeURIComponent(decoded); + } catch { + malformedEncoding = true; + break; + } + if (nextDecoded === decoded) { + break; + } + decoded = nextDecoded; + pushNormalizedCandidate(candidates, seen, decoded); + } + return { candidates, malformedEncoding }; +} + +export function canonicalizePathVariant(pathname: string): string { + const { candidates } = buildCanonicalPathCandidates(pathname); + return candidates[candidates.length - 1] ?? "/"; +} + +function prefixMatch(pathname: string, prefix: string): boolean { + return ( + pathname === prefix || + pathname.startsWith(`${prefix}/`) || + // Fail closed when malformed %-encoding follows the protected prefix. + pathname.startsWith(`${prefix}%`) + ); +} + +export function canonicalizePathForSecurity(pathname: string): SecurityPathCanonicalization { + const { candidates, malformedEncoding } = buildCanonicalPathCandidates(pathname); + + return { + canonicalPath: candidates[candidates.length - 1] ?? "/", + candidates, + malformedEncoding, + rawNormalizedPath: normalizePathSeparators(pathname.toLowerCase()) || "/", + }; +} + +const normalizedPrefixesCache = new WeakMap(); + +function getNormalizedPrefixes(prefixes: readonly string[]): readonly string[] { + const cached = normalizedPrefixesCache.get(prefixes); + if (cached) { + return cached; + } + const normalized = prefixes.map(normalizeProtectedPrefix); + normalizedPrefixesCache.set(prefixes, normalized); + return normalized; +} + +export function isPathProtectedByPrefixes(pathname: string, prefixes: readonly string[]): boolean { + const canonical = canonicalizePathForSecurity(pathname); + const normalizedPrefixes = getNormalizedPrefixes(prefixes); + if ( + canonical.candidates.some((candidate) => + normalizedPrefixes.some((prefix) => prefixMatch(candidate, prefix)), + ) + ) { + return true; + } + if (!canonical.malformedEncoding) { + return false; + } + return normalizedPrefixes.some((prefix) => prefixMatch(canonical.rawNormalizedPath, prefix)); +} + +export const PROTECTED_PLUGIN_ROUTE_PREFIXES = ["/api/channels"] as const; + +export function isProtectedPluginRoutePath(pathname: string): boolean { + return isPathProtectedByPrefixes(pathname, PROTECTED_PLUGIN_ROUTE_PREFIXES); +} diff --git a/src/gateway/server-http.hooks-request-timeout.test.ts b/src/gateway/server-http.hooks-request-timeout.test.ts index 448707eb1c7..577ffe1ab43 100644 --- a/src/gateway/server-http.hooks-request-timeout.test.ts +++ b/src/gateway/server-http.hooks-request-timeout.test.ts @@ -41,10 +41,11 @@ function createHooksConfig(): HooksConfigResolved { function createRequest(params?: { authorization?: string; remoteAddress?: string; + url?: string; }): IncomingMessage { return { method: "POST", - url: "/hooks/wake", + url: params?.url ?? "/hooks/wake", headers: { host: "127.0.0.1:18789", authorization: params?.authorization ?? "Bearer hook-secret", @@ -71,10 +72,11 @@ function createResponse(): { function createHandler(params?: { dispatchWakeHook?: HooksHandlerDeps["dispatchWakeHook"]; dispatchAgentHook?: HooksHandlerDeps["dispatchAgentHook"]; + bindHost?: string; }) { return createHooksRequestHandler({ getHooksConfig: () => createHooksConfig(), - bindHost: "127.0.0.1", + bindHost: params?.bindHost ?? "127.0.0.1", port: 18789, logHooks: { warn: vi.fn(), @@ -139,4 +141,18 @@ describe("createHooksRequestHandler timeout status mapping", () => { expect(mappedRes.statusCode).toBe(429); expect(setHeader).toHaveBeenCalledWith("Retry-After", expect.any(String)); }); + + test.each(["0.0.0.0", "::"])( + "does not throw when bindHost=%s while parsing non-hook request URL", + async (bindHost) => { + const handler = createHandler({ bindHost }); + const req = createRequest({ url: "/" }); + const { res, end } = createResponse(); + + const handled = await handler(req, res); + + expect(handled).toBe(false); + expect(end).not.toHaveBeenCalled(); + }, + ); }); diff --git a/src/gateway/server-http.ts b/src/gateway/server-http.ts index 72a81a769ad..0af1120d21f 100644 --- a/src/gateway/server-http.ts +++ b/src/gateway/server-http.ts @@ -49,6 +49,7 @@ import { normalizeHookHeaders, normalizeWakePayload, readJsonBody, + normalizeHookDispatchSessionKey, resolveHookSessionKey, resolveHookTargetAgentId, resolveHookChannel, @@ -59,6 +60,7 @@ import { getBearerToken } from "./http-utils.js"; import { handleOpenAiHttpRequest } from "./openai-http.js"; import { handleOpenResponsesHttpRequest } from "./openresponses-http.js"; import { GATEWAY_CLIENT_MODES, normalizeGatewayClientMode } from "./protocol/client-info.js"; +import { isProtectedPluginRoutePath } from "./security-path.js"; import type { GatewayWsClient } from "./server/ws-types.js"; import { handleToolsInvokeHttpRequest } from "./tools-invoke-http.js"; @@ -169,6 +171,34 @@ async function authorizeCanvasRequest(params: { return lastAuthFailure ?? { ok: false, reason: "unauthorized" }; } +async function enforcePluginRouteGatewayAuth(params: { + requestPath: string; + req: IncomingMessage; + res: ServerResponse; + auth: ResolvedGatewayAuth; + trustedProxies: string[]; + allowRealIpFallback: boolean; + rateLimiter?: AuthRateLimiter; +}): Promise { + if (!isProtectedPluginRoutePath(params.requestPath)) { + return true; + } + const token = getBearerToken(params.req); + const authResult = await authorizeHttpGatewayConnect({ + auth: params.auth, + connectAuth: token ? { token, password: token } : null, + req: params.req, + trustedProxies: params.trustedProxies, + allowRealIpFallback: params.allowRealIpFallback, + rateLimiter: params.rateLimiter, + }); + if (!authResult.ok) { + sendGatewayAuthFailure(params.res, authResult); + return false; + } + return true; +} + function writeUpgradeAuthFailure( socket: { write: (chunk: string) => void }, auth: GatewayAuthResult, @@ -208,7 +238,7 @@ export function createHooksRequestHandler( logHooks: SubsystemLogger; } & HookDispatchers, ): HooksRequestHandler { - const { getHooksConfig, bindHost, port, logHooks, dispatchAgentHook, dispatchWakeHook } = opts; + const { getHooksConfig, logHooks, dispatchAgentHook, dispatchWakeHook } = opts; const hookAuthLimiter = createAuthRateLimiter({ maxAttempts: HOOK_AUTH_FAILURE_LIMIT, windowMs: HOOK_AUTH_FAILURE_WINDOW_MS, @@ -227,7 +257,9 @@ export function createHooksRequestHandler( if (!hooksConfig) { return false; } - const url = new URL(req.url ?? "/", `http://${bindHost}:${port}`); + // Only pathname/search are used here; keep the base host fixed so bind-host + // representation (e.g. IPv6 wildcards) cannot break request parsing. + const url = new URL(req.url ?? "/", "http://localhost"); const basePath = hooksConfig.basePath; if (url.pathname !== basePath && !url.pathname.startsWith(`${basePath}/`)) { return false; @@ -324,10 +356,14 @@ export function createHooksRequestHandler( sendJson(res, 400, { ok: false, error: sessionKey.error }); return true; } + const targetAgentId = resolveHookTargetAgentId(hooksConfig, normalized.value.agentId); const runId = dispatchAgentHook({ ...normalized.value, - sessionKey: sessionKey.value, - agentId: resolveHookTargetAgentId(hooksConfig, normalized.value.agentId), + sessionKey: normalizeHookDispatchSessionKey({ + sessionKey: sessionKey.value, + targetAgentId, + }), + agentId: targetAgentId, }); sendJson(res, 202, { ok: true, runId }); return true; @@ -377,12 +413,16 @@ export function createHooksRequestHandler( sendJson(res, 400, { ok: false, error: sessionKey.error }); return true; } + const targetAgentId = resolveHookTargetAgentId(hooksConfig, mapped.action.agentId); const runId = dispatchAgentHook({ message: mapped.action.message, name: mapped.action.name ?? "Hook", - agentId: resolveHookTargetAgentId(hooksConfig, mapped.action.agentId), + agentId: targetAgentId, wakeMode: mapped.action.wakeMode, - sessionKey: sessionKey.value, + sessionKey: normalizeHookDispatchSessionKey({ + sessionKey: sessionKey.value, + targetAgentId, + }), deliver: resolveHookDeliver(mapped.action.deliver), channel, to: mapped.action.to, @@ -488,23 +528,20 @@ export function createGatewayHttpServer(opts: { return; } if (handlePluginRequest) { - // Channel HTTP endpoints are gateway-auth protected by default. - // Non-channel plugin routes remain plugin-owned and must enforce + // Protected plugin route prefixes are gateway-auth protected by default. + // Non-protected plugin routes remain plugin-owned and must enforce // their own auth when exposing sensitive functionality. - if (requestPath === "/api/channels" || requestPath.startsWith("/api/channels/")) { - const token = getBearerToken(req); - const authResult = await authorizeHttpGatewayConnect({ - auth: resolvedAuth, - connectAuth: token ? { token, password: token } : null, - req, - trustedProxies, - allowRealIpFallback, - rateLimiter, - }); - if (!authResult.ok) { - sendGatewayAuthFailure(res, authResult); - return; - } + const pluginAuthOk = await enforcePluginRouteGatewayAuth({ + requestPath, + req, + res, + auth: resolvedAuth, + trustedProxies, + allowRealIpFallback, + rateLimiter, + }); + if (!pluginAuthOk) { + return; } if (await handlePluginRequest(req, res)) { return; diff --git a/src/gateway/server-methods-list.ts b/src/gateway/server-methods-list.ts index 4023fdb985e..76f400f36bd 100644 --- a/src/gateway/server-methods-list.ts +++ b/src/gateway/server-methods-list.ts @@ -50,6 +50,7 @@ const BASE_METHODS = [ "update.run", "voicewake.get", "voicewake.set", + "secrets.reload", "sessions.list", "sessions.preview", "sessions.patch", diff --git a/src/gateway/server-methods/agent.test.ts b/src/gateway/server-methods/agent.test.ts index 5d65d262735..75efc1c328f 100644 --- a/src/gateway/server-methods/agent.test.ts +++ b/src/gateway/server-methods/agent.test.ts @@ -43,9 +43,14 @@ vi.mock("../../commands/agent.js", () => ({ agentCommand: mocks.agentCommand, })); -vi.mock("../../config/config.js", () => ({ - loadConfig: () => mocks.loadConfigReturn, -})); +vi.mock("../../config/config.js", async () => { + const actual = + await vi.importActual("../../config/config.js"); + return { + ...actual, + loadConfig: () => mocks.loadConfigReturn, + }; +}); vi.mock("../../agents/agent-scope.js", () => ({ listAgentIds: () => ["main"], @@ -179,6 +184,8 @@ async function invokeAgent( respond?: ReturnType; reqId?: string; context?: GatewayRequestContext; + client?: AgentHandlerArgs["client"]; + isWebchatConnect?: AgentHandlerArgs["isWebchatConnect"]; }, ) { const respond = options?.respond ?? vi.fn(); @@ -187,8 +194,8 @@ async function invokeAgent( respond: respond as never, context: options?.context ?? makeContext(), req: { type: "req", id: options?.reqId ?? "agent-test-req", method: "agent" }, - client: null, - isWebchatConnect: () => false, + client: options?.client ?? null, + isWebchatConnect: options?.isWebchatConnect ?? (() => false), }); return respond; } @@ -218,6 +225,46 @@ async function invokeAgentIdentityGet( } describe("gateway agent handler", () => { + it("preserves ACP metadata from the current stored session entry", async () => { + const existingAcpMeta = { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime-1", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }; + + mockMainSessionEntry({ + acp: existingAcpMeta, + }); + + let capturedEntry: Record | undefined; + mocks.updateSessionStore.mockImplementation(async (_path, updater) => { + const store: Record = { + "agent:main:main": { + sessionId: "existing-session-id", + updatedAt: Date.now(), + acp: existingAcpMeta, + }, + }; + const result = await updater(store); + capturedEntry = store["agent:main:main"] as Record; + return result; + }); + + mocks.agentCommand.mockResolvedValue({ + payloads: [{ text: "ok" }], + meta: { durationMs: 100 }, + }); + + await runMainAgent("test", "test-idem-acp-meta"); + + expect(mocks.updateSessionStore).toHaveBeenCalled(); + expect(capturedEntry).toBeDefined(); + expect(capturedEntry?.acp).toEqual(existingAcpMeta); + }); + it("preserves cliSessionIds from existing session entry", async () => { const existingCliSessionIds = { "claude-cli": "abc-123-def" }; const existingClaudeCliSessionId = "abc-123-def"; @@ -301,6 +348,56 @@ describe("gateway agent handler", () => { expect(callArgs.bestEffortDeliver).toBe(false); }); + it("keeps origin messageChannel as webchat while delivery channel uses last session channel", async () => { + mockMainSessionEntry({ + sessionId: "existing-session-id", + lastChannel: "telegram", + lastTo: "12345", + }); + mocks.updateSessionStore.mockImplementation(async (_path, updater) => { + const store: Record = { + "agent:main:main": { + sessionId: "existing-session-id", + updatedAt: Date.now(), + lastChannel: "telegram", + lastTo: "12345", + }, + }; + return await updater(store); + }); + mocks.agentCommand.mockResolvedValue({ + payloads: [{ text: "ok" }], + meta: { durationMs: 100 }, + }); + + await invokeAgent( + { + message: "webchat turn", + sessionKey: "agent:main:main", + idempotencyKey: "test-webchat-origin-channel", + }, + { + reqId: "webchat-origin-1", + client: { + connect: { + client: { id: "webchat-ui", mode: "webchat" }, + }, + } as AgentHandlerArgs["client"], + isWebchatConnect: () => true, + }, + ); + + await vi.waitFor(() => expect(mocks.agentCommand).toHaveBeenCalled()); + const callArgs = mocks.agentCommand.mock.calls.at(-1)?.[0] as { + channel?: string; + messageChannel?: string; + runContext?: { messageChannel?: string }; + }; + expect(callArgs.channel).toBe("telegram"); + expect(callArgs.messageChannel).toBe("webchat"); + expect(callArgs.runContext?.messageChannel).toBe("webchat"); + }); + it("handles missing cliSessionIds gracefully", async () => { mockMainSessionEntry({}); diff --git a/src/gateway/server-methods/agent.ts b/src/gateway/server-methods/agent.ts index 387077a8bbd..5aa518a558d 100644 --- a/src/gateway/server-methods/agent.ts +++ b/src/gateway/server-methods/agent.ts @@ -4,6 +4,7 @@ import { BARE_SESSION_RESET_PROMPT } from "../../auto-reply/reply/session-reset- import { agentCommand } from "../../commands/agent.js"; import { loadConfig } from "../../config/config.js"; import { + mergeSessionEntry, resolveAgentIdFromSessionKey, resolveExplicitAgentSessionKey, resolveAgentMainSessionKey, @@ -385,7 +386,7 @@ export const agentHandlers: GatewayRequestHandlers = { resolvedGroupChannel = resolvedGroupChannel || inheritedGroup?.groupChannel; resolvedGroupSpace = resolvedGroupSpace || inheritedGroup?.groupSpace; const deliveryFields = normalizeSessionDeliveryFields(entry); - const nextEntry: SessionEntry = { + const nextEntryPatch: SessionEntry = { sessionId, updatedAt: now, thinkingLevel: entry?.thinkingLevel, @@ -410,7 +411,7 @@ export const agentHandlers: GatewayRequestHandlers = { cliSessionIds: entry?.cliSessionIds, claudeCliSessionId: entry?.claudeCliSessionId, }; - sessionEntry = nextEntry; + sessionEntry = mergeSessionEntry(entry, nextEntryPatch); const sendPolicy = resolveSendPolicy({ cfg, entry, @@ -432,7 +433,7 @@ export const agentHandlers: GatewayRequestHandlers = { const agentId = resolveAgentIdFromSessionKey(canonicalSessionKey); const mainSessionKey = resolveAgentMainSessionKey({ cfg, agentId }); if (storePath) { - await updateSessionStore(storePath, (store) => { + const persisted = await updateSessionStore(storePath, (store) => { const target = resolveGatewaySessionStoreTarget({ cfg, key: requestedSessionKey, @@ -443,8 +444,11 @@ export const agentHandlers: GatewayRequestHandlers = { canonicalKey: target.canonicalKey, candidates: target.storeKeys, }); - store[canonicalSessionKey] = nextEntry; + const merged = mergeSessionEntry(store[canonicalSessionKey], nextEntryPatch); + store[canonicalSessionKey] = merged; + return merged; }); + sessionEntry = persisted; } if (canonicalSessionKey === mainSessionKey || canonicalSessionKey === "global") { context.addChatRun(idem, { @@ -559,6 +563,17 @@ export const agentHandlers: GatewayRequestHandlers = { return; } + const normalizedTurnSource = normalizeMessageChannel(turnSourceChannel); + const turnSourceMessageChannel = + normalizedTurnSource && isGatewayMessageChannel(normalizedTurnSource) + ? normalizedTurnSource + : undefined; + const originMessageChannel = + turnSourceMessageChannel ?? + (client?.connect && isWebchatConnect(client.connect) + ? INTERNAL_MESSAGE_CHANNEL + : resolvedChannel); + const deliver = request.deliver === true && resolvedChannel !== INTERNAL_MESSAGE_CHANNEL; const accepted = { @@ -590,7 +605,7 @@ export const agentHandlers: GatewayRequestHandlers = { accountId: resolvedAccountId, threadId: resolvedThreadId, runContext: { - messageChannel: resolvedChannel, + messageChannel: originMessageChannel, accountId: resolvedAccountId, groupId: resolvedGroupId, groupChannel: resolvedGroupChannel, @@ -603,7 +618,7 @@ export const agentHandlers: GatewayRequestHandlers = { spawnedBy: spawnedByValue, timeout: request.timeout?.toString(), bestEffortDeliver, - messageChannel: resolvedChannel, + messageChannel: originMessageChannel, runId, lane: request.lane, extraSystemPrompt: request.extraSystemPrompt, diff --git a/src/gateway/server-methods/agents-mutate.test.ts b/src/gateway/server-methods/agents-mutate.test.ts index 54c285203f3..a12db195c3a 100644 --- a/src/gateway/server-methods/agents-mutate.test.ts +++ b/src/gateway/server-methods/agents-mutate.test.ts @@ -1,3 +1,4 @@ +import path from "node:path"; import { describe, expect, it, vi, beforeEach } from "vitest"; /* ------------------------------------------------------------------ */ @@ -26,7 +27,10 @@ const mocks = vi.hoisted(() => ({ fsMkdir: vi.fn(async () => undefined), fsAppendFile: vi.fn(async () => {}), fsReadFile: vi.fn(async () => ""), - fsStat: vi.fn(async () => null), + fsStat: vi.fn(async (..._args: unknown[]) => null as import("node:fs").Stats | null), + fsLstat: vi.fn(async (..._args: unknown[]) => null as import("node:fs").Stats | null), + fsRealpath: vi.fn(async (p: string) => p), + fsOpen: vi.fn(async () => ({}) as unknown), })); vi.mock("../../config/config.js", () => ({ @@ -85,6 +89,9 @@ vi.mock("node:fs/promises", async () => { appendFile: mocks.fsAppendFile, readFile: mocks.fsReadFile, stat: mocks.fsStat, + lstat: mocks.fsLstat, + realpath: mocks.fsRealpath, + open: mocks.fsOpen, }; return { ...patched, default: patched }; }); @@ -125,6 +132,35 @@ function createErrnoError(code: string) { return err; } +function makeFileStat(params?: { + size?: number; + mtimeMs?: number; + dev?: number; + ino?: number; + nlink?: number; +}): import("node:fs").Stats { + return { + isFile: () => true, + isSymbolicLink: () => false, + size: params?.size ?? 10, + mtimeMs: params?.mtimeMs ?? 1234, + dev: params?.dev ?? 1, + ino: params?.ino ?? 1, + nlink: params?.nlink ?? 1, + } as unknown as import("node:fs").Stats; +} + +function makeSymlinkStat(params?: { dev?: number; ino?: number }): import("node:fs").Stats { + return { + isFile: () => false, + isSymbolicLink: () => true, + size: 0, + mtimeMs: 0, + dev: params?.dev ?? 1, + ino: params?.ino ?? 2, + } as unknown as import("node:fs").Stats; +} + function mockWorkspaceStateRead(params: { onboardingCompletedAt?: string; errorCode?: string; @@ -172,6 +208,19 @@ beforeEach(() => { mocks.fsStat.mockImplementation(async () => { throw createEnoentError(); }); + mocks.fsLstat.mockImplementation(async () => { + throw createEnoentError(); + }); + mocks.fsRealpath.mockImplementation(async (p: string) => p); + mocks.fsOpen.mockImplementation( + async () => + ({ + stat: async () => makeFileStat(), + readFile: async () => Buffer.from(""), + writeFile: async () => {}, + close: async () => {}, + }) as unknown, + ); }); /* ------------------------------------------------------------------ */ @@ -459,3 +508,209 @@ describe("agents.files.list", () => { expect(names).toContain("BOOTSTRAP.md"); }); }); + +describe("agents.files.get/set symlink safety", () => { + beforeEach(() => { + vi.clearAllMocks(); + mocks.loadConfigReturn = {}; + mocks.fsMkdir.mockResolvedValue(undefined); + }); + + it("rejects agents.files.get when allowlisted file symlink escapes workspace", async () => { + const workspace = "/workspace/test-agent"; + const candidate = path.resolve(workspace, "AGENTS.md"); + mocks.fsRealpath.mockImplementation(async (p: string) => { + if (p === workspace) { + return workspace; + } + if (p === candidate) { + return "/outside/secret.txt"; + } + return p; + }); + mocks.fsLstat.mockImplementation(async (...args: unknown[]) => { + const p = typeof args[0] === "string" ? args[0] : ""; + if (p === candidate) { + return makeSymlinkStat(); + } + throw createEnoentError(); + }); + + const { respond, promise } = makeCall("agents.files.get", { + agentId: "main", + name: "AGENTS.md", + }); + await promise; + + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ message: expect.stringContaining("unsafe workspace file") }), + ); + }); + + it("rejects agents.files.set when allowlisted file symlink escapes workspace", async () => { + const workspace = "/workspace/test-agent"; + const candidate = path.resolve(workspace, "AGENTS.md"); + mocks.fsRealpath.mockImplementation(async (p: string) => { + if (p === workspace) { + return workspace; + } + if (p === candidate) { + return "/outside/secret.txt"; + } + return p; + }); + mocks.fsLstat.mockImplementation(async (...args: unknown[]) => { + const p = typeof args[0] === "string" ? args[0] : ""; + if (p === candidate) { + return makeSymlinkStat(); + } + throw createEnoentError(); + }); + + const { respond, promise } = makeCall("agents.files.set", { + agentId: "main", + name: "AGENTS.md", + content: "x", + }); + await promise; + + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ message: expect.stringContaining("unsafe workspace file") }), + ); + expect(mocks.fsOpen).not.toHaveBeenCalled(); + }); + + it("allows in-workspace symlink targets for get/set", async () => { + const workspace = "/workspace/test-agent"; + const candidate = path.resolve(workspace, "AGENTS.md"); + const target = path.resolve(workspace, "policies", "AGENTS.md"); + const targetStat = makeFileStat({ size: 7, mtimeMs: 1700, dev: 9, ino: 42 }); + + mocks.fsRealpath.mockImplementation(async (p: string) => { + if (p === workspace) { + return workspace; + } + if (p === candidate) { + return target; + } + return p; + }); + mocks.fsLstat.mockImplementation(async (...args: unknown[]) => { + const p = typeof args[0] === "string" ? args[0] : ""; + if (p === candidate) { + return makeSymlinkStat({ dev: 9, ino: 41 }); + } + if (p === target) { + return targetStat; + } + throw createEnoentError(); + }); + mocks.fsStat.mockImplementation(async (...args: unknown[]) => { + const p = typeof args[0] === "string" ? args[0] : ""; + if (p === target) { + return targetStat; + } + throw createEnoentError(); + }); + mocks.fsOpen.mockImplementation( + async () => + ({ + stat: async () => targetStat, + readFile: async () => Buffer.from("inside\n"), + writeFile: async () => {}, + close: async () => {}, + }) as unknown, + ); + + const getCall = makeCall("agents.files.get", { agentId: "main", name: "AGENTS.md" }); + await getCall.promise; + expect(getCall.respond).toHaveBeenCalledWith( + true, + expect.objectContaining({ + file: expect.objectContaining({ missing: false, content: "inside\n" }), + }), + undefined, + ); + + const setCall = makeCall("agents.files.set", { + agentId: "main", + name: "AGENTS.md", + content: "updated\n", + }); + await setCall.promise; + expect(setCall.respond).toHaveBeenCalledWith( + true, + expect.objectContaining({ + ok: true, + file: expect.objectContaining({ missing: false, content: "updated\n" }), + }), + undefined, + ); + }); + + it("rejects agents.files.get when allowlisted file is a hardlinked alias", async () => { + const workspace = "/workspace/test-agent"; + const candidate = path.resolve(workspace, "AGENTS.md"); + mocks.fsRealpath.mockImplementation(async (p: string) => { + if (p === workspace) { + return workspace; + } + return p; + }); + mocks.fsLstat.mockImplementation(async (...args: unknown[]) => { + const p = typeof args[0] === "string" ? args[0] : ""; + if (p === candidate) { + return makeFileStat({ nlink: 2 }); + } + throw createEnoentError(); + }); + + const { respond, promise } = makeCall("agents.files.get", { + agentId: "main", + name: "AGENTS.md", + }); + await promise; + + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ message: expect.stringContaining("unsafe workspace file") }), + ); + }); + + it("rejects agents.files.set when allowlisted file is a hardlinked alias", async () => { + const workspace = "/workspace/test-agent"; + const candidate = path.resolve(workspace, "AGENTS.md"); + mocks.fsRealpath.mockImplementation(async (p: string) => { + if (p === workspace) { + return workspace; + } + return p; + }); + mocks.fsLstat.mockImplementation(async (...args: unknown[]) => { + const p = typeof args[0] === "string" ? args[0] : ""; + if (p === candidate) { + return makeFileStat({ nlink: 2 }); + } + throw createEnoentError(); + }); + + const { respond, promise } = makeCall("agents.files.set", { + agentId: "main", + name: "AGENTS.md", + content: "x", + }); + await promise; + + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ message: expect.stringContaining("unsafe workspace file") }), + ); + expect(mocks.fsOpen).not.toHaveBeenCalled(); + }); +}); diff --git a/src/gateway/server-methods/agents.ts b/src/gateway/server-methods/agents.ts index 04a716e077e..a59b689a27d 100644 --- a/src/gateway/server-methods/agents.ts +++ b/src/gateway/server-methods/agents.ts @@ -27,6 +27,10 @@ import { } from "../../commands/agents.config.js"; import { loadConfig, writeConfigFile } from "../../config/config.js"; import { resolveSessionTranscriptsDirForAgent } from "../../config/sessions/paths.js"; +import { sameFileIdentity } from "../../infra/file-identity.js"; +import { SafeOpenError, readLocalFileSafely, writeFileWithinRoot } from "../../infra/fs-safe.js"; +import { assertNoPathAliasEscape } from "../../infra/path-alias-guards.js"; +import { isNotFoundPathError } from "../../infra/path-guards.js"; import { DEFAULT_AGENT_ID, normalizeAgentId } from "../../routing/session-key.js"; import { resolveUserPath } from "../../utils.js"; import { @@ -97,10 +101,124 @@ type FileMeta = { updatedAtMs: number; }; -async function statFile(filePath: string): Promise { +type ResolvedAgentWorkspaceFilePath = + | { + kind: "ready"; + requestPath: string; + ioPath: string; + workspaceReal: string; + } + | { + kind: "missing"; + requestPath: string; + ioPath: string; + workspaceReal: string; + } + | { + kind: "invalid"; + requestPath: string; + reason: string; + }; + +async function resolveWorkspaceRealPath(workspaceDir: string): Promise { try { - const stat = await fs.stat(filePath); - if (!stat.isFile()) { + return await fs.realpath(workspaceDir); + } catch { + return path.resolve(workspaceDir); + } +} + +async function resolveAgentWorkspaceFilePath(params: { + workspaceDir: string; + name: string; + allowMissing: boolean; +}): Promise { + const requestPath = path.join(params.workspaceDir, params.name); + const workspaceReal = await resolveWorkspaceRealPath(params.workspaceDir); + const candidatePath = path.resolve(workspaceReal, params.name); + + try { + await assertNoPathAliasEscape({ + absolutePath: candidatePath, + rootPath: workspaceReal, + boundaryLabel: "workspace root", + }); + } catch (error) { + return { + kind: "invalid", + requestPath, + reason: error instanceof Error ? error.message : "path escapes workspace root", + }; + } + + let candidateLstat: Awaited>; + try { + candidateLstat = await fs.lstat(candidatePath); + } catch (err) { + if (isNotFoundPathError(err)) { + if (params.allowMissing) { + return { kind: "missing", requestPath, ioPath: candidatePath, workspaceReal }; + } + return { kind: "invalid", requestPath, reason: "file not found" }; + } + throw err; + } + + if (candidateLstat.isSymbolicLink()) { + let targetReal: string; + try { + targetReal = await fs.realpath(candidatePath); + } catch (err) { + if (isNotFoundPathError(err)) { + if (params.allowMissing) { + return { kind: "missing", requestPath, ioPath: candidatePath, workspaceReal }; + } + return { kind: "invalid", requestPath, reason: "file not found" }; + } + throw err; + } + let targetStat: Awaited>; + try { + targetStat = await fs.stat(targetReal); + } catch (err) { + if (isNotFoundPathError(err)) { + if (params.allowMissing) { + return { kind: "missing", requestPath, ioPath: targetReal, workspaceReal }; + } + return { kind: "invalid", requestPath, reason: "file not found" }; + } + throw err; + } + if (!targetStat.isFile()) { + return { kind: "invalid", requestPath, reason: "path is not a regular file" }; + } + if (targetStat.nlink > 1) { + return { kind: "invalid", requestPath, reason: "hardlinked file path not allowed" }; + } + return { kind: "ready", requestPath, ioPath: targetReal, workspaceReal }; + } + + if (!candidateLstat.isFile()) { + return { kind: "invalid", requestPath, reason: "path is not a regular file" }; + } + if (candidateLstat.nlink > 1) { + return { kind: "invalid", requestPath, reason: "hardlinked file path not allowed" }; + } + + const targetReal = await fs.realpath(candidatePath).catch(() => candidatePath); + return { kind: "ready", requestPath, ioPath: targetReal, workspaceReal }; +} + +async function statFileSafely(filePath: string): Promise { + try { + const [stat, lstat] = await Promise.all([fs.stat(filePath), fs.lstat(filePath)]); + if (lstat.isSymbolicLink() || !stat.isFile()) { + return null; + } + if (stat.nlink > 1) { + return null; + } + if (!sameFileIdentity(stat, lstat)) { return null; } return { @@ -125,8 +243,18 @@ async function listAgentFiles(workspaceDir: string, options?: { hideBootstrap?: ? BOOTSTRAP_FILE_NAMES_POST_ONBOARDING : BOOTSTRAP_FILE_NAMES; for (const name of bootstrapFileNames) { - const filePath = path.join(workspaceDir, name); - const meta = await statFile(filePath); + const resolved = await resolveAgentWorkspaceFilePath({ + workspaceDir, + name, + allowMissing: true, + }); + const filePath = resolved.requestPath; + const meta = + resolved.kind === "ready" + ? await statFileSafely(resolved.ioPath) + : resolved.kind === "missing" + ? null + : null; if (meta) { files.push({ name, @@ -140,29 +268,43 @@ async function listAgentFiles(workspaceDir: string, options?: { hideBootstrap?: } } - const primaryMemoryPath = path.join(workspaceDir, DEFAULT_MEMORY_FILENAME); - const primaryMeta = await statFile(primaryMemoryPath); + const primaryResolved = await resolveAgentWorkspaceFilePath({ + workspaceDir, + name: DEFAULT_MEMORY_FILENAME, + allowMissing: true, + }); + const primaryMeta = + primaryResolved.kind === "ready" ? await statFileSafely(primaryResolved.ioPath) : null; if (primaryMeta) { files.push({ name: DEFAULT_MEMORY_FILENAME, - path: primaryMemoryPath, + path: primaryResolved.requestPath, missing: false, size: primaryMeta.size, updatedAtMs: primaryMeta.updatedAtMs, }); } else { - const altMemoryPath = path.join(workspaceDir, DEFAULT_MEMORY_ALT_FILENAME); - const altMeta = await statFile(altMemoryPath); + const altMemoryResolved = await resolveAgentWorkspaceFilePath({ + workspaceDir, + name: DEFAULT_MEMORY_ALT_FILENAME, + allowMissing: true, + }); + const altMeta = + altMemoryResolved.kind === "ready" ? await statFileSafely(altMemoryResolved.ioPath) : null; if (altMeta) { files.push({ name: DEFAULT_MEMORY_ALT_FILENAME, - path: altMemoryPath, + path: altMemoryResolved.requestPath, missing: false, size: altMeta.size, updatedAtMs: altMeta.updatedAtMs, }); } else { - files.push({ name: DEFAULT_MEMORY_FILENAME, path: primaryMemoryPath, missing: true }); + files.push({ + name: DEFAULT_MEMORY_FILENAME, + path: primaryResolved.requestPath, + missing: true, + }); } } @@ -453,8 +595,23 @@ export const agentsHandlers: GatewayRequestHandlers = { } const { agentId, workspaceDir, name } = resolved; const filePath = path.join(workspaceDir, name); - const meta = await statFile(filePath); - if (!meta) { + const resolvedPath = await resolveAgentWorkspaceFilePath({ + workspaceDir, + name, + allowMissing: true, + }); + if (resolvedPath.kind === "invalid") { + respond( + false, + undefined, + errorShape( + ErrorCodes.INVALID_REQUEST, + `unsafe workspace file "${name}" (${resolvedPath.reason})`, + ), + ); + return; + } + if (resolvedPath.kind === "missing") { respond( true, { @@ -466,7 +623,29 @@ export const agentsHandlers: GatewayRequestHandlers = { ); return; } - const content = await fs.readFile(filePath, "utf-8"); + let safeRead: Awaited>; + try { + safeRead = await readLocalFileSafely({ filePath: resolvedPath.ioPath }); + } catch (err) { + if (err instanceof SafeOpenError && err.code === "not-found") { + respond( + true, + { + agentId, + workspace: workspaceDir, + file: { name, path: filePath, missing: true }, + }, + undefined, + ); + return; + } + respond( + false, + undefined, + errorShape(ErrorCodes.INVALID_REQUEST, `unsafe workspace file "${name}"`), + ); + return; + } respond( true, { @@ -476,9 +655,9 @@ export const agentsHandlers: GatewayRequestHandlers = { name, path: filePath, missing: false, - size: meta.size, - updatedAtMs: meta.updatedAtMs, - content, + size: safeRead.stat.size, + updatedAtMs: Math.floor(safeRead.stat.mtimeMs), + content: safeRead.buffer.toString("utf-8"), }, }, undefined, @@ -505,9 +684,39 @@ export const agentsHandlers: GatewayRequestHandlers = { const { agentId, workspaceDir, name } = resolved; await fs.mkdir(workspaceDir, { recursive: true }); const filePath = path.join(workspaceDir, name); + const resolvedPath = await resolveAgentWorkspaceFilePath({ + workspaceDir, + name, + allowMissing: true, + }); + if (resolvedPath.kind === "invalid") { + respond( + false, + undefined, + errorShape( + ErrorCodes.INVALID_REQUEST, + `unsafe workspace file "${name}" (${resolvedPath.reason})`, + ), + ); + return; + } const content = String(params.content ?? ""); - await fs.writeFile(filePath, content, "utf-8"); - const meta = await statFile(filePath); + try { + await writeFileWithinRoot({ + rootDir: workspaceDir, + relativePath: name, + data: content, + encoding: "utf8", + }); + } catch { + respond( + false, + undefined, + errorShape(ErrorCodes.INVALID_REQUEST, `unsafe workspace file "${name}"`), + ); + return; + } + const meta = await statFileSafely(resolvedPath.ioPath); respond( true, { diff --git a/src/gateway/server-methods/exec-approval.ts b/src/gateway/server-methods/exec-approval.ts index d1cfc9ec0d9..2d362efa214 100644 --- a/src/gateway/server-methods/exec-approval.ts +++ b/src/gateway/server-methods/exec-approval.ts @@ -3,6 +3,8 @@ import { DEFAULT_EXEC_APPROVAL_TIMEOUT_MS, type ExecApprovalDecision, } from "../../infra/exec-approvals.js"; +import { buildSystemRunApprovalBindingV1 } from "../../infra/system-run-approval-binding.js"; +import { resolveSystemRunApprovalRequestContext } from "../../infra/system-run-approval-context.js"; import type { ExecApprovalManager } from "../exec-approval-manager.js"; import { ErrorCodes, @@ -43,7 +45,10 @@ export function createExecApprovalHandlers( const p = params as { id?: string; command: string; + commandArgv?: string[]; + env?: Record; cwd?: string; + systemRunPlanV2?: unknown; nodeId?: string; host?: string; security?: string; @@ -51,6 +56,10 @@ export function createExecApprovalHandlers( agentId?: string; resolvedPath?: string; sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; timeoutMs?: number; twoPhase?: boolean; }; @@ -60,6 +69,20 @@ export function createExecApprovalHandlers( const explicitId = typeof p.id === "string" && p.id.trim().length > 0 ? p.id.trim() : null; const host = typeof p.host === "string" ? p.host.trim() : ""; const nodeId = typeof p.nodeId === "string" ? p.nodeId.trim() : ""; + const approvalContext = resolveSystemRunApprovalRequestContext({ + host, + command: p.command, + commandArgv: p.commandArgv, + systemRunPlanV2: p.systemRunPlanV2, + cwd: p.cwd, + agentId: p.agentId, + sessionKey: p.sessionKey, + }); + const effectiveCommandArgv = approvalContext.commandArgv; + const effectiveCwd = approvalContext.cwd; + const effectiveAgentId = approvalContext.agentId; + const effectiveSessionKey = approvalContext.sessionKey; + const effectiveCommandText = approvalContext.commandText; if (host === "node" && !nodeId) { respond( false, @@ -68,6 +91,27 @@ export function createExecApprovalHandlers( ); return; } + if ( + host === "node" && + (!Array.isArray(effectiveCommandArgv) || effectiveCommandArgv.length === 0) + ) { + respond( + false, + undefined, + errorShape(ErrorCodes.INVALID_REQUEST, "commandArgv is required for host=node"), + ); + return; + } + const systemRunBindingV1 = + host === "node" + ? buildSystemRunApprovalBindingV1({ + argv: effectiveCommandArgv, + cwd: effectiveCwd, + agentId: effectiveAgentId, + sessionKey: effectiveSessionKey, + env: p.env, + }) + : null; if (explicitId && manager.getSnapshot(explicitId)) { respond( false, @@ -77,15 +121,25 @@ export function createExecApprovalHandlers( return; } const request = { - command: p.command, - cwd: p.cwd ?? null, + command: effectiveCommandText, + commandArgv: effectiveCommandArgv, + envKeys: systemRunBindingV1?.envKeys?.length ? systemRunBindingV1.envKeys : undefined, + systemRunBindingV1: systemRunBindingV1?.binding ?? null, + systemRunPlanV2: approvalContext.planV2, + cwd: effectiveCwd ?? null, nodeId: host === "node" ? nodeId : null, host: host || null, security: p.security ?? null, ask: p.ask ?? null, - agentId: p.agentId ?? null, + agentId: effectiveAgentId ?? null, resolvedPath: p.resolvedPath ?? null, - sessionKey: p.sessionKey ?? null, + sessionKey: effectiveSessionKey ?? null, + turnSourceChannel: + typeof p.turnSourceChannel === "string" ? p.turnSourceChannel.trim() || null : null, + turnSourceTo: typeof p.turnSourceTo === "string" ? p.turnSourceTo.trim() || null : null, + turnSourceAccountId: + typeof p.turnSourceAccountId === "string" ? p.turnSourceAccountId.trim() || null : null, + turnSourceThreadId: p.turnSourceThreadId ?? null, }; const record = manager.create(request, timeoutMs, explicitId); record.requestedByConnId = client?.connId ?? null; diff --git a/src/gateway/server-methods/secrets.test.ts b/src/gateway/server-methods/secrets.test.ts new file mode 100644 index 00000000000..202e1df8ae0 --- /dev/null +++ b/src/gateway/server-methods/secrets.test.ts @@ -0,0 +1,43 @@ +import { describe, expect, it, vi } from "vitest"; +import { createSecretsHandlers } from "./secrets.js"; + +describe("secrets handlers", () => { + it("responds with warning count on successful reload", async () => { + const handlers = createSecretsHandlers({ + reloadSecrets: vi.fn().mockResolvedValue({ warningCount: 2 }), + }); + const respond = vi.fn(); + await handlers["secrets.reload"]({ + req: { type: "req", id: "1", method: "secrets.reload" }, + params: {}, + client: null, + isWebchatConnect: () => false, + respond, + context: {} as never, + }); + expect(respond).toHaveBeenCalledWith(true, { ok: true, warningCount: 2 }); + }); + + it("returns unavailable when reload fails", async () => { + const handlers = createSecretsHandlers({ + reloadSecrets: vi.fn().mockRejectedValue(new Error("reload failed")), + }); + const respond = vi.fn(); + await handlers["secrets.reload"]({ + req: { type: "req", id: "1", method: "secrets.reload" }, + params: {}, + client: null, + isWebchatConnect: () => false, + respond, + context: {} as never, + }); + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ + code: "UNAVAILABLE", + message: "Error: reload failed", + }), + ); + }); +}); diff --git a/src/gateway/server-methods/secrets.ts b/src/gateway/server-methods/secrets.ts new file mode 100644 index 00000000000..995fb384a80 --- /dev/null +++ b/src/gateway/server-methods/secrets.ts @@ -0,0 +1,17 @@ +import { ErrorCodes, errorShape } from "../protocol/index.js"; +import type { GatewayRequestHandlers } from "./types.js"; + +export function createSecretsHandlers(params: { + reloadSecrets: () => Promise<{ warningCount: number }>; +}): GatewayRequestHandlers { + return { + "secrets.reload": async ({ respond }) => { + try { + const result = await params.reloadSecrets(); + respond(true, { ok: true, warningCount: result.warningCount }); + } catch (err) { + respond(false, undefined, errorShape(ErrorCodes.UNAVAILABLE, String(err))); + } + }, + }; +} diff --git a/src/gateway/server-methods/send.test.ts b/src/gateway/server-methods/send.test.ts index 7209d3e6176..e3c3c168c31 100644 --- a/src/gateway/server-methods/send.test.ts +++ b/src/gateway/server-methods/send.test.ts @@ -1,5 +1,7 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import { resolveOutboundTarget } from "../../infra/outbound/targets.js"; +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createTestRegistry } from "../../test-utils/channel-plugins.js"; import { sendHandlers } from "./send.js"; import type { GatewayRequestContext } from "./types.js"; @@ -10,6 +12,8 @@ const mocks = vi.hoisted(() => ({ resolveOutboundTarget: vi.fn(() => ({ ok: true, to: "resolved" })), resolveMessageChannelSelection: vi.fn(), sendPoll: vi.fn(async () => ({ messageId: "poll-1" })), + getChannelPlugin: vi.fn(), + loadOpenClawPlugins: vi.fn(), })); vi.mock("../../config/config.js", async () => { @@ -22,10 +26,38 @@ vi.mock("../../config/config.js", async () => { }); vi.mock("../../channels/plugins/index.js", () => ({ - getChannelPlugin: () => ({ outbound: { sendPoll: mocks.sendPoll } }), + getChannelPlugin: mocks.getChannelPlugin, normalizeChannelId: (value: string) => (value === "webchat" ? null : value), })); +vi.mock("../../agents/agent-scope.js", () => ({ + resolveSessionAgentId: ({ + sessionKey, + }: { + sessionKey?: string; + config?: unknown; + agentId?: string; + }) => { + if (typeof sessionKey === "string") { + const match = sessionKey.match(/^agent:([^:]+)/i); + if (match?.[1]) { + return match[1]; + } + } + return "main"; + }, + resolveDefaultAgentId: () => "main", + resolveAgentWorkspaceDir: () => "/tmp/openclaw-test-workspace", +})); + +vi.mock("../../config/plugin-auto-enable.js", () => ({ + applyPluginAutoEnable: ({ config }: { config: unknown }) => ({ config, changes: [] }), +})); + +vi.mock("../../plugins/loader.js", () => ({ + loadOpenClawPlugins: mocks.loadOpenClawPlugins, +})); + vi.mock("../../infra/outbound/targets.js", () => ({ resolveOutboundTarget: mocks.resolveOutboundTarget, })); @@ -85,14 +117,19 @@ function mockDeliverySuccess(messageId: string) { } describe("gateway send mirroring", () => { + let registrySeq = 0; + beforeEach(() => { vi.clearAllMocks(); + registrySeq += 1; + setActivePluginRegistry(createTestRegistry([]), `send-test-${registrySeq}`); mocks.resolveOutboundTarget.mockReturnValue({ ok: true, to: "resolved" }); mocks.resolveMessageChannelSelection.mockResolvedValue({ channel: "slack", configured: ["slack"], }); mocks.sendPoll.mockResolvedValue({ messageId: "poll-1" }); + mocks.getChannelPlugin.mockReturnValue({ outbound: { sendPoll: mocks.sendPoll } }); }); it("accepts media-only sends without message", async () => { @@ -342,6 +379,108 @@ describe("gateway send mirroring", () => { ); }); + it("uses explicit agentId for delivery when sessionKey is not provided", async () => { + mockDeliverySuccess("m-agent"); + + await runSend({ + to: "channel:C1", + message: "hello", + channel: "slack", + agentId: "work", + idempotencyKey: "idem-agent-explicit", + }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + session: expect.objectContaining({ + agentId: "work", + key: "agent:work:slack:channel:resolved", + }), + mirror: expect.objectContaining({ + sessionKey: "agent:work:slack:channel:resolved", + agentId: "work", + }), + }), + ); + }); + + it("uses sessionKey agentId when explicit agentId is omitted", async () => { + mockDeliverySuccess("m-session-agent"); + + await runSend({ + to: "channel:C1", + message: "hello", + channel: "slack", + sessionKey: "agent:work:slack:channel:c1", + idempotencyKey: "idem-session-agent", + }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + session: expect.objectContaining({ + agentId: "work", + key: "agent:work:slack:channel:c1", + }), + mirror: expect.objectContaining({ + sessionKey: "agent:work:slack:channel:c1", + agentId: "work", + }), + }), + ); + }); + + it("prefers explicit agentId over sessionKey agent for delivery and mirror", async () => { + mockDeliverySuccess("m-agent-precedence"); + + await runSend({ + to: "channel:C1", + message: "hello", + channel: "slack", + agentId: "work", + sessionKey: "agent:main:slack:channel:c1", + idempotencyKey: "idem-agent-precedence", + }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + session: expect.objectContaining({ + agentId: "work", + key: "agent:main:slack:channel:c1", + }), + mirror: expect.objectContaining({ + sessionKey: "agent:main:slack:channel:c1", + agentId: "work", + }), + }), + ); + }); + + it("ignores blank explicit agentId and falls back to sessionKey agent", async () => { + mockDeliverySuccess("m-agent-blank"); + + await runSend({ + to: "channel:C1", + message: "hello", + channel: "slack", + agentId: " ", + sessionKey: "agent:work:slack:channel:c1", + idempotencyKey: "idem-agent-blank", + }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + session: expect.objectContaining({ + agentId: "work", + key: "agent:work:slack:channel:c1", + }), + mirror: expect.objectContaining({ + sessionKey: "agent:work:slack:channel:c1", + agentId: "work", + }), + }), + ); + }); + it("forwards threadId to outbound delivery when provided", async () => { mockDeliverySuccess("m-thread"); @@ -385,4 +524,39 @@ describe("gateway send mirroring", () => { }), ); }); + + it("recovers cold plugin resolution for telegram threaded sends", async () => { + mocks.resolveOutboundTarget.mockReturnValue({ ok: true, to: "123" }); + mocks.deliverOutboundPayloads.mockResolvedValue([ + { messageId: "m-telegram", channel: "telegram" }, + ]); + const telegramPlugin = { outbound: { sendPoll: mocks.sendPoll } }; + mocks.getChannelPlugin + .mockReturnValueOnce(undefined) + .mockReturnValueOnce(telegramPlugin) + .mockReturnValue(telegramPlugin); + + const { respond } = await runSend({ + to: "123", + message: "forum completion", + channel: "telegram", + threadId: "42", + idempotencyKey: "idem-cold-telegram-thread", + }); + + expect(mocks.loadOpenClawPlugins).toHaveBeenCalledTimes(1); + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "telegram", + to: "123", + threadId: "42", + }), + ); + expect(respond).toHaveBeenCalledWith( + true, + expect.objectContaining({ messageId: "m-telegram" }), + undefined, + expect.objectContaining({ channel: "telegram" }), + ); + }); }); diff --git a/src/gateway/server-methods/send.ts b/src/gateway/server-methods/send.ts index c404a47032a..8585f1c84aa 100644 --- a/src/gateway/server-methods/send.ts +++ b/src/gateway/server-methods/send.ts @@ -1,7 +1,8 @@ import { resolveSessionAgentId } from "../../agents/agent-scope.js"; -import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; +import { normalizeChannelId } from "../../channels/plugins/index.js"; import { createOutboundSendDeps } from "../../cli/deps.js"; import { loadConfig } from "../../config/config.js"; +import { resolveOutboundChannelPlugin } from "../../infra/outbound/channel-resolution.js"; import { resolveMessageChannelSelection } from "../../infra/outbound/channel-selection.js"; import { deliverOutboundPayloads } from "../../infra/outbound/deliver.js"; import { @@ -9,6 +10,7 @@ import { resolveOutboundSessionRoute, } from "../../infra/outbound/outbound-session.js"; import { normalizeReplyPayloadsForDelivery } from "../../infra/outbound/payloads.js"; +import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js"; import { resolveOutboundTarget } from "../../infra/outbound/targets.js"; import { normalizePollInput } from "../../polls.js"; import { @@ -106,6 +108,7 @@ export const sendHandlers: GatewayRequestHandlers = { gifPlayback?: boolean; channel?: string; accountId?: string; + agentId?: string; threadId?: string; sessionKey?: string; idempotencyKey: string; @@ -165,7 +168,7 @@ export const sendHandlers: GatewayRequestHandlers = { ? request.threadId.trim() : undefined; const outboundChannel = channel; - const plugin = getChannelPlugin(channel); + const plugin = resolveOutboundChannelPlugin({ channel, cfg }); if (!plugin) { respond( false, @@ -206,13 +209,21 @@ export const sendHandlers: GatewayRequestHandlers = { typeof request.sessionKey === "string" && request.sessionKey.trim() ? request.sessionKey.trim().toLowerCase() : undefined; - const derivedAgentId = resolveSessionAgentId({ config: cfg }); + const explicitAgentId = + typeof request.agentId === "string" && request.agentId.trim() + ? request.agentId.trim() + : undefined; + const sessionAgentId = providedSessionKey + ? resolveSessionAgentId({ sessionKey: providedSessionKey, config: cfg }) + : undefined; + const defaultAgentId = resolveSessionAgentId({ config: cfg }); + const effectiveAgentId = explicitAgentId ?? sessionAgentId ?? defaultAgentId; // If callers omit sessionKey, derive a target session key from the outbound route. const derivedRoute = !providedSessionKey ? await resolveOutboundSessionRoute({ cfg, channel, - agentId: derivedAgentId, + agentId: effectiveAgentId, accountId, target: resolved.to, threadId, @@ -221,35 +232,38 @@ export const sendHandlers: GatewayRequestHandlers = { if (derivedRoute) { await ensureOutboundSessionEntry({ cfg, - agentId: derivedAgentId, + agentId: effectiveAgentId, channel, accountId, route: derivedRoute, }); } + const outboundSession = buildOutboundSessionContext({ + cfg, + agentId: effectiveAgentId, + sessionKey: providedSessionKey ?? derivedRoute?.sessionKey, + }); const results = await deliverOutboundPayloads({ cfg, channel: outboundChannel, to: resolved.to, accountId, payloads: [{ text: message, mediaUrl, mediaUrls }], - agentId: providedSessionKey - ? resolveSessionAgentId({ sessionKey: providedSessionKey, config: cfg }) - : derivedAgentId, + session: outboundSession, gifPlayback: request.gifPlayback, threadId: threadId ?? null, deps: outboundDeps, mirror: providedSessionKey ? { sessionKey: providedSessionKey, - agentId: resolveSessionAgentId({ sessionKey: providedSessionKey, config: cfg }), + agentId: effectiveAgentId, text: mirrorText || message, mediaUrls: mirrorMediaUrls.length > 0 ? mirrorMediaUrls : undefined, } : derivedRoute ? { sessionKey: derivedRoute.sessionKey, - agentId: derivedAgentId, + agentId: effectiveAgentId, text: mirrorText || message, mediaUrls: mirrorMediaUrls.length > 0 ? mirrorMediaUrls : undefined, } @@ -386,7 +400,7 @@ export const sendHandlers: GatewayRequestHandlers = { ? request.accountId.trim() : undefined; try { - const plugin = getChannelPlugin(channel); + const plugin = resolveOutboundChannelPlugin({ channel, cfg }); const outbound = plugin?.outbound; if (!outbound?.sendPoll) { respond( diff --git a/src/gateway/server-methods/server-methods.test.ts b/src/gateway/server-methods/server-methods.test.ts index b19a6d8c608..c6db927093a 100644 --- a/src/gateway/server-methods/server-methods.test.ts +++ b/src/gateway/server-methods/server-methods.test.ts @@ -6,6 +6,7 @@ import { fileURLToPath } from "node:url"; import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; import { emitAgentEvent } from "../../infra/agent-events.js"; import { formatZonedTimestamp } from "../../infra/format-time/format-datetime.js"; +import { buildSystemRunApprovalBindingV1 } from "../../infra/system-run-approval-binding.js"; import { resetLogger, setLoggerOverride } from "../../logging.js"; import { ExecApprovalManager } from "../exec-approval-manager.js"; import { validateExecApprovalRequestParams } from "../protocol/index.js"; @@ -247,6 +248,7 @@ describe("exec approval handlers", () => { const defaultExecApprovalRequestParams = { command: "echo ok", + commandArgv: ["echo", "ok"], cwd: "/tmp", nodeId: "node-1", host: "node", @@ -383,6 +385,25 @@ describe("exec approval handlers", () => { ); }); + it("rejects host=node approval requests without commandArgv", async () => { + const { handlers, respond, context } = createExecApprovalFixture(); + await requestExecApproval({ + handlers, + respond, + context, + params: { + commandArgv: undefined, + }, + }); + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ + message: "commandArgv is required for host=node", + }), + ); + }); + it("broadcasts request + resolve", async () => { const { handlers, broadcasts, respond, context } = createExecApprovalFixture(); @@ -423,6 +444,71 @@ describe("exec approval handlers", () => { expect(broadcasts.some((entry) => entry.event === "exec.approval.resolved")).toBe(true); }); + it("stores versioned system.run binding and sorted env keys on approval request", async () => { + const { handlers, broadcasts, respond, context } = createExecApprovalFixture(); + await requestExecApproval({ + handlers, + respond, + context, + params: { + commandArgv: ["echo", "ok"], + env: { + Z_VAR: "z", + A_VAR: "a", + }, + }, + }); + const requested = broadcasts.find((entry) => entry.event === "exec.approval.requested"); + expect(requested).toBeTruthy(); + const request = (requested?.payload as { request?: Record })?.request ?? {}; + expect(request["envKeys"]).toEqual(["A_VAR", "Z_VAR"]); + expect(request["systemRunBindingV1"]).toEqual( + buildSystemRunApprovalBindingV1({ + argv: ["echo", "ok"], + cwd: "/tmp", + env: { A_VAR: "a", Z_VAR: "z" }, + }).binding, + ); + }); + + it("prefers systemRunPlanV2 canonical command/cwd when present", async () => { + const { handlers, broadcasts, respond, context } = createExecApprovalFixture(); + await requestExecApproval({ + handlers, + respond, + context, + params: { + command: "echo stale", + commandArgv: ["echo", "stale"], + cwd: "/tmp/link/sub", + systemRunPlanV2: { + version: 2, + argv: ["/usr/bin/echo", "ok"], + cwd: "/real/cwd", + rawCommand: "/usr/bin/echo ok", + agentId: "main", + sessionKey: "agent:main:main", + }, + }, + }); + const requested = broadcasts.find((entry) => entry.event === "exec.approval.requested"); + expect(requested).toBeTruthy(); + const request = (requested?.payload as { request?: Record })?.request ?? {}; + expect(request["command"]).toBe("/usr/bin/echo ok"); + expect(request["commandArgv"]).toEqual(["/usr/bin/echo", "ok"]); + expect(request["cwd"]).toBe("/real/cwd"); + expect(request["agentId"]).toBe("main"); + expect(request["sessionKey"]).toBe("agent:main:main"); + expect(request["systemRunPlanV2"]).toEqual({ + version: 2, + argv: ["/usr/bin/echo", "ok"], + cwd: "/real/cwd", + rawCommand: "/usr/bin/echo ok", + agentId: "main", + sessionKey: "agent:main:main", + }); + }); + it("accepts resolve during broadcast", async () => { const manager = new ExecApprovalManager(); const handlers = createExecApprovalHandlers(manager); @@ -493,6 +579,56 @@ describe("exec approval handlers", () => { expect(resolveRespond).toHaveBeenCalledWith(true, { ok: true }, undefined); }); + it("forwards turn-source metadata to exec approval forwarding", async () => { + vi.useFakeTimers(); + try { + const manager = new ExecApprovalManager(); + const forwarder = { + handleRequested: vi.fn(async () => false), + handleResolved: vi.fn(async () => {}), + stop: vi.fn(), + }; + const handlers = createExecApprovalHandlers(manager, { forwarder }); + const respond = vi.fn(); + const context = { + broadcast: (_event: string, _payload: unknown) => {}, + hasExecApprovalClients: () => false, + }; + + const requestPromise = requestExecApproval({ + handlers, + respond, + context, + params: { + timeoutMs: 60_000, + turnSourceChannel: "whatsapp", + turnSourceTo: "+15555550123", + turnSourceAccountId: "work", + turnSourceThreadId: "1739201675.123", + }, + }); + for (let idx = 0; idx < 20; idx += 1) { + await Promise.resolve(); + } + expect(forwarder.handleRequested).toHaveBeenCalledTimes(1); + expect(forwarder.handleRequested).toHaveBeenCalledWith( + expect.objectContaining({ + request: expect.objectContaining({ + turnSourceChannel: "whatsapp", + turnSourceTo: "+15555550123", + turnSourceAccountId: "work", + turnSourceThreadId: "1739201675.123", + }), + }), + ); + + await vi.runOnlyPendingTimersAsync(); + await requestPromise; + } finally { + vi.useRealTimers(); + } + }); + it("expires immediately when no approver clients and no forwarding targets", async () => { vi.useFakeTimers(); try { diff --git a/src/gateway/server-methods/sessions.ts b/src/gateway/server-methods/sessions.ts index 357d1f4e563..c426a4aee27 100644 --- a/src/gateway/server-methods/sessions.ts +++ b/src/gateway/server-methods/sessions.ts @@ -1,5 +1,6 @@ import { randomUUID } from "node:crypto"; import fs from "node:fs"; +import { getAcpSessionManager } from "../../acp/control-plane/manager.js"; import { resolveDefaultAgentId } from "../../agents/agent-scope.js"; import { clearBootstrapSnapshot } from "../../agents/bootstrap-cache.js"; import { abortEmbeddedPiRun, waitForEmbeddedPiRunEnd } from "../../agents/pi-embedded.js"; @@ -14,6 +15,7 @@ import { updateSessionStore, } from "../../config/sessions.js"; import { unbindThreadBindingsBySessionKey } from "../../discord/monitor/thread-bindings.js"; +import { logVerbose } from "../../globals.js"; import { createInternalHookEvent, triggerInternalHook } from "../../hooks/internal-hooks.js"; import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js"; import { @@ -202,6 +204,82 @@ async function ensureSessionRuntimeCleanup(params: { ); } +const ACP_RUNTIME_CLEANUP_TIMEOUT_MS = 15_000; + +async function runAcpCleanupStep(params: { + op: () => Promise; +}): Promise<{ status: "ok" } | { status: "timeout" } | { status: "error"; error: unknown }> { + let timer: NodeJS.Timeout | undefined; + const timeoutPromise = new Promise<{ status: "timeout" }>((resolve) => { + timer = setTimeout(() => resolve({ status: "timeout" }), ACP_RUNTIME_CLEANUP_TIMEOUT_MS); + }); + const opPromise = params + .op() + .then(() => ({ status: "ok" as const })) + .catch((error: unknown) => ({ status: "error" as const, error })); + const outcome = await Promise.race([opPromise, timeoutPromise]); + if (timer) { + clearTimeout(timer); + } + return outcome; +} + +async function closeAcpRuntimeForSession(params: { + cfg: ReturnType; + sessionKey: string; + entry?: SessionEntry; + reason: "session-reset" | "session-delete"; +}) { + if (!params.entry?.acp) { + return undefined; + } + const acpManager = getAcpSessionManager(); + const cancelOutcome = await runAcpCleanupStep({ + op: async () => { + await acpManager.cancelSession({ + cfg: params.cfg, + sessionKey: params.sessionKey, + reason: params.reason, + }); + }, + }); + if (cancelOutcome.status === "timeout") { + return errorShape( + ErrorCodes.UNAVAILABLE, + `Session ${params.sessionKey} is still active; try again in a moment.`, + ); + } + if (cancelOutcome.status === "error") { + logVerbose( + `sessions.${params.reason}: ACP cancel failed for ${params.sessionKey}: ${String(cancelOutcome.error)}`, + ); + } + + const closeOutcome = await runAcpCleanupStep({ + op: async () => { + await acpManager.closeSession({ + cfg: params.cfg, + sessionKey: params.sessionKey, + reason: params.reason, + requireAcpSession: false, + allowBackendUnavailable: true, + }); + }, + }); + if (closeOutcome.status === "timeout") { + return errorShape( + ErrorCodes.UNAVAILABLE, + `Session ${params.sessionKey} is still active; try again in a moment.`, + ); + } + if (closeOutcome.status === "error") { + logVerbose( + `sessions.${params.reason}: ACP runtime close failed for ${params.sessionKey}: ${String(closeOutcome.error)}`, + ); + } + return undefined; +} + export const sessionsHandlers: GatewayRequestHandlers = { "sessions.list": ({ params, respond }) => { if (!assertValidParams(params, validateSessionsListParams, "sessions.list", respond)) { @@ -348,7 +426,7 @@ export const sessionsHandlers: GatewayRequestHandlers = { } const { cfg, target, storePath } = resolveGatewaySessionTargetFromKey(key); - const { entry } = loadSessionEntry(key); + const { entry, legacyKey, canonicalKey } = loadSessionEntry(key); const hadExistingEntry = Boolean(entry); const commandReason = p.reason === "new" ? "new" : "reset"; const hookEvent = createInternalHookEvent( @@ -369,6 +447,16 @@ export const sessionsHandlers: GatewayRequestHandlers = { respond(false, undefined, cleanupError); return; } + const acpCleanupError = await closeAcpRuntimeForSession({ + cfg, + sessionKey: legacyKey ?? canonicalKey ?? target.canonicalKey ?? key, + entry, + reason: "session-reset", + }); + if (acpCleanupError) { + respond(false, undefined, acpCleanupError); + return; + } let oldSessionId: string | undefined; let oldSessionFile: string | undefined; const next = await updateSessionStore(storePath, (store) => { @@ -446,13 +534,23 @@ export const sessionsHandlers: GatewayRequestHandlers = { const deleteTranscript = typeof p.deleteTranscript === "boolean" ? p.deleteTranscript : true; - const { entry } = loadSessionEntry(key); + const { entry, legacyKey, canonicalKey } = loadSessionEntry(key); const sessionId = entry?.sessionId; const cleanupError = await ensureSessionRuntimeCleanup({ cfg, key, target, sessionId }); if (cleanupError) { respond(false, undefined, cleanupError); return; } + const acpCleanupError = await closeAcpRuntimeForSession({ + cfg, + sessionKey: legacyKey ?? canonicalKey ?? target.canonicalKey ?? key, + entry, + reason: "session-delete", + }); + if (acpCleanupError) { + respond(false, undefined, acpCleanupError); + return; + } const deleted = await updateSessionStore(storePath, (store) => { const { primaryKey } = migrateAndPruneSessionStoreKey({ cfg, key, store }); const hadEntry = Boolean(store[primaryKey]); diff --git a/src/gateway/server-node-events.ts b/src/gateway/server-node-events.ts index ce1d699797f..c191a836066 100644 --- a/src/gateway/server-node-events.ts +++ b/src/gateway/server-node-events.ts @@ -1,5 +1,4 @@ import { randomUUID } from "node:crypto"; -import { resolveSessionAgentId } from "../agents/agent-scope.js"; import { normalizeChannelId } from "../channels/plugins/index.js"; import { createOutboundSendDeps } from "../cli/outbound-send-deps.js"; import { agentCommand } from "../commands/agent.js"; @@ -7,6 +6,7 @@ import { loadConfig } from "../config/config.js"; import { updateSessionStore } from "../config/sessions.js"; import { requestHeartbeatNow } from "../infra/heartbeat-wake.js"; import { deliverOutboundPayloads } from "../infra/outbound/deliver.js"; +import { buildOutboundSessionContext } from "../infra/outbound/session-context.js"; import { resolveOutboundTarget } from "../infra/outbound/targets.js"; import { registerApnsToken } from "../infra/push-apns.js"; import { enqueueSystemEvent } from "../infra/system-events.js"; @@ -232,13 +232,16 @@ async function sendReceiptAck(params: { if (!resolved.ok) { throw new Error(String(resolved.error)); } - const agentId = resolveSessionAgentId({ sessionKey: params.sessionKey, config: params.cfg }); + const session = buildOutboundSessionContext({ + cfg: params.cfg, + sessionKey: params.sessionKey, + }); await deliverOutboundPayloads({ cfg: params.cfg, channel: params.channel, to: resolved.to, payloads: [{ text: params.text }], - agentId, + session, bestEffort: true, deps: createOutboundSendDeps(params.deps), }); diff --git a/src/gateway/server-restart-sentinel.test.ts b/src/gateway/server-restart-sentinel.test.ts new file mode 100644 index 00000000000..187698b06ed --- /dev/null +++ b/src/gateway/server-restart-sentinel.test.ts @@ -0,0 +1,94 @@ +import { describe, expect, it, vi } from "vitest"; + +const mocks = vi.hoisted(() => ({ + resolveSessionAgentId: vi.fn(() => "agent-from-key"), + consumeRestartSentinel: vi.fn(async () => ({ + payload: { + sessionKey: "agent:main:main", + deliveryContext: { + channel: "whatsapp", + to: "+15550002", + accountId: "acct-2", + }, + }, + })), + formatRestartSentinelMessage: vi.fn(() => "restart message"), + summarizeRestartSentinel: vi.fn(() => "restart summary"), + resolveMainSessionKeyFromConfig: vi.fn(() => "agent:main:main"), + parseSessionThreadInfo: vi.fn(() => ({ baseSessionKey: null, threadId: undefined })), + loadSessionEntry: vi.fn(() => ({ cfg: {}, entry: {} })), + resolveAnnounceTargetFromKey: vi.fn(() => null), + deliveryContextFromSession: vi.fn(() => undefined), + mergeDeliveryContext: vi.fn((a?: Record, b?: Record) => ({ + ...b, + ...a, + })), + normalizeChannelId: vi.fn((channel: string) => channel), + resolveOutboundTarget: vi.fn(() => ({ ok: true as const, to: "+15550002" })), + deliverOutboundPayloads: vi.fn(async () => []), + enqueueSystemEvent: vi.fn(), +})); + +vi.mock("../agents/agent-scope.js", () => ({ + resolveSessionAgentId: mocks.resolveSessionAgentId, +})); + +vi.mock("../infra/restart-sentinel.js", () => ({ + consumeRestartSentinel: mocks.consumeRestartSentinel, + formatRestartSentinelMessage: mocks.formatRestartSentinelMessage, + summarizeRestartSentinel: mocks.summarizeRestartSentinel, +})); + +vi.mock("../config/sessions.js", () => ({ + resolveMainSessionKeyFromConfig: mocks.resolveMainSessionKeyFromConfig, +})); + +vi.mock("../config/sessions/delivery-info.js", () => ({ + parseSessionThreadInfo: mocks.parseSessionThreadInfo, +})); + +vi.mock("./session-utils.js", () => ({ + loadSessionEntry: mocks.loadSessionEntry, +})); + +vi.mock("../agents/tools/sessions-send-helpers.js", () => ({ + resolveAnnounceTargetFromKey: mocks.resolveAnnounceTargetFromKey, +})); + +vi.mock("../utils/delivery-context.js", () => ({ + deliveryContextFromSession: mocks.deliveryContextFromSession, + mergeDeliveryContext: mocks.mergeDeliveryContext, +})); + +vi.mock("../channels/plugins/index.js", () => ({ + normalizeChannelId: mocks.normalizeChannelId, +})); + +vi.mock("../infra/outbound/targets.js", () => ({ + resolveOutboundTarget: mocks.resolveOutboundTarget, +})); + +vi.mock("../infra/outbound/deliver.js", () => ({ + deliverOutboundPayloads: mocks.deliverOutboundPayloads, +})); + +vi.mock("../infra/system-events.js", () => ({ + enqueueSystemEvent: mocks.enqueueSystemEvent, +})); + +const { scheduleRestartSentinelWake } = await import("./server-restart-sentinel.js"); + +describe("scheduleRestartSentinelWake", () => { + it("forwards session context to outbound delivery", async () => { + await scheduleRestartSentinelWake({ deps: {} as never }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "whatsapp", + to: "+15550002", + session: { key: "agent:main:main", agentId: "agent-from-key" }, + }), + ); + expect(mocks.enqueueSystemEvent).not.toHaveBeenCalled(); + }); +}); diff --git a/src/gateway/server-restart-sentinel.ts b/src/gateway/server-restart-sentinel.ts index 454657d188d..e6191942dba 100644 --- a/src/gateway/server-restart-sentinel.ts +++ b/src/gateway/server-restart-sentinel.ts @@ -1,10 +1,10 @@ -import { resolveSessionAgentId } from "../agents/agent-scope.js"; import { resolveAnnounceTargetFromKey } from "../agents/tools/sessions-send-helpers.js"; import { normalizeChannelId } from "../channels/plugins/index.js"; import type { CliDeps } from "../cli/deps.js"; import { resolveMainSessionKeyFromConfig } from "../config/sessions.js"; import { parseSessionThreadInfo } from "../config/sessions/delivery-info.js"; import { deliverOutboundPayloads } from "../infra/outbound/deliver.js"; +import { buildOutboundSessionContext } from "../infra/outbound/session-context.js"; import { resolveOutboundTarget } from "../infra/outbound/targets.js"; import { consumeRestartSentinel, @@ -83,6 +83,10 @@ export async function scheduleRestartSentinelWake(_params: { deps: CliDeps }) { const isSlack = channel === "slack"; const replyToId = isSlack && threadId != null && threadId !== "" ? String(threadId) : undefined; const resolvedThreadId = isSlack ? undefined : threadId; + const outboundSession = buildOutboundSessionContext({ + cfg, + sessionKey, + }); try { await deliverOutboundPayloads({ @@ -93,7 +97,7 @@ export async function scheduleRestartSentinelWake(_params: { deps: CliDeps }) { replyToId, threadId: resolvedThreadId, payloads: [{ text: message }], - agentId: resolveSessionAgentId({ sessionKey, config: cfg }), + session: outboundSession, bestEffort: true, }); } catch (err) { diff --git a/src/gateway/server-runtime-state.ts b/src/gateway/server-runtime-state.ts index af42df0fc42..c9fa334222e 100644 --- a/src/gateway/server-runtime-state.ts +++ b/src/gateway/server-runtime-state.ts @@ -11,7 +11,7 @@ import type { ResolvedGatewayAuth } from "./auth.js"; import type { ChatAbortControllerEntry } from "./chat-abort.js"; import type { ControlUiRootState } from "./control-ui.js"; import type { HooksConfigResolved } from "./hooks.js"; -import { resolveGatewayListenHosts } from "./net.js"; +import { isLoopbackHost, resolveGatewayListenHosts } from "./net.js"; import { createGatewayBroadcaster, type GatewayBroadcastFn, @@ -117,6 +117,12 @@ export async function createGatewayRuntimeState(params: { }); const bindHosts = await resolveGatewayListenHosts(params.bindHost); + if (!isLoopbackHost(params.bindHost)) { + params.log.warn( + "⚠️ Gateway is binding to a non-loopback address. " + + "Ensure authentication is configured before exposing to public networks.", + ); + } const httpServers: HttpServer[] = []; const httpBindHosts: string[] = []; for (const host of bindHosts) { diff --git a/src/gateway/server-startup.ts b/src/gateway/server-startup.ts index 15bf67f4c00..01ec6266df6 100644 --- a/src/gateway/server-startup.ts +++ b/src/gateway/server-startup.ts @@ -1,3 +1,5 @@ +import { getAcpSessionManager } from "../acp/control-plane/manager.js"; +import { ACP_SESSION_IDENTITY_RENDERER_VERSION } from "../acp/runtime/session-identifiers.js"; import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { @@ -159,6 +161,22 @@ export async function startGatewaySidecars(params: { params.log.warn(`plugin services failed to start: ${String(err)}`); } + if (params.cfg.acp?.enabled) { + void getAcpSessionManager() + .reconcilePendingSessionIdentities({ cfg: params.cfg }) + .then((result) => { + if (result.checked === 0) { + return; + } + params.log.warn( + `acp startup identity reconcile (renderer=${ACP_SESSION_IDENTITY_RENDERER_VERSION}): checked=${result.checked} resolved=${result.resolved} failed=${result.failed}`, + ); + }) + .catch((err) => { + params.log.warn(`acp startup identity reconcile failed: ${String(err)}`); + }); + } + void startGatewayMemoryBackend({ cfg: params.cfg, log: params.log }).catch((err) => { params.log.warn(`qmd memory startup initialization failed: ${String(err)}`); }); diff --git a/src/gateway/server-ws-runtime.ts b/src/gateway/server-ws-runtime.ts index 9c14794a58e..f03235daddf 100644 --- a/src/gateway/server-ws-runtime.ts +++ b/src/gateway/server-ws-runtime.ts @@ -16,6 +16,8 @@ export function attachGatewayWsHandlers(params: { resolvedAuth: ResolvedGatewayAuth; /** Optional rate limiter for auth brute-force protection. */ rateLimiter?: AuthRateLimiter; + /** Browser-origin fallback limiter (loopback is never exempt). */ + browserRateLimiter?: AuthRateLimiter; gatewayMethods: string[]; events: string[]; logGateway: ReturnType; @@ -41,6 +43,7 @@ export function attachGatewayWsHandlers(params: { canvasHostServerPort: params.canvasHostServerPort, resolvedAuth: params.resolvedAuth, rateLimiter: params.rateLimiter, + browserRateLimiter: params.browserRateLimiter, gatewayMethods: params.gatewayMethods, events: params.events, logGateway: params.logGateway, diff --git a/src/gateway/server.auth.browser-hardening.test.ts b/src/gateway/server.auth.browser-hardening.test.ts new file mode 100644 index 00000000000..070addbdc53 --- /dev/null +++ b/src/gateway/server.auth.browser-hardening.test.ts @@ -0,0 +1,155 @@ +import { randomUUID } from "node:crypto"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, test } from "vitest"; +import { WebSocket } from "ws"; +import { + loadOrCreateDeviceIdentity, + publicKeyRawBase64UrlFromPem, + signDevicePayload, +} from "../infra/device-identity.js"; +import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; +import { buildDeviceAuthPayload } from "./device-auth.js"; +import { + connectReq, + installGatewayTestHooks, + readConnectChallengeNonce, + testState, + trackConnectChallengeNonce, + withGatewayServer, +} from "./test-helpers.js"; + +installGatewayTestHooks({ scope: "suite" }); + +const TEST_OPERATOR_CLIENT = { + id: GATEWAY_CLIENT_NAMES.TEST, + version: "1.0.0", + platform: "test", + mode: GATEWAY_CLIENT_MODES.TEST, +}; + +const originForPort = (port: number) => `http://127.0.0.1:${port}`; + +const openWs = async (port: number, headers?: Record) => { + const ws = new WebSocket(`ws://127.0.0.1:${port}`, headers ? { headers } : undefined); + trackConnectChallengeNonce(ws); + await new Promise((resolve) => ws.once("open", resolve)); + return ws; +}; + +async function createSignedDevice(params: { + token: string; + scopes: string[]; + clientId: string; + clientMode: string; + identityPath?: string; + nonce: string; + signedAtMs?: number; +}) { + const identity = params.identityPath + ? loadOrCreateDeviceIdentity(params.identityPath) + : loadOrCreateDeviceIdentity(); + const signedAtMs = params.signedAtMs ?? Date.now(); + const payload = buildDeviceAuthPayload({ + deviceId: identity.deviceId, + clientId: params.clientId, + clientMode: params.clientMode, + role: "operator", + scopes: params.scopes, + signedAtMs, + token: params.token, + nonce: params.nonce, + }); + return { + identity, + device: { + id: identity.deviceId, + publicKey: publicKeyRawBase64UrlFromPem(identity.publicKeyPem), + signature: signDevicePayload(identity.privateKeyPem, payload), + signedAt: signedAtMs, + nonce: params.nonce, + }, + }; +} + +describe("gateway auth browser hardening", () => { + test("rejects non-local browser origins for non-control-ui clients", async () => { + testState.gatewayAuth = { mode: "token", token: "secret" }; + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { origin: "https://attacker.example" }); + try { + const res = await connectReq(ws, { + token: "secret", + client: TEST_OPERATOR_CLIENT, + }); + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("origin not allowed"); + } finally { + ws.close(); + } + }); + }); + + test("rate-limits browser-origin auth failures on loopback even when loopback exemption is enabled", async () => { + testState.gatewayAuth = { + mode: "token", + token: "secret", + rateLimit: { maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000, exemptLoopback: true }, + }; + await withGatewayServer(async ({ port }) => { + const firstWs = await openWs(port, { origin: originForPort(port) }); + try { + const first = await connectReq(firstWs, { token: "wrong" }); + expect(first.ok).toBe(false); + expect(first.error?.message ?? "").not.toContain("retry later"); + } finally { + firstWs.close(); + } + + const secondWs = await openWs(port, { origin: originForPort(port) }); + try { + const second = await connectReq(secondWs, { token: "wrong" }); + expect(second.ok).toBe(false); + expect(second.error?.message ?? "").toContain("retry later"); + } finally { + secondWs.close(); + } + }); + }); + + test("does not silently auto-pair non-control-ui browser clients on loopback", async () => { + const { listDevicePairing } = await import("../infra/device-pairing.js"); + testState.gatewayAuth = { mode: "token", token: "secret" }; + + await withGatewayServer(async ({ port }) => { + const browserWs = await openWs(port, { origin: originForPort(port) }); + try { + const nonce = await readConnectChallengeNonce(browserWs); + expect(typeof nonce).toBe("string"); + const { identity, device } = await createSignedDevice({ + token: "secret", + scopes: ["operator.admin"], + clientId: TEST_OPERATOR_CLIENT.id, + clientMode: TEST_OPERATOR_CLIENT.mode, + identityPath: path.join(os.tmpdir(), `openclaw-browser-device-${randomUUID()}.json`), + nonce: String(nonce ?? ""), + }); + const res = await connectReq(browserWs, { + token: "secret", + scopes: ["operator.admin"], + client: TEST_OPERATOR_CLIENT, + device, + }); + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("pairing required"); + + const pairing = await listDevicePairing(); + const pending = pairing.pending.find((entry) => entry.deviceId === identity.deviceId); + expect(pending).toBeTruthy(); + expect(pending?.silent).toBe(false); + } finally { + browserWs.close(); + } + }); + }); +}); diff --git a/src/gateway/server.auth.test.ts b/src/gateway/server.auth.test.ts index 8da0e18ef31..c5a82390cea 100644 --- a/src/gateway/server.auth.test.ts +++ b/src/gateway/server.auth.test.ts @@ -1,3 +1,5 @@ +import os from "node:os"; +import path from "node:path"; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, test, vi } from "vitest"; import { WebSocket } from "ws"; import { withEnvAsync } from "../test-utils/env.js"; @@ -105,6 +107,13 @@ const CONTROL_UI_CLIENT = { mode: GATEWAY_CLIENT_MODES.WEBCHAT, }; +const TRUSTED_PROXY_CONTROL_UI_HEADERS = { + origin: "https://localhost", + "x-forwarded-for": "203.0.113.10", + "x-forwarded-proto": "https", + "x-forwarded-user": "peter@example.com", +} as const; + const NODE_CLIENT = { id: GATEWAY_CLIENT_NAMES.NODE_HOST, version: "1.0.0", @@ -131,10 +140,11 @@ async function expectHelloOkServerVersion(port: number, expectedVersion: string) } async function createSignedDevice(params: { - token: string; + token?: string | null; scopes: string[]; clientId: string; clientMode: string; + role?: "operator" | "node"; identityPath?: string; nonce: string; signedAtMs?: number; @@ -149,10 +159,10 @@ async function createSignedDevice(params: { deviceId: identity.deviceId, clientId: params.clientId, clientMode: params.clientMode, - role: "operator", + role: params.role ?? "operator", scopes: params.scopes, signedAtMs, - token: params.token, + token: params.token ?? null, nonce: params.nonce, }); return { @@ -187,6 +197,23 @@ async function approvePendingPairingIfNeeded() { } } +async function configureTrustedProxyControlUiAuth() { + testState.gatewayAuth = { + mode: "trusted-proxy", + trustedProxy: { + userHeader: "x-forwarded-user", + requiredHeaders: ["x-forwarded-proto"], + }, + }; + const { writeConfigFile } = await import("../config/config.js"); + await writeConfigFile({ + gateway: { + trustedProxies: ["127.0.0.1"], + }, + // oxlint-disable-next-line typescript/no-explicit-any + } as any); +} + function isConnectResMessage(id: string) { return (o: unknown) => { if (!o || typeof o !== "object" || Array.isArray(o)) { @@ -242,19 +269,24 @@ async function startRateLimitedTokenServerWithPairedDeviceToken() { } as any; const { server, ws, port, prevToken } = await startServerWithClient(); + const deviceIdentityPath = path.join( + os.tmpdir(), + `openclaw-auth-rate-limit-${Date.now()}-${Math.random().toString(36).slice(2)}.json`, + ); try { - const initial = await connectReq(ws, { token: "secret" }); + const initial = await connectReq(ws, { token: "secret", deviceIdentityPath }); if (!initial.ok) { await approvePendingPairingIfNeeded(); } - const identity = loadOrCreateDeviceIdentity(); + const identity = loadOrCreateDeviceIdentity(deviceIdentityPath); const paired = await getPairedDevice(identity.deviceId); const deviceToken = paired?.tokens?.operator?.token; + expect(paired?.deviceId).toBe(identity.deviceId); expect(deviceToken).toBeDefined(); ws.close(); - return { server, port, prevToken, deviceToken: String(deviceToken ?? "") }; + return { server, port, prevToken, deviceToken: String(deviceToken ?? ""), deviceIdentityPath }; } catch (err) { ws.close(); await server.close(); @@ -266,20 +298,31 @@ async function startRateLimitedTokenServerWithPairedDeviceToken() { async function ensurePairedDeviceTokenForCurrentIdentity(ws: WebSocket): Promise<{ identity: { deviceId: string }; deviceToken: string; + deviceIdentityPath: string; }> { const { loadOrCreateDeviceIdentity } = await import("../infra/device-identity.js"); const { getPairedDevice } = await import("../infra/device-pairing.js"); - const res = await connectReq(ws, { token: "secret" }); + const deviceIdentityPath = path.join( + os.tmpdir(), + `openclaw-auth-device-${Date.now()}-${Math.random().toString(36).slice(2)}.json`, + ); + + const res = await connectReq(ws, { token: "secret", deviceIdentityPath }); if (!res.ok) { await approvePendingPairingIfNeeded(); } - const identity = loadOrCreateDeviceIdentity(); + const identity = loadOrCreateDeviceIdentity(deviceIdentityPath); const paired = await getPairedDevice(identity.deviceId); const deviceToken = paired?.tokens?.operator?.token; + expect(paired?.deviceId).toBe(identity.deviceId); expect(deviceToken).toBeDefined(); - return { identity: { deviceId: identity.deviceId }, deviceToken: String(deviceToken ?? "") }; + return { + identity: { deviceId: identity.deviceId }, + deviceToken: String(deviceToken ?? ""), + deviceIdentityPath, + }; } describe("gateway server auth/connect", () => { @@ -303,7 +346,7 @@ describe("gateway server auth/connect", () => { try { const ws = await openWs(port); const handshakeTimeoutMs = getHandshakeTimeoutMs(); - const closed = await waitForWsClose(ws, handshakeTimeoutMs + 60); + const closed = await waitForWsClose(ws, handshakeTimeoutMs + 500); expect(closed).toBe(true); } finally { if (prevHandshakeTimeout === undefined) { @@ -373,13 +416,14 @@ describe("gateway server auth/connect", () => { opts: Parameters[1]; expectConnectOk: boolean; expectConnectError?: string; + expectStatusOk?: boolean; expectStatusError?: string; }> = [ { - name: "operator + valid shared token => connected with zero scopes", + name: "operator + valid shared token => connected with preserved scopes", opts: { role: "operator", token, device: null }, expectConnectOk: true, - expectStatusError: "missing scope", + expectStatusOk: true, }, { name: "node + valid shared token => rejected without device", @@ -406,12 +450,14 @@ describe("gateway server auth/connect", () => { ); continue; } - if (scenario.expectStatusError) { + if (scenario.expectStatusOk !== undefined) { const status = await rpcReq(ws, "status"); - expect(status.ok, scenario.name).toBe(false); - expect(status.error?.message ?? "", scenario.name).toContain( - scenario.expectStatusError, - ); + expect(status.ok, scenario.name).toBe(scenario.expectStatusOk); + if (!scenario.expectStatusOk && scenario.expectStatusError) { + expect(status.error?.message ?? "", scenario.name).toContain( + scenario.expectStatusError, + ); + } } } finally { ws.close(); @@ -768,14 +814,99 @@ describe("gateway server auth/connect", () => { const res = await connectReq(ws, { token: "secret", device: null }); expect(res.ok).toBe(true); const status = await rpcReq(ws, "status"); - expect(status.ok).toBe(false); - expect(status.error?.message).toContain("missing scope"); + expect(status.ok).toBe(true); const health = await rpcReq(ws, "health"); expect(health.ok).toBe(true); ws.close(); }); }); + const trustedProxyControlUiCases: Array<{ + name: string; + role: "operator" | "node"; + withUnpairedNodeDevice: boolean; + expectedOk: boolean; + expectedErrorSubstring?: string; + expectedErrorCode?: string; + expectStatusChecks: boolean; + }> = [ + { + name: "allows trusted-proxy control ui operator without device identity", + role: "operator", + withUnpairedNodeDevice: false, + expectedOk: true, + expectStatusChecks: true, + }, + { + name: "rejects trusted-proxy control ui node role without device identity", + role: "node", + withUnpairedNodeDevice: false, + expectedOk: false, + expectedErrorSubstring: "control ui requires device identity", + expectedErrorCode: ConnectErrorDetailCodes.CONTROL_UI_DEVICE_IDENTITY_REQUIRED, + expectStatusChecks: false, + }, + { + name: "requires pairing for trusted-proxy control ui node role with unpaired device", + role: "node", + withUnpairedNodeDevice: true, + expectedOk: false, + expectedErrorSubstring: "pairing required", + expectedErrorCode: ConnectErrorDetailCodes.PAIRING_REQUIRED, + expectStatusChecks: false, + }, + ]; + + for (const tc of trustedProxyControlUiCases) { + test(tc.name, async () => { + await configureTrustedProxyControlUiAuth(); + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, TRUSTED_PROXY_CONTROL_UI_HEADERS); + const scopes = tc.withUnpairedNodeDevice ? [] : undefined; + let device: Awaited>["device"] | null = null; + if (tc.withUnpairedNodeDevice) { + const challengeNonce = await readConnectChallengeNonce(ws); + expect(challengeNonce).toBeTruthy(); + ({ device } = await createSignedDevice({ + token: null, + role: "node", + scopes: [], + clientId: GATEWAY_CLIENT_NAMES.CONTROL_UI, + clientMode: GATEWAY_CLIENT_MODES.WEBCHAT, + nonce: String(challengeNonce), + })); + } + const res = await connectReq(ws, { + skipDefaultAuth: true, + role: tc.role, + scopes, + device, + client: { ...CONTROL_UI_CLIENT }, + }); + expect(res.ok).toBe(tc.expectedOk); + if (!tc.expectedOk) { + if (tc.expectedErrorSubstring) { + expect(res.error?.message ?? "").toContain(tc.expectedErrorSubstring); + } + if (tc.expectedErrorCode) { + expect((res.error?.details as { code?: string } | undefined)?.code).toBe( + tc.expectedErrorCode, + ); + } + ws.close(); + return; + } + if (tc.expectStatusChecks) { + const status = await rpcReq(ws, "status"); + expect(status.ok).toBe(true); + const health = await rpcReq(ws, "health"); + expect(health.ok).toBe(true); + } + ws.close(); + }); + }); + } + test("allows localhost control ui without device identity when insecure auth is enabled", async () => { testState.gatewayControlUi = { allowInsecureAuth: true }; const { server, ws, prevToken } = await startServerWithClient("secret", { @@ -793,8 +924,7 @@ describe("gateway server auth/connect", () => { }); expect(res.ok).toBe(true); const status = await rpcReq(ws, "status"); - expect(status.ok).toBe(false); - expect(status.error?.message ?? "").toContain("missing scope"); + expect(status.ok).toBe(true); const health = await rpcReq(ws, "health"); expect(health.ok).toBe(true); ws.close(); @@ -816,8 +946,7 @@ describe("gateway server auth/connect", () => { }); expect(res.ok).toBe(true); const status = await rpcReq(ws, "status"); - expect(status.ok).toBe(false); - expect(status.error?.message ?? "").toContain("missing scope"); + expect(status.ok).toBe(true); const health = await rpcReq(ws, "health"); expect(health.ok).toBe(true); ws.close(); @@ -930,7 +1059,7 @@ describe("gateway server auth/connect", () => { test("device token auth matrix", async () => { const { server, ws, port, prevToken } = await startServerWithClient("secret"); - const { deviceToken } = await ensurePairedDeviceTokenForCurrentIdentity(ws); + const { deviceToken, deviceIdentityPath } = await ensurePairedDeviceTokenForCurrentIdentity(ws); ws.close(); const scenarios: Array<{ @@ -997,7 +1126,10 @@ describe("gateway server auth/connect", () => { for (const scenario of scenarios) { const ws2 = await openWs(port); try { - const res = await connectReq(ws2, scenario.opts); + const res = await connectReq(ws2, { + ...scenario.opts, + deviceIdentityPath, + }); scenario.assert(res); } finally { ws2.close(); @@ -1010,7 +1142,7 @@ describe("gateway server auth/connect", () => { }); test("keeps shared-secret lockout separate from device-token auth", async () => { - const { server, port, prevToken, deviceToken } = + const { server, port, prevToken, deviceToken, deviceIdentityPath } = await startRateLimitedTokenServerWithPairedDeviceToken(); try { const wsBadShared = await openWs(port); @@ -1025,7 +1157,7 @@ describe("gateway server auth/connect", () => { wsSharedLocked.close(); const wsDevice = await openWs(port); - const deviceOk = await connectReq(wsDevice, { token: deviceToken }); + const deviceOk = await connectReq(wsDevice, { token: deviceToken, deviceIdentityPath }); expect(deviceOk.ok).toBe(true); wsDevice.close(); } finally { @@ -1035,16 +1167,16 @@ describe("gateway server auth/connect", () => { }); test("keeps device-token lockout separate from shared-secret auth", async () => { - const { server, port, prevToken, deviceToken } = + const { server, port, prevToken, deviceToken, deviceIdentityPath } = await startRateLimitedTokenServerWithPairedDeviceToken(); try { const wsBadDevice = await openWs(port); - const badDevice = await connectReq(wsBadDevice, { token: "wrong" }); + const badDevice = await connectReq(wsBadDevice, { token: "wrong", deviceIdentityPath }); expect(badDevice.ok).toBe(false); wsBadDevice.close(); const wsDeviceLocked = await openWs(port); - const deviceLocked = await connectReq(wsDeviceLocked, { token: "wrong" }); + const deviceLocked = await connectReq(wsDeviceLocked, { token: "wrong", deviceIdentityPath }); expect(deviceLocked.ok).toBe(false); expect(deviceLocked.error?.message ?? "").toContain("retry later"); wsDeviceLocked.close(); @@ -1055,7 +1187,10 @@ describe("gateway server auth/connect", () => { wsShared.close(); const wsDeviceReal = await openWs(port); - const deviceStillLocked = await connectReq(wsDeviceReal, { token: deviceToken }); + const deviceStillLocked = await connectReq(wsDeviceReal, { + token: deviceToken, + deviceIdentityPath, + }); expect(deviceStillLocked.ok).toBe(false); expect(deviceStillLocked.error?.message ?? "").toContain("retry later"); wsDeviceReal.close(); @@ -1065,7 +1200,7 @@ describe("gateway server auth/connect", () => { } }); - test("skips pairing for operator scope upgrades when shared token auth is valid", async () => { + test("requires pairing for remote operator device identity with shared token auth", async () => { const { mkdtemp } = await import("node:fs/promises"); const { tmpdir } = await import("node:os"); const { join } = await import("node:path"); @@ -1102,21 +1237,29 @@ describe("gateway server auth/connect", () => { nonce, }; }; - const initialNonce = await readConnectChallengeNonce(ws); - const initial = await connectReq(ws, { + ws.close(); + + const wsRemoteRead = await openWs(port, { host: "gateway.example" }); + const initialNonce = await readConnectChallengeNonce(wsRemoteRead); + const initial = await connectReq(wsRemoteRead, { token: "secret", scopes: ["operator.read"], client, device: buildDevice(["operator.read"], initialNonce), }); - expect(initial.ok).toBe(true); + expect(initial.ok).toBe(false); + expect(initial.error?.message ?? "").toContain("pairing required"); let pairing = await listDevicePairing(); - expect(pairing.pending.filter((entry) => entry.deviceId === identity.deviceId)).toEqual([]); + const pendingAfterRead = pairing.pending.filter( + (entry) => entry.deviceId === identity.deviceId, + ); + expect(pendingAfterRead).toHaveLength(1); + expect(pendingAfterRead[0]?.role).toBe("operator"); + expect(pendingAfterRead[0]?.scopes ?? []).toContain("operator.read"); expect(await getPairedDevice(identity.deviceId)).toBeNull(); + wsRemoteRead.close(); - ws.close(); - - const ws2 = await openWs(port); + const ws2 = await openWs(port, { host: "gateway.example" }); const nonce2 = await readConnectChallengeNonce(ws2); const res = await connectReq(ws2, { token: "secret", @@ -1124,9 +1267,16 @@ describe("gateway server auth/connect", () => { client, device: buildDevice(["operator.admin"], nonce2), }); - expect(res.ok).toBe(true); + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("pairing required"); pairing = await listDevicePairing(); - expect(pairing.pending.filter((entry) => entry.deviceId === identity.deviceId)).toEqual([]); + const pendingAfterAdmin = pairing.pending.filter( + (entry) => entry.deviceId === identity.deviceId, + ); + expect(pendingAfterAdmin).toHaveLength(1); + expect(pendingAfterAdmin[0]?.scopes ?? []).toEqual( + expect.arrayContaining(["operator.read", "operator.admin"]), + ); expect(await getPairedDevice(identity.deviceId)).toBeNull(); ws2.close(); await server.close(); @@ -1199,7 +1349,7 @@ describe("gateway server auth/connect", () => { restoreGatewayToken(prevToken); }); - test("still requires node pairing while operator shared auth succeeds for the same device", async () => { + test("merges remote node/operator pairing requests for the same unpaired device", async () => { const { mkdtemp } = await import("node:fs/promises"); const { tmpdir } = await import("node:os"); const { join } = await import("node:path"); @@ -1266,23 +1416,25 @@ describe("gateway server auth/connect", () => { expect(nodeConnect.error?.message ?? "").toContain("pairing required"); const operatorConnect = await connectWithNonce("operator", ["operator.read", "operator.write"]); - expect(operatorConnect.ok).toBe(true); + expect(operatorConnect.ok).toBe(false); + expect(operatorConnect.error?.message ?? "").toContain("pairing required"); const pending = await listDevicePairing(); const pendingForTestDevice = pending.pending.filter( (entry) => entry.deviceId === identity.deviceId, ); expect(pendingForTestDevice).toHaveLength(1); - expect(pendingForTestDevice[0]?.roles).toEqual(expect.arrayContaining(["node"])); - expect(pendingForTestDevice[0]?.roles ?? []).not.toContain("operator"); + expect(pendingForTestDevice[0]?.roles).toEqual(expect.arrayContaining(["node", "operator"])); + expect(pendingForTestDevice[0]?.scopes ?? []).toEqual( + expect.arrayContaining(["operator.read", "operator.write"]), + ); if (!pendingForTestDevice[0]) { throw new Error("expected pending pairing request"); } await approveDevicePairing(pendingForTestDevice[0].requestId); const paired = await getPairedDevice(identity.deviceId); - expect(paired?.roles).toEqual(expect.arrayContaining(["node"])); - expect(paired?.roles ?? []).not.toContain("operator"); + expect(paired?.roles).toEqual(expect.arrayContaining(["node", "operator"])); const approvedOperatorConnect = await connectWithNonce("operator", ["operator.read"]); expect(approvedOperatorConnect.ok).toBe(true); @@ -1438,8 +1590,8 @@ describe("gateway server auth/connect", () => { expect(reconnect.ok).toBe(true); const repaired = await getPairedDevice(deviceId); - expect(repaired?.roles).toBeUndefined(); - expect(repaired?.scopes).toBeUndefined(); + expect(repaired?.roles ?? []).toContain("operator"); + expect(repaired?.scopes ?? []).toContain("operator.read"); const list = await listDevicePairing(); expect(list.pending.filter((entry) => entry.deviceId === deviceId)).toEqual([]); } finally { @@ -1450,7 +1602,7 @@ describe("gateway server auth/connect", () => { } }); - test("allows shared-auth scope escalation even when paired metadata is legacy-shaped", async () => { + test("auto-approves local scope upgrades even when paired metadata is legacy-shaped", async () => { const { mkdtemp } = await import("node:fs/promises"); const { tmpdir } = await import("node:os"); const { join } = await import("node:path"); @@ -1539,9 +1691,13 @@ describe("gateway server auth/connect", () => { expect(pendingUpgrade).toBeUndefined(); const repaired = await getPairedDevice(identity.deviceId); expect(repaired?.role).toBe("operator"); - expect(repaired?.roles).toBeUndefined(); - expect(repaired?.scopes).toBeUndefined(); - expect(repaired?.approvedScopes).not.toContain("operator.admin"); + expect(repaired?.roles ?? []).toContain("operator"); + expect(repaired?.scopes ?? []).toEqual( + expect.arrayContaining(["operator.read", "operator.admin"]), + ); + expect(repaired?.approvedScopes ?? []).toEqual( + expect.arrayContaining(["operator.read", "operator.admin"]), + ); } finally { ws.close(); ws2?.close(); @@ -1553,14 +1709,15 @@ describe("gateway server auth/connect", () => { test("rejects revoked device token", async () => { const { revokeDeviceToken } = await import("../infra/device-pairing.js"); const { server, ws, port, prevToken } = await startServerWithClient("secret"); - const { identity, deviceToken } = await ensurePairedDeviceTokenForCurrentIdentity(ws); + const { identity, deviceToken, deviceIdentityPath } = + await ensurePairedDeviceTokenForCurrentIdentity(ws); await revokeDeviceToken({ deviceId: identity.deviceId, role: "operator" }); ws.close(); const ws2 = await openWs(port); - const res2 = await connectReq(ws2, { token: deviceToken }); + const res2 = await connectReq(ws2, { token: deviceToken, deviceIdentityPath }); expect(res2.ok).toBe(false); ws2.close(); diff --git a/src/gateway/server.hooks.test.ts b/src/gateway/server.hooks.test.ts index eaa22b876d9..473b4e855aa 100644 --- a/src/gateway/server.hooks.test.ts +++ b/src/gateway/server.hooks.test.ts @@ -299,6 +299,48 @@ describe("gateway server hooks", () => { }); }); + test("normalizes duplicate target-agent prefixes before isolated dispatch", async () => { + testState.hooksConfig = { + enabled: true, + token: "hook-secret", + allowRequestSessionKey: true, + allowedSessionKeyPrefixes: ["hook:", "agent:"], + }; + testState.agentsConfig = { + list: [{ id: "main", default: true }, { id: "hooks" }], + }; + await withGatewayServer(async ({ port }) => { + cronIsolatedRun.mockClear(); + cronIsolatedRun.mockResolvedValueOnce({ + status: "ok", + summary: "done", + }); + + const resAgent = await fetch(`http://127.0.0.1:${port}/hooks/agent`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: "Bearer hook-secret", + }, + body: JSON.stringify({ + message: "Do it", + name: "Email", + agentId: "hooks", + sessionKey: "agent:hooks:slack:channel:c123", + }), + }); + expect(resAgent.status).toBe(202); + await waitForSystemEvent(); + + const routedCall = (cronIsolatedRun.mock.calls[0] as unknown[] | undefined)?.[0] as + | { sessionKey?: string; job?: { agentId?: string } } + | undefined; + expect(routedCall?.job?.agentId).toBe("hooks"); + expect(routedCall?.sessionKey).toBe("slack:channel:c123"); + drainSystemEvents(resolveMainKey()); + }); + }); + test("enforces hooks.allowedAgentIds for explicit agent routing", async () => { testState.hooksConfig = { enabled: true, diff --git a/src/gateway/server.impl.ts b/src/gateway/server.impl.ts index fdca08c2677..b3e6a9b3c15 100644 --- a/src/gateway/server.impl.ts +++ b/src/gateway/server.impl.ts @@ -11,6 +11,7 @@ import { createDefaultDeps } from "../cli/deps.js"; import { isRestartEnabled } from "../config/commands.js"; import { CONFIG_PATH, + type OpenClawConfig, isNixMode, loadConfig, migrateLegacyConfig, @@ -18,6 +19,7 @@ import { writeConfigFile, } from "../config/config.js"; import { applyPluginAutoEnable } from "../config/plugin-auto-enable.js"; +import { resolveMainSessionKey } from "../config/sessions.js"; import { clearAgentRunContext, onAgentEvent } from "../infra/agent-events.js"; import { ensureControlUiAssetsBuilt, @@ -37,6 +39,7 @@ import { refreshRemoteBinsForConnectedNodes, setSkillsRemoteRegistry, } from "../infra/skills-remote.js"; +import { enqueueSystemEvent } from "../infra/system-events.js"; import { scheduleGatewayUpdateCheck } from "../infra/update-startup.js"; import { startDiagnosticHeartbeat, stopDiagnosticHeartbeat } from "../logging/diagnostic.js"; import { createSubsystemLogger, runtimeForLogger } from "../logging/subsystem.js"; @@ -45,6 +48,12 @@ import { createEmptyPluginRegistry } from "../plugins/registry.js"; import type { PluginServicesHandle } from "../plugins/services.js"; import { getTotalQueueSize } from "../process/command-queue.js"; import type { RuntimeEnv } from "../runtime.js"; +import { + activateSecretsRuntimeSnapshot, + clearSecretsRuntimeSnapshot, + getActiveSecretsRuntimeSnapshot, + prepareSecretsRuntimeSnapshot, +} from "../secrets/runtime.js"; import { runOnboardingWizard } from "../wizard/onboarding.js"; import { createAuthRateLimiter, type AuthRateLimiter } from "./auth-rate-limit.js"; import { startChannelHealthMonitor } from "./channel-health-monitor.js"; @@ -68,6 +77,7 @@ import { GATEWAY_EVENTS, listGatewayMethods } from "./server-methods-list.js"; import { coreGatewayHandlers } from "./server-methods.js"; import { createExecApprovalHandlers } from "./server-methods/exec-approval.js"; import { safeParseJson } from "./server-methods/nodes.helpers.js"; +import { createSecretsHandlers } from "./server-methods/secrets.js"; import { hasConnectedMobileNode } from "./server-mobile-nodes.js"; import { loadGatewayModelCatalog } from "./server-model-catalog.js"; import { createNodeSubscriptionManager } from "./server-node-subscriptions.js"; @@ -107,9 +117,25 @@ const logReload = log.child("reload"); const logHooks = log.child("hooks"); const logPlugins = log.child("plugins"); const logWsControl = log.child("ws"); +const logSecrets = log.child("secrets"); const gatewayRuntime = runtimeForLogger(log); const canvasRuntime = runtimeForLogger(logCanvas); +type AuthRateLimitConfig = Parameters[0]; + +function createGatewayAuthRateLimiters(rateLimitConfig: AuthRateLimitConfig | undefined): { + rateLimiter?: AuthRateLimiter; + browserRateLimiter: AuthRateLimiter; +} { + const rateLimiter = rateLimitConfig ? createAuthRateLimiter(rateLimitConfig) : undefined; + // Browser-origin WS auth attempts always use loopback-non-exempt throttling. + const browserRateLimiter = createAuthRateLimiter({ + ...rateLimitConfig, + exemptLoopback: false, + }); + return { rateLimiter, browserRateLimiter }; +} + export type GatewayServer = { close: (opts?: { reason?: string; restartExpectedMs?: number | null }) => Promise; }; @@ -233,7 +259,91 @@ export async function startGatewayServer( } } - let cfgAtStart = loadConfig(); + let secretsDegraded = false; + const emitSecretsStateEvent = ( + code: "SECRETS_RELOADER_DEGRADED" | "SECRETS_RELOADER_RECOVERED", + message: string, + cfg: OpenClawConfig, + ) => { + enqueueSystemEvent(`[${code}] ${message}`, { + sessionKey: resolveMainSessionKey(cfg), + contextKey: code, + }); + }; + let secretsActivationTail: Promise = Promise.resolve(); + const runWithSecretsActivationLock = async (operation: () => Promise): Promise => { + const run = secretsActivationTail.then(operation, operation); + secretsActivationTail = run.then( + () => undefined, + () => undefined, + ); + return await run; + }; + const activateRuntimeSecrets = async ( + config: OpenClawConfig, + params: { reason: "startup" | "reload" | "restart-check"; activate: boolean }, + ) => + await runWithSecretsActivationLock(async () => { + try { + const prepared = await prepareSecretsRuntimeSnapshot({ config }); + if (params.activate) { + activateSecretsRuntimeSnapshot(prepared); + } + for (const warning of prepared.warnings) { + logSecrets.warn(`[${warning.code}] ${warning.message}`); + } + if (secretsDegraded) { + const recoveredMessage = + "Secret resolution recovered; runtime remained on last-known-good during the outage."; + logSecrets.info(`[SECRETS_RELOADER_RECOVERED] ${recoveredMessage}`); + emitSecretsStateEvent("SECRETS_RELOADER_RECOVERED", recoveredMessage, prepared.config); + } + secretsDegraded = false; + return prepared; + } catch (err) { + const details = String(err); + if (!secretsDegraded) { + logSecrets.error(`[SECRETS_RELOADER_DEGRADED] ${details}`); + if (params.reason !== "startup") { + emitSecretsStateEvent( + "SECRETS_RELOADER_DEGRADED", + `Secret resolution failed; runtime remains on last-known-good snapshot. ${details}`, + config, + ); + } + } else { + logSecrets.warn(`[SECRETS_RELOADER_DEGRADED] ${details}`); + } + secretsDegraded = true; + if (params.reason === "startup") { + throw new Error(`Startup failed: required secrets are unavailable. ${details}`, { + cause: err, + }); + } + throw err; + } + }); + + // Fail fast before startup if required refs are unresolved. + let cfgAtStart: OpenClawConfig; + { + const freshSnapshot = await readConfigFileSnapshot(); + if (!freshSnapshot.valid) { + const issues = + freshSnapshot.issues.length > 0 + ? freshSnapshot.issues + .map((issue) => `${issue.path || ""}: ${issue.message}`) + .join("\n") + : "Unknown validation issue."; + throw new Error(`Invalid config at ${freshSnapshot.path}.\n${issues}`); + } + await activateRuntimeSecrets(freshSnapshot.config, { + reason: "startup", + activate: false, + }); + } + + cfgAtStart = loadConfig(); const authBootstrap = await ensureGatewayStartupAuth({ cfg: cfgAtStart, env: process.env, @@ -253,6 +363,12 @@ export async function startGatewayServer( ); } } + cfgAtStart = ( + await activateRuntimeSecrets(cfgAtStart, { + reason: "startup", + activate: true, + }) + ).config; const diagnosticsEnabled = isDiagnosticsEnabled(cfgAtStart); if (diagnosticsEnabled) { startDiagnosticHeartbeat(); @@ -311,11 +427,10 @@ export async function startGatewayServer( let hooksConfig = runtimeConfig.hooksConfig; const canvasHostEnabled = runtimeConfig.canvasHostEnabled; - // Create auth rate limiter only when explicitly configured. + // Create auth rate limiters used by connect/auth flows. const rateLimitConfig = cfgAtStart.gateway?.auth?.rateLimit; - const authRateLimiter: AuthRateLimiter | undefined = rateLimitConfig - ? createAuthRateLimiter(rateLimitConfig) - : undefined; + const { rateLimiter: authRateLimiter, browserRateLimiter: browserAuthRateLimiter } = + createGatewayAuthRateLimiters(rateLimitConfig); let controlUiRootState: ControlUiRootState | undefined; if (controlUiRootOverride) { @@ -562,6 +677,19 @@ export async function startGatewayServer( const execApprovalHandlers = createExecApprovalHandlers(execApprovalManager, { forwarder: execApprovalForwarder, }); + const secretsHandlers = createSecretsHandlers({ + reloadSecrets: async () => { + const active = getActiveSecretsRuntimeSnapshot(); + if (!active) { + throw new Error("Secrets runtime snapshot is not active."); + } + const prepared = await activateRuntimeSecrets(active.sourceConfig, { + reason: "reload", + activate: true, + }); + return { warningCount: prepared.warnings.length }; + }, + }); const canvasHostServerPort = (canvasHostServer as CanvasHostServer | null)?.port; @@ -574,6 +702,7 @@ export async function startGatewayServer( canvasHostServerPort, resolvedAuth, rateLimiter: authRateLimiter, + browserRateLimiter: browserAuthRateLimiter, gatewayMethods, events: GATEWAY_EVENTS, logGateway: log, @@ -582,6 +711,7 @@ export async function startGatewayServer( extraHandlers: { ...pluginRegistry.gatewayHandlers, ...execApprovalHandlers, + ...secretsHandlers, }, broadcast, context: { @@ -723,8 +853,27 @@ export async function startGatewayServer( return startGatewayConfigReloader({ initialConfig: cfgAtStart, readSnapshot: readConfigFileSnapshot, - onHotReload: applyHotReload, - onRestart: requestGatewayRestart, + onHotReload: async (plan, nextConfig) => { + const previousSnapshot = getActiveSecretsRuntimeSnapshot(); + const prepared = await activateRuntimeSecrets(nextConfig, { + reason: "reload", + activate: true, + }); + try { + await applyHotReload(plan, prepared.config); + } catch (err) { + if (previousSnapshot) { + activateSecretsRuntimeSnapshot(previousSnapshot); + } else { + clearSecretsRuntimeSnapshot(); + } + throw err; + } + }, + onRestart: async (plan, nextConfig) => { + await activateRuntimeSecrets(nextConfig, { reason: "restart-check", activate: false }); + requestGatewayRestart(plan, nextConfig); + }, log: { info: (msg) => logReload.info(msg), warn: (msg) => logReload.warn(msg), @@ -777,7 +926,9 @@ export async function startGatewayServer( } skillsChangeUnsub(); authRateLimiter?.dispose(); + browserAuthRateLimiter.dispose(); channelHealthMonitor?.stop(); + clearSecretsRuntimeSnapshot(); await close(opts); }, }; diff --git a/src/gateway/server.node-invoke-approval-bypass.test.ts b/src/gateway/server.node-invoke-approval-bypass.test.ts index 7cc84b5b8d8..0e01a9619b9 100644 --- a/src/gateway/server.node-invoke-approval-bypass.test.ts +++ b/src/gateway/server.node-invoke-approval-bypass.test.ts @@ -75,9 +75,11 @@ async function requestAllowOnceApproval( nodeId: string, ): Promise { const approvalId = crypto.randomUUID(); + const commandArgv = command.split(/\s+/).filter((part) => part.length > 0); const requestP = rpcReq(ws, "exec.approval.request", { id: approvalId, command, + commandArgv, nodeId, cwd: null, host: "node", @@ -202,6 +204,7 @@ describe("node.invoke approval bypass", () => { readyResolve = resolve; }); + const resolvedDeviceIdentity = deviceIdentity ?? createDeviceIdentity(); const client = new GatewayClient({ url: `ws://127.0.0.1:${port}`, // Keep challenge timeout realistic in tests; 0 maps to a 250ms timeout and can @@ -215,7 +218,7 @@ describe("node.invoke approval bypass", () => { mode: GATEWAY_CLIENT_MODES.NODE, scopes: [], commands: ["system.run"], - deviceIdentity, + deviceIdentity: resolvedDeviceIdentity, onHelloOk: () => readyResolve?.(), onEvent: (evt) => { if (evt.event !== "node.invoke.request") { diff --git a/src/gateway/server.plugin-http-auth.test.ts b/src/gateway/server.plugin-http-auth.test.ts index 25568d4803e..b6a75ea008b 100644 --- a/src/gateway/server.plugin-http-auth.test.ts +++ b/src/gateway/server.plugin-http-auth.test.ts @@ -1,7 +1,10 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import { describe, expect, test, vi } from "vitest"; +import type { createSubsystemLogger } from "../logging/subsystem.js"; import type { ResolvedGatewayAuth } from "./auth.js"; -import { createGatewayHttpServer } from "./server-http.js"; +import type { HooksConfigResolved } from "./hooks.js"; +import { canonicalizePathVariant } from "./security-path.js"; +import { createGatewayHttpServer, createHooksRequestHandler } from "./server-http.js"; import { withTempConfig } from "./test-temp-config.js"; function createRequest(params: { @@ -65,6 +68,118 @@ async function dispatchRequest( await new Promise((resolve) => setImmediate(resolve)); } +function createHooksConfig(): HooksConfigResolved { + return { + basePath: "/hooks", + token: "hook-secret", + maxBodyBytes: 1024, + mappings: [], + agentPolicy: { + defaultAgentId: "main", + knownAgentIds: new Set(["main"]), + allowedAgentIds: undefined, + }, + sessionPolicy: { + allowRequestSessionKey: false, + defaultSessionKey: undefined, + allowedSessionKeyPrefixes: undefined, + }, + }; +} + +function canonicalizePluginPath(pathname: string): string { + return canonicalizePathVariant(pathname); +} + +type RouteVariant = { + label: string; + path: string; +}; + +const CANONICAL_UNAUTH_VARIANTS: RouteVariant[] = [ + { label: "case-variant", path: "/API/channels/nostr/default/profile" }, + { label: "encoded-slash", path: "/api/channels%2Fnostr%2Fdefault%2Fprofile" }, + { label: "encoded-segment", path: "/api/%63hannels/nostr/default/profile" }, + { label: "dot-traversal-encoded-slash", path: "/api/foo/..%2fchannels/nostr/default/profile" }, + { + label: "dot-traversal-encoded-dotdot-slash", + path: "/api/foo/%2e%2e%2fchannels/nostr/default/profile", + }, + { + label: "dot-traversal-double-encoded", + path: "/api/foo/%252e%252e%252fchannels/nostr/default/profile", + }, + { label: "duplicate-slashes", path: "/api/channels//nostr/default/profile" }, + { label: "trailing-slash", path: "/api/channels/nostr/default/profile/" }, + { label: "malformed-short-percent", path: "/api/channels%2" }, + { label: "malformed-double-slash-short-percent", path: "/api//channels%2" }, +]; + +const CANONICAL_AUTH_VARIANTS: RouteVariant[] = [ + { label: "auth-case-variant", path: "/API/channels/nostr/default/profile" }, + { label: "auth-encoded-segment", path: "/api/%63hannels/nostr/default/profile" }, + { label: "auth-duplicate-trailing-slash", path: "/api/channels//nostr/default/profile/" }, + { + label: "auth-dot-traversal-encoded-slash", + path: "/api/foo/..%2fchannels/nostr/default/profile", + }, + { + label: "auth-dot-traversal-double-encoded", + path: "/api/foo/%252e%252e%252fchannels/nostr/default/profile", + }, +]; + +function buildChannelPathFuzzCorpus(): RouteVariant[] { + const variants = [ + "/api/channels/nostr/default/profile", + "/API/channels/nostr/default/profile", + "/api/foo/..%2fchannels/nostr/default/profile", + "/api/foo/%2e%2e%2fchannels/nostr/default/profile", + "/api/foo/%252e%252e%252fchannels/nostr/default/profile", + "/api/channels//nostr/default/profile/", + "/api/channels%2Fnostr%2Fdefault%2Fprofile", + "/api/channels%252Fnostr%252Fdefault%252Fprofile", + "/api//channels/nostr/default/profile", + "/api/channels%2", + "/api/channels%zz", + "/api//channels%2", + "/api//channels%zz", + ]; + return variants.map((path) => ({ label: `fuzz:${path}`, path })); +} + +async function expectUnauthorizedVariants(params: { + server: ReturnType; + variants: RouteVariant[]; +}) { + for (const variant of params.variants) { + const response = createResponse(); + await dispatchRequest(params.server, createRequest({ path: variant.path }), response.res); + expect(response.res.statusCode, variant.label).toBe(401); + expect(response.getBody(), variant.label).toContain("Unauthorized"); + } +} + +async function expectAuthorizedVariants(params: { + server: ReturnType; + variants: RouteVariant[]; + authorization: string; +}) { + for (const variant of params.variants) { + const response = createResponse(); + await dispatchRequest( + params.server, + createRequest({ + path: variant.path, + authorization: params.authorization, + }), + response.res, + ); + expect(response.res.statusCode, variant.label).toBe(200); + expect(response.getBody(), variant.label).toContain('"route":"channel-canonicalized"'); + } +} + describe("gateway plugin HTTP auth boundary", () => { test("applies default security headers and optional strict transport security", async () => { const resolvedAuth: ResolvedGatewayAuth = { @@ -220,4 +335,198 @@ describe("gateway plugin HTTP auth boundary", () => { }, }); }); + + test("requires gateway auth for canonicalized /api/channels variants", async () => { + const resolvedAuth: ResolvedGatewayAuth = { + mode: "token", + token: "test-token", + password: undefined, + allowTailscale: false, + }; + + await withTempConfig({ + cfg: { gateway: { trustedProxies: [] } }, + prefix: "openclaw-plugin-http-auth-canonicalized-test-", + run: async () => { + const handlePluginRequest = vi.fn(async (req: IncomingMessage, res: ServerResponse) => { + const pathname = new URL(req.url ?? "/", "http://localhost").pathname; + const canonicalPath = canonicalizePluginPath(pathname); + if (canonicalPath === "/api/channels/nostr/default/profile") { + res.statusCode = 200; + res.setHeader("Content-Type", "application/json; charset=utf-8"); + res.end(JSON.stringify({ ok: true, route: "channel-canonicalized" })); + return true; + } + return false; + }); + + const server = createGatewayHttpServer({ + canvasHost: null, + clients: new Set(), + controlUiEnabled: false, + controlUiBasePath: "/__control__", + openAiChatCompletionsEnabled: false, + openResponsesEnabled: false, + handleHooksRequest: async () => false, + handlePluginRequest, + resolvedAuth, + }); + + await expectUnauthorizedVariants({ server, variants: CANONICAL_UNAUTH_VARIANTS }); + expect(handlePluginRequest).not.toHaveBeenCalled(); + + await expectAuthorizedVariants({ + server, + variants: CANONICAL_AUTH_VARIANTS, + authorization: "Bearer test-token", + }); + expect(handlePluginRequest).toHaveBeenCalledTimes(CANONICAL_AUTH_VARIANTS.length); + }, + }); + }); + + test("rejects unauthenticated plugin-channel fuzz corpus variants", async () => { + const resolvedAuth: ResolvedGatewayAuth = { + mode: "token", + token: "test-token", + password: undefined, + allowTailscale: false, + }; + + await withTempConfig({ + cfg: { gateway: { trustedProxies: [] } }, + prefix: "openclaw-plugin-http-auth-fuzz-corpus-test-", + run: async () => { + const handlePluginRequest = vi.fn(async (req: IncomingMessage, res: ServerResponse) => { + const pathname = new URL(req.url ?? "/", "http://localhost").pathname; + const canonicalPath = canonicalizePluginPath(pathname); + if (canonicalPath === "/api/channels/nostr/default/profile") { + res.statusCode = 200; + res.setHeader("Content-Type", "application/json; charset=utf-8"); + res.end(JSON.stringify({ ok: true, route: "channel-canonicalized" })); + return true; + } + return false; + }); + + const server = createGatewayHttpServer({ + canvasHost: null, + clients: new Set(), + controlUiEnabled: false, + controlUiBasePath: "/__control__", + openAiChatCompletionsEnabled: false, + openResponsesEnabled: false, + handleHooksRequest: async () => false, + handlePluginRequest, + resolvedAuth, + }); + + for (const variant of buildChannelPathFuzzCorpus()) { + const response = createResponse(); + await dispatchRequest(server, createRequest({ path: variant.path }), response.res); + expect(response.res.statusCode, variant.label).not.toBe(200); + expect(response.getBody(), variant.label).not.toContain( + '"route":"channel-canonicalized"', + ); + } + }, + }); + }); + + test.each(["0.0.0.0", "::"])( + "returns 404 (not 500) for non-hook routes with hooks enabled and bindHost=%s", + async (bindHost) => { + const resolvedAuth: ResolvedGatewayAuth = { + mode: "none", + token: undefined, + password: undefined, + allowTailscale: false, + }; + + await withTempConfig({ + cfg: { gateway: { trustedProxies: [] } }, + prefix: "openclaw-plugin-http-hooks-bindhost-", + run: async () => { + const handleHooksRequest = createHooksRequestHandler({ + getHooksConfig: () => createHooksConfig(), + bindHost, + port: 18789, + logHooks: { + warn: vi.fn(), + debug: vi.fn(), + info: vi.fn(), + error: vi.fn(), + } as unknown as ReturnType, + dispatchWakeHook: () => {}, + dispatchAgentHook: () => "run-1", + }); + const server = createGatewayHttpServer({ + canvasHost: null, + clients: new Set(), + controlUiEnabled: false, + controlUiBasePath: "/__control__", + openAiChatCompletionsEnabled: false, + openResponsesEnabled: false, + handleHooksRequest, + resolvedAuth, + }); + + const response = createResponse(); + await dispatchRequest(server, createRequest({ path: "/" }), response.res); + + expect(response.res.statusCode).toBe(404); + expect(response.getBody()).toBe("Not Found"); + }, + }); + }, + ); + + test("rejects query-token hooks requests with bindHost=::", async () => { + const resolvedAuth: ResolvedGatewayAuth = { + mode: "none", + token: undefined, + password: undefined, + allowTailscale: false, + }; + + await withTempConfig({ + cfg: { gateway: { trustedProxies: [] } }, + prefix: "openclaw-plugin-http-hooks-query-token-", + run: async () => { + const handleHooksRequest = createHooksRequestHandler({ + getHooksConfig: () => createHooksConfig(), + bindHost: "::", + port: 18789, + logHooks: { + warn: vi.fn(), + debug: vi.fn(), + info: vi.fn(), + error: vi.fn(), + } as unknown as ReturnType, + dispatchWakeHook: () => {}, + dispatchAgentHook: () => "run-1", + }); + const server = createGatewayHttpServer({ + canvasHost: null, + clients: new Set(), + controlUiEnabled: false, + controlUiBasePath: "/__control__", + openAiChatCompletionsEnabled: false, + openResponsesEnabled: false, + handleHooksRequest, + resolvedAuth, + }); + + const response = createResponse(); + await dispatchRequest( + server, + createRequest({ path: "/hooks/wake?token=bad" }), + response.res, + ); + + expect(response.res.statusCode).toBe(400); + expect(response.getBody()).toContain("Hook token must be provided"); + }, + }); + }); }); diff --git a/src/gateway/server.reload.test.ts b/src/gateway/server.reload.test.ts index f3ddec1d113..c44ed0ea71e 100644 --- a/src/gateway/server.reload.test.ts +++ b/src/gateway/server.reload.test.ts @@ -1,4 +1,8 @@ +import fs from "node:fs/promises"; +import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { resolveMainSessionKeyFromConfig } from "../config/sessions.js"; +import { drainSystemEvents } from "../infra/system-events.js"; import { connectOk, installGatewayTestHooks, @@ -170,11 +174,13 @@ describe("gateway hot reload", () => { let prevSkipChannels: string | undefined; let prevSkipGmail: string | undefined; let prevSkipProviders: string | undefined; + let prevOpenAiApiKey: string | undefined; beforeEach(() => { prevSkipChannels = process.env.OPENCLAW_SKIP_CHANNELS; prevSkipGmail = process.env.OPENCLAW_SKIP_GMAIL_WATCHER; prevSkipProviders = process.env.OPENCLAW_SKIP_PROVIDERS; + prevOpenAiApiKey = process.env.OPENAI_API_KEY; process.env.OPENCLAW_SKIP_CHANNELS = "0"; delete process.env.OPENCLAW_SKIP_GMAIL_WATCHER; delete process.env.OPENCLAW_SKIP_PROVIDERS; @@ -196,8 +202,78 @@ describe("gateway hot reload", () => { } else { process.env.OPENCLAW_SKIP_PROVIDERS = prevSkipProviders; } + if (prevOpenAiApiKey === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = prevOpenAiApiKey; + } }); + async function writeEnvRefConfig() { + const configPath = process.env.OPENCLAW_CONFIG_PATH; + if (!configPath) { + throw new Error("OPENCLAW_CONFIG_PATH is not set"); + } + await fs.writeFile( + configPath, + `${JSON.stringify( + { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [], + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + } + + async function writeAuthProfileEnvRefStore() { + const stateDir = process.env.OPENCLAW_STATE_DIR; + if (!stateDir) { + throw new Error("OPENCLAW_STATE_DIR is not set"); + } + const authStorePath = path.join(stateDir, "agents", "main", "agent", "auth-profiles.json"); + await fs.mkdir(path.dirname(authStorePath), { recursive: true }); + await fs.writeFile( + authStorePath, + `${JSON.stringify( + { + version: 1, + profiles: { + missing: { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "MISSING_OPENCLAW_AUTH_REF" }, + }, + }, + selectedProfileId: "missing", + lastUsedProfileByModel: {}, + usageStats: {}, + }, + null, + 2, + )}\n`, + "utf8", + ); + } + + async function removeMainAuthProfileStore() { + const stateDir = process.env.OPENCLAW_STATE_DIR; + if (!stateDir) { + return; + } + const authStorePath = path.join(stateDir, "agents", "main", "agent", "auth-profiles.json"); + await fs.rm(authStorePath, { force: true }); + } + it("applies hot reload actions and emits restart signal", async () => { await withGatewayServer(async () => { const onHotReload = hoisted.getOnHotReload(); @@ -281,7 +357,7 @@ describe("gateway hot reload", () => { const signalSpy = vi.fn(); process.once("SIGUSR1", signalSpy); - onRestart?.( + const restartResult = onRestart?.( { changedPaths: ["gateway.port"], restartGateway: true, @@ -297,10 +373,105 @@ describe("gateway hot reload", () => { }, {}, ); + await Promise.resolve(restartResult); expect(signalSpy).toHaveBeenCalledTimes(1); }); }); + + it("fails startup when required secret refs are unresolved", async () => { + await writeEnvRefConfig(); + delete process.env.OPENAI_API_KEY; + await expect(withGatewayServer(async () => {})).rejects.toThrow( + "Startup failed: required secrets are unavailable", + ); + }); + + it("fails startup when auth-profile secret refs are unresolved", async () => { + await writeAuthProfileEnvRefStore(); + delete process.env.MISSING_OPENCLAW_AUTH_REF; + try { + await expect(withGatewayServer(async () => {})).rejects.toThrow( + 'Environment variable "MISSING_OPENCLAW_AUTH_REF" is missing or empty.', + ); + } finally { + await removeMainAuthProfileStore(); + } + }); + + it("emits one-shot degraded and recovered system events during secret reload transitions", async () => { + await writeEnvRefConfig(); + process.env.OPENAI_API_KEY = "sk-startup"; + + await withGatewayServer(async () => { + const onHotReload = hoisted.getOnHotReload(); + expect(onHotReload).toBeTypeOf("function"); + const sessionKey = resolveMainSessionKeyFromConfig(); + const plan = { + changedPaths: ["models.providers.openai.apiKey"], + restartGateway: false, + restartReasons: [], + hotReasons: ["models.providers.openai.apiKey"], + reloadHooks: false, + restartGmailWatcher: false, + restartBrowserControl: false, + restartCron: false, + restartHeartbeat: false, + restartChannels: new Set(), + noopPaths: [], + }; + const nextConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [], + }, + }, + }, + }; + + delete process.env.OPENAI_API_KEY; + await expect(onHotReload?.(plan, nextConfig)).rejects.toThrow( + 'Environment variable "OPENAI_API_KEY" is missing or empty.', + ); + const degradedEvents = drainSystemEvents(sessionKey); + expect(degradedEvents.some((event) => event.includes("[SECRETS_RELOADER_DEGRADED]"))).toBe( + true, + ); + + await expect(onHotReload?.(plan, nextConfig)).rejects.toThrow( + 'Environment variable "OPENAI_API_KEY" is missing or empty.', + ); + expect(drainSystemEvents(sessionKey)).toEqual([]); + + process.env.OPENAI_API_KEY = "sk-recovered"; + await expect(onHotReload?.(plan, nextConfig)).resolves.toBeUndefined(); + const recoveredEvents = drainSystemEvents(sessionKey); + expect(recoveredEvents.some((event) => event.includes("[SECRETS_RELOADER_RECOVERED]"))).toBe( + true, + ); + }); + }); + + it("serves secrets.reload immediately after startup without race failures", async () => { + await writeEnvRefConfig(); + process.env.OPENAI_API_KEY = "sk-startup"; + const { server, ws } = await startServerWithClient(); + try { + await connectOk(ws); + const [first, second] = await Promise.all([ + rpcReq<{ warningCount: number }>(ws, "secrets.reload", {}), + rpcReq<{ warningCount: number }>(ws, "secrets.reload", {}), + ]); + expect(first.ok).toBe(true); + expect(second.ok).toBe(true); + } finally { + ws.close(); + await server.close(); + } + }); }); describe("gateway agents", () => { diff --git a/src/gateway/server.roles-allowlist-update.test.ts b/src/gateway/server.roles-allowlist-update.test.ts index fceb71a0b38..8b78ced9b47 100644 --- a/src/gateway/server.roles-allowlist-update.test.ts +++ b/src/gateway/server.roles-allowlist-update.test.ts @@ -4,6 +4,7 @@ import path from "node:path"; import { describe, expect, test, vi } from "vitest"; import { WebSocket } from "ws"; import { CONFIG_PATH } from "../config/config.js"; +import type { DeviceIdentity } from "../infra/device-identity.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; import type { GatewayClient } from "./client.js"; @@ -36,6 +37,9 @@ installConnectedControlUiServerSuite((started) => { const connectNodeClient = async (params: { port: number; commands: string[]; + platform?: string; + deviceFamily?: string; + deviceIdentity?: DeviceIdentity; instanceId?: string; displayName?: string; onEvent?: (evt: { event?: string; payload?: unknown }) => void; @@ -51,11 +55,13 @@ const connectNodeClient = async (params: { clientName: GATEWAY_CLIENT_NAMES.NODE_HOST, clientVersion: "1.0.0", clientDisplayName: params.displayName, - platform: "ios", + platform: params.platform ?? "ios", + deviceFamily: params.deviceFamily, mode: GATEWAY_CLIENT_MODES.NODE, instanceId: params.instanceId, scopes: [], commands: params.commands, + deviceIdentity: params.deviceIdentity, onEvent: params.onEvent, timeoutMessage: "timeout waiting for node to connect", }); @@ -313,4 +319,51 @@ describe("gateway node command allowlist", () => { allowedClient?.stop(); } }); + + test("rejects reconnect metadata spoof for paired node devices", async () => { + const { loadOrCreateDeviceIdentity } = await import("../infra/device-identity.js"); + const deviceIdentityPath = path.join( + os.tmpdir(), + `openclaw-spoof-test-device-${Date.now()}-${Math.random().toString(36).slice(2)}.json`, + ); + const deviceIdentity = loadOrCreateDeviceIdentity(deviceIdentityPath); + + let iosClient: GatewayClient | undefined; + try { + iosClient = await connectNodeClientWithPairing({ + port, + commands: ["canvas.snapshot"], + platform: "ios", + deviceFamily: "iPhone", + instanceId: "node-platform-pin", + displayName: "node-platform-pin", + deviceIdentity, + }); + iosClient.stop(); + await expect + .poll(async () => { + const listRes = await rpcReq<{ nodes?: Array<{ connected?: boolean }> }>( + ws, + "node.list", + {}, + ); + return (listRes.payload?.nodes ?? []).filter((node) => node.connected).length; + }, FAST_WAIT_OPTS) + .toBe(0); + + await expect( + connectNodeClient({ + port, + commands: ["system.run"], + platform: "linux", + deviceFamily: "linux", + instanceId: "node-platform-pin", + displayName: "node-platform-pin", + deviceIdentity, + }), + ).rejects.toThrow(/pairing required/i); + } finally { + iosClient?.stop(); + } + }); }); diff --git a/src/gateway/server.sessions.gateway-server-sessions-a.test.ts b/src/gateway/server.sessions.gateway-server-sessions-a.test.ts index b05cf2220ed..0d8996cbf47 100644 --- a/src/gateway/server.sessions.gateway-server-sessions-a.test.ts +++ b/src/gateway/server.sessions.gateway-server-sessions-a.test.ts @@ -38,6 +38,12 @@ const subagentLifecycleHookState = vi.hoisted(() => ({ const threadBindingMocks = vi.hoisted(() => ({ unbindThreadBindingsBySessionKey: vi.fn((_params?: unknown) => []), })); +const acpRuntimeMocks = vi.hoisted(() => ({ + cancel: vi.fn(async () => {}), + close: vi.fn(async () => {}), + getAcpRuntimeBackend: vi.fn(), + requireAcpRuntimeBackend: vi.fn(), +})); vi.mock("../auto-reply/reply/queue.js", async () => { const actual = await vi.importActual( @@ -90,6 +96,21 @@ vi.mock("../discord/monitor/thread-bindings.js", async (importOriginal) => { }; }); +vi.mock("../acp/runtime/registry.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + getAcpRuntimeBackend: acpRuntimeMocks.getAcpRuntimeBackend, + requireAcpRuntimeBackend: (backendId?: string) => { + const backend = acpRuntimeMocks.requireAcpRuntimeBackend(backendId); + if (!backend) { + throw new Error("missing mocked ACP backend"); + } + return backend; + }, + }; +}); + installGatewayTestHooks({ scope: "suite" }); let harness: GatewayServerHarness; @@ -176,6 +197,14 @@ describe("gateway server sessions", () => { subagentLifecycleHookMocks.runSubagentEnded.mockClear(); subagentLifecycleHookState.hasSubagentEndedHook = true; threadBindingMocks.unbindThreadBindingsBySessionKey.mockClear(); + acpRuntimeMocks.cancel.mockClear(); + acpRuntimeMocks.close.mockClear(); + acpRuntimeMocks.getAcpRuntimeBackend.mockReset(); + acpRuntimeMocks.getAcpRuntimeBackend.mockReturnValue(null); + acpRuntimeMocks.requireAcpRuntimeBackend.mockReset(); + acpRuntimeMocks.requireAcpRuntimeBackend.mockImplementation((backendId?: string) => + acpRuntimeMocks.getAcpRuntimeBackend(backendId), + ); }); test("lists and patches session store via sessions.* RPC", async () => { @@ -669,6 +698,68 @@ describe("gateway server sessions", () => { ws.close(); }); + test("sessions.delete closes ACP runtime handles before removing ACP sessions", async () => { + const { dir } = await createSessionStoreDir(); + await writeSingleLineSession(dir, "sess-main", "hello"); + await writeSingleLineSession(dir, "sess-acp", "acp"); + + await writeSessionStore({ + entries: { + main: { sessionId: "sess-main", updatedAt: Date.now() }, + "discord:group:dev": { + sessionId: "sess-acp", + updatedAt: Date.now(), + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime:delete", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }, + }, + }); + acpRuntimeMocks.getAcpRuntimeBackend.mockReturnValue({ + id: "acpx", + runtime: { + ensureSession: vi.fn(async () => ({ + sessionKey: "agent:main:discord:group:dev", + backend: "acpx", + runtimeSessionName: "runtime:delete", + })), + runTurn: vi.fn(async function* () {}), + cancel: acpRuntimeMocks.cancel, + close: acpRuntimeMocks.close, + }, + }); + + const { ws } = await openClient(); + const deleted = await rpcReq<{ ok: true; deleted: boolean }>(ws, "sessions.delete", { + key: "discord:group:dev", + }); + expect(deleted.ok).toBe(true); + expect(deleted.payload?.deleted).toBe(true); + expect(acpRuntimeMocks.close).toHaveBeenCalledWith({ + handle: { + sessionKey: "agent:main:discord:group:dev", + backend: "acpx", + runtimeSessionName: "runtime:delete", + }, + reason: "session-delete", + }); + expect(acpRuntimeMocks.cancel).toHaveBeenCalledWith({ + handle: { + sessionKey: "agent:main:discord:group:dev", + backend: "acpx", + runtimeSessionName: "runtime:delete", + }, + reason: "session-delete", + }); + + ws.close(); + }); + test("sessions.delete does not emit lifecycle events when nothing was deleted", async () => { const { dir } = await createSessionStoreDir(); await writeSingleLineSession(dir, "sess-main", "hello"); @@ -838,6 +929,57 @@ describe("gateway server sessions", () => { ws.close(); }); + test("sessions.reset closes ACP runtime handles for ACP sessions", async () => { + const { dir } = await createSessionStoreDir(); + await writeSingleLineSession(dir, "sess-main", "hello"); + + await writeSessionStore({ + entries: { + main: { + sessionId: "sess-main", + updatedAt: Date.now(), + acp: { + backend: "acpx", + agent: "codex", + runtimeSessionName: "runtime:reset", + mode: "persistent", + state: "idle", + lastActivityAt: Date.now(), + }, + }, + }, + }); + acpRuntimeMocks.getAcpRuntimeBackend.mockReturnValue({ + id: "acpx", + runtime: { + ensureSession: vi.fn(async () => ({ + sessionKey: "agent:main:main", + backend: "acpx", + runtimeSessionName: "runtime:reset", + })), + runTurn: vi.fn(async function* () {}), + cancel: vi.fn(async () => {}), + close: acpRuntimeMocks.close, + }, + }); + + const { ws } = await openClient(); + const reset = await rpcReq<{ ok: true; key: string }>(ws, "sessions.reset", { + key: "main", + }); + expect(reset.ok).toBe(true); + expect(acpRuntimeMocks.close).toHaveBeenCalledWith({ + handle: { + sessionKey: "agent:main:main", + backend: "acpx", + runtimeSessionName: "runtime:reset", + }, + reason: "session-reset", + }); + + ws.close(); + }); + test("sessions.reset does not emit lifecycle events when key does not exist", async () => { const { dir } = await createSessionStoreDir(); await writeSingleLineSession(dir, "sess-main", "hello"); diff --git a/src/gateway/server/hooks.ts b/src/gateway/server/hooks.ts index 4b816aea7db..3b294be8fb9 100644 --- a/src/gateway/server/hooks.ts +++ b/src/gateway/server/hooks.ts @@ -7,7 +7,11 @@ import type { CronJob } from "../../cron/types.js"; import { requestHeartbeatNow } from "../../infra/heartbeat-wake.js"; import { enqueueSystemEvent } from "../../infra/system-events.js"; import type { createSubsystemLogger } from "../../logging/subsystem.js"; -import type { HookAgentDispatchPayload, HooksConfigResolved } from "../hooks.js"; +import { + normalizeHookDispatchSessionKey, + type HookAgentDispatchPayload, + type HooksConfigResolved, +} from "../hooks.js"; import { createHooksRequestHandler } from "../server-http.js"; type SubsystemLogger = ReturnType; @@ -30,7 +34,10 @@ export function createGatewayHooksRequestHandler(params: { }; const dispatchAgentHook = (value: HookAgentDispatchPayload) => { - const sessionKey = value.sessionKey.trim(); + const sessionKey = normalizeHookDispatchSessionKey({ + sessionKey: value.sessionKey, + targetAgentId: value.agentId, + }); const mainSessionKey = resolveMainSessionKeyFromConfig(); const jobId = randomUUID(); const now = Date.now(); diff --git a/src/gateway/server/ws-connection.ts b/src/gateway/server/ws-connection.ts index e7c9d458f8f..3abc8d6e1b9 100644 --- a/src/gateway/server/ws-connection.ts +++ b/src/gateway/server/ws-connection.ts @@ -65,6 +65,8 @@ export function attachGatewayWsConnectionHandler(params: { resolvedAuth: ResolvedGatewayAuth; /** Optional rate limiter for auth brute-force protection. */ rateLimiter?: AuthRateLimiter; + /** Browser-origin fallback limiter (loopback is never exempt). */ + browserRateLimiter?: AuthRateLimiter; gatewayMethods: string[]; events: string[]; logGateway: SubsystemLogger; @@ -90,6 +92,7 @@ export function attachGatewayWsConnectionHandler(params: { canvasHostServerPort, resolvedAuth, rateLimiter, + browserRateLimiter, gatewayMethods, events, logGateway, @@ -278,6 +281,7 @@ export function attachGatewayWsConnectionHandler(params: { connectNonce, resolvedAuth, rateLimiter, + browserRateLimiter, gatewayMethods, events, extraHandlers, diff --git a/src/gateway/server/ws-connection/connect-policy.test.ts b/src/gateway/server/ws-connection/connect-policy.test.ts index 320f90537ce..88813663a85 100644 --- a/src/gateway/server/ws-connection/connect-policy.test.ts +++ b/src/gateway/server/ws-connection/connect-policy.test.ts @@ -1,6 +1,7 @@ import { describe, expect, test } from "vitest"; import { evaluateMissingDeviceIdentity, + isTrustedProxyControlUiOperatorAuth, resolveControlUiAuthPolicy, shouldSkipControlUiPairing, } from "./connect-policy.js"; @@ -186,4 +187,55 @@ describe("ws connect policy", () => { expect(shouldSkipControlUiPairing(strict, true, false)).toBe(false); expect(shouldSkipControlUiPairing(strict, false, true)).toBe(true); }); + + test("trusted-proxy control-ui bypass only applies to operator + trusted-proxy auth", () => { + const cases: Array<{ + role: "operator" | "node"; + authMode: string; + authOk: boolean; + authMethod: string | undefined; + expected: boolean; + }> = [ + { + role: "operator", + authMode: "trusted-proxy", + authOk: true, + authMethod: "trusted-proxy", + expected: true, + }, + { + role: "node", + authMode: "trusted-proxy", + authOk: true, + authMethod: "trusted-proxy", + expected: false, + }, + { + role: "operator", + authMode: "token", + authOk: true, + authMethod: "token", + expected: false, + }, + { + role: "operator", + authMode: "trusted-proxy", + authOk: false, + authMethod: "trusted-proxy", + expected: false, + }, + ]; + + for (const tc of cases) { + expect( + isTrustedProxyControlUiOperatorAuth({ + isControlUi: true, + role: tc.role, + authMode: tc.authMode, + authOk: tc.authOk, + authMethod: tc.authMethod, + }), + ).toBe(tc.expected); + } + }); }); diff --git a/src/gateway/server/ws-connection/connect-policy.ts b/src/gateway/server/ws-connection/connect-policy.ts index 70dbea07505..f2467aedc98 100644 --- a/src/gateway/server/ws-connection/connect-policy.ts +++ b/src/gateway/server/ws-connection/connect-policy.ts @@ -43,6 +43,22 @@ export function shouldSkipControlUiPairing( return policy.allowBypass && sharedAuthOk; } +export function isTrustedProxyControlUiOperatorAuth(params: { + isControlUi: boolean; + role: GatewayRole; + authMode: string; + authOk: boolean; + authMethod: string | undefined; +}): boolean { + return ( + params.isControlUi && + params.role === "operator" && + params.authMode === "trusted-proxy" && + params.authOk && + params.authMethod === "trusted-proxy" + ); +} + export type MissingDeviceIdentityDecision = | { kind: "allow" } | { kind: "reject-control-ui-insecure-auth" } diff --git a/src/gateway/server/ws-connection/message-handler.ts b/src/gateway/server/ws-connection/message-handler.ts index 191278275ee..7c1b449ff4d 100644 --- a/src/gateway/server/ws-connection/message-handler.ts +++ b/src/gateway/server/ws-connection/message-handler.ts @@ -32,7 +32,11 @@ import { CANVAS_CAPABILITY_TTL_MS, mintCanvasCapabilityToken, } from "../../canvas-capability.js"; -import { buildDeviceAuthPayload } from "../../device-auth.js"; +import { + buildDeviceAuthPayload, + buildDeviceAuthPayloadV3, + normalizeDeviceMetadataForAuth, +} from "../../device-auth.js"; import { isLocalishHost, isLoopbackAddress, @@ -75,6 +79,7 @@ import { resolveConnectAuthDecision, resolveConnectAuthState } from "./auth-cont import { formatGatewayAuthFailureMessage, type AuthProvidedKind } from "./auth-messages.js"; import { evaluateMissingDeviceIdentity, + isTrustedProxyControlUiOperatorAuth, resolveControlUiAuthPolicy, shouldSkipControlUiPairing, } from "./connect-policy.js"; @@ -83,6 +88,123 @@ import { isUnauthorizedRoleError, UnauthorizedFloodGuard } from "./unauthorized- type SubsystemLogger = ReturnType; const DEVICE_SIGNATURE_SKEW_MS = 2 * 60 * 1000; +const BROWSER_ORIGIN_LOOPBACK_RATE_LIMIT_IP = "198.18.0.1"; + +type HandshakeBrowserSecurityContext = { + hasBrowserOriginHeader: boolean; + enforceOriginCheckForAnyClient: boolean; + rateLimitClientIp: string | undefined; + authRateLimiter?: AuthRateLimiter; +}; + +function resolveHandshakeBrowserSecurityContext(params: { + requestOrigin?: string; + hasProxyHeaders: boolean; + clientIp: string | undefined; + rateLimiter?: AuthRateLimiter; + browserRateLimiter?: AuthRateLimiter; +}): HandshakeBrowserSecurityContext { + const hasBrowserOriginHeader = Boolean( + params.requestOrigin && params.requestOrigin.trim() !== "", + ); + return { + hasBrowserOriginHeader, + enforceOriginCheckForAnyClient: hasBrowserOriginHeader && !params.hasProxyHeaders, + rateLimitClientIp: + hasBrowserOriginHeader && isLoopbackAddress(params.clientIp) + ? BROWSER_ORIGIN_LOOPBACK_RATE_LIMIT_IP + : params.clientIp, + authRateLimiter: + hasBrowserOriginHeader && params.browserRateLimiter + ? params.browserRateLimiter + : params.rateLimiter, + }; +} + +function shouldAllowSilentLocalPairing(params: { + isLocalClient: boolean; + hasBrowserOriginHeader: boolean; + isControlUi: boolean; + isWebchat: boolean; + reason: "not-paired" | "role-upgrade" | "scope-upgrade" | "metadata-upgrade"; +}): boolean { + return ( + params.isLocalClient && + (!params.hasBrowserOriginHeader || params.isControlUi || params.isWebchat) && + (params.reason === "not-paired" || params.reason === "scope-upgrade") + ); +} + +function resolveDeviceSignaturePayloadVersion(params: { + device: { + id: string; + signature: string; + publicKey: string; + }; + connectParams: ConnectParams; + role: string; + scopes: string[]; + signedAtMs: number; + nonce: string; +}): "v3" | "v2" | null { + const payloadV3 = buildDeviceAuthPayloadV3({ + deviceId: params.device.id, + clientId: params.connectParams.client.id, + clientMode: params.connectParams.client.mode, + role: params.role, + scopes: params.scopes, + signedAtMs: params.signedAtMs, + token: params.connectParams.auth?.token ?? params.connectParams.auth?.deviceToken ?? null, + nonce: params.nonce, + platform: params.connectParams.client.platform, + deviceFamily: params.connectParams.client.deviceFamily, + }); + if (verifyDeviceSignature(params.device.publicKey, payloadV3, params.device.signature)) { + return "v3"; + } + + const payloadV2 = buildDeviceAuthPayload({ + deviceId: params.device.id, + clientId: params.connectParams.client.id, + clientMode: params.connectParams.client.mode, + role: params.role, + scopes: params.scopes, + signedAtMs: params.signedAtMs, + token: params.connectParams.auth?.token ?? params.connectParams.auth?.deviceToken ?? null, + nonce: params.nonce, + }); + if (verifyDeviceSignature(params.device.publicKey, payloadV2, params.device.signature)) { + return "v2"; + } + return null; +} + +function resolvePinnedClientMetadata(params: { + claimedPlatform?: string; + claimedDeviceFamily?: string; + pairedPlatform?: string; + pairedDeviceFamily?: string; +}): { + platformMismatch: boolean; + deviceFamilyMismatch: boolean; + pinnedPlatform?: string; + pinnedDeviceFamily?: string; +} { + const claimedPlatform = normalizeDeviceMetadataForAuth(params.claimedPlatform); + const claimedDeviceFamily = normalizeDeviceMetadataForAuth(params.claimedDeviceFamily); + const pairedPlatform = normalizeDeviceMetadataForAuth(params.pairedPlatform); + const pairedDeviceFamily = normalizeDeviceMetadataForAuth(params.pairedDeviceFamily); + const hasPinnedPlatform = pairedPlatform !== ""; + const hasPinnedDeviceFamily = pairedDeviceFamily !== ""; + const platformMismatch = hasPinnedPlatform && claimedPlatform !== pairedPlatform; + const deviceFamilyMismatch = hasPinnedDeviceFamily && claimedDeviceFamily !== pairedDeviceFamily; + return { + platformMismatch, + deviceFamilyMismatch, + pinnedPlatform: hasPinnedPlatform ? params.pairedPlatform : undefined, + pinnedDeviceFamily: hasPinnedDeviceFamily ? params.pairedDeviceFamily : undefined, + }; +} export function attachGatewayWsMessageHandler(params: { socket: WebSocket; @@ -99,6 +221,8 @@ export function attachGatewayWsMessageHandler(params: { resolvedAuth: ResolvedGatewayAuth; /** Optional rate limiter for auth brute-force protection. */ rateLimiter?: AuthRateLimiter; + /** Browser-origin fallback limiter (loopback is never exempt). */ + browserRateLimiter?: AuthRateLimiter; gatewayMethods: string[]; events: string[]; extraHandlers: GatewayRequestHandlers; @@ -130,6 +254,7 @@ export function attachGatewayWsMessageHandler(params: { connectNonce, resolvedAuth, rateLimiter, + browserRateLimiter, gatewayMethods, events, extraHandlers, @@ -192,6 +317,19 @@ export function attachGatewayWsMessageHandler(params: { const isWebchatConnect = (p: ConnectParams | null | undefined) => isWebchatClient(p?.client); const unauthorizedFloodGuard = new UnauthorizedFloodGuard(); + const browserSecurity = resolveHandshakeBrowserSecurityContext({ + requestOrigin, + hasProxyHeaders, + clientIp, + rateLimiter, + browserRateLimiter, + }); + const { + hasBrowserOriginHeader, + enforceOriginCheckForAnyClient, + rateLimitClientIp: browserRateLimitClientIp, + authRateLimiter, + } = browserSecurity; socket.on("message", async (data) => { if (isClosed()) { @@ -329,7 +467,7 @@ export function attachGatewayWsMessageHandler(params: { const isControlUi = connectParams.client.id === GATEWAY_CLIENT_IDS.CONTROL_UI; const isWebchat = isWebchatConnect(connectParams); - if (isControlUi || isWebchat) { + if (enforceOriginCheckForAnyClient || isControlUi || isWebchat) { const originCheck = checkBrowserOrigin({ requestHost, origin: requestOrigin, @@ -353,6 +491,7 @@ export function attachGatewayWsMessageHandler(params: { const deviceRaw = connectParams.device; let devicePublicKey: string | null = null; + let deviceAuthPayloadVersion: "v2" | "v3" | null = null; const hasTokenAuth = Boolean(connectParams.auth?.token); const hasPasswordAuth = Boolean(connectParams.auth?.password); const hasSharedAuth = hasTokenAuth || hasPasswordAuth; @@ -377,8 +516,8 @@ export function attachGatewayWsMessageHandler(params: { req: upgradeReq, trustedProxies, allowRealIpFallback, - rateLimiter, - clientIp, + rateLimiter: authRateLimiter, + clientIp: browserRateLimitClientIp, }); const rejectUnauthorized = (failedAuth: GatewayAuthResult) => { markHandshakeFailure("unauthorized", { @@ -418,7 +557,7 @@ export function attachGatewayWsMessageHandler(params: { close(1008, truncateCloseReason(authMessage)); }; const clearUnboundScopes = () => { - if (scopes.length > 0 && !controlUiAuthPolicy.allowBypass) { + if (scopes.length > 0 && !controlUiAuthPolicy.allowBypass && !sharedAuthOk) { scopes = []; connectParams.scopes = scopes; } @@ -427,11 +566,13 @@ export function attachGatewayWsMessageHandler(params: { if (!device) { clearUnboundScopes(); } - const trustedProxyAuthOk = - isControlUi && - resolvedAuth.mode === "trusted-proxy" && - authOk && - authMethod === "trusted-proxy"; + const trustedProxyAuthOk = isTrustedProxyControlUiOperatorAuth({ + isControlUi, + role, + authMode: resolvedAuth.mode, + authOk, + authMethod, + }); const decision = evaluateMissingDeviceIdentity({ hasDeviceIdentity: Boolean(device), role, @@ -518,23 +659,21 @@ export function attachGatewayWsMessageHandler(params: { rejectDeviceAuthInvalid("device-nonce-mismatch", "device nonce mismatch"); return; } - const payload = buildDeviceAuthPayload({ - deviceId: device.id, - clientId: connectParams.client.id, - clientMode: connectParams.client.mode, + const rejectDeviceSignatureInvalid = () => + rejectDeviceAuthInvalid("device-signature", "device signature invalid"); + const payloadVersion = resolveDeviceSignaturePayloadVersion({ + device, + connectParams, role, scopes, signedAtMs: signedAt, - token: connectParams.auth?.token ?? connectParams.auth?.deviceToken ?? null, nonce: providedNonce, }); - const rejectDeviceSignatureInvalid = () => - rejectDeviceAuthInvalid("device-signature", "device signature invalid"); - const signatureOk = verifyDeviceSignature(device.publicKey, payload, device.signature); - if (!signatureOk) { + if (!payloadVersion) { rejectDeviceSignatureInvalid(); return; } + deviceAuthPayloadVersion = payloadVersion; devicePublicKey = normalizeDevicePublicKeyBase64Url(device.publicKey); if (!devicePublicKey) { rejectDeviceAuthInvalid("device-public-key", "device public key invalid"); @@ -556,8 +695,8 @@ export function attachGatewayWsMessageHandler(params: { deviceId: device?.id, role, scopes, - rateLimiter, - clientIp, + rateLimiter: authRateLimiter, + clientIp: browserRateLimitClientIp, verifyDeviceToken, })); if (!authOk) { @@ -565,18 +704,18 @@ export function attachGatewayWsMessageHandler(params: { return; } - // Shared token/password auth is already gateway-level trust for operator clients. - // In that case, don't force device pairing on first connect. - const skipPairingForOperatorSharedAuth = - role === "operator" && sharedAuthOk && !isControlUi && !isWebchat; - const trustedProxyAuthOk = - isControlUi && - resolvedAuth.mode === "trusted-proxy" && - authOk && - authMethod === "trusted-proxy"; - const skipPairing = - shouldSkipControlUiPairing(controlUiAuthPolicy, sharedAuthOk, trustedProxyAuthOk) || - skipPairingForOperatorSharedAuth; + const trustedProxyAuthOk = isTrustedProxyControlUiOperatorAuth({ + isControlUi, + role, + authMode: resolvedAuth.mode, + authOk, + authMethod, + }); + const skipPairing = shouldSkipControlUiPairing( + controlUiAuthPolicy, + sharedAuthOk, + trustedProxyAuthOk, + ); if (device && devicePublicKey && !skipPairing) { const formatAuditList = (items: string[] | undefined): string => { if (!items || items.length === 0) { @@ -603,9 +742,18 @@ export function attachGatewayWsMessageHandler(params: { `security audit: device access upgrade requested reason=${reason} device=${device.id} ip=${reportedClientIp ?? "unknown-ip"} auth=${authMethod} roleFrom=${formatAuditList(currentRoles)} roleTo=${role} scopesFrom=${formatAuditList(currentScopes)} scopesTo=${formatAuditList(scopes)} client=${connectParams.client.id} conn=${connId}`, ); }; - const clientAccessMetadata = { + const clientPairingMetadata = { displayName: connectParams.client.displayName, platform: connectParams.client.platform, + deviceFamily: connectParams.client.deviceFamily, + clientId: connectParams.client.id, + clientMode: connectParams.client.mode, + role, + scopes, + remoteIp: reportedClientIp, + }; + const clientAccessMetadata = { + displayName: connectParams.client.displayName, clientId: connectParams.client.id, clientMode: connectParams.client.mode, role, @@ -613,13 +761,20 @@ export function attachGatewayWsMessageHandler(params: { remoteIp: reportedClientIp, }; const requirePairing = async ( - reason: "not-paired" | "role-upgrade" | "scope-upgrade", + reason: "not-paired" | "role-upgrade" | "scope-upgrade" | "metadata-upgrade", ) => { + const allowSilentLocalPairing = shouldAllowSilentLocalPairing({ + isLocalClient, + hasBrowserOriginHeader, + isControlUi, + isWebchat, + reason, + }); const pairing = await requestDevicePairing({ deviceId: device.id, publicKey: devicePublicKey, - ...clientAccessMetadata, - silent: isLocalClient && (reason === "not-paired" || reason === "scope-upgrade"), + ...clientPairingMetadata, + silent: allowSilentLocalPairing, }); const context = buildRequestContext(); if (pairing.request.silent === true) { @@ -675,6 +830,33 @@ export function attachGatewayWsMessageHandler(params: { return; } } else { + const claimedPlatform = connectParams.client.platform; + const pairedPlatform = paired.platform; + const claimedDeviceFamily = connectParams.client.deviceFamily; + const pairedDeviceFamily = paired.deviceFamily; + const metadataPinning = resolvePinnedClientMetadata({ + claimedPlatform, + claimedDeviceFamily, + pairedPlatform, + pairedDeviceFamily, + }); + const { platformMismatch, deviceFamilyMismatch } = metadataPinning; + if (platformMismatch || deviceFamilyMismatch) { + logGateway.warn( + `security audit: device metadata upgrade requested reason=metadata-upgrade device=${device.id} ip=${reportedClientIp ?? "unknown-ip"} auth=${authMethod} payload=${deviceAuthPayloadVersion ?? "unknown"} claimedPlatform=${claimedPlatform ?? ""} pinnedPlatform=${pairedPlatform ?? ""} claimedDeviceFamily=${claimedDeviceFamily ?? ""} pinnedDeviceFamily=${pairedDeviceFamily ?? ""} client=${connectParams.client.id} conn=${connId}`, + ); + const ok = await requirePairing("metadata-upgrade"); + if (!ok) { + return; + } + } else { + if (metadataPinning.pinnedPlatform) { + connectParams.client.platform = metadataPinning.pinnedPlatform; + } + if (metadataPinning.pinnedDeviceFamily) { + connectParams.client.deviceFamily = metadataPinning.pinnedDeviceFamily; + } + } const pairedRoles = Array.isArray(paired.roles) ? paired.roles : paired.role @@ -723,6 +905,8 @@ export function attachGatewayWsMessageHandler(params: { } } + // Metadata pinning is approval-bound. Reconnects can update access metadata, + // but platform/device family must stay on the approved pairing record. await updatePairedDeviceMetadata(device.id, clientAccessMetadata); } } diff --git a/src/gateway/session-utils.ts b/src/gateway/session-utils.ts index 14165ab2875..fa4c514388b 100644 --- a/src/gateway/session-utils.ts +++ b/src/gateway/session-utils.ts @@ -22,7 +22,7 @@ import { type SessionEntry, type SessionScope, } from "../config/sessions.js"; -import { openVerifiedFileSync } from "../infra/safe-open-sync.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { normalizeAgentId, normalizeMainKey, @@ -102,14 +102,13 @@ function resolveIdentityAvatarUrl( return undefined; } try { - const resolvedReal = fs.realpathSync(resolvedCandidate); - if (!isPathWithinRoot(workspaceRoot, resolvedReal)) { - return undefined; - } - const opened = openVerifiedFileSync({ - filePath: resolvedReal, - resolvedPath: resolvedReal, + const opened = openBoundaryFileSync({ + absolutePath: resolvedCandidate, + rootPath: workspaceRoot, + rootRealPath: workspaceRoot, + boundaryLabel: "workspace root", maxBytes: AVATAR_MAX_BYTES, + skipLexicalRootCheck: true, }); if (!opened.ok) { return undefined; diff --git a/src/gateway/system-run-approval-binding.contract.test.ts b/src/gateway/system-run-approval-binding.contract.test.ts new file mode 100644 index 00000000000..48976c3bdc5 --- /dev/null +++ b/src/gateway/system-run-approval-binding.contract.test.ts @@ -0,0 +1,90 @@ +import fs from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import { describe, expect, test } from "vitest"; +import type { ExecApprovalRequestPayload } from "../infra/exec-approvals.js"; +import { buildSystemRunApprovalBindingV1 } from "../infra/system-run-approval-binding.js"; +import { evaluateSystemRunApprovalMatch } from "./node-invoke-system-run-approval-match.js"; + +type FixtureCase = { + name: string; + request: { + host: string; + command: string; + commandArgv?: string[]; + cwd?: string | null; + agentId?: string | null; + sessionKey?: string | null; + bindingV1?: { + argv: string[]; + cwd?: string | null; + agentId?: string | null; + sessionKey?: string | null; + env?: Record; + }; + }; + invoke: { + argv: string[]; + binding: { + cwd: string | null; + agentId: string | null; + sessionKey: string | null; + env?: Record; + }; + }; + expected: { + ok: boolean; + code?: "APPROVAL_REQUEST_MISMATCH" | "APPROVAL_ENV_BINDING_MISSING" | "APPROVAL_ENV_MISMATCH"; + }; +}; + +type Fixture = { + cases: FixtureCase[]; +}; + +const fixturePath = path.resolve( + path.dirname(fileURLToPath(import.meta.url)), + "../../test/fixtures/system-run-approval-binding-contract.json", +); +const fixture = JSON.parse(fs.readFileSync(fixturePath, "utf8")) as Fixture; + +function buildRequestPayload(entry: FixtureCase): ExecApprovalRequestPayload { + const payload: ExecApprovalRequestPayload = { + host: entry.request.host, + command: entry.request.command, + commandArgv: entry.request.commandArgv, + cwd: entry.request.cwd ?? null, + agentId: entry.request.agentId ?? null, + sessionKey: entry.request.sessionKey ?? null, + }; + if (entry.request.bindingV1) { + payload.systemRunBindingV1 = buildSystemRunApprovalBindingV1({ + argv: entry.request.bindingV1.argv, + cwd: entry.request.bindingV1.cwd, + agentId: entry.request.bindingV1.agentId, + sessionKey: entry.request.bindingV1.sessionKey, + env: entry.request.bindingV1.env, + }).binding; + } + return payload; +} + +describe("system-run approval binding contract fixtures", () => { + for (const entry of fixture.cases) { + test(entry.name, () => { + const result = evaluateSystemRunApprovalMatch({ + argv: entry.invoke.argv, + request: buildRequestPayload(entry), + binding: entry.invoke.binding, + }); + + expect(result.ok).toBe(entry.expected.ok); + if (!entry.expected.ok) { + if (result.ok) { + throw new Error("expected approval mismatch"); + } + expect(result.code).toBe(entry.expected.code); + } + }); + } +}); diff --git a/src/gateway/system-run-approval-binding.test.ts b/src/gateway/system-run-approval-binding.test.ts new file mode 100644 index 00000000000..383b2895ffd --- /dev/null +++ b/src/gateway/system-run-approval-binding.test.ts @@ -0,0 +1,131 @@ +import { describe, expect, test } from "vitest"; +import { + buildSystemRunApprovalBindingV1, + buildSystemRunApprovalEnvBinding, + matchSystemRunApprovalBindingV1, + matchSystemRunApprovalEnvHash, + toSystemRunApprovalMismatchError, +} from "../infra/system-run-approval-binding.js"; + +describe("buildSystemRunApprovalEnvBinding", () => { + test("normalizes keys and produces stable hash regardless of input order", () => { + const a = buildSystemRunApprovalEnvBinding({ + Z_VAR: "z", + A_VAR: "a", + " BAD KEY": "ignored", + }); + const b = buildSystemRunApprovalEnvBinding({ + A_VAR: "a", + Z_VAR: "z", + }); + expect(a.envKeys).toEqual(["A_VAR", "Z_VAR"]); + expect(a.envHash).toBe(b.envHash); + }); +}); + +describe("matchSystemRunApprovalEnvHash", () => { + test("accepts empty env hash on both sides", () => { + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: null, + actualEnvHash: null, + actualEnvKeys: [], + }), + ).toEqual({ ok: true }); + }); + + test("rejects non-empty actual env hash when expected is empty", () => { + const result = matchSystemRunApprovalEnvHash({ + expectedEnvHash: null, + actualEnvHash: "hash", + actualEnvKeys: ["GIT_EXTERNAL_DIFF"], + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.code).toBe("APPROVAL_ENV_BINDING_MISSING"); + }); +}); + +describe("matchSystemRunApprovalBindingV1", () => { + test("accepts matching binding with reordered env keys", () => { + const expected = buildSystemRunApprovalBindingV1({ + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + env: { SAFE_A: "1", SAFE_B: "2" }, + }); + const actual = buildSystemRunApprovalBindingV1({ + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + env: { SAFE_B: "2", SAFE_A: "1" }, + }); + expect( + matchSystemRunApprovalBindingV1({ + expected: expected.binding, + actual: actual.binding, + actualEnvKeys: actual.envKeys, + }), + ).toEqual({ ok: true }); + }); + + test("rejects env mismatch", () => { + const expected = buildSystemRunApprovalBindingV1({ + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + env: { SAFE: "1" }, + }); + const actual = buildSystemRunApprovalBindingV1({ + argv: ["git", "diff"], + cwd: null, + agentId: null, + sessionKey: null, + env: { SAFE: "2" }, + }); + const result = matchSystemRunApprovalBindingV1({ + expected: expected.binding, + actual: actual.binding, + actualEnvKeys: actual.envKeys, + }); + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("unreachable"); + } + expect(result.code).toBe("APPROVAL_ENV_MISMATCH"); + }); +}); + +describe("toSystemRunApprovalMismatchError", () => { + test("includes runId/code and preserves mismatch details", () => { + const result = toSystemRunApprovalMismatchError({ + runId: "approval-123", + match: { + ok: false, + code: "APPROVAL_ENV_MISMATCH", + message: "approval id env binding mismatch", + details: { + envKeys: ["SAFE_A"], + expectedEnvHash: "expected-hash", + actualEnvHash: "actual-hash", + }, + }, + }); + expect(result).toEqual({ + ok: false, + message: "approval id env binding mismatch", + details: { + code: "APPROVAL_ENV_MISMATCH", + runId: "approval-123", + envKeys: ["SAFE_A"], + expectedEnvHash: "expected-hash", + actualEnvHash: "actual-hash", + }, + }); + }); +}); diff --git a/src/gateway/test-helpers.e2e.ts b/src/gateway/test-helpers.e2e.ts index e267921c0ea..34afd6614a8 100644 --- a/src/gateway/test-helpers.e2e.ts +++ b/src/gateway/test-helpers.e2e.ts @@ -1,4 +1,6 @@ import { writeFile } from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; import { WebSocket } from "ws"; import { type DeviceIdentity, @@ -15,7 +17,7 @@ import { type GatewayClientName, } from "../utils/message-channel.js"; import { GatewayClient } from "./client.js"; -import { buildDeviceAuthPayload } from "./device-auth.js"; +import { buildDeviceAuthPayloadV3 } from "./device-auth.js"; import { PROTOCOL_VERSION } from "./protocol/index.js"; import { startGatewayServer } from "./server.js"; @@ -31,6 +33,7 @@ export async function connectGatewayClient(params: { clientVersion?: string; mode?: GatewayClientMode; platform?: string; + deviceFamily?: string; role?: "operator" | "node"; scopes?: string[]; caps?: string[]; @@ -42,6 +45,20 @@ export async function connectGatewayClient(params: { timeoutMs?: number; timeoutMessage?: string; }) { + const role = params.role ?? "operator"; + const platform = params.platform ?? process.platform; + const identityRoot = process.env.OPENCLAW_STATE_DIR ?? process.env.HOME ?? os.tmpdir(); + const deviceIdentity = + params.deviceIdentity ?? + loadOrCreateDeviceIdentity( + (() => { + const safe = + `${params.clientName ?? GATEWAY_CLIENT_NAMES.TEST}-${params.mode ?? GATEWAY_CLIENT_MODES.TEST}-${platform}-${params.deviceFamily ?? "none"}-${role}` + .replace(/[^a-zA-Z0-9._-]+/g, "_") + .toLowerCase(); + return path.join(identityRoot, "test-device-identities", `${safe}.json`); + })(), + ); return await new Promise>((resolve, reject) => { let settled = false; const stop = (err?: Error, client?: InstanceType) => { @@ -63,14 +80,15 @@ export async function connectGatewayClient(params: { clientName: params.clientName ?? GATEWAY_CLIENT_NAMES.TEST, clientDisplayName: params.clientDisplayName ?? "vitest", clientVersion: params.clientVersion ?? "dev", - platform: params.platform, + platform, + deviceFamily: params.deviceFamily, mode: params.mode ?? GATEWAY_CLIENT_MODES.TEST, - role: params.role, + role, scopes: params.scopes, caps: params.caps, commands: params.commands, instanceId: params.instanceId, - deviceIdentity: params.deviceIdentity, + deviceIdentity, onEvent: params.onEvent, onHelloOk: () => stop(undefined, client), onConnectError: (err) => stop(err), @@ -127,7 +145,8 @@ export async function connectDeviceAuthReq(params: { url: string; token?: string const connectNonce = await connectNoncePromise; const identity = loadOrCreateDeviceIdentity(); const signedAtMs = Date.now(); - const payload = buildDeviceAuthPayload({ + const platform = process.platform; + const payload = buildDeviceAuthPayloadV3({ deviceId: identity.deviceId, clientId: GATEWAY_CLIENT_NAMES.TEST, clientMode: GATEWAY_CLIENT_MODES.TEST, @@ -136,6 +155,7 @@ export async function connectDeviceAuthReq(params: { url: string; token?: string signedAtMs, token: params.token ?? null, nonce: connectNonce, + platform, }); const device = { id: identity.deviceId, @@ -156,7 +176,7 @@ export async function connectDeviceAuthReq(params: { url: string; token?: string id: GATEWAY_CLIENT_NAMES.TEST, displayName: "vitest", version: "dev", - platform: process.platform, + platform, mode: GATEWAY_CLIENT_MODES.TEST, }, caps: [], diff --git a/src/gateway/test-helpers.server.ts b/src/gateway/test-helpers.server.ts index e923a3bbb3e..d6afcc82d58 100644 --- a/src/gateway/test-helpers.server.ts +++ b/src/gateway/test-helpers.server.ts @@ -18,7 +18,7 @@ import { DEFAULT_AGENT_ID, toAgentStoreSessionKey } from "../routing/session-key import { captureEnv } from "../test-utils/env.js"; import { getDeterministicFreePortBlock } from "../test-utils/ports.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; -import { buildDeviceAuthPayload } from "./device-auth.js"; +import { buildDeviceAuthPayloadV3 } from "./device-auth.js"; import { PROTOCOL_VERSION } from "./protocol/index.js"; import type { GatewayServerOptions } from "./server.js"; import { @@ -421,6 +421,21 @@ type ConnectResponse = { error?: { message?: string; code?: string; details?: unknown }; }; +function resolveDefaultTestDeviceIdentityPath(params: { + clientId: string; + clientMode: string; + platform: string; + deviceFamily?: string; + role: string; +}) { + const safe = + `${params.clientId}-${params.clientMode}-${params.platform}-${params.deviceFamily ?? "none"}-${params.role}` + .replace(/[^a-zA-Z0-9._-]+/g, "_") + .toLowerCase(); + const suiteRoot = process.env.OPENCLAW_STATE_DIR ?? process.env.HOME ?? os.tmpdir(); + return path.join(suiteRoot, "test-device-identities", `${safe}.json`); +} + export async function readConnectChallengeNonce( ws: WebSocket, timeoutMs = 2_000, @@ -478,6 +493,7 @@ export async function connectReq( signedAt: number; nonce?: string; } | null; + deviceIdentityPath?: string; skipConnectChallengeNonce?: boolean; timeoutMs?: number; }, @@ -527,9 +543,18 @@ export async function connectReq( if (!connectChallengeNonce) { throw new Error("missing connect.challenge nonce"); } - const identity = loadOrCreateDeviceIdentity(); + const identityPath = + opts?.deviceIdentityPath ?? + resolveDefaultTestDeviceIdentityPath({ + clientId: client.id, + clientMode: client.mode, + platform: client.platform, + deviceFamily: client.deviceFamily, + role, + }); + const identity = loadOrCreateDeviceIdentity(identityPath); const signedAtMs = Date.now(); - const payload = buildDeviceAuthPayload({ + const payload = buildDeviceAuthPayloadV3({ deviceId: identity.deviceId, clientId: client.id, clientMode: client.mode, @@ -538,6 +563,8 @@ export async function connectReq( signedAtMs, token: authTokenForSignature ?? null, nonce: connectChallengeNonce, + platform: client.platform, + deviceFamily: client.deviceFamily, }); return { id: identity.deviceId, diff --git a/src/hooks/bundled/bootstrap-extra-files/handler.ts b/src/hooks/bundled/bootstrap-extra-files/handler.ts index 2f015b280fc..bc708b62d07 100644 --- a/src/hooks/bundled/bootstrap-extra-files/handler.ts +++ b/src/hooks/bundled/bootstrap-extra-files/handler.ts @@ -1,6 +1,6 @@ import { filterBootstrapFilesForSession, - loadExtraBootstrapFiles, + loadExtraBootstrapFilesWithDiagnostics, } from "../../../agents/workspace.js"; import { createSubsystemLogger } from "../../../logging/subsystem.js"; import { resolveHookConfig } from "../../config.js"; @@ -45,7 +45,19 @@ const bootstrapExtraFilesHook: HookHandler = async (event) => { } try { - const extras = await loadExtraBootstrapFiles(context.workspaceDir, patterns); + const { files: extras, diagnostics } = await loadExtraBootstrapFilesWithDiagnostics( + context.workspaceDir, + patterns, + ); + if (diagnostics.length > 0) { + log.debug("skipped extra bootstrap candidates", { + skipped: diagnostics.length, + reasons: diagnostics.reduce>((counts, item) => { + counts[item.reason] = (counts[item.reason] ?? 0) + 1; + return counts; + }, {}), + }); + } if (extras.length === 0) { return; } diff --git a/src/hooks/loader.test.ts b/src/hooks/loader.test.ts index 66ccf04b8cf..d9107d2e390 100644 --- a/src/hooks/loader.test.ts +++ b/src/hooks/loader.test.ts @@ -281,5 +281,71 @@ describe("loader", () => { expect(count).toBe(0); expect(getRegisteredEventKeys()).not.toContain("command:new"); }); + + it("rejects directory hook handlers that escape hook dir via hardlink", async () => { + if (process.platform === "win32") { + return; + } + const outsideHandlerPath = path.join(fixtureRoot, `outside-handler-hardlink-${caseId}.js`); + await fs.writeFile(outsideHandlerPath, "export default async function() {}", "utf-8"); + + const hookDir = path.join(tmpDir, "hooks", "hardlink-hook"); + await fs.mkdir(hookDir, { recursive: true }); + await fs.writeFile( + path.join(hookDir, "HOOK.md"), + [ + "---", + "name: hardlink-hook", + "description: hardlink test", + 'metadata: {"openclaw":{"events":["command:new"]}}', + "---", + "", + "# Hardlink Hook", + ].join("\n"), + "utf-8", + ); + try { + await fs.link(outsideHandlerPath, path.join(hookDir, "handler.js")); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const cfg = createEnabledHooksConfig(); + const count = await loadInternalHooks(cfg, tmpDir); + expect(count).toBe(0); + expect(getRegisteredEventKeys()).not.toContain("command:new"); + }); + + it("rejects legacy handler modules that escape workspace via hardlink", async () => { + if (process.platform === "win32") { + return; + } + const outsideHandlerPath = path.join(fixtureRoot, `outside-legacy-hardlink-${caseId}.js`); + await fs.writeFile(outsideHandlerPath, "export default async function() {}", "utf-8"); + + const linkedHandlerPath = path.join(tmpDir, "legacy-handler.js"); + try { + await fs.link(outsideHandlerPath, linkedHandlerPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const cfg = createEnabledHooksConfig([ + { + event: "command:new", + module: "legacy-handler.js", + }, + ]); + + const count = await loadInternalHooks(cfg, tmpDir); + expect(count).toBe(0); + expect(getRegisteredEventKeys()).not.toContain("command:new"); + }); }); }); diff --git a/src/hooks/loader.ts b/src/hooks/loader.ts index 30f37e4db25..4a1fb964617 100644 --- a/src/hooks/loader.ts +++ b/src/hooks/loader.ts @@ -5,10 +5,11 @@ * and from directory-based discovery (bundled, managed, workspace) */ +import fs from "node:fs"; import path from "node:path"; import type { OpenClawConfig } from "../config/config.js"; +import { openBoundaryFile } from "../infra/boundary-file-read.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; -import { isPathInsideWithRealpath } from "../security/scan-paths.js"; import { resolveHookConfig } from "./config.js"; import { shouldIncludeHook } from "./config.js"; import { buildImportUrl } from "./import-url.js"; @@ -73,18 +74,23 @@ export async function loadInternalHooks( } try { - if ( - !isPathInsideWithRealpath(entry.hook.baseDir, entry.hook.handlerPath, { - requireRealpath: true, - }) - ) { + const hookBaseDir = safeRealpathOrResolve(entry.hook.baseDir); + const opened = await openBoundaryFile({ + absolutePath: entry.hook.handlerPath, + rootPath: hookBaseDir, + boundaryLabel: "hook directory", + }); + if (!opened.ok) { log.error( - `Hook '${entry.hook.name}' handler path resolves outside hook directory: ${entry.hook.handlerPath}`, + `Hook '${entry.hook.name}' handler path fails boundary checks: ${entry.hook.handlerPath}`, ); continue; } + const safeHandlerPath = opened.path; + fs.closeSync(opened.fd); + // Import handler module — only cache-bust mutable (workspace/managed) hooks - const importUrl = buildImportUrl(entry.hook.handlerPath, entry.hook.source); + const importUrl = buildImportUrl(safeHandlerPath, entry.hook.source); const mod = (await import(importUrl)) as Record; // Get handler function (default or named export) @@ -144,24 +150,27 @@ export async function loadInternalHooks( } const baseDir = path.resolve(workspaceDir); const modulePath = path.resolve(baseDir, rawModule); + const baseDirReal = safeRealpathOrResolve(baseDir); + const modulePathSafe = safeRealpathOrResolve(modulePath); const rel = path.relative(baseDir, modulePath); if (!rel || rel.startsWith("..") || path.isAbsolute(rel)) { log.error(`Handler module path must stay within workspaceDir: ${rawModule}`); continue; } - if ( - !isPathInsideWithRealpath(baseDir, modulePath, { - requireRealpath: true, - }) - ) { - log.error( - `Handler module path resolves outside workspaceDir after symlink resolution: ${rawModule}`, - ); + const opened = await openBoundaryFile({ + absolutePath: modulePathSafe, + rootPath: baseDirReal, + boundaryLabel: "workspace directory", + }); + if (!opened.ok) { + log.error(`Handler module path fails boundary checks under workspaceDir: ${rawModule}`); continue; } + const safeModulePath = opened.path; + fs.closeSync(opened.fd); // Legacy handlers are always workspace-relative, so use mtime-based cache busting - const importUrl = buildImportUrl(modulePath, "openclaw-workspace"); + const importUrl = buildImportUrl(safeModulePath, "openclaw-workspace"); const mod = (await import(importUrl)) as Record; // Get the handler function @@ -190,3 +199,11 @@ export async function loadInternalHooks( return loadedCount; } + +function safeRealpathOrResolve(value: string): string { + try { + return fs.realpathSync(value); + } catch { + return path.resolve(value); + } +} diff --git a/src/hooks/workspace.test.ts b/src/hooks/workspace.test.ts index cc35ffdd698..dc3de2acd9f 100644 --- a/src/hooks/workspace.test.ts +++ b/src/hooks/workspace.test.ts @@ -102,4 +102,67 @@ describe("hooks workspace", () => { const entries = loadHookEntriesFromDir({ dir: hooksRoot, source: "openclaw-workspace" }); expect(entries.some((e) => e.hook.name === "outside")).toBe(false); }); + + it("ignores hooks with hardlinked HOOK.md aliases", () => { + if (process.platform === "win32") { + return; + } + + const root = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-hooks-workspace-hardlink-")); + const hooksRoot = path.join(root, "hooks"); + fs.mkdirSync(hooksRoot, { recursive: true }); + + const hookDir = path.join(hooksRoot, "hardlink-hook"); + const outsideDir = path.join(root, "outside"); + fs.mkdirSync(hookDir, { recursive: true }); + fs.mkdirSync(outsideDir, { recursive: true }); + fs.writeFileSync(path.join(hookDir, "handler.js"), "export default async () => {};\n"); + const outsideHookMd = path.join(outsideDir, "HOOK.md"); + const linkedHookMd = path.join(hookDir, "HOOK.md"); + fs.writeFileSync(linkedHookMd, "---\nname: hardlink-hook\n---\n"); + fs.rmSync(linkedHookMd); + fs.writeFileSync(outsideHookMd, "---\nname: outside\n---\n"); + try { + fs.linkSync(outsideHookMd, linkedHookMd); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const entries = loadHookEntriesFromDir({ dir: hooksRoot, source: "openclaw-workspace" }); + expect(entries.some((e) => e.hook.name === "hardlink-hook")).toBe(false); + expect(entries.some((e) => e.hook.name === "outside")).toBe(false); + }); + + it("ignores hooks with hardlinked handler aliases", () => { + if (process.platform === "win32") { + return; + } + + const root = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-hooks-workspace-hardlink-")); + const hooksRoot = path.join(root, "hooks"); + fs.mkdirSync(hooksRoot, { recursive: true }); + + const hookDir = path.join(hooksRoot, "hardlink-handler-hook"); + const outsideDir = path.join(root, "outside"); + fs.mkdirSync(hookDir, { recursive: true }); + fs.mkdirSync(outsideDir, { recursive: true }); + fs.writeFileSync(path.join(hookDir, "HOOK.md"), "---\nname: hardlink-handler-hook\n---\n"); + const outsideHandler = path.join(outsideDir, "handler.js"); + const linkedHandler = path.join(hookDir, "handler.js"); + fs.writeFileSync(outsideHandler, "export default async () => {};\n"); + try { + fs.linkSync(outsideHandler, linkedHandler); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const entries = loadHookEntriesFromDir({ dir: hooksRoot, source: "openclaw-workspace" }); + expect(entries.some((e) => e.hook.name === "hardlink-handler-hook")).toBe(false); + }); }); diff --git a/src/hooks/workspace.ts b/src/hooks/workspace.ts index 426569a215f..ab6375cd8ea 100644 --- a/src/hooks/workspace.ts +++ b/src/hooks/workspace.ts @@ -2,6 +2,7 @@ import fs from "node:fs"; import path from "node:path"; import { MANIFEST_KEY } from "../compat/legacy-names.js"; import type { OpenClawConfig } from "../config/config.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { isPathInsideWithRealpath } from "../security/scan-paths.js"; import { CONFIG_DIR, resolveUserPath } from "../utils.js"; @@ -36,11 +37,15 @@ function filterHookEntries( function readHookPackageManifest(dir: string): HookPackageManifest | null { const manifestPath = path.join(dir, "package.json"); - if (!fs.existsSync(manifestPath)) { + const raw = readBoundaryFileUtf8({ + absolutePath: manifestPath, + rootPath: dir, + boundaryLabel: "hook package directory", + }); + if (raw === null) { return null; } try { - const raw = fs.readFileSync(manifestPath, "utf-8"); return JSON.parse(raw) as HookPackageManifest; } catch { return null; @@ -75,12 +80,15 @@ function loadHookFromDir(params: { nameHint?: string; }): Hook | null { const hookMdPath = path.join(params.hookDir, "HOOK.md"); - if (!fs.existsSync(hookMdPath)) { + const content = readBoundaryFileUtf8({ + absolutePath: hookMdPath, + rootPath: params.hookDir, + boundaryLabel: "hook directory", + }); + if (content === null) { return null; } - try { - const content = fs.readFileSync(hookMdPath, "utf-8"); const frontmatter = parseFrontmatter(content); const name = frontmatter.name || params.nameHint || path.basename(params.hookDir); @@ -90,8 +98,13 @@ function loadHookFromDir(params: { let handlerPath: string | undefined; for (const candidate of handlerCandidates) { const candidatePath = path.join(params.hookDir, candidate); - if (fs.existsSync(candidatePath)) { - handlerPath = candidatePath; + const safeCandidatePath = resolveBoundaryFilePath({ + absolutePath: candidatePath, + rootPath: params.hookDir, + boundaryLabel: "hook directory", + }); + if (safeCandidatePath) { + handlerPath = safeCandidatePath; break; } } @@ -192,11 +205,13 @@ export function loadHookEntriesFromDir(params: { }); return hooks.map((hook) => { let frontmatter: ParsedHookFrontmatter = {}; - try { - const raw = fs.readFileSync(hook.filePath, "utf-8"); + const raw = readBoundaryFileUtf8({ + absolutePath: hook.filePath, + rootPath: hook.baseDir, + boundaryLabel: "hook directory", + }); + if (raw !== null) { frontmatter = parseFrontmatter(raw); - } catch { - // ignore malformed hooks } const entry: HookEntry = { hook: { @@ -267,11 +282,13 @@ function loadHookEntries( return Array.from(merged.values()).map((hook) => { let frontmatter: ParsedHookFrontmatter = {}; - try { - const raw = fs.readFileSync(hook.filePath, "utf-8"); + const raw = readBoundaryFileUtf8({ + absolutePath: hook.filePath, + rootPath: hook.baseDir, + boundaryLabel: "hook directory", + }); + if (raw !== null) { frontmatter = parseFrontmatter(raw); - } catch { - // ignore malformed hooks } return { hook, @@ -316,3 +333,43 @@ export function loadWorkspaceHookEntries( ): HookEntry[] { return loadHookEntries(workspaceDir, opts); } + +function readBoundaryFileUtf8(params: { + absolutePath: string; + rootPath: string; + boundaryLabel: string; +}): string | null { + const opened = openBoundaryFileSync({ + absolutePath: params.absolutePath, + rootPath: params.rootPath, + boundaryLabel: params.boundaryLabel, + }); + if (!opened.ok) { + return null; + } + try { + return fs.readFileSync(opened.fd, "utf-8"); + } catch { + return null; + } finally { + fs.closeSync(opened.fd); + } +} + +function resolveBoundaryFilePath(params: { + absolutePath: string; + rootPath: string; + boundaryLabel: string; +}): string | null { + const opened = openBoundaryFileSync({ + absolutePath: params.absolutePath, + rootPath: params.rootPath, + boundaryLabel: params.boundaryLabel, + }); + if (!opened.ok) { + return null; + } + const safePath = opened.path; + fs.closeSync(opened.fd); + return safePath; +} diff --git a/src/imessage/monitor/inbound-processing.test.ts b/src/imessage/monitor/inbound-processing.test.ts index d63c4163318..5eb13e097b9 100644 --- a/src/imessage/monitor/inbound-processing.test.ts +++ b/src/imessage/monitor/inbound-processing.test.ts @@ -58,3 +58,71 @@ describe("describeIMessageEchoDropLog", () => { ).toContain("id=abc-123"); }); }); + +describe("resolveIMessageInboundDecision command auth", () => { + const cfg = {} as OpenClawConfig; + + it("does not auto-authorize DM commands in open mode without allowlists", () => { + const decision = resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 100, + sender: "+15555550123", + text: "/status", + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "/status", + bodyText: "/status", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + if (decision.kind !== "dispatch") { + return; + } + expect(decision.commandAuthorized).toBe(false); + }); + + it("authorizes DM commands for senders in pairing-store allowlist", () => { + const decision = resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 101, + sender: "+15555550123", + text: "/status", + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "/status", + bodyText: "/status", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: ["+15555550123"], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + if (decision.kind !== "dispatch") { + return; + } + expect(decision.commandAuthorized).toBe(true); + }); +}); diff --git a/src/imessage/monitor/inbound-processing.ts b/src/imessage/monitor/inbound-processing.ts index cf51e958b31..8a4979df965 100644 --- a/src/imessage/monitor/inbound-processing.ts +++ b/src/imessage/monitor/inbound-processing.ts @@ -20,6 +20,10 @@ import { resolveChannelGroupRequireMention, } from "../../config/group-policy.js"; import { resolveAgentRoute } from "../../routing/resolve-route.js"; +import { + DM_GROUP_ACCESS_REASON, + resolveDmGroupAccessWithLists, +} from "../../security/dm-policy-shared.js"; import { truncateUtf16Safe } from "../../utils.js"; import { formatIMessageChatTarget, @@ -138,72 +142,60 @@ export function resolveIMessageInboundDecision(params: { } const groupId = isGroup ? groupIdCandidate : undefined; - const storeAllowFrom = params.dmPolicy === "allowlist" ? [] : params.storeAllowFrom; - const effectiveDmAllowFrom = Array.from(new Set([...params.allowFrom, ...storeAllowFrom])) - .map((v) => String(v).trim()) - .filter(Boolean); - // Keep DM pairing-store authorization scoped to DMs; group access must come from explicit group allowlist config. - const effectiveGroupAllowFrom = Array.from(new Set(params.groupAllowFrom)) - .map((v) => String(v).trim()) - .filter(Boolean); + const accessDecision = resolveDmGroupAccessWithLists({ + isGroup, + dmPolicy: params.dmPolicy, + groupPolicy: params.groupPolicy, + allowFrom: params.allowFrom, + groupAllowFrom: params.groupAllowFrom, + storeAllowFrom: params.storeAllowFrom, + groupAllowFromFallbackToAllowFrom: false, + isSenderAllowed: (allowFrom) => + isAllowedIMessageSender({ + allowFrom, + sender, + chatId, + chatGuid, + chatIdentifier, + }), + }); + const effectiveDmAllowFrom = accessDecision.effectiveAllowFrom; + const effectiveGroupAllowFrom = accessDecision.effectiveGroupAllowFrom; - if (isGroup) { - if (params.groupPolicy === "disabled") { - params.logVerbose?.("Blocked iMessage group message (groupPolicy: disabled)"); - return { kind: "drop", reason: "groupPolicy disabled" }; - } - if (params.groupPolicy === "allowlist") { - if (effectiveGroupAllowFrom.length === 0) { + if (accessDecision.decision !== "allow") { + if (isGroup) { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_DISABLED) { + params.logVerbose?.("Blocked iMessage group message (groupPolicy: disabled)"); + return { kind: "drop", reason: "groupPolicy disabled" }; + } + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_EMPTY_ALLOWLIST) { params.logVerbose?.( "Blocked iMessage group message (groupPolicy: allowlist, no groupAllowFrom)", ); return { kind: "drop", reason: "groupPolicy allowlist (empty groupAllowFrom)" }; } - const allowed = isAllowedIMessageSender({ - allowFrom: effectiveGroupAllowFrom, - sender, - chatId, - chatGuid, - chatIdentifier, - }); - if (!allowed) { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_NOT_ALLOWLISTED) { params.logVerbose?.(`Blocked iMessage sender ${sender} (not in groupAllowFrom)`); return { kind: "drop", reason: "not in groupAllowFrom" }; } + params.logVerbose?.(`Blocked iMessage group message (${accessDecision.reason})`); + return { kind: "drop", reason: accessDecision.reason }; } - if (groupListPolicy.allowlistEnabled && !groupListPolicy.allowed) { - params.logVerbose?.( - `imessage: skipping group message (${groupId ?? "unknown"}) not in allowlist`, - ); - return { kind: "drop", reason: "group id not in allowlist" }; - } - } - - const dmHasWildcard = effectiveDmAllowFrom.includes("*"); - const dmAuthorized = - params.dmPolicy === "open" - ? true - : dmHasWildcard || - (effectiveDmAllowFrom.length > 0 && - isAllowedIMessageSender({ - allowFrom: effectiveDmAllowFrom, - sender, - chatId, - chatGuid, - chatIdentifier, - })); - - if (!isGroup) { - if (params.dmPolicy === "disabled") { + if (accessDecision.reasonCode === DM_GROUP_ACCESS_REASON.DM_POLICY_DISABLED) { return { kind: "drop", reason: "dmPolicy disabled" }; } - if (!dmAuthorized) { - if (params.dmPolicy === "pairing") { - return { kind: "pairing", senderId: senderNormalized }; - } - params.logVerbose?.(`Blocked iMessage sender ${sender} (dmPolicy=${params.dmPolicy})`); - return { kind: "drop", reason: "dmPolicy blocked" }; + if (accessDecision.decision === "pairing") { + return { kind: "pairing", senderId: senderNormalized }; } + params.logVerbose?.(`Blocked iMessage sender ${sender} (dmPolicy=${params.dmPolicy})`); + return { kind: "drop", reason: "dmPolicy blocked" }; + } + + if (isGroup && groupListPolicy.allowlistEnabled && !groupListPolicy.allowed) { + params.logVerbose?.( + `imessage: skipping group message (${groupId ?? "unknown"}) not in allowlist`, + ); + return { kind: "drop", reason: "group id not in allowlist" }; } const route = resolveAgentRoute({ @@ -263,10 +255,11 @@ export function resolveIMessageInboundDecision(params: { const canDetectMention = mentionRegexes.length > 0; const useAccessGroups = params.cfg.commands?.useAccessGroups !== false; + const commandDmAllowFrom = isGroup ? params.allowFrom : effectiveDmAllowFrom; const ownerAllowedForCommands = - effectiveDmAllowFrom.length > 0 + commandDmAllowFrom.length > 0 ? isAllowedIMessageSender({ - allowFrom: effectiveDmAllowFrom, + allowFrom: commandDmAllowFrom, sender, chatId, chatGuid, @@ -287,13 +280,13 @@ export function resolveIMessageInboundDecision(params: { const commandGate = resolveControlCommandGate({ useAccessGroups, authorizers: [ - { configured: effectiveDmAllowFrom.length > 0, allowed: ownerAllowedForCommands }, + { configured: commandDmAllowFrom.length > 0, allowed: ownerAllowedForCommands }, { configured: effectiveGroupAllowFrom.length > 0, allowed: groupAllowedForCommands }, ], allowTextCommands: true, hasControlCommand: hasControlCommandInMessage, }); - const commandAuthorized = isGroup ? commandGate.commandAuthorized : dmAuthorized; + const commandAuthorized = commandGate.commandAuthorized; if (isGroup && commandGate.shouldBlock) { if (params.logVerbose) { logInboundDrop({ diff --git a/src/imessage/monitor/monitor-provider.ts b/src/imessage/monitor/monitor-provider.ts index 3bfdc691163..838e840f558 100644 --- a/src/imessage/monitor/monitor-provider.ts +++ b/src/imessage/monitor/monitor-provider.ts @@ -230,7 +230,11 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P : ""; const bodyText = messageText || placeholder; - const storeAllowFrom = await readChannelAllowFromStore("imessage").catch(() => []); + const storeAllowFrom = await readChannelAllowFromStore( + "imessage", + process.env, + accountInfo.accountId, + ).catch(() => []); const decision = resolveIMessageInboundDecision({ cfg, accountId: accountInfo.accountId, @@ -262,6 +266,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P const { code, created } = await upsertChannelPairingRequest({ channel: "imessage", id: decision.senderId, + accountId: accountInfo.accountId, meta: { sender: decision.senderId, chatId: chatId ? String(chatId) : undefined, diff --git a/src/infra/abort-signal.test.ts b/src/infra/abort-signal.test.ts new file mode 100644 index 00000000000..be32e0d881a --- /dev/null +++ b/src/infra/abort-signal.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { waitForAbortSignal } from "./abort-signal.js"; + +describe("waitForAbortSignal", () => { + it("resolves immediately when signal is missing", async () => { + await expect(waitForAbortSignal(undefined)).resolves.toBeUndefined(); + }); + + it("resolves immediately when signal is already aborted", async () => { + const abort = new AbortController(); + abort.abort(); + await expect(waitForAbortSignal(abort.signal)).resolves.toBeUndefined(); + }); + + it("waits until abort fires", async () => { + const abort = new AbortController(); + let resolved = false; + + const task = waitForAbortSignal(abort.signal).then(() => { + resolved = true; + }); + await Promise.resolve(); + expect(resolved).toBe(false); + + abort.abort(); + await task; + expect(resolved).toBe(true); + }); +}); diff --git a/src/infra/abort-signal.ts b/src/infra/abort-signal.ts new file mode 100644 index 00000000000..77922784eda --- /dev/null +++ b/src/infra/abort-signal.ts @@ -0,0 +1,12 @@ +export async function waitForAbortSignal(signal?: AbortSignal): Promise { + if (!signal || signal.aborted) { + return; + } + await new Promise((resolve) => { + const onAbort = () => { + signal.removeEventListener("abort", onAbort); + resolve(); + }; + signal.addEventListener("abort", onAbort, { once: true }); + }); +} diff --git a/src/infra/boundary-file-read.ts b/src/infra/boundary-file-read.ts new file mode 100644 index 00000000000..9b0c5e9a510 --- /dev/null +++ b/src/infra/boundary-file-read.ts @@ -0,0 +1,107 @@ +import fs from "node:fs"; +import path from "node:path"; +import { resolveBoundaryPath, resolveBoundaryPathSync } from "./boundary-path.js"; +import type { PathAliasPolicy } from "./path-alias-guards.js"; +import { openVerifiedFileSync, type SafeOpenSyncFailureReason } from "./safe-open-sync.js"; + +type BoundaryReadFs = Pick< + typeof fs, + | "closeSync" + | "constants" + | "fstatSync" + | "lstatSync" + | "openSync" + | "readFileSync" + | "realpathSync" +>; + +export type BoundaryFileOpenFailureReason = SafeOpenSyncFailureReason | "validation"; + +export type BoundaryFileOpenResult = + | { ok: true; path: string; fd: number; stat: fs.Stats; rootRealPath: string } + | { ok: false; reason: BoundaryFileOpenFailureReason; error?: unknown }; + +export type OpenBoundaryFileSyncParams = { + absolutePath: string; + rootPath: string; + boundaryLabel: string; + rootRealPath?: string; + maxBytes?: number; + rejectHardlinks?: boolean; + skipLexicalRootCheck?: boolean; + ioFs?: BoundaryReadFs; +}; + +export type OpenBoundaryFileParams = OpenBoundaryFileSyncParams & { + aliasPolicy?: PathAliasPolicy; +}; + +export function canUseBoundaryFileOpen(ioFs: typeof fs): boolean { + return ( + typeof ioFs.openSync === "function" && + typeof ioFs.closeSync === "function" && + typeof ioFs.fstatSync === "function" && + typeof ioFs.lstatSync === "function" && + typeof ioFs.realpathSync === "function" && + typeof ioFs.readFileSync === "function" && + typeof ioFs.constants === "object" && + ioFs.constants !== null + ); +} + +export function openBoundaryFileSync(params: OpenBoundaryFileSyncParams): BoundaryFileOpenResult { + const ioFs = params.ioFs ?? fs; + const absolutePath = path.resolve(params.absolutePath); + + let resolvedPath: string; + let rootRealPath: string; + try { + const resolved = resolveBoundaryPathSync({ + absolutePath, + rootPath: params.rootPath, + rootCanonicalPath: params.rootRealPath, + boundaryLabel: params.boundaryLabel, + skipLexicalRootCheck: params.skipLexicalRootCheck, + }); + resolvedPath = resolved.canonicalPath; + rootRealPath = resolved.rootCanonicalPath; + } catch (error) { + return { ok: false, reason: "validation", error }; + } + + const opened = openVerifiedFileSync({ + filePath: absolutePath, + resolvedPath, + rejectHardlinks: params.rejectHardlinks ?? true, + maxBytes: params.maxBytes, + ioFs, + }); + if (!opened.ok) { + return opened; + } + return { + ok: true, + path: opened.path, + fd: opened.fd, + stat: opened.stat, + rootRealPath, + }; +} + +export async function openBoundaryFile( + params: OpenBoundaryFileParams, +): Promise { + try { + await resolveBoundaryPath({ + absolutePath: params.absolutePath, + rootPath: params.rootPath, + rootCanonicalPath: params.rootRealPath, + boundaryLabel: params.boundaryLabel, + policy: params.aliasPolicy, + skipLexicalRootCheck: params.skipLexicalRootCheck, + }); + } catch (error) { + return { ok: false, reason: "validation", error }; + } + return openBoundaryFileSync(params); +} diff --git a/src/infra/boundary-path.test.ts b/src/infra/boundary-path.test.ts new file mode 100644 index 00000000000..a2aefc73c28 --- /dev/null +++ b/src/infra/boundary-path.test.ts @@ -0,0 +1,198 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { resolveBoundaryPath, resolveBoundaryPathSync } from "./boundary-path.js"; +import { isPathInside } from "./path-guards.js"; + +async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { + const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + try { + return await run(root); + } finally { + await fs.rm(root, { recursive: true, force: true }); + } +} + +function createSeededRandom(seed: number): () => number { + let state = seed >>> 0; + return () => { + state = (state * 1664525 + 1013904223) >>> 0; + return state / 0x100000000; + }; +} + +describe("resolveBoundaryPath", () => { + it("resolves symlink parents with non-existent leafs inside root", async () => { + if (process.platform === "win32") { + return; + } + + await withTempRoot("openclaw-boundary-path-", async (base) => { + const root = path.join(base, "workspace"); + const targetDir = path.join(root, "target-dir"); + const linkPath = path.join(root, "alias"); + await fs.mkdir(targetDir, { recursive: true }); + await fs.symlink(targetDir, linkPath); + + const unresolved = path.join(linkPath, "missing.txt"); + const result = await resolveBoundaryPath({ + absolutePath: unresolved, + rootPath: root, + boundaryLabel: "sandbox root", + }); + + const targetReal = await fs.realpath(targetDir); + expect(result.exists).toBe(false); + expect(result.kind).toBe("missing"); + expect(result.canonicalPath).toBe(path.join(targetReal, "missing.txt")); + expect(isPathInside(result.rootCanonicalPath, result.canonicalPath)).toBe(true); + }); + }); + + it("blocks dangling symlink leaf escapes outside root", async () => { + if (process.platform === "win32") { + return; + } + + await withTempRoot("openclaw-boundary-path-", async (base) => { + const root = path.join(base, "workspace"); + const outside = path.join(base, "outside"); + const linkPath = path.join(root, "alias-out"); + await fs.mkdir(root, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.symlink(outside, linkPath); + const dangling = path.join(linkPath, "missing.txt"); + + await expect( + resolveBoundaryPath({ + absolutePath: dangling, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/i); + expect(() => + resolveBoundaryPathSync({ + absolutePath: dangling, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).toThrow(/Symlink escapes sandbox root/i); + }); + }); + + it("allows final symlink only when unlink policy opts in", async () => { + if (process.platform === "win32") { + return; + } + + await withTempRoot("openclaw-boundary-path-", async (base) => { + const root = path.join(base, "workspace"); + const outside = path.join(base, "outside"); + const outsideFile = path.join(outside, "target.txt"); + const linkPath = path.join(root, "link.txt"); + await fs.mkdir(root, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.writeFile(outsideFile, "x", "utf8"); + await fs.symlink(outsideFile, linkPath); + + await expect( + resolveBoundaryPath({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/i); + + const allowed = await resolveBoundaryPath({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + policy: { allowFinalSymlinkForUnlink: true }, + }); + const rootReal = await fs.realpath(root); + expect(allowed.exists).toBe(true); + expect(allowed.kind).toBe("symlink"); + expect(allowed.canonicalPath).toBe(path.join(rootReal, "link.txt")); + }); + }); + + it("allows canonical aliases that still resolve inside root", async () => { + if (process.platform === "win32") { + return; + } + + await withTempRoot("openclaw-boundary-path-", async (base) => { + const root = path.join(base, "workspace"); + const aliasRoot = path.join(base, "workspace-alias"); + const fileName = "plugin.js"; + await fs.mkdir(root, { recursive: true }); + await fs.writeFile(path.join(root, fileName), "export default {}", "utf8"); + await fs.symlink(root, aliasRoot); + + const resolved = await resolveBoundaryPath({ + absolutePath: path.join(aliasRoot, fileName), + rootPath: await fs.realpath(root), + boundaryLabel: "plugin root", + }); + expect(resolved.exists).toBe(true); + expect(isPathInside(resolved.rootCanonicalPath, resolved.canonicalPath)).toBe(true); + + const resolvedSync = resolveBoundaryPathSync({ + absolutePath: path.join(aliasRoot, fileName), + rootPath: await fs.realpath(root), + boundaryLabel: "plugin root", + }); + expect(resolvedSync.exists).toBe(true); + expect(isPathInside(resolvedSync.rootCanonicalPath, resolvedSync.canonicalPath)).toBe(true); + }); + }); + + it("maintains containment invariant across randomized alias cases", async () => { + if (process.platform === "win32") { + return; + } + + await withTempRoot("openclaw-boundary-path-fuzz-", async (base) => { + const root = path.join(base, "workspace"); + const outside = path.join(base, "outside"); + const safeTarget = path.join(root, "safe-target"); + await fs.mkdir(root, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.mkdir(safeTarget, { recursive: true }); + + const rand = createSeededRandom(0x5eed1234); + for (let idx = 0; idx < 64; idx += 1) { + const token = Math.floor(rand() * 1_000_000) + .toString(16) + .padStart(5, "0"); + const safeName = `safe-${idx}-${token}`; + const useLink = rand() > 0.5; + const safeBase = useLink ? path.join(root, `safe-link-${idx}`) : path.join(root, safeName); + if (useLink) { + await fs.symlink(safeTarget, safeBase); + } else { + await fs.mkdir(safeBase, { recursive: true }); + } + const safeCandidate = path.join(safeBase, `new-${token}.txt`); + const safeResolved = await resolveBoundaryPath({ + absolutePath: safeCandidate, + rootPath: root, + boundaryLabel: "sandbox root", + }); + expect(isPathInside(safeResolved.rootCanonicalPath, safeResolved.canonicalPath)).toBe(true); + + const escapeLink = path.join(root, `escape-${idx}`); + await fs.symlink(outside, escapeLink); + const unsafeCandidate = path.join(escapeLink, `new-${token}.txt`); + await expect( + resolveBoundaryPath({ + absolutePath: unsafeCandidate, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/i); + } + }); + }); +}); diff --git a/src/infra/boundary-path.ts b/src/infra/boundary-path.ts new file mode 100644 index 00000000000..e0f6673dd05 --- /dev/null +++ b/src/infra/boundary-path.ts @@ -0,0 +1,568 @@ +import fs from "node:fs"; +import fsp from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { isNotFoundPathError, isPathInside } from "./path-guards.js"; + +export type BoundaryPathIntent = "read" | "write" | "create" | "delete" | "stat"; + +export type BoundaryPathAliasPolicy = { + allowFinalSymlinkForUnlink?: boolean; + allowFinalHardlinkForUnlink?: boolean; +}; + +export const BOUNDARY_PATH_ALIAS_POLICIES = { + strict: Object.freeze({ + allowFinalSymlinkForUnlink: false, + allowFinalHardlinkForUnlink: false, + }), + unlinkTarget: Object.freeze({ + allowFinalSymlinkForUnlink: true, + allowFinalHardlinkForUnlink: true, + }), +} as const; + +export type ResolveBoundaryPathParams = { + absolutePath: string; + rootPath: string; + boundaryLabel: string; + intent?: BoundaryPathIntent; + policy?: BoundaryPathAliasPolicy; + skipLexicalRootCheck?: boolean; + rootCanonicalPath?: string; +}; + +export type ResolvedBoundaryPathKind = "missing" | "file" | "directory" | "symlink" | "other"; + +export type ResolvedBoundaryPath = { + absolutePath: string; + canonicalPath: string; + rootPath: string; + rootCanonicalPath: string; + relativePath: string; + exists: boolean; + kind: ResolvedBoundaryPathKind; +}; + +export async function resolveBoundaryPath( + params: ResolveBoundaryPathParams, +): Promise { + const rootPath = path.resolve(params.rootPath); + const absolutePath = path.resolve(params.absolutePath); + const rootCanonicalPath = params.rootCanonicalPath + ? path.resolve(params.rootCanonicalPath) + : await resolvePathViaExistingAncestor(rootPath); + const lexicalInside = isPathInside(rootPath, absolutePath); + const outsideLexicalCanonicalPath = lexicalInside + ? undefined + : await resolvePathViaExistingAncestor(absolutePath); + const canonicalOutsideLexicalPath = resolveCanonicalOutsideLexicalPath({ + absolutePath, + outsideLexicalCanonicalPath, + }); + assertLexicalBoundaryOrCanonicalAlias({ + skipLexicalRootCheck: params.skipLexicalRootCheck, + lexicalInside, + canonicalOutsideLexicalPath, + rootCanonicalPath, + boundaryLabel: params.boundaryLabel, + rootPath, + absolutePath, + }); + + if (!lexicalInside) { + const canonicalPath = canonicalOutsideLexicalPath; + assertInsideBoundary({ + boundaryLabel: params.boundaryLabel, + rootCanonicalPath, + candidatePath: canonicalPath, + absolutePath, + }); + const kind = await getPathKind(absolutePath, false); + return buildResolvedBoundaryPath({ + absolutePath, + canonicalPath, + rootPath, + rootCanonicalPath, + kind, + }); + } + + return resolveBoundaryPathLexicalAsync({ + params, + absolutePath, + rootPath, + rootCanonicalPath, + }); +} + +export function resolveBoundaryPathSync(params: ResolveBoundaryPathParams): ResolvedBoundaryPath { + const rootPath = path.resolve(params.rootPath); + const absolutePath = path.resolve(params.absolutePath); + const rootCanonicalPath = params.rootCanonicalPath + ? path.resolve(params.rootCanonicalPath) + : resolvePathViaExistingAncestorSync(rootPath); + const lexicalInside = isPathInside(rootPath, absolutePath); + const outsideLexicalCanonicalPath = lexicalInside + ? undefined + : resolvePathViaExistingAncestorSync(absolutePath); + const canonicalOutsideLexicalPath = resolveCanonicalOutsideLexicalPath({ + absolutePath, + outsideLexicalCanonicalPath, + }); + assertLexicalBoundaryOrCanonicalAlias({ + skipLexicalRootCheck: params.skipLexicalRootCheck, + lexicalInside, + canonicalOutsideLexicalPath, + rootCanonicalPath, + boundaryLabel: params.boundaryLabel, + rootPath, + absolutePath, + }); + + if (!lexicalInside) { + const canonicalPath = canonicalOutsideLexicalPath; + assertInsideBoundary({ + boundaryLabel: params.boundaryLabel, + rootCanonicalPath, + candidatePath: canonicalPath, + absolutePath, + }); + const kind = getPathKindSync(absolutePath, false); + return buildResolvedBoundaryPath({ + absolutePath, + canonicalPath, + rootPath, + rootCanonicalPath, + kind, + }); + } + + return resolveBoundaryPathLexicalSync({ + params, + absolutePath, + rootPath, + rootCanonicalPath, + }); +} + +async function resolveBoundaryPathLexicalAsync(params: { + params: ResolveBoundaryPathParams; + absolutePath: string; + rootPath: string; + rootCanonicalPath: string; +}): Promise { + const relative = path.relative(params.rootPath, params.absolutePath); + const segments = relative.split(path.sep).filter(Boolean); + const allowFinalSymlink = params.params.policy?.allowFinalSymlinkForUnlink === true; + let canonicalCursor = params.rootCanonicalPath; + let lexicalCursor = params.rootPath; + let preserveFinalSymlink = false; + + for (let idx = 0; idx < segments.length; idx += 1) { + const segment = segments[idx] ?? ""; + const isLast = idx === segments.length - 1; + lexicalCursor = path.join(lexicalCursor, segment); + + let stat: Awaited>; + try { + stat = await fsp.lstat(lexicalCursor); + } catch (error) { + if (isNotFoundPathError(error)) { + const missingSuffix = segments.slice(idx); + canonicalCursor = path.resolve(canonicalCursor, ...missingSuffix); + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + break; + } + throw error; + } + + if (!stat.isSymbolicLink()) { + canonicalCursor = path.resolve(canonicalCursor, segment); + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + continue; + } + + if (allowFinalSymlink && isLast) { + preserveFinalSymlink = true; + canonicalCursor = path.resolve(canonicalCursor, segment); + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + break; + } + + const linkCanonical = await resolveSymlinkHopPath(lexicalCursor); + if (!isPathInside(params.rootCanonicalPath, linkCanonical)) { + throw symlinkEscapeError({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + symlinkPath: lexicalCursor, + }); + } + canonicalCursor = linkCanonical; + lexicalCursor = linkCanonical; + } + + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + const kind = await getPathKind(params.absolutePath, preserveFinalSymlink); + return buildResolvedBoundaryPath({ + absolutePath: params.absolutePath, + canonicalPath: canonicalCursor, + rootPath: params.rootPath, + rootCanonicalPath: params.rootCanonicalPath, + kind, + }); +} + +function resolveBoundaryPathLexicalSync(params: { + params: ResolveBoundaryPathParams; + absolutePath: string; + rootPath: string; + rootCanonicalPath: string; +}): ResolvedBoundaryPath { + const relative = path.relative(params.rootPath, params.absolutePath); + const segments = relative.split(path.sep).filter(Boolean); + const allowFinalSymlink = params.params.policy?.allowFinalSymlinkForUnlink === true; + let canonicalCursor = params.rootCanonicalPath; + let lexicalCursor = params.rootPath; + let preserveFinalSymlink = false; + + for (let idx = 0; idx < segments.length; idx += 1) { + const segment = segments[idx] ?? ""; + const isLast = idx === segments.length - 1; + lexicalCursor = path.join(lexicalCursor, segment); + + let stat: fs.Stats; + try { + stat = fs.lstatSync(lexicalCursor); + } catch (error) { + if (isNotFoundPathError(error)) { + const missingSuffix = segments.slice(idx); + canonicalCursor = path.resolve(canonicalCursor, ...missingSuffix); + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + break; + } + throw error; + } + + if (!stat.isSymbolicLink()) { + canonicalCursor = path.resolve(canonicalCursor, segment); + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + continue; + } + + if (allowFinalSymlink && isLast) { + preserveFinalSymlink = true; + canonicalCursor = path.resolve(canonicalCursor, segment); + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + break; + } + + const linkCanonical = resolveSymlinkHopPathSync(lexicalCursor); + if (!isPathInside(params.rootCanonicalPath, linkCanonical)) { + throw symlinkEscapeError({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + symlinkPath: lexicalCursor, + }); + } + canonicalCursor = linkCanonical; + lexicalCursor = linkCanonical; + } + + assertInsideBoundary({ + boundaryLabel: params.params.boundaryLabel, + rootCanonicalPath: params.rootCanonicalPath, + candidatePath: canonicalCursor, + absolutePath: params.absolutePath, + }); + const kind = getPathKindSync(params.absolutePath, preserveFinalSymlink); + return buildResolvedBoundaryPath({ + absolutePath: params.absolutePath, + canonicalPath: canonicalCursor, + rootPath: params.rootPath, + rootCanonicalPath: params.rootCanonicalPath, + kind, + }); +} + +function resolveCanonicalOutsideLexicalPath(params: { + absolutePath: string; + outsideLexicalCanonicalPath?: string; +}): string { + return params.outsideLexicalCanonicalPath ?? params.absolutePath; +} + +function assertLexicalBoundaryOrCanonicalAlias(params: { + skipLexicalRootCheck?: boolean; + lexicalInside: boolean; + canonicalOutsideLexicalPath: string; + rootCanonicalPath: string; + boundaryLabel: string; + rootPath: string; + absolutePath: string; +}): void { + if (params.skipLexicalRootCheck || params.lexicalInside) { + return; + } + if (isPathInside(params.rootCanonicalPath, params.canonicalOutsideLexicalPath)) { + return; + } + throw pathEscapeError({ + boundaryLabel: params.boundaryLabel, + rootPath: params.rootPath, + absolutePath: params.absolutePath, + }); +} + +function buildResolvedBoundaryPath(params: { + absolutePath: string; + canonicalPath: string; + rootPath: string; + rootCanonicalPath: string; + kind: { exists: boolean; kind: ResolvedBoundaryPathKind }; +}): ResolvedBoundaryPath { + return { + absolutePath: params.absolutePath, + canonicalPath: params.canonicalPath, + rootPath: params.rootPath, + rootCanonicalPath: params.rootCanonicalPath, + relativePath: relativeInsideRoot(params.rootCanonicalPath, params.canonicalPath), + exists: params.kind.exists, + kind: params.kind.kind, + }; +} + +export async function resolvePathViaExistingAncestor(targetPath: string): Promise { + const normalized = path.resolve(targetPath); + let cursor = normalized; + const missingSuffix: string[] = []; + + while (!isFilesystemRoot(cursor) && !(await pathExists(cursor))) { + missingSuffix.unshift(path.basename(cursor)); + const parent = path.dirname(cursor); + if (parent === cursor) { + break; + } + cursor = parent; + } + + if (!(await pathExists(cursor))) { + return normalized; + } + + try { + const resolvedAncestor = path.resolve(await fsp.realpath(cursor)); + if (missingSuffix.length === 0) { + return resolvedAncestor; + } + return path.resolve(resolvedAncestor, ...missingSuffix); + } catch { + return normalized; + } +} + +export function resolvePathViaExistingAncestorSync(targetPath: string): string { + const normalized = path.resolve(targetPath); + let cursor = normalized; + const missingSuffix: string[] = []; + + while (!isFilesystemRoot(cursor) && !fs.existsSync(cursor)) { + missingSuffix.unshift(path.basename(cursor)); + const parent = path.dirname(cursor); + if (parent === cursor) { + break; + } + cursor = parent; + } + + if (!fs.existsSync(cursor)) { + return normalized; + } + + try { + // Keep sync behavior aligned with async (`fsp.realpath`) to avoid + // platform-specific canonical alias drift (notably on Windows). + const resolvedAncestor = path.resolve(fs.realpathSync(cursor)); + if (missingSuffix.length === 0) { + return resolvedAncestor; + } + return path.resolve(resolvedAncestor, ...missingSuffix); + } catch { + return normalized; + } +} + +async function getPathKind( + absolutePath: string, + preserveFinalSymlink: boolean, +): Promise<{ exists: boolean; kind: ResolvedBoundaryPathKind }> { + try { + const stat = preserveFinalSymlink + ? await fsp.lstat(absolutePath) + : await fsp.stat(absolutePath); + return { exists: true, kind: toResolvedKind(stat) }; + } catch (error) { + if (isNotFoundPathError(error)) { + return { exists: false, kind: "missing" }; + } + throw error; + } +} + +function getPathKindSync( + absolutePath: string, + preserveFinalSymlink: boolean, +): { exists: boolean; kind: ResolvedBoundaryPathKind } { + try { + const stat = preserveFinalSymlink ? fs.lstatSync(absolutePath) : fs.statSync(absolutePath); + return { exists: true, kind: toResolvedKind(stat) }; + } catch (error) { + if (isNotFoundPathError(error)) { + return { exists: false, kind: "missing" }; + } + throw error; + } +} + +function toResolvedKind(stat: fs.Stats): ResolvedBoundaryPathKind { + if (stat.isFile()) { + return "file"; + } + if (stat.isDirectory()) { + return "directory"; + } + if (stat.isSymbolicLink()) { + return "symlink"; + } + return "other"; +} + +function relativeInsideRoot(rootPath: string, targetPath: string): string { + const relative = path.relative(path.resolve(rootPath), path.resolve(targetPath)); + if (!relative || relative === ".") { + return ""; + } + if (relative.startsWith("..") || path.isAbsolute(relative)) { + return ""; + } + return relative; +} + +function assertInsideBoundary(params: { + boundaryLabel: string; + rootCanonicalPath: string; + candidatePath: string; + absolutePath: string; +}): void { + if (isPathInside(params.rootCanonicalPath, params.candidatePath)) { + return; + } + throw new Error( + `Path resolves outside ${params.boundaryLabel} (${shortPath(params.rootCanonicalPath)}): ${shortPath(params.absolutePath)}`, + ); +} + +function pathEscapeError(params: { + boundaryLabel: string; + rootPath: string; + absolutePath: string; +}): Error { + return new Error( + `Path escapes ${params.boundaryLabel} (${shortPath(params.rootPath)}): ${shortPath(params.absolutePath)}`, + ); +} + +function symlinkEscapeError(params: { + boundaryLabel: string; + rootCanonicalPath: string; + symlinkPath: string; +}): Error { + return new Error( + `Symlink escapes ${params.boundaryLabel} (${shortPath(params.rootCanonicalPath)}): ${shortPath(params.symlinkPath)}`, + ); +} + +function shortPath(value: string): string { + const home = os.homedir(); + if (value.startsWith(home)) { + return `~${value.slice(home.length)}`; + } + return value; +} + +function isFilesystemRoot(candidate: string): boolean { + return path.parse(candidate).root === candidate; +} + +async function pathExists(targetPath: string): Promise { + try { + await fsp.lstat(targetPath); + return true; + } catch (error) { + if (isNotFoundPathError(error)) { + return false; + } + throw error; + } +} + +async function resolveSymlinkHopPath(symlinkPath: string): Promise { + try { + return path.resolve(await fsp.realpath(symlinkPath)); + } catch (error) { + if (!isNotFoundPathError(error)) { + throw error; + } + const linkTarget = await fsp.readlink(symlinkPath); + const linkAbsolute = path.resolve(path.dirname(symlinkPath), linkTarget); + return resolvePathViaExistingAncestor(linkAbsolute); + } +} + +function resolveSymlinkHopPathSync(symlinkPath: string): string { + try { + return path.resolve(fs.realpathSync(symlinkPath)); + } catch (error) { + if (!isNotFoundPathError(error)) { + throw error; + } + const linkTarget = fs.readlinkSync(symlinkPath); + const linkAbsolute = path.resolve(path.dirname(symlinkPath), linkTarget); + return resolvePathViaExistingAncestorSync(linkAbsolute); + } +} diff --git a/src/infra/device-pairing.ts b/src/infra/device-pairing.ts index 1d18efed1b3..591a9d70888 100644 --- a/src/infra/device-pairing.ts +++ b/src/infra/device-pairing.ts @@ -17,6 +17,7 @@ export type DevicePairingPendingRequest = { publicKey: string; displayName?: string; platform?: string; + deviceFamily?: string; clientId?: string; clientMode?: string; role?: string; @@ -52,6 +53,7 @@ export type PairedDevice = { publicKey: string; displayName?: string; platform?: string; + deviceFamily?: string; clientId?: string; clientMode?: string; role?: string; @@ -165,6 +167,7 @@ function mergePendingDevicePairingRequest( ...existing, displayName: incoming.displayName ?? existing.displayName, platform: incoming.platform ?? existing.platform, + deviceFamily: incoming.deviceFamily ?? existing.deviceFamily, clientId: incoming.clientId ?? existing.clientId, clientMode: incoming.clientMode ?? existing.clientMode, role: existingRole ?? incomingRole ?? undefined, @@ -297,6 +300,7 @@ export async function requestDevicePairing( publicKey: req.publicKey, displayName: req.displayName, platform: req.platform, + deviceFamily: req.deviceFamily, clientId: req.clientId, clientMode: req.clientMode, role: req.role, @@ -360,6 +364,7 @@ export async function approveDevicePairing( publicKey: pending.publicKey, displayName: pending.displayName, platform: pending.platform, + deviceFamily: pending.deviceFamily, clientId: pending.clientId, clientMode: pending.clientMode, role: pending.role, diff --git a/src/infra/exec-approval-forwarder.test.ts b/src/infra/exec-approval-forwarder.test.ts index 298efa4789c..8d81cc69661 100644 --- a/src/infra/exec-approval-forwarder.test.ts +++ b/src/infra/exec-approval-forwarder.test.ts @@ -1,3 +1,6 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; import { afterEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { createExecApprovalForwarder } from "./exec-approval-forwarder.js"; @@ -40,14 +43,17 @@ function createForwarder(params: { resolveSessionTarget?: () => { channel: string; to: string } | null; }) { const deliver = params.deliver ?? vi.fn().mockResolvedValue([]); - const forwarder = createExecApprovalForwarder({ + const deps: NonNullable[0]> = { getConfig: () => params.cfg, deliver: deliver as unknown as NonNullable< NonNullable[0]>["deliver"] >, nowMs: () => 1000, - resolveSessionTarget: params.resolveSessionTarget ?? (() => null), - }); + }; + if (params.resolveSessionTarget !== undefined) { + deps.resolveSessionTarget = params.resolveSessionTarget; + } + const forwarder = createExecApprovalForwarder(deps); return { deliver, forwarder }; } @@ -212,6 +218,58 @@ describe("exec approval forwarder", () => { }); }); + it("prefers turn-source routing over stale session last route", async () => { + vi.useFakeTimers(); + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-exec-approval-forwarder-test-")); + try { + const storePath = path.join(tmpDir, "sessions.json"); + fs.writeFileSync( + storePath, + JSON.stringify({ + "agent:main:main": { + updatedAt: 1, + channel: "slack", + to: "U1", + lastChannel: "slack", + lastTo: "U1", + }, + }), + "utf-8", + ); + + const cfg = { + session: { store: storePath }, + approvals: { exec: { enabled: true, mode: "session" } }, + } as OpenClawConfig; + + const { deliver, forwarder } = createForwarder({ cfg }); + await expect( + forwarder.handleRequested({ + ...baseRequest, + request: { + ...baseRequest.request, + turnSourceChannel: "whatsapp", + turnSourceTo: "+15555550123", + turnSourceAccountId: "work", + turnSourceThreadId: "1739201675.123", + }, + }), + ).resolves.toBe(true); + + expect(deliver).toHaveBeenCalledTimes(1); + expect(deliver).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "whatsapp", + to: "+15555550123", + accountId: "work", + threadId: "1739201675.123", + }), + ); + } finally { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + it("can forward resolved notices without pending cache when request payload is present", async () => { vi.useFakeTimers(); const cfg = { diff --git a/src/infra/exec-approval-forwarder.ts b/src/infra/exec-approval-forwarder.ts index 7af7489baf2..d024f91bc3a 100644 --- a/src/infra/exec-approval-forwarder.ts +++ b/src/infra/exec-approval-forwarder.ts @@ -8,7 +8,11 @@ import type { import { createSubsystemLogger } from "../logging/subsystem.js"; import { normalizeAccountId, parseAgentSessionKey } from "../routing/session-key.js"; import { compileSafeRegex } from "../security/safe-regex.js"; -import { isDeliverableMessageChannel, normalizeMessageChannel } from "../utils/message-channel.js"; +import { + isDeliverableMessageChannel, + normalizeMessageChannel, + type DeliverableMessageChannel, +} from "../utils/message-channel.js"; import type { ExecApprovalDecision, ExecApprovalRequest, @@ -171,6 +175,9 @@ function buildRequestMessage(request: ExecApprovalRequest, nowMs: number) { if (request.request.nodeId) { lines.push(`Node: ${request.request.nodeId}`); } + if (Array.isArray(request.request.envKeys) && request.request.envKeys.length > 0) { + lines.push(`Env overrides: ${request.request.envKeys.join(", ")}`); + } if (request.request.host) { lines.push(`Host: ${request.request.host}`); } @@ -209,6 +216,11 @@ function buildExpiredMessage(request: ExecApprovalRequest) { return `⏱️ Exec approval expired. ID: ${request.id}`; } +function normalizeTurnSourceChannel(value?: string | null): DeliverableMessageChannel | undefined { + const normalized = value ? normalizeMessageChannel(value) : undefined; + return normalized && isDeliverableMessageChannel(normalized) ? normalized : undefined; +} + function defaultResolveSessionTarget(params: { cfg: OpenClawConfig; request: ExecApprovalRequest; @@ -225,7 +237,14 @@ function defaultResolveSessionTarget(params: { if (!entry) { return null; } - const target = resolveSessionDeliveryTarget({ entry, requestedChannel: "last" }); + const target = resolveSessionDeliveryTarget({ + entry, + requestedChannel: "last", + turnSourceChannel: normalizeTurnSourceChannel(params.request.request.turnSourceChannel), + turnSourceTo: params.request.request.turnSourceTo?.trim() || undefined, + turnSourceAccountId: params.request.request.turnSourceAccountId?.trim() || undefined, + turnSourceThreadId: params.request.request.turnSourceThreadId ?? undefined, + }); if (!target.channel || !target.to) { return null; } diff --git a/src/infra/exec-approvals.ts b/src/infra/exec-approvals.ts index be4264e22ec..b48a65e02ca 100644 --- a/src/infra/exec-approvals.ts +++ b/src/infra/exec-approvals.ts @@ -11,19 +11,48 @@ export type ExecHost = "sandbox" | "gateway" | "node"; export type ExecSecurity = "deny" | "allowlist" | "full"; export type ExecAsk = "off" | "on-miss" | "always"; +export type SystemRunApprovalBindingV1 = { + version: 1; + argv: string[]; + cwd: string | null; + agentId: string | null; + sessionKey: string | null; + envHash: string | null; +}; + +export type SystemRunApprovalPlanV2 = { + version: 2; + argv: string[]; + cwd: string | null; + rawCommand: string | null; + agentId: string | null; + sessionKey: string | null; +}; + +export type ExecApprovalRequestPayload = { + command: string; + commandArgv?: string[]; + // Optional UI-safe env key preview for approval prompts. + envKeys?: string[]; + systemRunBindingV1?: SystemRunApprovalBindingV1 | null; + systemRunPlanV2?: SystemRunApprovalPlanV2 | null; + cwd?: string | null; + nodeId?: string | null; + host?: string | null; + security?: string | null; + ask?: string | null; + agentId?: string | null; + resolvedPath?: string | null; + sessionKey?: string | null; + turnSourceChannel?: string | null; + turnSourceTo?: string | null; + turnSourceAccountId?: string | null; + turnSourceThreadId?: string | number | null; +}; + export type ExecApprovalRequest = { id: string; - request: { - command: string; - cwd?: string | null; - nodeId?: string | null; - host?: string | null; - security?: string | null; - ask?: string | null; - agentId?: string | null; - resolvedPath?: string | null; - sessionKey?: string | null; - }; + request: ExecApprovalRequestPayload; createdAtMs: number; expiresAtMs: number; }; diff --git a/src/infra/fs-safe.test.ts b/src/infra/fs-safe.test.ts index 02059149532..cb2399c616b 100644 --- a/src/infra/fs-safe.test.ts +++ b/src/infra/fs-safe.test.ts @@ -2,7 +2,12 @@ import fs from "node:fs/promises"; import path from "node:path"; import { afterEach, describe, expect, it } from "vitest"; import { createTrackedTempDirs } from "../test-utils/tracked-temp-dirs.js"; -import { SafeOpenError, openFileWithinRoot, readLocalFileSafely } from "./fs-safe.js"; +import { + SafeOpenError, + openFileWithinRoot, + readLocalFileSafely, + writeFileWithinRoot, +} from "./fs-safe.js"; const tempDirs = createTrackedTempDirs(); @@ -81,6 +86,83 @@ describe("fs-safe", () => { ).rejects.toMatchObject({ code: "invalid-path" }); }); + it.runIf(process.platform !== "win32")("blocks hardlink aliases under root", async () => { + const root = await tempDirs.make("openclaw-fs-safe-root-"); + const outside = await tempDirs.make("openclaw-fs-safe-outside-"); + const outsideFile = path.join(outside, "outside.txt"); + const hardlinkPath = path.join(root, "link.txt"); + await fs.writeFile(outsideFile, "outside"); + try { + try { + await fs.link(outsideFile, hardlinkPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + await expect( + openFileWithinRoot({ + rootDir: root, + relativePath: "link.txt", + }), + ).rejects.toMatchObject({ code: "invalid-path" }); + } finally { + await fs.rm(hardlinkPath, { force: true }); + await fs.rm(outsideFile, { force: true }); + } + }); + + it("writes a file within root safely", async () => { + const root = await tempDirs.make("openclaw-fs-safe-root-"); + await writeFileWithinRoot({ + rootDir: root, + relativePath: "nested/out.txt", + data: "hello", + }); + await expect(fs.readFile(path.join(root, "nested", "out.txt"), "utf8")).resolves.toBe("hello"); + }); + + it("rejects write traversal outside root", async () => { + const root = await tempDirs.make("openclaw-fs-safe-root-"); + await expect( + writeFileWithinRoot({ + rootDir: root, + relativePath: "../escape.txt", + data: "x", + }), + ).rejects.toMatchObject({ code: "invalid-path" }); + }); + + it.runIf(process.platform !== "win32")("rejects writing through hardlink aliases", async () => { + const root = await tempDirs.make("openclaw-fs-safe-root-"); + const outside = await tempDirs.make("openclaw-fs-safe-outside-"); + const outsideFile = path.join(outside, "outside.txt"); + const hardlinkPath = path.join(root, "alias.txt"); + await fs.writeFile(outsideFile, "outside"); + try { + try { + await fs.link(outsideFile, hardlinkPath); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + await expect( + writeFileWithinRoot({ + rootDir: root, + relativePath: "alias.txt", + data: "pwned", + }), + ).rejects.toMatchObject({ code: "invalid-path" }); + await expect(fs.readFile(outsideFile, "utf8")).resolves.toBe("outside"); + } finally { + await fs.rm(hardlinkPath, { force: true }); + await fs.rm(outsideFile, { force: true }); + } + }); + it("returns not-found for missing files", async () => { const dir = await tempDirs.make("openclaw-fs-safe-"); const missing = path.join(dir, "missing.txt"); diff --git a/src/infra/fs-safe.ts b/src/infra/fs-safe.ts index b42a109df98..4ac06f937fd 100644 --- a/src/infra/fs-safe.ts +++ b/src/infra/fs-safe.ts @@ -4,6 +4,7 @@ import type { FileHandle } from "node:fs/promises"; import fs from "node:fs/promises"; import path from "node:path"; import { sameFileIdentity } from "./file-identity.js"; +import { assertNoPathAliasEscape } from "./path-alias-guards.js"; import { isNotFoundPathError, isPathInside, isSymlinkOpenError } from "./path-guards.js"; export type SafeOpenErrorCode = @@ -38,10 +39,20 @@ export type SafeLocalReadResult = { const SUPPORTS_NOFOLLOW = process.platform !== "win32" && "O_NOFOLLOW" in fsConstants; const OPEN_READ_FLAGS = fsConstants.O_RDONLY | (SUPPORTS_NOFOLLOW ? fsConstants.O_NOFOLLOW : 0); +const OPEN_WRITE_FLAGS = + fsConstants.O_WRONLY | + fsConstants.O_CREAT | + fsConstants.O_TRUNC | + (SUPPORTS_NOFOLLOW ? fsConstants.O_NOFOLLOW : 0); const ensureTrailingSep = (value: string) => (value.endsWith(path.sep) ? value : value + path.sep); -async function openVerifiedLocalFile(filePath: string): Promise { +async function openVerifiedLocalFile( + filePath: string, + options?: { + rejectHardlinks?: boolean; + }, +): Promise { let handle: FileHandle; try { handle = await fs.open(filePath, OPEN_READ_FLAGS); @@ -63,12 +74,18 @@ async function openVerifiedLocalFile(filePath: string): Promise if (!stat.isFile()) { throw new SafeOpenError("not-file", "not a file"); } + if (options?.rejectHardlinks && stat.nlink > 1) { + throw new SafeOpenError("invalid-path", "hardlinked path not allowed"); + } if (!sameFileIdentity(stat, lstat)) { throw new SafeOpenError("path-mismatch", "path changed during read"); } const realPath = await fs.realpath(filePath); const realStat = await fs.stat(realPath); + if (options?.rejectHardlinks && realStat.nlink > 1) { + throw new SafeOpenError("invalid-path", "hardlinked path not allowed"); + } if (!sameFileIdentity(stat, realStat)) { throw new SafeOpenError("path-mismatch", "path mismatch"); } @@ -89,6 +106,7 @@ async function openVerifiedLocalFile(filePath: string): Promise export async function openFileWithinRoot(params: { rootDir: string; relativePath: string; + rejectHardlinks?: boolean; }): Promise { let rootReal: string; try { @@ -120,6 +138,11 @@ export async function openFileWithinRoot(params: { throw err; } + if (params.rejectHardlinks !== false && opened.stat.nlink > 1) { + await opened.handle.close().catch(() => {}); + throw new SafeOpenError("invalid-path", "hardlinked path not allowed"); + } + if (!isPathInside(rootWithSep, opened.realPath)) { await opened.handle.close().catch(() => {}); throw new SafeOpenError("invalid-path", "path escapes root"); @@ -146,3 +169,100 @@ export async function readLocalFileSafely(params: { await opened.handle.close().catch(() => {}); } } + +export async function writeFileWithinRoot(params: { + rootDir: string; + relativePath: string; + data: string | Buffer; + encoding?: BufferEncoding; + mkdir?: boolean; +}): Promise { + let rootReal: string; + try { + rootReal = await fs.realpath(params.rootDir); + } catch (err) { + if (isNotFoundPathError(err)) { + throw new SafeOpenError("not-found", "root dir not found"); + } + throw err; + } + const rootWithSep = ensureTrailingSep(rootReal); + const resolved = path.resolve(rootWithSep, params.relativePath); + if (!isPathInside(rootWithSep, resolved)) { + throw new SafeOpenError("invalid-path", "path escapes root"); + } + try { + await assertNoPathAliasEscape({ + absolutePath: resolved, + rootPath: rootReal, + boundaryLabel: "root", + }); + } catch (err) { + throw new SafeOpenError("invalid-path", "path alias escape blocked", { cause: err }); + } + if (params.mkdir !== false) { + await fs.mkdir(path.dirname(resolved), { recursive: true }); + } + + let ioPath = resolved; + try { + const resolvedRealPath = await fs.realpath(resolved); + if (!isPathInside(rootWithSep, resolvedRealPath)) { + throw new SafeOpenError("invalid-path", "path escapes root"); + } + ioPath = resolvedRealPath; + } catch (err) { + if (err instanceof SafeOpenError) { + throw err; + } + if (!isNotFoundPathError(err)) { + throw err; + } + } + + let handle: FileHandle; + try { + handle = await fs.open(ioPath, OPEN_WRITE_FLAGS, 0o600); + } catch (err) { + if (isNotFoundPathError(err)) { + throw new SafeOpenError("not-found", "file not found"); + } + if (isSymlinkOpenError(err)) { + throw new SafeOpenError("invalid-path", "symlink open blocked", { cause: err }); + } + throw err; + } + + try { + const [stat, lstat] = await Promise.all([handle.stat(), fs.lstat(ioPath)]); + if (lstat.isSymbolicLink() || !stat.isFile()) { + throw new SafeOpenError("invalid-path", "path is not a regular file under root"); + } + if (stat.nlink > 1) { + throw new SafeOpenError("invalid-path", "hardlinked path not allowed"); + } + if (!sameFileIdentity(stat, lstat)) { + throw new SafeOpenError("path-mismatch", "path changed during write"); + } + + const realPath = await fs.realpath(ioPath); + const realStat = await fs.stat(realPath); + if (!sameFileIdentity(stat, realStat)) { + throw new SafeOpenError("path-mismatch", "path mismatch"); + } + if (realStat.nlink > 1) { + throw new SafeOpenError("invalid-path", "hardlinked path not allowed"); + } + if (!isPathInside(rootWithSep, realPath)) { + throw new SafeOpenError("invalid-path", "path escapes root"); + } + + if (typeof params.data === "string") { + await handle.writeFile(params.data, params.encoding ?? "utf8"); + } else { + await handle.writeFile(params.data); + } + } finally { + await handle.close().catch(() => {}); + } +} diff --git a/src/infra/hardlink-guards.ts b/src/infra/hardlink-guards.ts new file mode 100644 index 00000000000..ad99729b463 --- /dev/null +++ b/src/infra/hardlink-guards.ts @@ -0,0 +1,38 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import { isNotFoundPathError } from "./path-guards.js"; + +export async function assertNoHardlinkedFinalPath(params: { + filePath: string; + root: string; + boundaryLabel: string; + allowFinalHardlinkForUnlink?: boolean; +}): Promise { + if (params.allowFinalHardlinkForUnlink) { + return; + } + let stat: Awaited>; + try { + stat = await fs.stat(params.filePath); + } catch (err) { + if (isNotFoundPathError(err)) { + return; + } + throw err; + } + if (!stat.isFile()) { + return; + } + if (stat.nlink > 1) { + throw new Error( + `Hardlinked path is not allowed under ${params.boundaryLabel} (${shortPath(params.root)}): ${shortPath(params.filePath)}`, + ); + } +} + +function shortPath(value: string) { + if (value.startsWith(os.homedir())) { + return `~${value.slice(os.homedir().length)}`; + } + return value; +} diff --git a/src/infra/heartbeat-runner.returns-default-unset.test.ts b/src/infra/heartbeat-runner.returns-default-unset.test.ts index 0ec2afcafdd..c4f45b5e039 100644 --- a/src/infra/heartbeat-runner.returns-default-unset.test.ts +++ b/src/infra/heartbeat-runner.returns-default-unset.test.ts @@ -325,6 +325,30 @@ describe("resolveHeartbeatDeliveryTarget", () => { lastAccountId: undefined, }, }, + { + name: "allow direct target by default", + cfg: { agents: { defaults: { heartbeat: { target: "last" } } } }, + entry: { ...baseEntry, lastChannel: "telegram", lastTo: "5232990709" }, + expected: { + channel: "telegram", + to: "5232990709", + accountId: undefined, + lastChannel: "telegram", + lastAccountId: undefined, + }, + }, + { + name: "block direct target when directPolicy is block", + cfg: { agents: { defaults: { heartbeat: { target: "last", directPolicy: "block" } } } }, + entry: { ...baseEntry, lastChannel: "telegram", lastTo: "5232990709" }, + expected: { + channel: "none", + reason: "dm-blocked", + accountId: undefined, + lastChannel: "telegram", + lastAccountId: undefined, + }, + }, ]; for (const testCase of cases) { expect( diff --git a/src/infra/heartbeat-runner.ts b/src/infra/heartbeat-runner.ts index 73c2fafb1ae..056142c4056 100644 --- a/src/infra/heartbeat-runner.ts +++ b/src/infra/heartbeat-runner.ts @@ -60,6 +60,7 @@ import { } from "./heartbeat-wake.js"; import type { OutboundSendDeps } from "./outbound/deliver.js"; import { deliverOutboundPayloads } from "./outbound/deliver.js"; +import { buildOutboundSessionContext } from "./outbound/session-context.js"; import { resolveHeartbeatDeliveryTarget, resolveHeartbeatSenderContext, @@ -696,6 +697,11 @@ export async function runHeartbeatOnce(opts: { } const heartbeatOkText = responsePrefix ? `${responsePrefix} ${HEARTBEAT_TOKEN}` : HEARTBEAT_TOKEN; + const outboundSession = buildOutboundSessionContext({ + cfg, + agentId, + sessionKey, + }); const canAttemptHeartbeatOk = Boolean( visibility.showOk && delivery.channel !== "none" && delivery.to, ); @@ -721,7 +727,7 @@ export async function runHeartbeatOnce(opts: { accountId: delivery.accountId, threadId: delivery.threadId, payloads: [{ text: heartbeatOkText }], - agentId, + session: outboundSession, deps: opts.deps, }); return true; @@ -914,7 +920,7 @@ export async function runHeartbeatOnce(opts: { channel: delivery.channel, to: delivery.to, accountId: deliveryAccountId, - agentId, + session: outboundSession, threadId: delivery.threadId, payloads: [ ...reasoningPayloads, diff --git a/src/infra/host-env-security-policy.json b/src/infra/host-env-security-policy.json index 8b3ec80d5b4..4335bc43183 100644 --- a/src/infra/host-env-security-policy.json +++ b/src/infra/host-env-security-policy.json @@ -10,6 +10,7 @@ "RUBYOPT", "BASH_ENV", "ENV", + "GIT_EXTERNAL_DIFF", "SHELL", "SHELLOPTS", "PS4", diff --git a/src/infra/host-env-security.policy-parity.test.ts b/src/infra/host-env-security.policy-parity.test.ts index 4ee46265447..49b631d25a4 100644 --- a/src/infra/host-env-security.policy-parity.test.ts +++ b/src/infra/host-env-security.policy-parity.test.ts @@ -19,26 +19,44 @@ function parseSwiftStringArray(source: string, marker: string): string[] { } describe("host env security policy parity", () => { - it("keeps macOS HostEnvSanitizer lists in sync with shared JSON policy", () => { + it("keeps generated macOS host env policy in sync with shared JSON policy", () => { const repoRoot = process.cwd(); const policyPath = path.join(repoRoot, "src/infra/host-env-security-policy.json"); - const swiftPath = path.join(repoRoot, "apps/macos/Sources/OpenClaw/HostEnvSanitizer.swift"); + const generatedSwiftPath = path.join( + repoRoot, + "apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift", + ); + const sanitizerSwiftPath = path.join( + repoRoot, + "apps/macos/Sources/OpenClaw/HostEnvSanitizer.swift", + ); const policy = JSON.parse(fs.readFileSync(policyPath, "utf8")) as HostEnvSecurityPolicy; - const swiftSource = fs.readFileSync(swiftPath, "utf8"); + const generatedSource = fs.readFileSync(generatedSwiftPath, "utf8"); + const sanitizerSource = fs.readFileSync(sanitizerSwiftPath, "utf8"); - const swiftBlockedKeys = parseSwiftStringArray(swiftSource, "private static let blockedKeys"); + const swiftBlockedKeys = parseSwiftStringArray(generatedSource, "static let blockedKeys"); const swiftBlockedOverrideKeys = parseSwiftStringArray( - swiftSource, - "private static let blockedOverrideKeys", + generatedSource, + "static let blockedOverrideKeys", ); const swiftBlockedPrefixes = parseSwiftStringArray( - swiftSource, - "private static let blockedPrefixes", + generatedSource, + "static let blockedPrefixes", ); expect(swiftBlockedKeys).toEqual(policy.blockedKeys); expect(swiftBlockedOverrideKeys).toEqual(policy.blockedOverrideKeys ?? []); expect(swiftBlockedPrefixes).toEqual(policy.blockedPrefixes); + + expect(sanitizerSource).toContain( + "private static let blockedKeys = HostEnvSecurityPolicy.blockedKeys", + ); + expect(sanitizerSource).toContain( + "private static let blockedOverrideKeys = HostEnvSecurityPolicy.blockedOverrideKeys", + ); + expect(sanitizerSource).toContain( + "private static let blockedPrefixes = HostEnvSecurityPolicy.blockedPrefixes", + ); }); }); diff --git a/src/infra/host-env-security.test.ts b/src/infra/host-env-security.test.ts index 47ef53a6b9a..e0156077ae2 100644 --- a/src/infra/host-env-security.test.ts +++ b/src/infra/host-env-security.test.ts @@ -16,6 +16,7 @@ describe("isDangerousHostEnvVarName", () => { expect(isDangerousHostEnvVarName("BASH_ENV")).toBe(true); expect(isDangerousHostEnvVarName("bash_env")).toBe(true); expect(isDangerousHostEnvVarName("SHELL")).toBe(true); + expect(isDangerousHostEnvVarName("GIT_EXTERNAL_DIFF")).toBe(true); expect(isDangerousHostEnvVarName("SHELLOPTS")).toBe(true); expect(isDangerousHostEnvVarName("ps4")).toBe(true); expect(isDangerousHostEnvVarName("DYLD_INSERT_LIBRARIES")).toBe(true); @@ -32,6 +33,7 @@ describe("sanitizeHostExecEnv", () => { baseEnv: { PATH: "/usr/bin:/bin", BASH_ENV: "/tmp/pwn.sh", + GIT_EXTERNAL_DIFF: "/tmp/pwn.sh", LD_PRELOAD: "/tmp/pwn.so", OK: "1", }, diff --git a/src/infra/install-source-utils.test.ts b/src/infra/install-source-utils.test.ts index b1bcc8ffacc..64cb804210f 100644 --- a/src/infra/install-source-utils.test.ts +++ b/src/infra/install-source-utils.test.ts @@ -123,6 +123,8 @@ describe("resolveArchiveSourcePath", () => { describe("packNpmSpecToArchive", () => { it("packs spec and returns archive path using JSON output metadata", async () => { const cwd = await createFixtureDir(); + const archivePath = path.join(cwd, "openclaw-plugin-1.2.3.tgz"); + await fs.writeFile(archivePath, "", "utf-8"); mockPackCommandResult({ stdout: JSON.stringify([ { @@ -140,7 +142,7 @@ describe("packNpmSpecToArchive", () => { expect(result).toEqual({ ok: true, - archivePath: path.join(cwd, "openclaw-plugin-1.2.3.tgz"), + archivePath, metadata: { name: "openclaw-plugin", version: "1.2.3", @@ -160,6 +162,8 @@ describe("packNpmSpecToArchive", () => { it("falls back to parsing final stdout line when npm json output is unavailable", async () => { const cwd = await createFixtureDir(); + const expectedArchivePath = path.join(cwd, "openclaw-plugin-1.2.3.tgz"); + await fs.writeFile(expectedArchivePath, "", "utf-8"); mockPackCommandResult({ stdout: "npm notice created package\nopenclaw-plugin-1.2.3.tgz\n", }); @@ -168,7 +172,7 @@ describe("packNpmSpecToArchive", () => { expect(result).toEqual({ ok: true, - archivePath: path.join(cwd, "openclaw-plugin-1.2.3.tgz"), + archivePath: expectedArchivePath, metadata: {}, }); }); @@ -190,6 +194,74 @@ describe("packNpmSpecToArchive", () => { } }); + it("falls back to archive detected in cwd when npm pack stdout is empty", async () => { + const cwd = await createTempDir("openclaw-install-source-utils-"); + const archivePath = path.join(cwd, "openclaw-plugin-1.2.3.tgz"); + await fs.writeFile(archivePath, "", "utf-8"); + runCommandWithTimeoutMock.mockResolvedValue({ + stdout: " \n\n", + stderr: "", + code: 0, + signal: null, + killed: false, + }); + + const result = await packNpmSpecToArchive({ + spec: "openclaw-plugin@1.2.3", + timeoutMs: 5000, + cwd, + }); + + expect(result).toEqual({ + ok: true, + archivePath, + metadata: {}, + }); + }); + + it("falls back to archive detected in cwd when stdout does not contain a tgz", async () => { + const cwd = await createTempDir("openclaw-install-source-utils-"); + const archivePath = path.join(cwd, "openclaw-plugin-1.2.3.tgz"); + await fs.writeFile(archivePath, "", "utf-8"); + runCommandWithTimeoutMock.mockResolvedValue({ + stdout: "npm pack completed successfully\n", + stderr: "", + code: 0, + signal: null, + killed: false, + }); + + const result = await packNpmSpecToArchive({ + spec: "openclaw-plugin@1.2.3", + timeoutMs: 5000, + cwd, + }); + + expect(result).toEqual({ + ok: true, + archivePath, + metadata: {}, + }); + }); + + it("returns friendly error for 404 (package not on npm)", async () => { + const cwd = await createFixtureDir(); + mockPackCommandResult({ + stdout: "", + stderr: "npm error code E404\nnpm error 404 '@openclaw/whatsapp@*' is not in this registry.", + code: 1, + }); + + const result = await runPack("@openclaw/whatsapp", cwd); + + expect(result.ok).toBe(false); + if (!result.ok) { + expect(result.error).toContain("Package not found on npm"); + expect(result.error).toContain("@openclaw/whatsapp"); + expect(result.error).toContain("docs.openclaw.ai/tools/plugin"); + } + }); + it("returns explicit error when npm pack produces no archive name", async () => { const cwd = await createFixtureDir(); mockPackCommandResult({ @@ -206,6 +278,7 @@ describe("packNpmSpecToArchive", () => { it("parses scoped metadata from id-only json output even with npm notice prefix", async () => { const cwd = await createFixtureDir(); + await fs.writeFile(path.join(cwd, "openclaw-plugin-demo-2.0.0.tgz"), "", "utf-8"); mockPackCommandResult({ stdout: "npm notice creating package\n" + diff --git a/src/infra/install-source-utils.ts b/src/infra/install-source-utils.ts index d4a2ac025d7..fce33b61979 100644 --- a/src/infra/install-source-utils.ts +++ b/src/infra/install-source-utils.ts @@ -144,6 +144,42 @@ function parseNpmPackJsonOutput( return null; } +function parsePackedArchiveFromStdout(stdout: string): string | undefined { + const lines = stdout + .split(/\r?\n/) + .map((line) => line.trim()) + .filter(Boolean); + + for (let index = lines.length - 1; index >= 0; index -= 1) { + const line = lines[index]; + const match = line?.match(/([^\s"']+\.tgz)/); + if (match?.[1]) { + return match[1]; + } + } + return undefined; +} + +async function findPackedArchiveInDir(cwd: string): Promise { + const entries = await fs.readdir(cwd, { withFileTypes: true }).catch(() => []); + const archives = entries.filter((entry) => entry.isFile() && entry.name.endsWith(".tgz")); + if (archives.length === 0) { + return undefined; + } + if (archives.length === 1) { + return archives[0]?.name; + } + + const sortedByMtime = await Promise.all( + archives.map(async (entry) => ({ + name: entry.name, + mtimeMs: (await fs.stat(path.join(cwd, entry.name))).mtimeMs, + })), + ); + sortedByMtime.sort((a, b) => b.mtimeMs - a.mtimeMs); + return sortedByMtime[0]?.name; +} + export async function packNpmSpecToArchive(params: { spec: string; timeoutMs: number; @@ -171,25 +207,38 @@ export async function packNpmSpecToArchive(params: { }, ); if (res.code !== 0) { - return { ok: false, error: `npm pack failed: ${res.stderr.trim() || res.stdout.trim()}` }; + const raw = res.stderr.trim() || res.stdout.trim(); + if (/E404|is not in this registry/i.test(raw)) { + return { + ok: false, + error: `Package not found on npm: ${params.spec}. See https://docs.openclaw.ai/tools/plugin for installable plugins.`, + }; + } + return { ok: false, error: `npm pack failed: ${raw}` }; } const parsedJson = parseNpmPackJsonOutput(res.stdout || ""); - const packed = - parsedJson?.filename ?? - (res.stdout || "") - .split("\n") - .map((line) => line.trim()) - .filter(Boolean) - .pop(); + let packed = parsedJson?.filename ?? parsePackedArchiveFromStdout(res.stdout || ""); + if (!packed) { + packed = await findPackedArchiveInDir(params.cwd); + } if (!packed) { return { ok: false, error: "npm pack produced no archive" }; } + let archivePath = path.isAbsolute(packed) ? packed : path.join(params.cwd, packed); + if (!(await fileExists(archivePath))) { + const fallbackPacked = await findPackedArchiveInDir(params.cwd); + if (!fallbackPacked) { + return { ok: false, error: "npm pack produced no archive" }; + } + archivePath = path.join(params.cwd, fallbackPacked); + } + return { ok: true, - archivePath: path.join(params.cwd, packed), + archivePath, metadata: parsedJson?.metadata ?? {}, }; } diff --git a/src/infra/net/fetch-guard.ssrf.test.ts b/src/infra/net/fetch-guard.ssrf.test.ts index a03afba325f..223695c1a53 100644 --- a/src/infra/net/fetch-guard.ssrf.test.ts +++ b/src/infra/net/fetch-guard.ssrf.test.ts @@ -18,6 +18,7 @@ describe("fetchWithSsrFGuard hardening", () => { it("blocks private and legacy loopback literals before fetch", async () => { const blockedUrls = [ "http://127.0.0.1:8080/internal", + "http://[ff02::1]/internal", "http://0177.0.0.1:8080/internal", "http://0x7f000001/internal", ]; diff --git a/src/infra/net/fetch-guard.ts b/src/infra/net/fetch-guard.ts index c3e2b7864b1..77260f474f5 100644 --- a/src/infra/net/fetch-guard.ts +++ b/src/infra/net/fetch-guard.ts @@ -1,4 +1,4 @@ -import type { Dispatcher } from "undici"; +import { EnvHttpProxyAgent, type Dispatcher } from "undici"; import { logWarn } from "../../logger.js"; import { bindAbortRelay } from "../../utils/fetch-timeout.js"; import { @@ -22,6 +22,7 @@ export type GuardedFetchOptions = { policy?: SsrFPolicy; lookupFn?: LookupFn; pinDns?: boolean; + proxy?: "env"; auditContext?: string; }; @@ -32,6 +33,14 @@ export type GuardedFetchResult = { }; const DEFAULT_MAX_REDIRECTS = 3; +const ENV_PROXY_KEYS = [ + "HTTP_PROXY", + "HTTPS_PROXY", + "ALL_PROXY", + "http_proxy", + "https_proxy", + "all_proxy", +] as const; const CROSS_ORIGIN_REDIRECT_SENSITIVE_HEADERS = [ "authorization", "proxy-authorization", @@ -39,6 +48,16 @@ const CROSS_ORIGIN_REDIRECT_SENSITIVE_HEADERS = [ "cookie2", ]; +function hasEnvProxyConfigured(): boolean { + for (const key of ENV_PROXY_KEYS) { + const value = process.env[key]; + if (typeof value === "string" && value.trim()) { + return true; + } + } + return false; +} + function isRedirectStatus(status: number): boolean { return status === 301 || status === 302 || status === 303 || status === 307 || status === 308; } @@ -138,7 +157,9 @@ export async function fetchWithSsrFGuard(params: GuardedFetchOptions): Promise ({ import { createPinnedDispatcher, type PinnedHostname } from "./ssrf.js"; describe("createPinnedDispatcher", () => { - it("enables network family auto-selection for pinned lookups", () => { + it("uses pinned lookup without overriding global family policy", () => { const lookup = vi.fn() as unknown as PinnedHostname["lookup"]; const pinned: PinnedHostname = { hostname: "api.telegram.org", @@ -27,9 +27,11 @@ describe("createPinnedDispatcher", () => { expect(agentCtor).toHaveBeenCalledWith({ connect: { lookup, - autoSelectFamily: true, - autoSelectFamilyAttemptTimeout: 300, }, }); + const firstCallArg = agentCtor.mock.calls[0]?.[0] as + | { connect?: Record } + | undefined; + expect(firstCallArg?.connect?.autoSelectFamily).toBeUndefined(); }); }); diff --git a/src/infra/net/ssrf.test.ts b/src/infra/net/ssrf.test.ts index 5826669196d..2698bf3db9e 100644 --- a/src/infra/net/ssrf.test.ts +++ b/src/infra/net/ssrf.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it } from "vitest"; +import { blockedIpv6MulticastLiterals } from "../../shared/net/ip-test-fixtures.js"; import { normalizeFingerprint } from "../tls/fingerprint.js"; import { isBlockedHostnameOrIp, isPrivateIpAddress } from "./ssrf.js"; @@ -38,6 +39,7 @@ const privateIpCases = [ "fe80::1%lo0", "fd00::1", "fec0::1", + ...blockedIpv6MulticastLiterals, "2001:db8:1234::5efe:127.0.0.1", "2001:db8:1234:1:200:5efe:7f00:1", ]; diff --git a/src/infra/net/ssrf.ts b/src/infra/net/ssrf.ts index b84469390c0..7798e5990a4 100644 --- a/src/infra/net/ssrf.ts +++ b/src/infra/net/ssrf.ts @@ -4,11 +4,11 @@ import { Agent, type Dispatcher } from "undici"; import { extractEmbeddedIpv4FromIpv6, isBlockedSpecialUseIpv4Address, + isBlockedSpecialUseIpv6Address, isCanonicalDottedDecimalIPv4, type Ipv4SpecialUseBlockOptions, isIpv4Address, isLegacyIpv4Literal, - isPrivateOrLoopbackIpAddress, parseCanonicalIpAddress, parseLooseIpAddress, } from "../../shared/net/ip.js"; @@ -120,7 +120,7 @@ export function isPrivateIpAddress(address: string, policy?: SsrFPolicy): boolea if (isIpv4Address(strictIp)) { return isBlockedSpecialUseIpv4Address(strictIp, blockOptions); } - if (isPrivateOrLoopbackIpAddress(strictIp.toString())) { + if (isBlockedSpecialUseIpv6Address(strictIp)) { return true; } const embeddedIpv4 = extractEmbeddedIpv4FromIpv6(strictIp); @@ -333,8 +333,6 @@ export function createPinnedDispatcher(pinned: PinnedHostname): Dispatcher { return new Agent({ connect: { lookup: pinned.lookup, - autoSelectFamily: true, - autoSelectFamilyAttemptTimeout: 300, }, }); } diff --git a/src/infra/node-commands.ts b/src/infra/node-commands.ts new file mode 100644 index 00000000000..3aa35051d2d --- /dev/null +++ b/src/infra/node-commands.ts @@ -0,0 +1,13 @@ +export const NODE_SYSTEM_RUN_COMMANDS = [ + "system.run.prepare", + "system.run", + "system.which", +] as const; + +export const NODE_SYSTEM_NOTIFY_COMMAND = "system.notify"; +export const NODE_BROWSER_PROXY_COMMAND = "browser.proxy"; + +export const NODE_EXEC_APPROVALS_COMMANDS = [ + "system.execApprovals.get", + "system.execApprovals.set", +] as const; diff --git a/src/infra/outbound/channel-resolution.ts b/src/infra/outbound/channel-resolution.ts new file mode 100644 index 00000000000..8d17294d024 --- /dev/null +++ b/src/infra/outbound/channel-resolution.ts @@ -0,0 +1,78 @@ +import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "../../agents/agent-scope.js"; +import { getChannelPlugin } from "../../channels/plugins/index.js"; +import type { ChannelPlugin } from "../../channels/plugins/types.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { applyPluginAutoEnable } from "../../config/plugin-auto-enable.js"; +import { loadOpenClawPlugins } from "../../plugins/loader.js"; +import { getActivePluginRegistry, getActivePluginRegistryKey } from "../../plugins/runtime.js"; +import { + isDeliverableMessageChannel, + normalizeMessageChannel, + type DeliverableMessageChannel, +} from "../../utils/message-channel.js"; + +const bootstrapAttempts = new Set(); + +export function normalizeDeliverableOutboundChannel( + raw?: string | null, +): DeliverableMessageChannel | undefined { + const normalized = normalizeMessageChannel(raw); + if (!normalized || !isDeliverableMessageChannel(normalized)) { + return undefined; + } + return normalized; +} + +function maybeBootstrapChannelPlugin(params: { + channel: DeliverableMessageChannel; + cfg?: OpenClawConfig; +}): void { + const cfg = params.cfg; + if (!cfg) { + return; + } + + const activeRegistry = getActivePluginRegistry(); + if ((activeRegistry?.channels?.length ?? 0) > 0) { + return; + } + + const registryKey = getActivePluginRegistryKey() ?? ""; + const attemptKey = `${registryKey}:${params.channel}`; + if (bootstrapAttempts.has(attemptKey)) { + return; + } + bootstrapAttempts.add(attemptKey); + + const autoEnabled = applyPluginAutoEnable({ config: cfg }).config; + const defaultAgentId = resolveDefaultAgentId(autoEnabled); + const workspaceDir = resolveAgentWorkspaceDir(autoEnabled, defaultAgentId); + try { + loadOpenClawPlugins({ + config: autoEnabled, + workspaceDir, + }); + } catch { + // Allow a follow-up resolution attempt if bootstrap failed transiently. + bootstrapAttempts.delete(attemptKey); + } +} + +export function resolveOutboundChannelPlugin(params: { + channel: string; + cfg?: OpenClawConfig; +}): ChannelPlugin | undefined { + const normalized = normalizeDeliverableOutboundChannel(params.channel); + if (!normalized) { + return undefined; + } + + const resolve = () => getChannelPlugin(normalized); + const current = resolve(); + if (current) { + return current; + } + + maybeBootstrapChannelPlugin({ channel: normalized, cfg: params.cfg }); + return resolve(); +} diff --git a/src/infra/outbound/conversation-id.test.ts b/src/infra/outbound/conversation-id.test.ts new file mode 100644 index 00000000000..b35c8e2e4a1 --- /dev/null +++ b/src/infra/outbound/conversation-id.test.ts @@ -0,0 +1,40 @@ +import { describe, expect, it } from "vitest"; +import { resolveConversationIdFromTargets } from "./conversation-id.js"; + +describe("resolveConversationIdFromTargets", () => { + it("prefers explicit thread id when present", () => { + const resolved = resolveConversationIdFromTargets({ + threadId: "123456789", + targets: ["channel:987654321"], + }); + expect(resolved).toBe("123456789"); + }); + + it("extracts channel ids from channel: targets", () => { + const resolved = resolveConversationIdFromTargets({ + targets: ["channel:987654321"], + }); + expect(resolved).toBe("987654321"); + }); + + it("extracts ids from Discord channel mentions", () => { + const resolved = resolveConversationIdFromTargets({ + targets: ["<#1475250310120214812>"], + }); + expect(resolved).toBe("1475250310120214812"); + }); + + it("accepts raw numeric ids", () => { + const resolved = resolveConversationIdFromTargets({ + targets: ["1475250310120214812"], + }); + expect(resolved).toBe("1475250310120214812"); + }); + + it("returns undefined for non-channel targets", () => { + const resolved = resolveConversationIdFromTargets({ + targets: ["user:alice", "general"], + }); + expect(resolved).toBeUndefined(); + }); +}); diff --git a/src/infra/outbound/conversation-id.ts b/src/infra/outbound/conversation-id.ts new file mode 100644 index 00000000000..a6f8ed1fd6b --- /dev/null +++ b/src/infra/outbound/conversation-id.ts @@ -0,0 +1,41 @@ +function normalizeConversationId(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed || undefined; +} + +export function resolveConversationIdFromTargets(params: { + threadId?: string | number; + targets: Array; +}): string | undefined { + const threadId = + params.threadId != null ? normalizeConversationId(String(params.threadId)) : undefined; + if (threadId) { + return threadId; + } + + for (const rawTarget of params.targets) { + const target = normalizeConversationId(rawTarget); + if (!target) { + continue; + } + if (target.startsWith("channel:")) { + const channelId = normalizeConversationId(target.slice("channel:".length)); + if (channelId) { + return channelId; + } + continue; + } + const mentionMatch = target.match(/^<#(\d+)>$/); + if (mentionMatch?.[1]) { + return mentionMatch[1]; + } + if (/^\d{6,}$/.test(target)) { + return target; + } + } + + return undefined; +} diff --git a/src/infra/outbound/deliver.test.ts b/src/infra/outbound/deliver.test.ts index 94b5bee9891..b9c59f0e391 100644 --- a/src/infra/outbound/deliver.test.ts +++ b/src/infra/outbound/deliver.test.ts @@ -31,6 +31,9 @@ const queueMocks = vi.hoisted(() => ({ ackDelivery: vi.fn(async () => {}), failDelivery: vi.fn(async () => {}), })); +const logMocks = vi.hoisted(() => ({ + warn: vi.fn(), +})); vi.mock("../../config/sessions.js", async () => { const actual = await vi.importActual( @@ -53,6 +56,18 @@ vi.mock("./delivery-queue.js", () => ({ ackDelivery: queueMocks.ackDelivery, failDelivery: queueMocks.failDelivery, })); +vi.mock("../../logging/subsystem.js", () => ({ + createSubsystemLogger: () => { + const makeLogger = () => ({ + warn: logMocks.warn, + info: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + child: vi.fn(() => makeLogger()), + }); + return makeLogger(); + }, +})); const { deliverOutboundPayloads, normalizeOutboundPayloads } = await import("./deliver.js"); @@ -117,6 +132,7 @@ describe("deliverOutboundPayloads", () => { queueMocks.ackDelivery.mockResolvedValue(undefined); queueMocks.failDelivery.mockClear(); queueMocks.failDelivery.mockResolvedValue(undefined); + logMocks.warn.mockClear(); }); afterEach(() => { @@ -188,7 +204,7 @@ describe("deliverOutboundPayloads", () => { cfg: telegramChunkConfig, channel: "telegram", to: "123", - agentId: "work", + session: { agentId: "work" }, payloads: [{ text: "hi", mediaUrl: "file:///tmp/f.png" }], deps: { sendTelegram }, }); @@ -583,7 +599,7 @@ describe("deliverOutboundPayloads", () => { to: "+1555", payloads: [{ text: "hello" }], deps: { sendWhatsApp }, - sessionKey: "agent:main:main", + session: { key: "agent:main:main" }, }); expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledTimes(1); @@ -603,6 +619,25 @@ describe("deliverOutboundPayloads", () => { expect(internalHookMocks.triggerInternalHook).toHaveBeenCalledTimes(1); }); + it("warns when session.agentId is set without a session key", async () => { + const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); + hookMocks.runner.hasHooks.mockReturnValue(true); + + await deliverOutboundPayloads({ + cfg: whatsappChunkConfig, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hello" }], + deps: { sendWhatsApp }, + session: { agentId: "agent-main" }, + }); + + expect(logMocks.warn).toHaveBeenCalledWith( + "deliverOutboundPayloads: session.agentId present without session key; internal message:sent hook will be skipped", + expect.objectContaining({ channel: "whatsapp", to: "+1555", agentId: "agent-main" }), + ); + }); + it("calls failDelivery instead of ackDelivery on bestEffort partial failure", async () => { const sendWhatsApp = vi .fn() diff --git a/src/infra/outbound/deliver.ts b/src/infra/outbound/deliver.ts index f071a25d048..76ea0e78736 100644 --- a/src/infra/outbound/deliver.ts +++ b/src/infra/outbound/deliver.ts @@ -20,6 +20,7 @@ import { import type { sendMessageDiscord } from "../../discord/send.js"; import { createInternalHookEvent, triggerInternalHook } from "../../hooks/internal-hooks.js"; import type { sendMessageIMessage } from "../../imessage/send.js"; +import { createSubsystemLogger } from "../../logging/subsystem.js"; import { getAgentScopedMediaLocalRoots } from "../../media/local-roots.js"; import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js"; import { markdownToSignalTextChunks, type SignalTextStyleRange } from "../../signal/format.js"; @@ -32,11 +33,14 @@ import { ackDelivery, enqueueDelivery, failDelivery } from "./delivery-queue.js" import type { OutboundIdentity } from "./identity.js"; import type { NormalizedOutboundPayload } from "./payloads.js"; import { normalizeReplyPayloadsForDelivery } from "./payloads.js"; +import type { OutboundSessionContext } from "./session-context.js"; import type { OutboundChannel } from "./targets.js"; export type { NormalizedOutboundPayload } from "./payloads.js"; export { normalizeOutboundPayloads } from "./payloads.js"; +const log = createSubsystemLogger("outbound/deliver"); + type SendMatrixMessage = ( to: string, text: string, @@ -207,8 +211,8 @@ type DeliverOutboundPayloadsCoreParams = { bestEffort?: boolean; onError?: (err: unknown, payload: NormalizedOutboundPayload) => void; onPayload?: (payload: NormalizedOutboundPayload) => void; - /** Active agent id for media local-root scoping. */ - agentId?: string; + /** Session/agent context used for hooks and media local-root scoping. */ + session?: OutboundSessionContext; mirror?: { sessionKey: string; agentId?: string; @@ -216,8 +220,6 @@ type DeliverOutboundPayloadsCoreParams = { mediaUrls?: string[]; }; silent?: boolean; - /** Session key for internal hook dispatch (when `mirror` is not needed). */ - sessionKey?: string; }; type DeliverOutboundPayloadsParams = DeliverOutboundPayloadsCoreParams & { @@ -296,7 +298,7 @@ async function deliverOutboundPayloadsCore( const sendSignal = params.deps?.sendSignal ?? sendMessageSignal; const mediaLocalRoots = getAgentScopedMediaLocalRoots( cfg, - params.agentId ?? params.mirror?.agentId, + params.session?.agentId ?? params.mirror?.agentId, ); const results: OutboundDeliveryResult[] = []; const handler = await createChannelHandler({ @@ -446,7 +448,21 @@ async function deliverOutboundPayloadsCore( return normalized ? [normalized] : []; }); const hookRunner = getGlobalHookRunner(); - const sessionKeyForInternalHooks = params.mirror?.sessionKey ?? params.sessionKey; + const sessionKeyForInternalHooks = params.mirror?.sessionKey ?? params.session?.key; + if ( + hookRunner?.hasHooks("message_sent") && + params.session?.agentId && + !sessionKeyForInternalHooks + ) { + log.warn( + "deliverOutboundPayloads: session.agentId present without session key; internal message:sent hook will be skipped", + { + channel, + to, + agentId: params.session.agentId, + }, + ); + } for (const payload of normalizedPayloads) { const payloadSummary: NormalizedOutboundPayload = { text: payload.text ?? "", diff --git a/src/infra/outbound/delivery-queue.ts b/src/infra/outbound/delivery-queue.ts index 699ba6f7403..1e954ea8e39 100644 --- a/src/infra/outbound/delivery-queue.ts +++ b/src/infra/outbound/delivery-queue.ts @@ -47,9 +47,17 @@ export interface QueuedDelivery extends QueuedDeliveryPayload { id: string; enqueuedAt: number; retryCount: number; + lastAttemptAt?: number; lastError?: string; } +export type RecoverySummary = { + recovered: number; + failed: number; + skippedMaxRetries: number; + deferredBackoff: number; +}; + function resolveQueueDir(stateDir?: string): string { const base = stateDir ?? resolveStateDir(); return path.join(base, QUEUE_DIRNAME); @@ -122,6 +130,7 @@ export async function failDelivery(id: string, error: string, stateDir?: string) const raw = await fs.promises.readFile(filePath, "utf-8"); const entry: QueuedDelivery = JSON.parse(raw); entry.retryCount += 1; + entry.lastAttemptAt = Date.now(); entry.lastError = error; const tmp = `${filePath}.${process.pid}.tmp`; await fs.promises.writeFile(tmp, JSON.stringify(entry, null, 2), { @@ -159,7 +168,17 @@ export async function loadPendingDeliveries(stateDir?: string): Promise 0; + const baseAttemptAt = hasAttemptTimestamp + ? (entry.lastAttemptAt ?? entry.enqueuedAt) + : entry.enqueuedAt; + const nextEligibleAt = baseAttemptAt + backoff; + if (now >= nextEligibleAt) { + return { eligible: true }; + } + return { eligible: false, remainingBackoffMs: nextEligibleAt - now }; +} + +function normalizeLegacyQueuedDeliveryEntry(entry: QueuedDelivery): { + entry: QueuedDelivery; + migrated: boolean; +} { + const hasAttemptTimestamp = + typeof entry.lastAttemptAt === "number" && + Number.isFinite(entry.lastAttemptAt) && + entry.lastAttemptAt > 0; + if (hasAttemptTimestamp || entry.retryCount <= 0) { + return { entry, migrated: false }; + } + const hasEnqueuedTimestamp = + typeof entry.enqueuedAt === "number" && + Number.isFinite(entry.enqueuedAt) && + entry.enqueuedAt > 0; + if (!hasEnqueuedTimestamp) { + return { entry, migrated: false }; + } + return { + entry: { + ...entry, + lastAttemptAt: entry.enqueuedAt, + }, + migrated: true, + }; +} + export type DeliverFn = ( params: { cfg: OpenClawConfig; @@ -208,14 +280,12 @@ export async function recoverPendingDeliveries(opts: { log: RecoveryLogger; cfg: OpenClawConfig; stateDir?: string; - /** Override for testing — resolves instead of using real setTimeout. */ - delay?: (ms: number) => Promise; /** Maximum wall-clock time for recovery in ms. Remaining entries are deferred to next restart. Default: 60 000. */ maxRecoveryMs?: number; -}): Promise<{ recovered: number; failed: number; skipped: number }> { +}): Promise { const pending = await loadPendingDeliveries(opts.stateDir); if (pending.length === 0) { - return { recovered: 0, failed: 0, skipped: 0 }; + return { recovered: 0, failed: 0, skippedMaxRetries: 0, deferredBackoff: 0 }; } // Process oldest first. @@ -223,17 +293,17 @@ export async function recoverPendingDeliveries(opts: { opts.log.info(`Found ${pending.length} pending delivery entries — starting recovery`); - const delayFn = opts.delay ?? ((ms: number) => new Promise((r) => setTimeout(r, ms))); const deadline = Date.now() + (opts.maxRecoveryMs ?? 60_000); let recovered = 0; let failed = 0; - let skipped = 0; + let skippedMaxRetries = 0; + let deferredBackoff = 0; for (const entry of pending) { const now = Date.now(); if (now >= deadline) { - const deferred = pending.length - recovered - failed - skipped; + const deferred = pending.length - recovered - failed - skippedMaxRetries - deferredBackoff; opts.log.warn(`Recovery time budget exceeded — ${deferred} entries deferred to next restart`); break; } @@ -246,21 +316,17 @@ export async function recoverPendingDeliveries(opts: { } catch (err) { opts.log.error(`Failed to move entry ${entry.id} to failed/: ${String(err)}`); } - skipped += 1; + skippedMaxRetries += 1; continue; } - const backoff = computeBackoffMs(entry.retryCount + 1); - if (backoff > 0) { - if (now + backoff >= deadline) { - const deferred = pending.length - recovered - failed - skipped; - opts.log.warn( - `Recovery time budget exceeded — ${deferred} entries deferred to next restart`, - ); - break; - } - opts.log.info(`Waiting ${backoff}ms before retrying delivery ${entry.id}`); - await delayFn(backoff); + const retryEligibility = isEntryEligibleForRecoveryRetry(entry, now); + if (!retryEligibility.eligible) { + deferredBackoff += 1; + opts.log.info( + `Delivery ${entry.id} not ready for retry yet — backoff ${retryEligibility.remainingBackoffMs}ms remaining`, + ); + continue; } try { @@ -304,9 +370,9 @@ export async function recoverPendingDeliveries(opts: { } opts.log.info( - `Delivery recovery complete: ${recovered} recovered, ${failed} failed, ${skipped} skipped (max retries)`, + `Delivery recovery complete: ${recovered} recovered, ${failed} failed, ${skippedMaxRetries} skipped (max retries), ${deferredBackoff} deferred (backoff)`, ); - return { recovered, failed, skipped }; + return { recovered, failed, skippedMaxRetries, deferredBackoff }; } export { MAX_RETRIES }; diff --git a/src/infra/outbound/message-action-runner.test.ts b/src/infra/outbound/message-action-runner.test.ts index 6fdec33ab49..cf3ddabcead 100644 --- a/src/infra/outbound/message-action-runner.test.ts +++ b/src/infra/outbound/message-action-runner.test.ts @@ -1021,4 +1021,32 @@ describe("runMessageAction accountId defaults", () => { expect(ctx.accountId).toBe("ops"); expect(ctx.params.accountId).toBe("ops"); }); + + it("falls back to the agent's bound account when accountId is omitted", async () => { + await runMessageAction({ + cfg: { + bindings: [{ agentId: "agent-b", match: { channel: "discord", accountId: "account-b" } }], + } as OpenClawConfig, + action: "send", + params: { + channel: "discord", + target: "channel:123", + message: "hi", + }, + agentId: "agent-b", + }); + + expect(handleAction).toHaveBeenCalled(); + const ctx = (handleAction.mock.calls as unknown as Array<[unknown]>)[0]?.[0] as + | { + accountId?: string | null; + params: Record; + } + | undefined; + if (!ctx) { + throw new Error("expected action context"); + } + expect(ctx.accountId).toBe("account-b"); + expect(ctx.params.accountId).toBe("account-b"); + }); }); diff --git a/src/infra/outbound/message-action-runner.ts b/src/infra/outbound/message-action-runner.ts index 57032e27de8..2693d110306 100644 --- a/src/infra/outbound/message-action-runner.ts +++ b/src/infra/outbound/message-action-runner.ts @@ -14,6 +14,8 @@ import type { } from "../../channels/plugins/types.js"; import type { OpenClawConfig } from "../../config/config.js"; import { getAgentScopedMediaLocalRoots } from "../../media/local-roots.js"; +import { buildChannelAccountBindings } from "../../routing/bindings.js"; +import { normalizeAgentId } from "../../routing/session-key.js"; import { isDeliverableMessageChannel, normalizeMessageChannel, @@ -753,7 +755,14 @@ export async function runMessageAction( } const channel = await resolveChannel(cfg, params); - const accountId = readStringParam(params, "accountId") ?? input.defaultAccountId; + let accountId = readStringParam(params, "accountId") ?? input.defaultAccountId; + if (!accountId && resolvedAgentId) { + const byAgent = buildChannelAccountBindings(cfg).get(channel); + const boundAccountIds = byAgent?.get(normalizeAgentId(resolvedAgentId)); + if (boundAccountIds && boundAccountIds.length > 0) { + accountId = boundAccountIds[0]; + } + } if (accountId) { params.accountId = accountId; } diff --git a/src/infra/outbound/message.channels.test.ts b/src/infra/outbound/message.channels.test.ts index 39e83c8ad70..12b9b120f66 100644 --- a/src/infra/outbound/message.channels.test.ts +++ b/src/infra/outbound/message.channels.test.ts @@ -194,6 +194,35 @@ describe("gateway url override hardening", () => { }), ); }); + + it("forwards explicit agentId in gateway send params", async () => { + setRegistry( + createTestRegistry([ + { + pluginId: "mattermost", + source: "test", + plugin: { + ...createMattermostLikePlugin({ onSendText: () => {} }), + outbound: { deliveryMode: "gateway" }, + }, + }, + ]), + ); + + callGatewayMock.mockResolvedValueOnce({ messageId: "m-agent" }); + await sendMessage({ + cfg: {}, + to: "channel:town-square", + content: "hi", + channel: "mattermost", + agentId: "work", + }); + + const call = callGatewayMock.mock.calls[0]?.[0] as { + params?: Record; + }; + expect(call.params?.agentId).toBe("work"); + }); }); const emptyRegistry = createTestRegistry([]); diff --git a/src/infra/outbound/message.test.ts b/src/infra/outbound/message.test.ts index 3714e7ab5ac..36780b99505 100644 --- a/src/infra/outbound/message.test.ts +++ b/src/infra/outbound/message.test.ts @@ -4,6 +4,7 @@ const mocks = vi.hoisted(() => ({ getChannelPlugin: vi.fn(), resolveOutboundTarget: vi.fn(), deliverOutboundPayloads: vi.fn(), + loadOpenClawPlugins: vi.fn(), })); vi.mock("../../channels/plugins/index.js", () => ({ @@ -11,6 +12,19 @@ vi.mock("../../channels/plugins/index.js", () => ({ getChannelPlugin: mocks.getChannelPlugin, })); +vi.mock("../../agents/agent-scope.js", () => ({ + resolveDefaultAgentId: () => "main", + resolveAgentWorkspaceDir: () => "/tmp/openclaw-test-workspace", +})); + +vi.mock("../../config/plugin-auto-enable.js", () => ({ + applyPluginAutoEnable: ({ config }: { config: unknown }) => ({ config, changes: [] }), +})); + +vi.mock("../../plugins/loader.js", () => ({ + loadOpenClawPlugins: mocks.loadOpenClawPlugins, +})); + vi.mock("./targets.js", () => ({ resolveOutboundTarget: mocks.resolveOutboundTarget, })); @@ -19,13 +33,17 @@ vi.mock("./deliver.js", () => ({ deliverOutboundPayloads: mocks.deliverOutboundPayloads, })); +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createTestRegistry } from "../../test-utils/channel-plugins.js"; import { sendMessage } from "./message.js"; describe("sendMessage", () => { beforeEach(() => { + setActivePluginRegistry(createTestRegistry([])); mocks.getChannelPlugin.mockClear(); mocks.resolveOutboundTarget.mockClear(); mocks.deliverOutboundPayloads.mockClear(); + mocks.loadOpenClawPlugins.mockClear(); mocks.getChannelPlugin.mockReturnValue({ outbound: { deliveryMode: "direct" }, @@ -37,18 +55,43 @@ describe("sendMessage", () => { it("passes explicit agentId to outbound delivery for scoped media roots", async () => { await sendMessage({ cfg: {}, - channel: "mattermost", - to: "channel:town-square", + channel: "telegram", + to: "123456", content: "hi", agentId: "work", }); expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( expect.objectContaining({ - agentId: "work", - channel: "mattermost", - to: "channel:town-square", + session: expect.objectContaining({ agentId: "work" }), + channel: "telegram", + to: "123456", }), ); }); + + it("recovers telegram plugin resolution so message/send does not fail with Unknown channel: telegram", async () => { + const telegramPlugin = { + outbound: { deliveryMode: "direct" }, + }; + mocks.getChannelPlugin + .mockReturnValueOnce(undefined) + .mockReturnValueOnce(telegramPlugin) + .mockReturnValue(telegramPlugin); + + await expect( + sendMessage({ + cfg: { channels: { telegram: { botToken: "test-token" } } }, + channel: "telegram", + to: "123456", + content: "hi", + }), + ).resolves.toMatchObject({ + channel: "telegram", + to: "123456", + via: "direct", + }); + + expect(mocks.loadOpenClawPlugins).toHaveBeenCalledTimes(1); + }); }); diff --git a/src/infra/outbound/message.ts b/src/infra/outbound/message.ts index 71b36eca6b1..9bee14f45d0 100644 --- a/src/infra/outbound/message.ts +++ b/src/infra/outbound/message.ts @@ -1,4 +1,3 @@ -import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; import type { OpenClawConfig } from "../../config/config.js"; import { loadConfig } from "../../config/config.js"; import { callGatewayLeastPrivilege, randomIdempotencyKey } from "../../gateway/call.js"; @@ -10,6 +9,10 @@ import { type GatewayClientMode, type GatewayClientName, } from "../../utils/message-channel.js"; +import { + normalizeDeliverableOutboundChannel, + resolveOutboundChannelPlugin, +} from "./channel-resolution.js"; import { resolveMessageChannelSelection } from "./channel-selection.js"; import { deliverOutboundPayloads, @@ -17,6 +20,7 @@ import { type OutboundSendDeps, } from "./deliver.js"; import { normalizeReplyPayloadsForDelivery } from "./payloads.js"; +import { buildOutboundSessionContext } from "./session-context.js"; import { resolveOutboundTarget } from "./targets.js"; export type MessageGatewayOptions = { @@ -107,17 +111,18 @@ async function resolveRequiredChannel(params: { cfg: OpenClawConfig; channel?: string; }): Promise { - const channel = params.channel?.trim() - ? normalizeChannelId(params.channel) - : (await resolveMessageChannelSelection({ cfg: params.cfg })).channel; - if (!channel) { - throw new Error(`Unknown channel: ${params.channel}`); + if (params.channel?.trim()) { + const normalized = normalizeDeliverableOutboundChannel(params.channel); + if (!normalized) { + throw new Error(`Unknown channel: ${params.channel}`); + } + return normalized; } - return channel; + return (await resolveMessageChannelSelection({ cfg: params.cfg })).channel; } -function resolveRequiredPlugin(channel: string) { - const plugin = getChannelPlugin(channel); +function resolveRequiredPlugin(channel: string, cfg: OpenClawConfig) { + const plugin = resolveOutboundChannelPlugin({ channel, cfg }); if (!plugin) { throw new Error(`Unknown channel: ${channel}`); } @@ -166,7 +171,7 @@ async function callMessageGateway(params: { export async function sendMessage(params: MessageSendParams): Promise { const cfg = params.cfg ?? loadConfig(); const channel = await resolveRequiredChannel({ cfg, channel: params.channel }); - const plugin = resolveRequiredPlugin(channel); + const plugin = resolveRequiredPlugin(channel, cfg); const deliveryMode = plugin.outbound?.deliveryMode ?? "direct"; const normalizedPayloads = normalizeReplyPayloadsForDelivery([ { @@ -208,11 +213,16 @@ export async function sendMessage(params: MessageSendParams): Promise { }); describe("failDelivery", () => { - it("increments retryCount and sets lastError", async () => { + it("increments retryCount, records attempt time, and sets lastError", async () => { const id = await enqueueDelivery( { channel: "telegram", @@ -119,6 +120,8 @@ describe("delivery-queue", () => { const queueDir = path.join(tmpDir, "delivery-queue"); const entry = JSON.parse(fs.readFileSync(path.join(queueDir, `${id}.json`), "utf-8")); expect(entry.retryCount).toBe(1); + expect(typeof entry.lastAttemptAt).toBe("number"); + expect(entry.lastAttemptAt).toBeGreaterThan(0); expect(entry.lastError).toBe("connection refused"); }); }); @@ -181,6 +184,25 @@ describe("delivery-queue", () => { const entries = await loadPendingDeliveries(tmpDir); expect(entries).toHaveLength(2); }); + + it("backfills lastAttemptAt for legacy retry entries during load", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "legacy" }] }, + tmpDir, + ); + const filePath = path.join(tmpDir, "delivery-queue", `${id}.json`); + const legacyEntry = JSON.parse(fs.readFileSync(filePath, "utf-8")); + legacyEntry.retryCount = 2; + delete legacyEntry.lastAttemptAt; + fs.writeFileSync(filePath, JSON.stringify(legacyEntry), "utf-8"); + + const entries = await loadPendingDeliveries(tmpDir); + expect(entries).toHaveLength(1); + expect(entries[0]?.lastAttemptAt).toBe(entries[0]?.enqueuedAt); + + const persisted = JSON.parse(fs.readFileSync(filePath, "utf-8")); + expect(persisted.lastAttemptAt).toBe(persisted.enqueuedAt); + }); }); describe("computeBackoffMs", () => { @@ -203,29 +225,76 @@ describe("delivery-queue", () => { }); }); + describe("isEntryEligibleForRecoveryRetry", () => { + it("allows first replay after crash for retryCount=0 without lastAttemptAt", () => { + const now = Date.now(); + const result = isEntryEligibleForRecoveryRetry( + { + id: "entry-1", + channel: "whatsapp", + to: "+1", + payloads: [{ text: "a" }], + enqueuedAt: now, + retryCount: 0, + }, + now, + ); + expect(result).toEqual({ eligible: true }); + }); + + it("defers retry entries until backoff window elapses", () => { + const now = Date.now(); + const result = isEntryEligibleForRecoveryRetry( + { + id: "entry-2", + channel: "whatsapp", + to: "+1", + payloads: [{ text: "a" }], + enqueuedAt: now - 30_000, + retryCount: 3, + lastAttemptAt: now, + }, + now, + ); + expect(result.eligible).toBe(false); + if (result.eligible) { + throw new Error("Expected ineligible retry entry"); + } + expect(result.remainingBackoffMs).toBeGreaterThan(0); + }); + }); + describe("recoverPendingDeliveries", () => { - const noopDelay = async () => {}; const baseCfg = {}; const createLog = () => ({ info: vi.fn(), warn: vi.fn(), error: vi.fn() }); const enqueueCrashRecoveryEntries = async () => { await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); await enqueueDelivery({ channel: "telegram", to: "2", payloads: [{ text: "b" }] }, tmpDir); }; - const setEntryRetryCount = (id: string, retryCount: number) => { + const setEntryState = ( + id: string, + state: { retryCount: number; lastAttemptAt?: number; enqueuedAt?: number }, + ) => { const filePath = path.join(tmpDir, "delivery-queue", `${id}.json`); const entry = JSON.parse(fs.readFileSync(filePath, "utf-8")); - entry.retryCount = retryCount; + entry.retryCount = state.retryCount; + if (state.lastAttemptAt === undefined) { + delete entry.lastAttemptAt; + } else { + entry.lastAttemptAt = state.lastAttemptAt; + } + if (state.enqueuedAt !== undefined) { + entry.enqueuedAt = state.enqueuedAt; + } fs.writeFileSync(filePath, JSON.stringify(entry), "utf-8"); }; const runRecovery = async ({ deliver, log = createLog(), - delay = noopDelay, maxRecoveryMs, }: { deliver: ReturnType; log?: ReturnType; - delay?: (ms: number) => Promise; maxRecoveryMs?: number; }) => { const result = await recoverPendingDeliveries({ @@ -233,7 +302,6 @@ describe("delivery-queue", () => { log, cfg: baseCfg, stateDir: tmpDir, - delay, ...(maxRecoveryMs === undefined ? {} : { maxRecoveryMs }), }); return { result, log }; @@ -248,7 +316,8 @@ describe("delivery-queue", () => { expect(deliver).toHaveBeenCalledTimes(2); expect(result.recovered).toBe(2); expect(result.failed).toBe(0); - expect(result.skipped).toBe(0); + expect(result.skippedMaxRetries).toBe(0); + expect(result.deferredBackoff).toBe(0); // Queue should be empty after recovery. const remaining = await loadPendingDeliveries(tmpDir); @@ -261,13 +330,14 @@ describe("delivery-queue", () => { { channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir, ); - setEntryRetryCount(id, MAX_RETRIES); + setEntryState(id, { retryCount: MAX_RETRIES }); const deliver = vi.fn(); const { result } = await runRecovery({ deliver }); expect(deliver).not.toHaveBeenCalled(); - expect(result.skipped).toBe(1); + expect(result.skippedMaxRetries).toBe(1); + expect(result.deferredBackoff).toBe(0); // Entry should be in failed/ directory. const failedDir = path.join(tmpDir, "delivery-queue", "failed"); @@ -367,7 +437,8 @@ describe("delivery-queue", () => { expect(deliver).not.toHaveBeenCalled(); expect(result.recovered).toBe(0); expect(result.failed).toBe(0); - expect(result.skipped).toBe(0); + expect(result.skippedMaxRetries).toBe(0); + expect(result.deferredBackoff).toBe(0); // All entries should still be in the queue. const remaining = await loadPendingDeliveries(tmpDir); @@ -377,36 +448,114 @@ describe("delivery-queue", () => { expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("deferred to next restart")); }); - it("defers entries when backoff exceeds the recovery budget", async () => { + it("defers entries until backoff becomes eligible", async () => { const id = await enqueueDelivery( { channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir, ); - setEntryRetryCount(id, 3); + setEntryState(id, { retryCount: 3, lastAttemptAt: Date.now() }); const deliver = vi.fn().mockResolvedValue([]); - const delay = vi.fn(async () => {}); const { result, log } = await runRecovery({ deliver, - delay, - maxRecoveryMs: 1000, + maxRecoveryMs: 60_000, }); expect(deliver).not.toHaveBeenCalled(); - expect(delay).not.toHaveBeenCalled(); - expect(result).toEqual({ recovered: 0, failed: 0, skipped: 0 }); + expect(result).toEqual({ + recovered: 0, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 1, + }); const remaining = await loadPendingDeliveries(tmpDir); expect(remaining).toHaveLength(1); - expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("deferred to next restart")); + expect(log.info).toHaveBeenCalledWith(expect.stringContaining("not ready for retry yet")); + }); + + it("continues past high-backoff entries and recovers ready entries behind them", async () => { + const now = Date.now(); + const blockedId = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "blocked" }] }, + tmpDir, + ); + const readyId = await enqueueDelivery( + { channel: "telegram", to: "2", payloads: [{ text: "ready" }] }, + tmpDir, + ); + + setEntryState(blockedId, { retryCount: 3, lastAttemptAt: now, enqueuedAt: now - 30_000 }); + setEntryState(readyId, { retryCount: 0, enqueuedAt: now - 10_000 }); + + const deliver = vi.fn().mockResolvedValue([]); + const { result } = await runRecovery({ deliver, maxRecoveryMs: 60_000 }); + + expect(result).toEqual({ + recovered: 1, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 1, + }); + expect(deliver).toHaveBeenCalledTimes(1); + expect(deliver).toHaveBeenCalledWith( + expect.objectContaining({ channel: "telegram", to: "2", skipQueue: true }), + ); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(1); + expect(remaining[0]?.id).toBe(blockedId); + }); + + it("recovers deferred entries on a later restart once backoff elapsed", async () => { + vi.useFakeTimers(); + const start = new Date("2026-01-01T00:00:00.000Z"); + vi.setSystemTime(start); + + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "later" }] }, + tmpDir, + ); + setEntryState(id, { retryCount: 3, lastAttemptAt: start.getTime() }); + + const firstDeliver = vi.fn().mockResolvedValue([]); + const firstRun = await runRecovery({ deliver: firstDeliver, maxRecoveryMs: 60_000 }); + expect(firstRun.result).toEqual({ + recovered: 0, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 1, + }); + expect(firstDeliver).not.toHaveBeenCalled(); + + vi.setSystemTime(new Date(start.getTime() + 600_000 + 1)); + const secondDeliver = vi.fn().mockResolvedValue([]); + const secondRun = await runRecovery({ deliver: secondDeliver, maxRecoveryMs: 60_000 }); + expect(secondRun.result).toEqual({ + recovered: 1, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 0, + }); + expect(secondDeliver).toHaveBeenCalledTimes(1); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(0); + + vi.useRealTimers(); }); it("returns zeros when queue is empty", async () => { const deliver = vi.fn(); const { result } = await runRecovery({ deliver }); - expect(result).toEqual({ recovered: 0, failed: 0, skipped: 0 }); + expect(result).toEqual({ + recovered: 0, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 0, + }); expect(deliver).not.toHaveBeenCalled(); }); }); diff --git a/src/infra/outbound/session-binding-service.test.ts b/src/infra/outbound/session-binding-service.test.ts new file mode 100644 index 00000000000..04a75d6e867 --- /dev/null +++ b/src/infra/outbound/session-binding-service.test.ts @@ -0,0 +1,201 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + __testing, + getSessionBindingService, + isSessionBindingError, + registerSessionBindingAdapter, + type SessionBindingBindInput, + type SessionBindingRecord, +} from "./session-binding-service.js"; + +function createRecord(input: SessionBindingBindInput): SessionBindingRecord { + const conversationId = + input.placement === "child" + ? "thread-created" + : input.conversation.conversationId.trim() || "thread-current"; + return { + bindingId: `default:${conversationId}`, + targetSessionKey: input.targetSessionKey, + targetKind: input.targetKind, + conversation: { + channel: "discord", + accountId: "default", + conversationId, + parentConversationId: input.conversation.parentConversationId?.trim() || undefined, + }, + status: "active", + boundAt: 1, + }; +} + +describe("session binding service", () => { + beforeEach(() => { + __testing.resetSessionBindingAdaptersForTests(); + }); + + it("normalizes conversation refs and infers current placement", async () => { + const bind = vi.fn(async (input: SessionBindingBindInput) => createRecord(input)); + registerSessionBindingAdapter({ + channel: "discord", + accountId: "default", + bind, + listBySession: () => [], + resolveByConversation: () => null, + }); + + const result = await getSessionBindingService().bind({ + targetSessionKey: "agent:main:subagent:child-1", + targetKind: "subagent", + conversation: { + channel: "Discord", + accountId: "DEFAULT", + conversationId: " thread-1 ", + }, + }); + + expect(result.conversation.channel).toBe("discord"); + expect(result.conversation.accountId).toBe("default"); + expect(bind).toHaveBeenCalledWith( + expect.objectContaining({ + placement: "current", + conversation: expect.objectContaining({ + channel: "discord", + accountId: "default", + conversationId: "thread-1", + }), + }), + ); + }); + + it("supports explicit child placement when adapter advertises it", async () => { + registerSessionBindingAdapter({ + channel: "discord", + accountId: "default", + capabilities: { placements: ["child"] }, + bind: async (input) => createRecord(input), + listBySession: () => [], + resolveByConversation: () => null, + }); + + const result = await getSessionBindingService().bind({ + targetSessionKey: "agent:codex:acp:1", + targetKind: "session", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + }, + placement: "child", + }); + + expect(result.conversation.conversationId).toBe("thread-created"); + }); + + it("returns structured errors when adapter is unavailable", async () => { + await expect( + getSessionBindingService().bind({ + targetSessionKey: "agent:main:subagent:child-1", + targetKind: "subagent", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + }, + }), + ).rejects.toMatchObject({ + code: "BINDING_ADAPTER_UNAVAILABLE", + }); + }); + + it("returns structured errors for unsupported placement", async () => { + registerSessionBindingAdapter({ + channel: "discord", + accountId: "default", + capabilities: { placements: ["current"] }, + bind: async (input) => createRecord(input), + listBySession: () => [], + resolveByConversation: () => null, + }); + + const rejected = await getSessionBindingService() + .bind({ + targetSessionKey: "agent:codex:acp:1", + targetKind: "session", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + }, + placement: "child", + }) + .catch((error) => error); + + expect(isSessionBindingError(rejected)).toBe(true); + expect(rejected).toMatchObject({ + code: "BINDING_CAPABILITY_UNSUPPORTED", + details: { + placement: "child", + }, + }); + }); + + it("returns structured errors when adapter bind fails", async () => { + registerSessionBindingAdapter({ + channel: "discord", + accountId: "default", + bind: async () => null, + listBySession: () => [], + resolveByConversation: () => null, + }); + + await expect( + getSessionBindingService().bind({ + targetSessionKey: "agent:main:subagent:child-1", + targetKind: "subagent", + conversation: { + channel: "discord", + accountId: "default", + conversationId: "thread-1", + }, + }), + ).rejects.toMatchObject({ + code: "BINDING_CREATE_FAILED", + }); + }); + + it("reports adapter capabilities for command preflight messaging", () => { + registerSessionBindingAdapter({ + channel: "discord", + accountId: "default", + capabilities: { + placements: ["current", "child"], + }, + bind: async (input) => createRecord(input), + listBySession: () => [], + resolveByConversation: () => null, + unbind: async () => [], + }); + + const known = getSessionBindingService().getCapabilities({ + channel: "discord", + accountId: "default", + }); + const unknown = getSessionBindingService().getCapabilities({ + channel: "discord", + accountId: "other", + }); + + expect(known).toEqual({ + adapterAvailable: true, + bindSupported: true, + unbindSupported: true, + placements: ["current", "child"], + }); + expect(unknown).toEqual({ + adapterAvailable: false, + bindSupported: false, + unbindSupported: false, + placements: [], + }); + }); +}); diff --git a/src/infra/outbound/session-binding-service.ts b/src/infra/outbound/session-binding-service.ts index 900f4c00301..ffbf04758e6 100644 --- a/src/infra/outbound/session-binding-service.ts +++ b/src/infra/outbound/session-binding-service.ts @@ -2,6 +2,11 @@ import { normalizeAccountId } from "../../routing/session-key.js"; export type BindingTargetKind = "subagent" | "session"; export type BindingStatus = "active" | "ending" | "ended"; +export type SessionBindingPlacement = "current" | "child"; +export type SessionBindingErrorCode = + | "BINDING_ADAPTER_UNAVAILABLE" + | "BINDING_CAPABILITY_UNSUPPORTED" + | "BINDING_CREATE_FAILED"; export type ConversationRef = { channel: string; @@ -21,31 +26,66 @@ export type SessionBindingRecord = { metadata?: Record; }; -type SessionBindingBindInput = { +export type SessionBindingBindInput = { targetSessionKey: string; targetKind: BindingTargetKind; conversation: ConversationRef; + placement?: SessionBindingPlacement; metadata?: Record; ttlMs?: number; }; -type SessionBindingUnbindInput = { +export type SessionBindingUnbindInput = { bindingId?: string; targetSessionKey?: string; reason: string; }; +export type SessionBindingCapabilities = { + adapterAvailable: boolean; + bindSupported: boolean; + unbindSupported: boolean; + placements: SessionBindingPlacement[]; +}; + +export class SessionBindingError extends Error { + constructor( + public readonly code: SessionBindingErrorCode, + message: string, + public readonly details?: { + channel?: string; + accountId?: string; + placement?: SessionBindingPlacement; + }, + ) { + super(message); + this.name = "SessionBindingError"; + } +} + +export function isSessionBindingError(error: unknown): error is SessionBindingError { + return error instanceof SessionBindingError; +} + export type SessionBindingService = { bind: (input: SessionBindingBindInput) => Promise; + getCapabilities: (params: { channel: string; accountId: string }) => SessionBindingCapabilities; listBySession: (targetSessionKey: string) => SessionBindingRecord[]; resolveByConversation: (ref: ConversationRef) => SessionBindingRecord | null; touch: (bindingId: string, at?: number) => void; unbind: (input: SessionBindingUnbindInput) => Promise; }; +export type SessionBindingAdapterCapabilities = { + placements?: SessionBindingPlacement[]; + bindSupported?: boolean; + unbindSupported?: boolean; +}; + export type SessionBindingAdapter = { channel: string; accountId: string; + capabilities?: SessionBindingAdapterCapabilities; bind?: (input: SessionBindingBindInput) => Promise; listBySession: (targetSessionKey: string) => SessionBindingRecord[]; resolveByConversation: (ref: ConversationRef) => SessionBindingRecord | null; @@ -66,6 +106,45 @@ function toAdapterKey(params: { channel: string; accountId: string }): string { return `${params.channel.trim().toLowerCase()}:${normalizeAccountId(params.accountId)}`; } +function normalizePlacement(raw: unknown): SessionBindingPlacement | undefined { + return raw === "current" || raw === "child" ? raw : undefined; +} + +function inferDefaultPlacement(ref: ConversationRef): SessionBindingPlacement { + return ref.conversationId ? "current" : "child"; +} + +function resolveAdapterPlacements(adapter: SessionBindingAdapter): SessionBindingPlacement[] { + const configured = adapter.capabilities?.placements?.map((value) => normalizePlacement(value)); + const placements = configured?.filter((value): value is SessionBindingPlacement => + Boolean(value), + ); + if (placements && placements.length > 0) { + return [...new Set(placements)]; + } + return ["current", "child"]; +} + +function resolveAdapterCapabilities( + adapter: SessionBindingAdapter | null, +): SessionBindingCapabilities { + if (!adapter) { + return { + adapterAvailable: false, + bindSupported: false, + unbindSupported: false, + placements: [], + }; + } + const bindSupported = adapter.capabilities?.bindSupported ?? Boolean(adapter.bind); + return { + adapterAvailable: true, + bindSupported, + unbindSupported: adapter.capabilities?.unbindSupported ?? Boolean(adapter.unbind), + placements: bindSupported ? resolveAdapterPlacements(adapter) : [], + }; +} + const ADAPTERS_BY_CHANNEL_ACCOUNT = new Map(); export function registerSessionBindingAdapter(adapter: SessionBindingAdapter): void { @@ -88,10 +167,19 @@ export function unregisterSessionBindingAdapter(params: { } function resolveAdapterForConversation(ref: ConversationRef): SessionBindingAdapter | null { - const normalized = normalizeConversationRef(ref); + return resolveAdapterForChannelAccount({ + channel: ref.channel, + accountId: ref.accountId, + }); +} + +function resolveAdapterForChannelAccount(params: { + channel: string; + accountId: string; +}): SessionBindingAdapter | null { const key = toAdapterKey({ - channel: normalized.channel, - accountId: normalized.accountId, + channel: params.channel, + accountId: params.accountId, }); return ADAPTERS_BY_CHANNEL_ACCOUNT.get(key) ?? null; } @@ -112,20 +200,65 @@ function createDefaultSessionBindingService(): SessionBindingService { bind: async (input) => { const normalizedConversation = normalizeConversationRef(input.conversation); const adapter = resolveAdapterForConversation(normalizedConversation); - if (!adapter?.bind) { - throw new Error( + if (!adapter) { + throw new SessionBindingError( + "BINDING_ADAPTER_UNAVAILABLE", `Session binding adapter unavailable for ${normalizedConversation.channel}:${normalizedConversation.accountId}`, + { + channel: normalizedConversation.channel, + accountId: normalizedConversation.accountId, + }, + ); + } + if (!adapter.bind) { + throw new SessionBindingError( + "BINDING_CAPABILITY_UNSUPPORTED", + `Session binding adapter does not support binding for ${normalizedConversation.channel}:${normalizedConversation.accountId}`, + { + channel: normalizedConversation.channel, + accountId: normalizedConversation.accountId, + }, + ); + } + const placement = + normalizePlacement(input.placement) ?? inferDefaultPlacement(normalizedConversation); + const supportedPlacements = resolveAdapterPlacements(adapter); + if (!supportedPlacements.includes(placement)) { + throw new SessionBindingError( + "BINDING_CAPABILITY_UNSUPPORTED", + `Session binding placement "${placement}" is not supported for ${normalizedConversation.channel}:${normalizedConversation.accountId}`, + { + channel: normalizedConversation.channel, + accountId: normalizedConversation.accountId, + placement, + }, ); } const bound = await adapter.bind({ ...input, conversation: normalizedConversation, + placement, }); if (!bound) { - throw new Error("Session binding adapter failed to bind target conversation"); + throw new SessionBindingError( + "BINDING_CREATE_FAILED", + "Session binding adapter failed to bind target conversation", + { + channel: normalizedConversation.channel, + accountId: normalizedConversation.accountId, + placement, + }, + ); } return bound; }, + getCapabilities: (params) => { + const adapter = resolveAdapterForChannelAccount({ + channel: params.channel, + accountId: params.accountId, + }); + return resolveAdapterCapabilities(adapter); + }, listBySession: (targetSessionKey) => { const key = targetSessionKey.trim(); if (!key) { diff --git a/src/infra/outbound/session-context.ts b/src/infra/outbound/session-context.ts new file mode 100644 index 00000000000..f73cb0e6ed5 --- /dev/null +++ b/src/infra/outbound/session-context.ts @@ -0,0 +1,37 @@ +import { resolveSessionAgentId } from "../../agents/agent-scope.js"; +import type { OpenClawConfig } from "../../config/config.js"; + +export type OutboundSessionContext = { + /** Canonical session key used for internal hook dispatch. */ + key?: string; + /** Active agent id used for workspace-scoped media roots. */ + agentId?: string; +}; + +function normalizeOptionalString(value?: string | null): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed.length > 0 ? trimmed : undefined; +} + +export function buildOutboundSessionContext(params: { + cfg: OpenClawConfig; + sessionKey?: string | null; + agentId?: string | null; +}): OutboundSessionContext | undefined { + const key = normalizeOptionalString(params.sessionKey); + const explicitAgentId = normalizeOptionalString(params.agentId); + const derivedAgentId = key + ? resolveSessionAgentId({ sessionKey: key, config: params.cfg }) + : undefined; + const agentId = explicitAgentId ?? derivedAgentId; + if (!key && !agentId) { + return undefined; + } + return { + ...(key ? { key } : {}), + ...(agentId ? { agentId } : {}), + }; +} diff --git a/src/infra/outbound/targets.channel-resolution.test.ts b/src/infra/outbound/targets.channel-resolution.test.ts new file mode 100644 index 00000000000..01779d0655c --- /dev/null +++ b/src/infra/outbound/targets.channel-resolution.test.ts @@ -0,0 +1,103 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const mocks = vi.hoisted(() => ({ + getChannelPlugin: vi.fn(), + loadOpenClawPlugins: vi.fn(), +})); + +vi.mock("../../channels/plugins/index.js", () => ({ + getChannelPlugin: mocks.getChannelPlugin, + normalizeChannelId: (channel?: string) => channel?.trim().toLowerCase() ?? undefined, +})); + +vi.mock("../../agents/agent-scope.js", () => ({ + resolveDefaultAgentId: () => "main", + resolveAgentWorkspaceDir: () => "/tmp/openclaw-test-workspace", +})); + +vi.mock("../../config/plugin-auto-enable.js", () => ({ + applyPluginAutoEnable: ({ config }: { config: unknown }) => ({ config, changes: [] }), +})); + +vi.mock("../../plugins/loader.js", () => ({ + loadOpenClawPlugins: mocks.loadOpenClawPlugins, +})); + +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createTestRegistry } from "../../test-utils/channel-plugins.js"; +import { resolveOutboundTarget } from "./targets.js"; + +describe("resolveOutboundTarget channel resolution", () => { + let registrySeq = 0; + + beforeEach(() => { + registrySeq += 1; + setActivePluginRegistry(createTestRegistry([]), `targets-test-${registrySeq}`); + mocks.getChannelPlugin.mockReset(); + mocks.loadOpenClawPlugins.mockReset(); + }); + + it("recovers telegram plugin resolution so announce delivery does not fail with Unsupported channel: telegram", () => { + const telegramPlugin = { + id: "telegram", + meta: { label: "Telegram" }, + config: { + listAccountIds: () => [], + resolveAccount: () => ({}), + }, + }; + mocks.getChannelPlugin + .mockReturnValueOnce(undefined) + .mockReturnValueOnce(telegramPlugin) + .mockReturnValue(telegramPlugin); + + const result = resolveOutboundTarget({ + channel: "telegram", + to: "123456", + cfg: { channels: { telegram: { botToken: "test-token" } } }, + mode: "explicit", + }); + + expect(result).toEqual({ ok: true, to: "123456" }); + expect(mocks.loadOpenClawPlugins).toHaveBeenCalledTimes(1); + }); + + it("retries bootstrap on subsequent resolve when the first bootstrap attempt fails", () => { + const telegramPlugin = { + id: "telegram", + meta: { label: "Telegram" }, + config: { + listAccountIds: () => [], + resolveAccount: () => ({}), + }, + }; + mocks.getChannelPlugin + .mockReturnValueOnce(undefined) + .mockReturnValueOnce(undefined) + .mockReturnValueOnce(undefined) + .mockReturnValueOnce(telegramPlugin) + .mockReturnValue(telegramPlugin); + mocks.loadOpenClawPlugins + .mockImplementationOnce(() => { + throw new Error("bootstrap failed"); + }) + .mockImplementation(() => undefined); + + const first = resolveOutboundTarget({ + channel: "telegram", + to: "123456", + cfg: { channels: { telegram: { botToken: "test-token" } } }, + mode: "explicit", + }); + const second = resolveOutboundTarget({ + channel: "telegram", + to: "123456", + cfg: { channels: { telegram: { botToken: "test-token" } } }, + mode: "explicit", + }); + + expect(first.ok).toBe(false); + expect(second).toEqual({ ok: true, to: "123456" }); + expect(mocks.loadOpenClawPlugins).toHaveBeenCalledTimes(2); + }); +}); diff --git a/src/infra/outbound/targets.test.ts b/src/infra/outbound/targets.test.ts index 8f120702de0..cbad502cdde 100644 --- a/src/infra/outbound/targets.test.ts +++ b/src/infra/outbound/targets.test.ts @@ -301,7 +301,7 @@ describe("resolveSessionDeliveryTarget", () => { expect(resolved.to).toBe("63448508"); }); - it("blocks heartbeat delivery to Slack DMs and avoids inherited threadId", () => { + it("allows heartbeat delivery to Slack DMs and avoids inherited threadId by default", () => { const cfg: OpenClawConfig = {}; const resolved = resolveHeartbeatDeliveryTarget({ cfg, @@ -317,12 +317,34 @@ describe("resolveSessionDeliveryTarget", () => { }, }); + expect(resolved.channel).toBe("slack"); + expect(resolved.to).toBe("user:U123"); + expect(resolved.threadId).toBeUndefined(); + }); + + it("blocks heartbeat delivery to Slack DMs when directPolicy is block", () => { + const cfg: OpenClawConfig = {}; + const resolved = resolveHeartbeatDeliveryTarget({ + cfg, + entry: { + sessionId: "sess-heartbeat-outbound", + updatedAt: 1, + lastChannel: "slack", + lastTo: "user:U123", + lastThreadId: "1739142736.000100", + }, + heartbeat: { + target: "last", + directPolicy: "block", + }, + }); + expect(resolved.channel).toBe("none"); expect(resolved.reason).toBe("dm-blocked"); expect(resolved.threadId).toBeUndefined(); }); - it("blocks heartbeat delivery to Discord DMs", () => { + it("allows heartbeat delivery to Discord DMs by default", () => { const cfg: OpenClawConfig = {}; const resolved = resolveHeartbeatDeliveryTarget({ cfg, @@ -337,11 +359,11 @@ describe("resolveSessionDeliveryTarget", () => { }, }); - expect(resolved.channel).toBe("none"); - expect(resolved.reason).toBe("dm-blocked"); + expect(resolved.channel).toBe("discord"); + expect(resolved.to).toBe("user:12345"); }); - it("blocks heartbeat delivery to Telegram direct chats", () => { + it("allows heartbeat delivery to Telegram direct chats by default", () => { const cfg: OpenClawConfig = {}; const resolved = resolveHeartbeatDeliveryTarget({ cfg, @@ -356,6 +378,26 @@ describe("resolveSessionDeliveryTarget", () => { }, }); + expect(resolved.channel).toBe("telegram"); + expect(resolved.to).toBe("5232990709"); + }); + + it("blocks heartbeat delivery to Telegram direct chats when directPolicy is block", () => { + const cfg: OpenClawConfig = {}; + const resolved = resolveHeartbeatDeliveryTarget({ + cfg, + entry: { + sessionId: "sess-heartbeat-telegram-direct", + updatedAt: 1, + lastChannel: "telegram", + lastTo: "5232990709", + }, + heartbeat: { + target: "last", + directPolicy: "block", + }, + }); + expect(resolved.channel).toBe("none"); expect(resolved.reason).toBe("dm-blocked"); }); @@ -379,7 +421,7 @@ describe("resolveSessionDeliveryTarget", () => { expect(resolved.to).toBe("-1001234567890"); }); - it("blocks heartbeat delivery to WhatsApp direct chats", () => { + it("allows heartbeat delivery to WhatsApp direct chats by default", () => { const cfg: OpenClawConfig = {}; const resolved = resolveHeartbeatDeliveryTarget({ cfg, @@ -394,8 +436,8 @@ describe("resolveSessionDeliveryTarget", () => { }, }); - expect(resolved.channel).toBe("none"); - expect(resolved.reason).toBe("dm-blocked"); + expect(resolved.channel).toBe("whatsapp"); + expect(resolved.to).toBe("+15551234567"); }); it("keeps heartbeat delivery to WhatsApp groups", () => { @@ -417,7 +459,7 @@ describe("resolveSessionDeliveryTarget", () => { expect(resolved.to).toBe("120363140186826074@g.us"); }); - it("uses session chatType hint when target parser cannot classify", () => { + it("uses session chatType hint when target parser cannot classify and allows direct by default", () => { const cfg: OpenClawConfig = {}; const resolved = resolveHeartbeatDeliveryTarget({ cfg, @@ -433,6 +475,27 @@ describe("resolveSessionDeliveryTarget", () => { }, }); + expect(resolved.channel).toBe("imessage"); + expect(resolved.to).toBe("chat-guid-unknown-shape"); + }); + + it("blocks session chatType direct hints when directPolicy is block", () => { + const cfg: OpenClawConfig = {}; + const resolved = resolveHeartbeatDeliveryTarget({ + cfg, + entry: { + sessionId: "sess-heartbeat-imessage-direct", + updatedAt: 1, + lastChannel: "imessage", + lastTo: "chat-guid-unknown-shape", + chatType: "direct", + }, + heartbeat: { + target: "last", + directPolicy: "block", + }, + }); + expect(resolved.channel).toBe("none"); expect(resolved.reason).toBe("dm-blocked"); }); diff --git a/src/infra/outbound/targets.ts b/src/infra/outbound/targets.ts index 41baa558653..89e68e57566 100644 --- a/src/infra/outbound/targets.ts +++ b/src/infra/outbound/targets.ts @@ -1,5 +1,4 @@ import { normalizeChatType, type ChatType } from "../../channels/chat-type.js"; -import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; import type { ChannelOutboundTargetMode } from "../../channels/plugins/types.js"; import { formatCliCommand } from "../../cli/command-format.js"; import type { OpenClawConfig } from "../../config/config.js"; @@ -20,6 +19,10 @@ import { normalizeMessageChannel, } from "../../utils/message-channel.js"; import { isWhatsAppGroupJid, normalizeWhatsAppTarget } from "../../whatsapp/normalize.js"; +import { + normalizeDeliverableOutboundChannel, + resolveOutboundChannelPlugin, +} from "./channel-resolution.js"; import { missingTargetError } from "./target-errors.js"; export type OutboundChannel = DeliverableMessageChannel | "none"; @@ -181,7 +184,10 @@ export function resolveOutboundTarget(params: { }; } - const plugin = getChannelPlugin(params.channel); + const plugin = resolveOutboundChannelPlugin({ + channel: params.channel, + cfg: params.cfg, + }); if (!plugin) { return { ok: false, @@ -242,7 +248,7 @@ export function resolveHeartbeatDeliveryTarget(params: { if (rawTarget === "none" || rawTarget === "last") { target = rawTarget; } else if (typeof rawTarget === "string") { - const normalized = normalizeChannelId(rawTarget); + const normalized = normalizeDeliverableOutboundChannel(rawTarget); if (normalized) { target = normalized; } @@ -269,7 +275,10 @@ export function resolveHeartbeatDeliveryTarget(params: { let effectiveAccountId = heartbeatAccountId || resolvedTarget.accountId; if (heartbeatAccountId && resolvedTarget.channel) { - const plugin = getChannelPlugin(resolvedTarget.channel); + const plugin = resolveOutboundChannelPlugin({ + channel: resolvedTarget.channel, + cfg, + }); const listAccountIds = plugin?.config.listAccountIds; const accountIds = listAccountIds ? listAccountIds(cfg) : []; if (accountIds.length > 0) { @@ -321,7 +330,7 @@ export function resolveHeartbeatDeliveryTarget(params: { to: resolved.to, sessionChatType: sessionChatTypeHint, }); - if (deliveryChatType === "direct") { + if (deliveryChatType === "direct" && heartbeat?.directPolicy === "block") { return buildNoHeartbeatDeliveryTarget({ reason: "dm-blocked", accountId: effectiveAccountId, @@ -331,7 +340,10 @@ export function resolveHeartbeatDeliveryTarget(params: { } let reason: string | undefined; - const plugin = getChannelPlugin(resolvedTarget.channel); + const plugin = resolveOutboundChannelPlugin({ + channel: resolvedTarget.channel, + cfg, + }); if (plugin?.config.resolveAllowFrom) { const explicit = resolveOutboundTarget({ channel: resolvedTarget.channel, @@ -516,7 +528,10 @@ export function resolveHeartbeatSenderContext(params: { params.delivery.accountId ?? (provider === params.delivery.lastChannel ? params.delivery.lastAccountId : undefined); const allowFromRaw = provider - ? (getChannelPlugin(provider)?.config.resolveAllowFrom?.({ + ? (resolveOutboundChannelPlugin({ + channel: provider, + cfg: params.cfg, + })?.config.resolveAllowFrom?.({ cfg: params.cfg, accountId, }) ?? []) diff --git a/src/infra/path-alias-guards.test.ts b/src/infra/path-alias-guards.test.ts new file mode 100644 index 00000000000..abc16c48847 --- /dev/null +++ b/src/infra/path-alias-guards.test.ts @@ -0,0 +1,76 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { assertNoPathAliasEscape } from "./path-alias-guards.js"; + +async function withTempRoot(run: (root: string) => Promise): Promise { + const base = await fs.mkdtemp(path.join(process.cwd(), "openclaw-path-alias-")); + const root = path.join(base, "root"); + await fs.mkdir(root, { recursive: true }); + try { + return await run(root); + } finally { + await fs.rm(base, { recursive: true, force: true }); + } +} + +describe("assertNoPathAliasEscape", () => { + it.runIf(process.platform !== "win32")( + "rejects broken final symlink targets outside root", + async () => { + await withTempRoot(async (root) => { + const outside = path.join(path.dirname(root), "outside"); + await fs.mkdir(outside, { recursive: true }); + const linkPath = path.join(root, "jump"); + await fs.symlink(path.join(outside, "owned.txt"), linkPath); + + await expect( + assertNoPathAliasEscape({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/); + }); + }, + ); + + it.runIf(process.platform !== "win32")( + "allows broken final symlink targets that remain inside root", + async () => { + await withTempRoot(async (root) => { + const linkPath = path.join(root, "jump"); + await fs.symlink(path.join(root, "missing", "owned.txt"), linkPath); + + await expect( + assertNoPathAliasEscape({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).resolves.toBeUndefined(); + }); + }, + ); + + it.runIf(process.platform !== "win32")( + "rejects broken targets that traverse via an in-root symlink alias", + async () => { + await withTempRoot(async (root) => { + const outside = path.join(path.dirname(root), "outside"); + await fs.mkdir(outside, { recursive: true }); + await fs.symlink(outside, path.join(root, "hop")); + const linkPath = path.join(root, "jump"); + await fs.symlink(path.join("hop", "missing", "owned.txt"), linkPath); + + await expect( + assertNoPathAliasEscape({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/); + }); + }, + ); +}); diff --git a/src/infra/path-alias-guards.ts b/src/infra/path-alias-guards.ts new file mode 100644 index 00000000000..e7b0aa42a0e --- /dev/null +++ b/src/infra/path-alias-guards.ts @@ -0,0 +1,34 @@ +import { + BOUNDARY_PATH_ALIAS_POLICIES, + resolveBoundaryPath, + type BoundaryPathAliasPolicy, +} from "./boundary-path.js"; +import { assertNoHardlinkedFinalPath } from "./hardlink-guards.js"; + +export type PathAliasPolicy = BoundaryPathAliasPolicy; + +export const PATH_ALIAS_POLICIES = BOUNDARY_PATH_ALIAS_POLICIES; + +export async function assertNoPathAliasEscape(params: { + absolutePath: string; + rootPath: string; + boundaryLabel: string; + policy?: PathAliasPolicy; +}): Promise { + const resolved = await resolveBoundaryPath({ + absolutePath: params.absolutePath, + rootPath: params.rootPath, + boundaryLabel: params.boundaryLabel, + policy: params.policy, + }); + const allowFinalSymlink = params.policy?.allowFinalSymlinkForUnlink === true; + if (allowFinalSymlink && resolved.kind === "symlink") { + return; + } + await assertNoHardlinkedFinalPath({ + filePath: resolved.absolutePath, + root: resolved.rootPath, + boundaryLabel: params.boundaryLabel, + allowFinalHardlinkForUnlink: params.policy?.allowFinalHardlinkForUnlink, + }); +} diff --git a/src/infra/path-guards.ts b/src/infra/path-guards.ts index 55330fa8bc4..751da0a9db0 100644 --- a/src/infra/path-guards.ts +++ b/src/infra/path-guards.ts @@ -3,6 +3,17 @@ import path from "node:path"; const NOT_FOUND_CODES = new Set(["ENOENT", "ENOTDIR"]); const SYMLINK_OPEN_CODES = new Set(["ELOOP", "EINVAL", "ENOTSUP"]); +function normalizeWindowsPathForComparison(input: string): string { + let normalized = path.win32.normalize(input); + if (normalized.startsWith("\\\\?\\")) { + normalized = normalized.slice(4); + if (normalized.toUpperCase().startsWith("UNC\\")) { + normalized = `\\\\${normalized.slice(4)}`; + } + } + return normalized.replaceAll("/", "\\").toLowerCase(); +} + export function isNodeError(value: unknown): value is NodeJS.ErrnoException { return Boolean( value && typeof value === "object" && "code" in (value as Record), @@ -26,7 +37,9 @@ export function isPathInside(root: string, target: string): boolean { const resolvedTarget = path.resolve(target); if (process.platform === "win32") { - const relative = path.win32.relative(resolvedRoot.toLowerCase(), resolvedTarget.toLowerCase()); + const rootForCompare = normalizeWindowsPathForComparison(resolvedRoot); + const targetForCompare = normalizeWindowsPathForComparison(resolvedTarget); + const relative = path.win32.relative(rootForCompare, targetForCompare); return relative === "" || (!relative.startsWith("..") && !path.win32.isAbsolute(relative)); } diff --git a/src/infra/process-respawn.test.ts b/src/infra/process-respawn.test.ts index 90e9b5a9c57..13c1bc6a08d 100644 --- a/src/infra/process-respawn.test.ts +++ b/src/infra/process-respawn.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { captureFullEnv } from "../test-utils/env.js"; +import { SUPERVISOR_HINT_ENV_VARS } from "./supervisor-markers.js"; const spawnMock = vi.hoisted(() => vi.fn()); @@ -21,11 +22,9 @@ afterEach(() => { }); function clearSupervisorHints() { - delete process.env.LAUNCH_JOB_LABEL; - delete process.env.LAUNCH_JOB_NAME; - delete process.env.INVOCATION_ID; - delete process.env.SYSTEMD_EXEC_PID; - delete process.env.JOURNAL_STREAM; + for (const key of SUPERVISOR_HINT_ENV_VARS) { + delete process.env[key]; + } } describe("restartGatewayProcessWithFreshPid", () => { @@ -63,6 +62,30 @@ describe("restartGatewayProcessWithFreshPid", () => { ); }); + it("returns supervised when OPENCLAW_LAUNCHD_LABEL is set (stock launchd plist)", () => { + clearSupervisorHints(); + process.env.OPENCLAW_LAUNCHD_LABEL = "ai.openclaw.gateway"; + const result = restartGatewayProcessWithFreshPid(); + expect(result.mode).toBe("supervised"); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("returns supervised when OPENCLAW_SYSTEMD_UNIT is set", () => { + clearSupervisorHints(); + process.env.OPENCLAW_SYSTEMD_UNIT = "openclaw-gateway.service"; + const result = restartGatewayProcessWithFreshPid(); + expect(result.mode).toBe("supervised"); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("returns supervised when OPENCLAW_SERVICE_MARKER is set", () => { + clearSupervisorHints(); + process.env.OPENCLAW_SERVICE_MARKER = "gateway"; + const result = restartGatewayProcessWithFreshPid(); + expect(result.mode).toBe("supervised"); + expect(spawnMock).not.toHaveBeenCalled(); + }); + it("returns failed when spawn throws", () => { delete process.env.OPENCLAW_NO_RESPAWN; clearSupervisorHints(); diff --git a/src/infra/process-respawn.ts b/src/infra/process-respawn.ts index 3c6ef37106f..d77721cd088 100644 --- a/src/infra/process-respawn.ts +++ b/src/infra/process-respawn.ts @@ -1,4 +1,5 @@ import { spawn } from "node:child_process"; +import { hasSupervisorHint } from "./supervisor-markers.js"; type RespawnMode = "spawned" | "supervised" | "disabled" | "failed"; @@ -8,14 +9,6 @@ export type GatewayRespawnResult = { detail?: string; }; -const SUPERVISOR_HINT_ENV_VARS = [ - "LAUNCH_JOB_LABEL", - "LAUNCH_JOB_NAME", - "INVOCATION_ID", - "SYSTEMD_EXEC_PID", - "JOURNAL_STREAM", -]; - function isTruthy(value: string | undefined): boolean { if (!value) { return false; @@ -25,10 +18,7 @@ function isTruthy(value: string | undefined): boolean { } function isLikelySupervisedProcess(env: NodeJS.ProcessEnv = process.env): boolean { - return SUPERVISOR_HINT_ENV_VARS.some((key) => { - const value = env[key]; - return typeof value === "string" && value.trim().length > 0; - }); + return hasSupervisorHint(env); } /** diff --git a/src/infra/restart-stale-pids.ts b/src/infra/restart-stale-pids.ts new file mode 100644 index 00000000000..bbab76f8374 --- /dev/null +++ b/src/infra/restart-stale-pids.ts @@ -0,0 +1,127 @@ +import { spawnSync } from "node:child_process"; +import { resolveGatewayPort } from "../config/paths.js"; +import { createSubsystemLogger } from "../logging/subsystem.js"; +import { resolveLsofCommandSync } from "./ports-lsof.js"; + +const SPAWN_TIMEOUT_MS = 2000; +const STALE_SIGTERM_WAIT_MS = 300; +const STALE_SIGKILL_WAIT_MS = 200; + +const restartLog = createSubsystemLogger("restart"); +let sleepSyncOverride: ((ms: number) => void) | null = null; + +function sleepSync(ms: number): void { + const timeoutMs = Math.max(0, Math.floor(ms)); + if (timeoutMs <= 0) { + return; + } + if (sleepSyncOverride) { + sleepSyncOverride(timeoutMs); + return; + } + try { + const lock = new Int32Array(new SharedArrayBuffer(4)); + Atomics.wait(lock, 0, 0, timeoutMs); + } catch { + const start = Date.now(); + while (Date.now() - start < timeoutMs) { + // Best-effort fallback when Atomics.wait is unavailable. + } + } +} + +/** + * Find PIDs of gateway processes listening on the given port using synchronous lsof. + * Returns only PIDs that belong to openclaw gateway processes (not the current process). + */ +export function findGatewayPidsOnPortSync(port: number): number[] { + if (process.platform === "win32") { + return []; + } + const lsof = resolveLsofCommandSync(); + const res = spawnSync(lsof, ["-nP", `-iTCP:${port}`, "-sTCP:LISTEN", "-Fpc"], { + encoding: "utf8", + timeout: SPAWN_TIMEOUT_MS, + }); + if (res.error || res.status !== 0) { + return []; + } + const pids: number[] = []; + let currentPid: number | undefined; + let currentCmd: string | undefined; + for (const line of res.stdout.split(/\r?\n/).filter(Boolean)) { + if (line.startsWith("p")) { + if (currentPid != null && currentCmd && currentCmd.toLowerCase().includes("openclaw")) { + pids.push(currentPid); + } + const parsed = Number.parseInt(line.slice(1), 10); + currentPid = Number.isFinite(parsed) && parsed > 0 ? parsed : undefined; + currentCmd = undefined; + } else if (line.startsWith("c")) { + currentCmd = line.slice(1); + } + } + if (currentPid != null && currentCmd && currentCmd.toLowerCase().includes("openclaw")) { + pids.push(currentPid); + } + return pids.filter((pid) => pid !== process.pid); +} + +/** + * Synchronously terminate stale gateway processes. + * Sends SIGTERM, waits briefly, then SIGKILL for survivors. + */ +function terminateStaleProcessesSync(pids: number[]): number[] { + if (pids.length === 0) { + return []; + } + const killed: number[] = []; + for (const pid of pids) { + try { + process.kill(pid, "SIGTERM"); + killed.push(pid); + } catch { + // ESRCH — already gone + } + } + if (killed.length === 0) { + return killed; + } + sleepSync(STALE_SIGTERM_WAIT_MS); + for (const pid of killed) { + try { + process.kill(pid, 0); + process.kill(pid, "SIGKILL"); + } catch { + // already gone + } + } + sleepSync(STALE_SIGKILL_WAIT_MS); + return killed; +} + +/** + * Inspect the gateway port and kill any stale gateway processes holding it. + * Called before service restart commands to prevent port conflicts. + */ +export function cleanStaleGatewayProcessesSync(): number[] { + try { + const port = resolveGatewayPort(undefined, process.env); + const stalePids = findGatewayPidsOnPortSync(port); + if (stalePids.length === 0) { + return []; + } + restartLog.warn( + `killing ${stalePids.length} stale gateway process(es) before restart: ${stalePids.join(", ")}`, + ); + return terminateStaleProcessesSync(stalePids); + } catch { + return []; + } +} + +export const __testing = { + setSleepSyncOverride(fn: ((ms: number) => void) | null) { + sleepSyncOverride = fn; + }, +}; diff --git a/src/infra/restart.test.ts b/src/infra/restart.test.ts new file mode 100644 index 00000000000..23795e46f8e --- /dev/null +++ b/src/infra/restart.test.ts @@ -0,0 +1,111 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const spawnSyncMock = vi.hoisted(() => vi.fn()); +const resolveLsofCommandSyncMock = vi.hoisted(() => vi.fn()); +const resolveGatewayPortMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawnSync: (...args: unknown[]) => spawnSyncMock(...args), +})); + +vi.mock("./ports-lsof.js", () => ({ + resolveLsofCommandSync: (...args: unknown[]) => resolveLsofCommandSyncMock(...args), +})); + +vi.mock("../config/paths.js", () => ({ + resolveGatewayPort: (...args: unknown[]) => resolveGatewayPortMock(...args), +})); + +import { + __testing, + cleanStaleGatewayProcessesSync, + findGatewayPidsOnPortSync, +} from "./restart-stale-pids.js"; + +beforeEach(() => { + spawnSyncMock.mockReset(); + resolveLsofCommandSyncMock.mockReset(); + resolveGatewayPortMock.mockReset(); + + resolveLsofCommandSyncMock.mockReturnValue("/usr/sbin/lsof"); + resolveGatewayPortMock.mockReturnValue(18789); + __testing.setSleepSyncOverride(() => {}); +}); + +afterEach(() => { + __testing.setSleepSyncOverride(null); + vi.restoreAllMocks(); +}); + +describe.runIf(process.platform !== "win32")("findGatewayPidsOnPortSync", () => { + it("parses lsof output and filters non-openclaw/current processes", () => { + spawnSyncMock.mockReturnValue({ + error: undefined, + status: 0, + stdout: [ + `p${process.pid}`, + "copenclaw", + "p4100", + "copenclaw-gateway", + "p4200", + "cnode", + "p4300", + "cOpenClaw", + ].join("\n"), + }); + + const pids = findGatewayPidsOnPortSync(18789); + + expect(pids).toEqual([4100, 4300]); + expect(spawnSyncMock).toHaveBeenCalledWith( + "/usr/sbin/lsof", + ["-nP", "-iTCP:18789", "-sTCP:LISTEN", "-Fpc"], + expect.objectContaining({ encoding: "utf8", timeout: 2000 }), + ); + }); + + it("returns empty when lsof fails", () => { + spawnSyncMock.mockReturnValue({ + error: undefined, + status: 1, + stdout: "", + stderr: "lsof failed", + }); + + expect(findGatewayPidsOnPortSync(18789)).toEqual([]); + }); +}); + +describe.runIf(process.platform !== "win32")("cleanStaleGatewayProcessesSync", () => { + it("kills stale gateway pids discovered on the gateway port", () => { + spawnSyncMock.mockReturnValue({ + error: undefined, + status: 0, + stdout: ["p6001", "copenclaw", "p6002", "copenclaw-gateway"].join("\n"), + }); + const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); + + const killed = cleanStaleGatewayProcessesSync(); + + expect(killed).toEqual([6001, 6002]); + expect(resolveGatewayPortMock).toHaveBeenCalledWith(undefined, process.env); + expect(killSpy).toHaveBeenCalledWith(6001, "SIGTERM"); + expect(killSpy).toHaveBeenCalledWith(6002, "SIGTERM"); + expect(killSpy).toHaveBeenCalledWith(6001, "SIGKILL"); + expect(killSpy).toHaveBeenCalledWith(6002, "SIGKILL"); + }); + + it("returns empty when no stale listeners are found", () => { + spawnSyncMock.mockReturnValue({ + error: undefined, + status: 0, + stdout: "", + }); + const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); + + const killed = cleanStaleGatewayProcessesSync(); + + expect(killed).toEqual([]); + expect(killSpy).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/restart.ts b/src/infra/restart.ts index 4dd09beaa1a..c84dfc6f7ac 100644 --- a/src/infra/restart.ts +++ b/src/infra/restart.ts @@ -4,6 +4,7 @@ import { resolveGatewaySystemdServiceName, } from "../daemon/constants.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; +import { cleanStaleGatewayProcessesSync, findGatewayPidsOnPortSync } from "./restart-stale-pids.js"; export type RestartAttempt = { ok: boolean; @@ -20,6 +21,8 @@ const RESTART_COOLDOWN_MS = 30_000; const restartLog = createSubsystemLogger("restart"); +export { findGatewayPidsOnPortSync }; + let sigusr1AuthorizedCount = 0; let sigusr1AuthorizedUntil = 0; let sigusr1ExternalAllowed = false; @@ -287,6 +290,9 @@ export function triggerOpenClawRestart(): RestartAttempt { if (process.env.VITEST || process.env.NODE_ENV === "test") { return { ok: true, method: "supervisor", detail: "test mode" }; } + + cleanStaleGatewayProcessesSync(); + const tried: string[] = []; if (process.platform !== "darwin") { if (process.platform === "linux") { diff --git a/src/infra/safe-open-sync.ts b/src/infra/safe-open-sync.ts index 311849ba9fd..b502b04e90b 100644 --- a/src/infra/safe-open-sync.ts +++ b/src/infra/safe-open-sync.ts @@ -7,9 +7,10 @@ export type SafeOpenSyncResult = | { ok: true; path: string; fd: number; stat: fs.Stats } | { ok: false; reason: SafeOpenSyncFailureReason; error?: unknown }; -const OPEN_READ_FLAGS = - fs.constants.O_RDONLY | - (typeof fs.constants.O_NOFOLLOW === "number" ? fs.constants.O_NOFOLLOW : 0); +type SafeOpenSyncFs = Pick< + typeof fs, + "constants" | "lstatSync" | "realpathSync" | "openSync" | "fstatSync" | "closeSync" +>; function isExpectedPathError(error: unknown): boolean { const code = @@ -25,31 +26,43 @@ export function openVerifiedFileSync(params: { filePath: string; resolvedPath?: string; rejectPathSymlink?: boolean; + rejectHardlinks?: boolean; maxBytes?: number; + ioFs?: SafeOpenSyncFs; }): SafeOpenSyncResult { + const ioFs = params.ioFs ?? fs; + const openReadFlags = + ioFs.constants.O_RDONLY | + (typeof ioFs.constants.O_NOFOLLOW === "number" ? ioFs.constants.O_NOFOLLOW : 0); let fd: number | null = null; try { if (params.rejectPathSymlink) { - const candidateStat = fs.lstatSync(params.filePath); + const candidateStat = ioFs.lstatSync(params.filePath); if (candidateStat.isSymbolicLink()) { return { ok: false, reason: "validation" }; } } - const realPath = params.resolvedPath ?? fs.realpathSync(params.filePath); - const preOpenStat = fs.lstatSync(realPath); + const realPath = params.resolvedPath ?? ioFs.realpathSync(params.filePath); + const preOpenStat = ioFs.lstatSync(realPath); if (!preOpenStat.isFile()) { return { ok: false, reason: "validation" }; } + if (params.rejectHardlinks && preOpenStat.nlink > 1) { + return { ok: false, reason: "validation" }; + } if (params.maxBytes !== undefined && preOpenStat.size > params.maxBytes) { return { ok: false, reason: "validation" }; } - fd = fs.openSync(realPath, OPEN_READ_FLAGS); - const openedStat = fs.fstatSync(fd); + fd = ioFs.openSync(realPath, openReadFlags); + const openedStat = ioFs.fstatSync(fd); if (!openedStat.isFile()) { return { ok: false, reason: "validation" }; } + if (params.rejectHardlinks && openedStat.nlink > 1) { + return { ok: false, reason: "validation" }; + } if (params.maxBytes !== undefined && openedStat.size > params.maxBytes) { return { ok: false, reason: "validation" }; } @@ -67,7 +80,7 @@ export function openVerifiedFileSync(params: { return { ok: false, reason: "io", error }; } finally { if (fd !== null) { - fs.closeSync(fd); + ioFs.closeSync(fd); } } } diff --git a/src/infra/session-maintenance-warning.test.ts b/src/infra/session-maintenance-warning.test.ts new file mode 100644 index 00000000000..f0e9590c572 --- /dev/null +++ b/src/infra/session-maintenance-warning.test.ts @@ -0,0 +1,93 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const mocks = vi.hoisted(() => ({ + resolveSessionAgentId: vi.fn(() => "agent-from-key"), + resolveSessionDeliveryTarget: vi.fn(() => ({ + channel: "whatsapp", + to: "+15550001", + accountId: "acct-1", + threadId: "thread-1", + })), + normalizeMessageChannel: vi.fn((channel: string) => channel), + isDeliverableMessageChannel: vi.fn(() => true), + deliverOutboundPayloads: vi.fn(async () => []), + enqueueSystemEvent: vi.fn(), +})); + +vi.mock("../agents/agent-scope.js", () => ({ + resolveSessionAgentId: mocks.resolveSessionAgentId, +})); + +vi.mock("../utils/message-channel.js", () => ({ + normalizeMessageChannel: mocks.normalizeMessageChannel, + isDeliverableMessageChannel: mocks.isDeliverableMessageChannel, +})); + +vi.mock("./outbound/targets.js", () => ({ + resolveSessionDeliveryTarget: mocks.resolveSessionDeliveryTarget, +})); + +vi.mock("./outbound/deliver.js", () => ({ + deliverOutboundPayloads: mocks.deliverOutboundPayloads, +})); + +vi.mock("./system-events.js", () => ({ + enqueueSystemEvent: mocks.enqueueSystemEvent, +})); + +const { deliverSessionMaintenanceWarning } = await import("./session-maintenance-warning.js"); + +describe("deliverSessionMaintenanceWarning", () => { + let prevVitest: string | undefined; + let prevNodeEnv: string | undefined; + + beforeEach(() => { + prevVitest = process.env.VITEST; + prevNodeEnv = process.env.NODE_ENV; + delete process.env.VITEST; + process.env.NODE_ENV = "development"; + mocks.resolveSessionAgentId.mockClear(); + mocks.resolveSessionDeliveryTarget.mockClear(); + mocks.normalizeMessageChannel.mockClear(); + mocks.isDeliverableMessageChannel.mockClear(); + mocks.deliverOutboundPayloads.mockClear(); + mocks.enqueueSystemEvent.mockClear(); + }); + + afterEach(() => { + if (prevVitest === undefined) { + delete process.env.VITEST; + } else { + process.env.VITEST = prevVitest; + } + if (prevNodeEnv === undefined) { + delete process.env.NODE_ENV; + } else { + process.env.NODE_ENV = prevNodeEnv; + } + }); + + it("forwards session context to outbound delivery", async () => { + await deliverSessionMaintenanceWarning({ + cfg: {}, + sessionKey: "agent:main:main", + entry: {} as never, + warning: { + activeSessionKey: "agent:main:main", + pruneAfterMs: 1_000, + maxEntries: 100, + wouldPrune: true, + wouldCap: false, + } as never, + }); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "whatsapp", + to: "+15550001", + session: { key: "agent:main:main", agentId: "agent-from-key" }, + }), + ); + expect(mocks.enqueueSystemEvent).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/session-maintenance-warning.ts b/src/infra/session-maintenance-warning.ts index 804b419ed3a..df803f88411 100644 --- a/src/infra/session-maintenance-warning.ts +++ b/src/infra/session-maintenance-warning.ts @@ -1,8 +1,8 @@ -import { resolveSessionAgentId } from "../agents/agent-scope.js"; import type { OpenClawConfig } from "../config/config.js"; import type { SessionEntry, SessionMaintenanceWarning } from "../config/sessions.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { isDeliverableMessageChannel, normalizeMessageChannel } from "../utils/message-channel.js"; +import { buildOutboundSessionContext } from "./outbound/session-context.js"; import { resolveSessionDeliveryTarget } from "./outbound/targets.js"; import { enqueueSystemEvent } from "./system-events.js"; @@ -96,6 +96,10 @@ export async function deliverSessionMaintenanceWarning(params: WarningParams): P try { const { deliverOutboundPayloads } = await import("./outbound/deliver.js"); + const outboundSession = buildOutboundSessionContext({ + cfg: params.cfg, + sessionKey: params.sessionKey, + }); await deliverOutboundPayloads({ cfg: params.cfg, channel, @@ -103,7 +107,7 @@ export async function deliverSessionMaintenanceWarning(params: WarningParams): P accountId: target.accountId, threadId: target.threadId, payloads: [{ text }], - agentId: resolveSessionAgentId({ sessionKey: params.sessionKey, config: params.cfg }), + session: outboundSession, }); } catch (err) { log.warn(`Failed to deliver session maintenance warning: ${String(err)}`); diff --git a/src/infra/supervisor-markers.ts b/src/infra/supervisor-markers.ts new file mode 100644 index 00000000000..231bece5e3d --- /dev/null +++ b/src/infra/supervisor-markers.ts @@ -0,0 +1,20 @@ +export const SUPERVISOR_HINT_ENV_VARS = [ + // macOS launchd + "LAUNCH_JOB_LABEL", + "LAUNCH_JOB_NAME", + // OpenClaw service env markers + "OPENCLAW_LAUNCHD_LABEL", + "OPENCLAW_SYSTEMD_UNIT", + "OPENCLAW_SERVICE_MARKER", + // Linux systemd + "INVOCATION_ID", + "SYSTEMD_EXEC_PID", + "JOURNAL_STREAM", +] as const; + +export function hasSupervisorHint(env: NodeJS.ProcessEnv = process.env): boolean { + return SUPERVISOR_HINT_ENV_VARS.some((key) => { + const value = env[key]; + return typeof value === "string" && value.trim().length > 0; + }); +} diff --git a/src/infra/system-message.test.ts b/src/infra/system-message.test.ts new file mode 100644 index 00000000000..b0c32f31c35 --- /dev/null +++ b/src/infra/system-message.test.ts @@ -0,0 +1,19 @@ +import { describe, expect, it } from "vitest"; +import { SYSTEM_MARK, hasSystemMark, prefixSystemMessage } from "./system-message.js"; + +describe("system-message", () => { + it("prepends the system mark once", () => { + expect(prefixSystemMessage("thread notice")).toBe(`${SYSTEM_MARK} thread notice`); + }); + + it("does not double-prefix messages that already have the mark", () => { + expect(prefixSystemMessage(`${SYSTEM_MARK} already prefixed`)).toBe( + `${SYSTEM_MARK} already prefixed`, + ); + }); + + it("detects marked system text after trim normalization", () => { + expect(hasSystemMark(` ${SYSTEM_MARK} hello`)).toBe(true); + expect(hasSystemMark("hello")).toBe(false); + }); +}); diff --git a/src/infra/system-message.ts b/src/infra/system-message.ts new file mode 100644 index 00000000000..0982880a876 --- /dev/null +++ b/src/infra/system-message.ts @@ -0,0 +1,20 @@ +export const SYSTEM_MARK = "⚙️"; + +function normalizeSystemText(value: string): string { + return value.trim(); +} + +export function hasSystemMark(text: string): boolean { + return normalizeSystemText(text).startsWith(SYSTEM_MARK); +} + +export function prefixSystemMessage(text: string): string { + const normalized = normalizeSystemText(text); + if (!normalized) { + return normalized; + } + if (hasSystemMark(normalized)) { + return normalized; + } + return `${SYSTEM_MARK} ${normalized}`; +} diff --git a/src/infra/system-run-approval-binding.ts b/src/infra/system-run-approval-binding.ts new file mode 100644 index 00000000000..a760f4948ef --- /dev/null +++ b/src/infra/system-run-approval-binding.ts @@ -0,0 +1,219 @@ +import crypto from "node:crypto"; +import type { SystemRunApprovalBindingV1, SystemRunApprovalPlanV2 } from "./exec-approvals.js"; +import { normalizeEnvVarKey } from "./host-env-security.js"; + +type NormalizedSystemRunEnvEntry = [key: string, value: string]; + +function normalizeString(value: unknown): string | null { + if (typeof value !== "string") { + return null; + } + const trimmed = value.trim(); + return trimmed ? trimmed : null; +} + +function normalizeStringArray(value: unknown): string[] { + return Array.isArray(value) ? value.map((entry) => String(entry)) : []; +} + +export function normalizeSystemRunApprovalPlanV2(value: unknown): SystemRunApprovalPlanV2 | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + const candidate = value as Record; + if (candidate.version !== 2) { + return null; + } + const argv = normalizeStringArray(candidate.argv); + if (argv.length === 0) { + return null; + } + return { + version: 2, + argv, + cwd: normalizeString(candidate.cwd), + rawCommand: normalizeString(candidate.rawCommand), + agentId: normalizeString(candidate.agentId), + sessionKey: normalizeString(candidate.sessionKey), + }; +} + +function normalizeSystemRunEnvEntries(env: unknown): NormalizedSystemRunEnvEntry[] { + if (!env || typeof env !== "object" || Array.isArray(env)) { + return []; + } + const entries: NormalizedSystemRunEnvEntry[] = []; + for (const [rawKey, rawValue] of Object.entries(env as Record)) { + if (typeof rawValue !== "string") { + continue; + } + const key = normalizeEnvVarKey(rawKey, { portable: true }); + if (!key) { + continue; + } + entries.push([key, rawValue]); + } + entries.sort((a, b) => a[0].localeCompare(b[0])); + return entries; +} + +function hashSystemRunEnvEntries(entries: NormalizedSystemRunEnvEntry[]): string | null { + if (entries.length === 0) { + return null; + } + return crypto.createHash("sha256").update(JSON.stringify(entries)).digest("hex"); +} + +export function buildSystemRunApprovalEnvBinding(env: unknown): { + envHash: string | null; + envKeys: string[]; +} { + const entries = normalizeSystemRunEnvEntries(env); + return { + envHash: hashSystemRunEnvEntries(entries), + envKeys: entries.map(([key]) => key), + }; +} + +export function buildSystemRunApprovalBindingV1(params: { + argv: unknown; + cwd?: unknown; + agentId?: unknown; + sessionKey?: unknown; + env?: unknown; +}): { binding: SystemRunApprovalBindingV1; envKeys: string[] } { + const envBinding = buildSystemRunApprovalEnvBinding(params.env); + return { + binding: { + version: 1, + argv: normalizeStringArray(params.argv), + cwd: normalizeString(params.cwd), + agentId: normalizeString(params.agentId), + sessionKey: normalizeString(params.sessionKey), + envHash: envBinding.envHash, + }, + envKeys: envBinding.envKeys, + }; +} + +function argvMatches(expectedArgv: string[], actualArgv: string[]): boolean { + if (expectedArgv.length === 0 || expectedArgv.length !== actualArgv.length) { + return false; + } + for (let i = 0; i < expectedArgv.length; i += 1) { + if (expectedArgv[i] !== actualArgv[i]) { + return false; + } + } + return true; +} + +export type SystemRunApprovalMatchResult = + | { ok: true } + | { + ok: false; + code: "APPROVAL_REQUEST_MISMATCH" | "APPROVAL_ENV_BINDING_MISSING" | "APPROVAL_ENV_MISMATCH"; + message: string; + details?: Record; + }; + +type SystemRunApprovalMismatch = Extract; + +const APPROVAL_REQUEST_MISMATCH_MESSAGE = "approval id does not match request"; + +function requestMismatch(details?: Record): SystemRunApprovalMatchResult { + return { + ok: false, + code: "APPROVAL_REQUEST_MISMATCH", + message: APPROVAL_REQUEST_MISMATCH_MESSAGE, + details, + }; +} + +export function matchSystemRunApprovalEnvHash(params: { + expectedEnvHash: string | null; + actualEnvHash: string | null; + actualEnvKeys: string[]; +}): SystemRunApprovalMatchResult { + if (!params.expectedEnvHash && !params.actualEnvHash) { + return { ok: true }; + } + if (!params.expectedEnvHash && params.actualEnvHash) { + return { + ok: false, + code: "APPROVAL_ENV_BINDING_MISSING", + message: "approval id missing env binding for requested env overrides", + details: { envKeys: params.actualEnvKeys }, + }; + } + if (params.expectedEnvHash !== params.actualEnvHash) { + return { + ok: false, + code: "APPROVAL_ENV_MISMATCH", + message: "approval id env binding mismatch", + details: { + envKeys: params.actualEnvKeys, + expectedEnvHash: params.expectedEnvHash, + actualEnvHash: params.actualEnvHash, + }, + }; + } + return { ok: true }; +} + +export function matchSystemRunApprovalBindingV1(params: { + expected: SystemRunApprovalBindingV1; + actual: SystemRunApprovalBindingV1; + actualEnvKeys: string[]; +}): SystemRunApprovalMatchResult { + if (params.expected.version !== 1 || params.actual.version !== 1) { + return requestMismatch({ + expectedVersion: params.expected.version, + actualVersion: params.actual.version, + }); + } + if (!argvMatches(params.expected.argv, params.actual.argv)) { + return requestMismatch(); + } + if (params.expected.cwd !== params.actual.cwd) { + return requestMismatch(); + } + if (params.expected.agentId !== params.actual.agentId) { + return requestMismatch(); + } + if (params.expected.sessionKey !== params.actual.sessionKey) { + return requestMismatch(); + } + return matchSystemRunApprovalEnvHash({ + expectedEnvHash: params.expected.envHash, + actualEnvHash: params.actual.envHash, + actualEnvKeys: params.actualEnvKeys, + }); +} + +export function missingSystemRunApprovalBindingV1(params: { + actualEnvKeys: string[]; +}): SystemRunApprovalMatchResult { + return requestMismatch({ + requiredBindingVersion: 1, + envKeys: params.actualEnvKeys, + }); +} + +export function toSystemRunApprovalMismatchError(params: { + runId: string; + match: SystemRunApprovalMismatch; +}): { ok: false; message: string; details: Record } { + const details: Record = { + code: params.match.code, + runId: params.runId, + }; + if (params.match.details) { + Object.assign(details, params.match.details); + } + return { + ok: false, + message: params.match.message, + details, + }; +} diff --git a/src/infra/system-run-approval-context.ts b/src/infra/system-run-approval-context.ts new file mode 100644 index 00000000000..25cbee1fcfc --- /dev/null +++ b/src/infra/system-run-approval-context.ts @@ -0,0 +1,123 @@ +import type { SystemRunApprovalPlanV2 } from "./exec-approvals.js"; +import { normalizeSystemRunApprovalPlanV2 } from "./system-run-approval-binding.js"; +import { formatExecCommand, resolveSystemRunCommand } from "./system-run-command.js"; + +type PreparedRunPayload = { + cmdText: string; + plan: SystemRunApprovalPlanV2; +}; + +type SystemRunApprovalRequestContext = { + planV2: SystemRunApprovalPlanV2 | null; + commandArgv: string[] | undefined; + commandText: string; + cwd: string | null; + agentId: string | null; + sessionKey: string | null; +}; + +type SystemRunApprovalRuntimeContext = + | { + ok: true; + planV2: SystemRunApprovalPlanV2 | null; + argv: string[]; + cwd: string | null; + agentId: string | null; + sessionKey: string | null; + rawCommand: string | null; + } + | { + ok: false; + message: string; + details?: Record; + }; + +function normalizeString(value: unknown): string | null { + if (typeof value !== "string") { + return null; + } + const trimmed = value.trim(); + return trimmed ? trimmed : null; +} + +function normalizeStringArray(value: unknown): string[] { + return Array.isArray(value) ? value.map((entry) => String(entry)) : []; +} + +function normalizeCommandText(value: unknown): string { + return typeof value === "string" ? value : ""; +} + +export function parsePreparedSystemRunPayload(payload: unknown): PreparedRunPayload | null { + if (!payload || typeof payload !== "object" || Array.isArray(payload)) { + return null; + } + const raw = payload as { cmdText?: unknown; plan?: unknown }; + const cmdText = normalizeString(raw.cmdText); + const plan = normalizeSystemRunApprovalPlanV2(raw.plan); + if (!cmdText || !plan) { + return null; + } + return { cmdText, plan }; +} + +export function resolveSystemRunApprovalRequestContext(params: { + host?: unknown; + command?: unknown; + commandArgv?: unknown; + systemRunPlanV2?: unknown; + cwd?: unknown; + agentId?: unknown; + sessionKey?: unknown; +}): SystemRunApprovalRequestContext { + const host = normalizeString(params.host) ?? ""; + const planV2 = host === "node" ? normalizeSystemRunApprovalPlanV2(params.systemRunPlanV2) : null; + const fallbackArgv = normalizeStringArray(params.commandArgv); + const fallbackCommand = normalizeCommandText(params.command); + return { + planV2, + commandArgv: planV2?.argv ?? (fallbackArgv.length > 0 ? fallbackArgv : undefined), + commandText: planV2 ? (planV2.rawCommand ?? formatExecCommand(planV2.argv)) : fallbackCommand, + cwd: planV2?.cwd ?? normalizeString(params.cwd), + agentId: planV2?.agentId ?? normalizeString(params.agentId), + sessionKey: planV2?.sessionKey ?? normalizeString(params.sessionKey), + }; +} + +export function resolveSystemRunApprovalRuntimeContext(params: { + planV2?: unknown; + command?: unknown; + rawCommand?: unknown; + cwd?: unknown; + agentId?: unknown; + sessionKey?: unknown; +}): SystemRunApprovalRuntimeContext { + const normalizedPlan = normalizeSystemRunApprovalPlanV2(params.planV2 ?? null); + if (normalizedPlan) { + return { + ok: true, + planV2: normalizedPlan, + argv: [...normalizedPlan.argv], + cwd: normalizedPlan.cwd, + agentId: normalizedPlan.agentId, + sessionKey: normalizedPlan.sessionKey, + rawCommand: normalizedPlan.rawCommand, + }; + } + const command = resolveSystemRunCommand({ + command: params.command, + rawCommand: params.rawCommand, + }); + if (!command.ok) { + return { ok: false, message: command.message, details: command.details }; + } + return { + ok: true, + planV2: null, + argv: command.argv, + cwd: normalizeString(params.cwd), + agentId: normalizeString(params.agentId), + sessionKey: normalizeString(params.sessionKey), + rawCommand: normalizeString(params.rawCommand), + }; +} diff --git a/src/infra/system-run-approval-mismatch.contract.test.ts b/src/infra/system-run-approval-mismatch.contract.test.ts new file mode 100644 index 00000000000..890e0de1bf9 --- /dev/null +++ b/src/infra/system-run-approval-mismatch.contract.test.ts @@ -0,0 +1,41 @@ +import fs from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import { describe, expect, test } from "vitest"; +import { + toSystemRunApprovalMismatchError, + type SystemRunApprovalMatchResult, +} from "./system-run-approval-binding.js"; + +type FixtureCase = { + name: string; + runId: string; + match: Extract; + expected: { + ok: false; + message: string; + details: Record; + }; +}; + +type Fixture = { + cases: FixtureCase[]; +}; + +const fixturePath = path.resolve( + path.dirname(fileURLToPath(import.meta.url)), + "../../test/fixtures/system-run-approval-mismatch-contract.json", +); +const fixture = JSON.parse(fs.readFileSync(fixturePath, "utf8")) as Fixture; + +describe("system-run approval mismatch contract fixtures", () => { + for (const entry of fixture.cases) { + test(entry.name, () => { + const result = toSystemRunApprovalMismatchError({ + runId: entry.runId, + match: entry.match, + }); + expect(result).toEqual(entry.expected); + }); + } +}); diff --git a/src/infra/system-run-command.test.ts b/src/infra/system-run-command.test.ts index 7186823d84b..7f7d4fee96c 100644 --- a/src/infra/system-run-command.test.ts +++ b/src/infra/system-run-command.test.ts @@ -21,6 +21,10 @@ describe("system run command helpers", () => { expect(formatExecCommand(["echo", "hi there"])).toBe('echo "hi there"'); }); + test("formatExecCommand preserves trailing whitespace in argv tokens", () => { + expect(formatExecCommand(["runner "])).toBe('"runner "'); + }); + test("extractShellCommandFromArgv extracts sh -lc command", () => { expect(extractShellCommandFromArgv(["/bin/sh", "-lc", "echo hi"])).toBe("echo hi"); }); diff --git a/src/infra/system-run-command.ts b/src/infra/system-run-command.ts index b03d715fc72..dc54bf7b561 100644 --- a/src/infra/system-run-command.ts +++ b/src/infra/system-run-command.ts @@ -35,15 +35,14 @@ export type ResolvedSystemRunCommand = export function formatExecCommand(argv: string[]): string { return argv .map((arg) => { - const trimmed = arg.trim(); - if (!trimmed) { + if (arg.length === 0) { return '""'; } - const needsQuotes = /\s|"/.test(trimmed); + const needsQuotes = /\s|"/.test(arg); if (!needsQuotes) { - return trimmed; + return arg; } - return `"${trimmed.replace(/"/g, '\\"')}"`; + return `"${arg.replace(/"/g, '\\"')}"`; }) .join(" "); } diff --git a/src/infra/tmp-openclaw-dir.test.ts b/src/infra/tmp-openclaw-dir.test.ts index 0424e5e0223..4c0a68b9037 100644 --- a/src/infra/tmp-openclaw-dir.test.ts +++ b/src/infra/tmp-openclaw-dir.test.ts @@ -8,24 +8,60 @@ function fallbackTmp(uid = 501) { return path.join("/var/fallback", `openclaw-${uid}`); } +function nodeErrorWithCode(code: string) { + const err = new Error(code) as Error & { code?: string }; + err.code = code; + return err; +} + +function secureDirStat(uid = 501) { + return { + isDirectory: () => true, + isSymbolicLink: () => false, + uid, + mode: 0o40700, + }; +} + function resolveWithMocks(params: { lstatSync: NonNullable; + fallbackLstatSync?: NonNullable; accessSync?: NonNullable; + chmodSync?: NonNullable; + warn?: NonNullable; uid?: number; tmpdirPath?: string; }) { + const uid = params.uid ?? 501; + const fallbackPath = fallbackTmp(uid); const accessSync = params.accessSync ?? vi.fn(); + const chmodSync = params.chmodSync ?? vi.fn(); + const warn = params.warn ?? vi.fn(); + const wrappedLstatSync = vi.fn((target: string) => { + if (target === POSIX_OPENCLAW_TMP_DIR) { + return params.lstatSync(target); + } + if (target === fallbackPath) { + if (params.fallbackLstatSync) { + return params.fallbackLstatSync(target); + } + return secureDirStat(uid); + } + return secureDirStat(uid); + }) as NonNullable; const mkdirSync = vi.fn(); - const getuid = vi.fn(() => params.uid ?? 501); + const getuid = vi.fn(() => uid); const tmpdir = vi.fn(() => params.tmpdirPath ?? "/var/fallback"); const resolved = resolvePreferredOpenClawTmpDir({ accessSync, - lstatSync: params.lstatSync, + chmodSync, + lstatSync: wrappedLstatSync, mkdirSync, getuid, tmpdir, + warn, }); - return { resolved, accessSync, lstatSync: params.lstatSync, mkdirSync, tmpdir }; + return { resolved, accessSync, lstatSync: wrappedLstatSync, mkdirSync, tmpdir }; } describe("resolvePreferredOpenClawTmpDir", () => { @@ -45,24 +81,12 @@ describe("resolvePreferredOpenClawTmpDir", () => { }); it("prefers /tmp/openclaw when it does not exist but /tmp is writable", () => { - const lstatSyncMock = vi.fn>(() => { - const err = new Error("missing") as Error & { code?: string }; - err.code = "ENOENT"; - throw err; - }); - - // second lstat call (after mkdir) should succeed - lstatSyncMock.mockImplementationOnce(() => { - const err = new Error("missing") as Error & { code?: string }; - err.code = "ENOENT"; - throw err; - }); - lstatSyncMock.mockImplementationOnce(() => ({ - isDirectory: () => true, - isSymbolicLink: () => false, - uid: 501, - mode: 0o40700, - })); + const lstatSyncMock = vi + .fn>() + .mockImplementationOnce(() => { + throw nodeErrorWithCode("ENOENT"); + }) + .mockImplementationOnce(() => secureDirStat(501)); const { resolved, accessSync, mkdirSync, tmpdir } = resolveWithMocks({ lstatSync: lstatSyncMock, @@ -84,7 +108,7 @@ describe("resolvePreferredOpenClawTmpDir", () => { const { resolved, tmpdir } = resolveWithMocks({ lstatSync }); expect(resolved).toBe(fallbackTmp()); - expect(tmpdir).toHaveBeenCalledTimes(1); + expect(tmpdir).toHaveBeenCalled(); }); it("falls back to os.tmpdir()/openclaw when /tmp is not writable", () => { @@ -94,9 +118,7 @@ describe("resolvePreferredOpenClawTmpDir", () => { } }); const lstatSync = vi.fn(() => { - const err = new Error("missing") as Error & { code?: string }; - err.code = "ENOENT"; - throw err; + throw nodeErrorWithCode("ENOENT"); }); const { resolved, tmpdir } = resolveWithMocks({ accessSync, @@ -104,7 +126,7 @@ describe("resolvePreferredOpenClawTmpDir", () => { }); expect(resolved).toBe(fallbackTmp()); - expect(tmpdir).toHaveBeenCalledTimes(1); + expect(tmpdir).toHaveBeenCalled(); }); it("falls back when /tmp/openclaw is a symlink", () => { @@ -118,7 +140,7 @@ describe("resolvePreferredOpenClawTmpDir", () => { const { resolved, tmpdir } = resolveWithMocks({ lstatSync }); expect(resolved).toBe(fallbackTmp()); - expect(tmpdir).toHaveBeenCalledTimes(1); + expect(tmpdir).toHaveBeenCalled(); }); it("falls back when /tmp/openclaw is not owned by the current user", () => { @@ -132,7 +154,7 @@ describe("resolvePreferredOpenClawTmpDir", () => { const { resolved, tmpdir } = resolveWithMocks({ lstatSync }); expect(resolved).toBe(fallbackTmp()); - expect(tmpdir).toHaveBeenCalledTimes(1); + expect(tmpdir).toHaveBeenCalled(); }); it("falls back when /tmp/openclaw is group/other writable", () => { @@ -145,6 +167,142 @@ describe("resolvePreferredOpenClawTmpDir", () => { const { resolved, tmpdir } = resolveWithMocks({ lstatSync }); expect(resolved).toBe(fallbackTmp()); - expect(tmpdir).toHaveBeenCalledTimes(1); + expect(tmpdir).toHaveBeenCalled(); + }); + + it("throws when fallback path is a symlink", () => { + const lstatSync = vi.fn(() => ({ + isDirectory: () => true, + isSymbolicLink: () => true, + uid: 501, + mode: 0o120777, + })); + const fallbackLstatSync = vi.fn(() => ({ + isDirectory: () => true, + isSymbolicLink: () => true, + uid: 501, + mode: 0o120777, + })); + + expect(() => + resolveWithMocks({ + lstatSync, + fallbackLstatSync, + }), + ).toThrow(/Unsafe fallback OpenClaw temp dir/); + }); + + it("creates fallback directory when missing, then validates ownership and mode", () => { + const lstatSync = vi.fn(() => ({ + isDirectory: () => true, + isSymbolicLink: () => true, + uid: 501, + mode: 0o120777, + })); + const fallbackLstatSync = vi + .fn>() + .mockImplementationOnce(() => { + throw nodeErrorWithCode("ENOENT"); + }) + .mockImplementationOnce(() => secureDirStat(501)); + + const { resolved, mkdirSync } = resolveWithMocks({ + lstatSync, + fallbackLstatSync, + }); + + expect(resolved).toBe(fallbackTmp()); + expect(mkdirSync).toHaveBeenCalledWith(fallbackTmp(), { recursive: true, mode: 0o700 }); + }); + + it("repairs fallback directory permissions after create when umask makes it group-writable", () => { + const fallbackPath = fallbackTmp(); + let fallbackMode = 0o40775; + const lstatSync = vi.fn>(() => { + throw nodeErrorWithCode("ENOENT"); + }); + const fallbackLstatSync = vi + .fn>() + .mockImplementationOnce(() => { + throw nodeErrorWithCode("ENOENT"); + }) + .mockImplementation(() => ({ + isDirectory: () => true, + isSymbolicLink: () => false, + uid: 501, + mode: fallbackMode, + })); + const chmodSync = vi.fn((target: string, mode: number) => { + if (target === fallbackPath && mode === 0o700) { + fallbackMode = 0o40700; + } + }); + + const resolved = resolvePreferredOpenClawTmpDir({ + accessSync: vi.fn((target: string) => { + if (target === "/tmp") { + throw new Error("read-only"); + } + }), + lstatSync: vi.fn((target: string) => { + if (target === POSIX_OPENCLAW_TMP_DIR) { + return lstatSync(target); + } + if (target === fallbackPath) { + return fallbackLstatSync(target); + } + return secureDirStat(501); + }), + mkdirSync: vi.fn(), + chmodSync, + getuid: vi.fn(() => 501), + tmpdir: vi.fn(() => "/var/fallback"), + warn: vi.fn(), + }); + + expect(resolved).toBe(fallbackPath); + expect(chmodSync).toHaveBeenCalledWith(fallbackPath, 0o700); + }); + + it("repairs existing fallback directory when permissions are too broad", () => { + const fallbackPath = fallbackTmp(); + let fallbackMode = 0o40775; + const chmodSync = vi.fn((target: string, mode: number) => { + if (target === fallbackPath && mode === 0o700) { + fallbackMode = 0o40700; + } + }); + const warn = vi.fn(); + + const resolved = resolvePreferredOpenClawTmpDir({ + accessSync: vi.fn((target: string) => { + if (target === "/tmp") { + throw new Error("read-only"); + } + }), + lstatSync: vi.fn((target: string) => { + if (target === POSIX_OPENCLAW_TMP_DIR) { + throw nodeErrorWithCode("ENOENT"); + } + if (target === fallbackPath) { + return { + isDirectory: () => true, + isSymbolicLink: () => false, + uid: 501, + mode: fallbackMode, + }; + } + return secureDirStat(501); + }), + mkdirSync: vi.fn(), + chmodSync, + getuid: vi.fn(() => 501), + tmpdir: vi.fn(() => "/var/fallback"), + warn, + }); + + expect(resolved).toBe(fallbackPath); + expect(chmodSync).toHaveBeenCalledWith(fallbackPath, 0o700); + expect(warn).toHaveBeenCalledWith(expect.stringContaining("tightened permissions on temp dir")); }); }); diff --git a/src/infra/tmp-openclaw-dir.ts b/src/infra/tmp-openclaw-dir.ts index 1e8250b3210..7fc43926c5c 100644 --- a/src/infra/tmp-openclaw-dir.ts +++ b/src/infra/tmp-openclaw-dir.ts @@ -3,9 +3,11 @@ import os from "node:os"; import path from "node:path"; export const POSIX_OPENCLAW_TMP_DIR = "/tmp/openclaw"; +const TMP_DIR_ACCESS_MODE = fs.constants.W_OK | fs.constants.X_OK; type ResolvePreferredOpenClawTmpDirOptions = { accessSync?: (path: string, mode?: number) => void; + chmodSync?: (path: string, mode: number) => void; lstatSync?: (path: string) => { isDirectory(): boolean; isSymbolicLink(): boolean; @@ -15,6 +17,7 @@ type ResolvePreferredOpenClawTmpDirOptions = { mkdirSync?: (path: string, opts: { recursive: boolean; mode?: number }) => void; getuid?: () => number | undefined; tmpdir?: () => string; + warn?: (message: string) => void; }; type MaybeNodeError = { code?: string }; @@ -32,8 +35,10 @@ export function resolvePreferredOpenClawTmpDir( options: ResolvePreferredOpenClawTmpDirOptions = {}, ): string { const accessSync = options.accessSync ?? fs.accessSync; + const chmodSync = options.chmodSync ?? fs.chmodSync; const lstatSync = options.lstatSync ?? fs.lstatSync; const mkdirSync = options.mkdirSync ?? fs.mkdirSync; + const warn = options.warn ?? ((message: string) => console.warn(message)); const getuid = options.getuid ?? (() => { @@ -66,7 +71,7 @@ export function resolvePreferredOpenClawTmpDir( return path.join(base, suffix); }; - const isTrustedPreferredDir = (st: { + const isTrustedTmpDir = (st: { isDirectory(): boolean; isSymbolicLink(): boolean; mode?: number; @@ -75,17 +80,13 @@ export function resolvePreferredOpenClawTmpDir( return st.isDirectory() && !st.isSymbolicLink() && isSecureDirForUser(st); }; - const resolvePreferredState = ( - requireWritableAccess: boolean, - ): "available" | "missing" | "invalid" => { + const resolveDirState = (candidatePath: string): "available" | "missing" | "invalid" => { try { - const preferred = lstatSync(POSIX_OPENCLAW_TMP_DIR); - if (!isTrustedPreferredDir(preferred)) { + const candidate = lstatSync(candidatePath); + if (!isTrustedTmpDir(candidate)) { return "invalid"; } - if (requireWritableAccess) { - accessSync(POSIX_OPENCLAW_TMP_DIR, fs.constants.W_OK | fs.constants.X_OK); - } + accessSync(candidatePath, TMP_DIR_ACCESS_MODE); return "available"; } catch (err) { if (isNodeErrorWithCode(err, "ENOENT")) { @@ -95,23 +96,74 @@ export function resolvePreferredOpenClawTmpDir( } }; - const existingPreferredState = resolvePreferredState(true); + const tryRepairWritableBits = (candidatePath: string): boolean => { + try { + const st = lstatSync(candidatePath); + if (!st.isDirectory() || st.isSymbolicLink()) { + return false; + } + if (uid !== undefined && typeof st.uid === "number" && st.uid !== uid) { + return false; + } + if (typeof st.mode !== "number" || (st.mode & 0o022) === 0) { + return false; + } + chmodSync(candidatePath, 0o700); + warn(`[openclaw] tightened permissions on temp dir: ${candidatePath}`); + return resolveDirState(candidatePath) === "available"; + } catch { + return false; + } + }; + + const ensureTrustedFallbackDir = (): string => { + const fallbackPath = fallback(); + const state = resolveDirState(fallbackPath); + if (state === "available") { + return fallbackPath; + } + if (state === "invalid") { + if (tryRepairWritableBits(fallbackPath)) { + return fallbackPath; + } + throw new Error(`Unsafe fallback OpenClaw temp dir: ${fallbackPath}`); + } + try { + mkdirSync(fallbackPath, { recursive: true, mode: 0o700 }); + chmodSync(fallbackPath, 0o700); + } catch { + throw new Error(`Unable to create fallback OpenClaw temp dir: ${fallbackPath}`); + } + if (resolveDirState(fallbackPath) !== "available" && !tryRepairWritableBits(fallbackPath)) { + throw new Error(`Unsafe fallback OpenClaw temp dir: ${fallbackPath}`); + } + return fallbackPath; + }; + + const existingPreferredState = resolveDirState(POSIX_OPENCLAW_TMP_DIR); if (existingPreferredState === "available") { return POSIX_OPENCLAW_TMP_DIR; } if (existingPreferredState === "invalid") { - return fallback(); + if (tryRepairWritableBits(POSIX_OPENCLAW_TMP_DIR)) { + return POSIX_OPENCLAW_TMP_DIR; + } + return ensureTrustedFallbackDir(); } try { - accessSync("/tmp", fs.constants.W_OK | fs.constants.X_OK); + accessSync("/tmp", TMP_DIR_ACCESS_MODE); // Create with a safe default; subsequent callers expect it exists. mkdirSync(POSIX_OPENCLAW_TMP_DIR, { recursive: true, mode: 0o700 }); - if (resolvePreferredState(true) !== "available") { - return fallback(); + chmodSync(POSIX_OPENCLAW_TMP_DIR, 0o700); + if ( + resolveDirState(POSIX_OPENCLAW_TMP_DIR) !== "available" && + !tryRepairWritableBits(POSIX_OPENCLAW_TMP_DIR) + ) { + return ensureTrustedFallbackDir(); } return POSIX_OPENCLAW_TMP_DIR; } catch { - return fallback(); + return ensureTrustedFallbackDir(); } } diff --git a/src/infra/update-global.ts b/src/infra/update-global.ts index e85949f3cab..03a405b8f70 100644 --- a/src/infra/update-global.ts +++ b/src/infra/update-global.ts @@ -14,6 +14,10 @@ const PRIMARY_PACKAGE_NAME = "openclaw"; const ALL_PACKAGE_NAMES = [PRIMARY_PACKAGE_NAME] as const; const GLOBAL_RENAME_PREFIX = "."; const NPM_GLOBAL_INSTALL_QUIET_FLAGS = ["--no-fund", "--no-audit", "--loglevel=error"] as const; +const NPM_GLOBAL_INSTALL_OMIT_OPTIONAL_FLAGS = [ + "--omit=optional", + ...NPM_GLOBAL_INSTALL_QUIET_FLAGS, +] as const; async function tryRealpath(targetPath: string): Promise { try { @@ -139,6 +143,16 @@ export function globalInstallArgs(manager: GlobalInstallManager, spec: string): return ["npm", "i", "-g", spec, ...NPM_GLOBAL_INSTALL_QUIET_FLAGS]; } +export function globalInstallFallbackArgs( + manager: GlobalInstallManager, + spec: string, +): string[] | null { + if (manager !== "npm") { + return null; + } + return ["npm", "i", "-g", spec, ...NPM_GLOBAL_INSTALL_OMIT_OPTIONAL_FLAGS]; +} + export async function cleanupGlobalRenameDirs(params: { globalRoot: string; packageName: string; diff --git a/src/infra/update-runner.test.ts b/src/infra/update-runner.test.ts index 2ad84305794..26ae50a86a7 100644 --- a/src/infra/update-runner.test.ts +++ b/src/infra/update-runner.test.ts @@ -417,6 +417,51 @@ describe("runGatewayUpdate", () => { expect(await pathExists(staleDir)).toBe(false); }); + it("retries global npm update with --omit=optional when initial install fails", async () => { + const nodeModules = path.join(tempDir, "node_modules"); + const pkgRoot = path.join(nodeModules, "openclaw"); + await seedGlobalPackageRoot(pkgRoot); + + let firstAttempt = true; + const runCommand = async (argv: string[]) => { + const key = argv.join(" "); + if (key === `git -C ${pkgRoot} rev-parse --show-toplevel`) { + return { stdout: "", stderr: "not a git repository", code: 128 }; + } + if (key === "npm root -g") { + return { stdout: nodeModules, stderr: "", code: 0 }; + } + if (key === "pnpm root -g") { + return { stdout: "", stderr: "", code: 1 }; + } + if (key === "npm i -g openclaw@latest --no-fund --no-audit --loglevel=error") { + firstAttempt = false; + return { stdout: "", stderr: "node-gyp failed", code: 1 }; + } + if ( + key === "npm i -g openclaw@latest --omit=optional --no-fund --no-audit --loglevel=error" + ) { + await fs.writeFile( + path.join(pkgRoot, "package.json"), + JSON.stringify({ name: "openclaw", version: "2.0.0" }), + "utf-8", + ); + return { stdout: "ok", stderr: "", code: 0 }; + } + return { stdout: "", stderr: "", code: 0 }; + }; + + const result = await runWithCommand(runCommand, { cwd: pkgRoot }); + + expect(firstAttempt).toBe(false); + expect(result.status).toBe("ok"); + expect(result.mode).toBe("npm"); + expect(result.steps.map((s) => s.name)).toEqual([ + "global update", + "global update (omit optional)", + ]); + }); + it("updates global bun installs when detected", async () => { const bunInstall = path.join(tempDir, "bun-install"); await withEnvAsync({ BUN_INSTALL: bunInstall }, async () => { diff --git a/src/infra/update-runner.ts b/src/infra/update-runner.ts index 6631b6dd35f..8a9d56158b8 100644 --- a/src/infra/update-runner.ts +++ b/src/infra/update-runner.ts @@ -22,6 +22,7 @@ import { cleanupGlobalRenameDirs, detectGlobalInstallManagerForRoot, globalInstallArgs, + globalInstallFallbackArgs, } from "./update-global.js"; export type UpdateStepResult = { @@ -875,6 +876,7 @@ export async function runGatewayUpdate(opts: UpdateRunnerOptions = {}): Promise< const channel = opts.channel ?? DEFAULT_PACKAGE_CHANNEL; const tag = normalizeTag(opts.tag ?? channelToNpmTag(channel)); const spec = `${packageName}@${tag}`; + const steps: UpdateStepResult[] = []; const updateStep = await runStep({ runCommand, name: "global update", @@ -885,13 +887,33 @@ export async function runGatewayUpdate(opts: UpdateRunnerOptions = {}): Promise< stepIndex: 0, totalSteps: 1, }); - const steps = [updateStep]; + steps.push(updateStep); + + let finalStep = updateStep; + if (updateStep.exitCode !== 0) { + const fallbackArgv = globalInstallFallbackArgs(globalManager, spec); + if (fallbackArgv) { + const fallbackStep = await runStep({ + runCommand, + name: "global update (omit optional)", + argv: fallbackArgv, + cwd: pkgRoot, + timeoutMs, + progress, + stepIndex: 0, + totalSteps: 1, + }); + steps.push(fallbackStep); + finalStep = fallbackStep; + } + } + const afterVersion = await readPackageVersion(pkgRoot); return { - status: updateStep.exitCode === 0 ? "ok" : "error", + status: finalStep.exitCode === 0 ? "ok" : "error", mode: globalManager, root: pkgRoot, - reason: updateStep.exitCode === 0 ? undefined : updateStep.name, + reason: finalStep.exitCode === 0 ? undefined : finalStep.name, before: { version: beforeVersion }, after: { version: afterVersion }, steps, diff --git a/src/line/bot-access.ts b/src/line/bot-access.ts index fa7d87ae48c..461b9cb444c 100644 --- a/src/line/bot-access.ts +++ b/src/line/bot-access.ts @@ -1,4 +1,8 @@ -import { firstDefined, isSenderIdAllowed, mergeAllowFromSources } from "../channels/allow-from.js"; +import { + firstDefined, + isSenderIdAllowed, + mergeDmAllowFromSources, +} from "../channels/allow-from.js"; export type NormalizedAllowFrom = { entries: string[]; @@ -27,11 +31,11 @@ export const normalizeAllowFrom = (list?: Array): NormalizedAll }; }; -export const normalizeAllowFromWithStore = (params: { +export const normalizeDmAllowFromWithStore = (params: { allowFrom?: Array; storeAllowFrom?: string[]; dmPolicy?: string; -}): NormalizedAllowFrom => normalizeAllowFrom(mergeAllowFromSources(params)); +}): NormalizedAllowFrom => normalizeAllowFrom(mergeDmAllowFromSources(params)); export const isSenderAllowed = (params: { allow: NormalizedAllowFrom; diff --git a/src/line/bot-handlers.test.ts b/src/line/bot-handlers.test.ts index 32eaab80a61..54125e5c65c 100644 --- a/src/line/bot-handlers.test.ts +++ b/src/line/bot-handlers.test.ts @@ -182,6 +182,41 @@ describe("handleLineWebhookEvents", () => { expect(processMessage).toHaveBeenCalledTimes(1); }); + it("blocks group sender that is only present in pairing-store allowlist", async () => { + const processMessage = vi.fn(); + readAllowFromStoreMock.mockResolvedValueOnce(["user-paired"]); + const event = { + type: "message", + message: { id: "m3b", type: "text", text: "hi" }, + replyToken: "reply-token", + timestamp: Date.now(), + source: { type: "group", groupId: "group-1", userId: "user-paired" }, + mode: "active", + webhookEventId: "evt-3b", + deliveryContext: { isRedelivery: false }, + } as MessageEvent; + + await handleLineWebhookEvents([event], { + cfg: { + channels: { line: { groupPolicy: "allowlist", groupAllowFrom: ["user-owner"] } }, + }, + account: { + accountId: "default", + enabled: true, + channelAccessToken: "token", + channelSecret: "secret", + tokenSource: "config", + config: { groupPolicy: "allowlist", groupAllowFrom: ["user-owner"] }, + }, + runtime: createRuntime(), + mediaMaxBytes: 1, + processMessage, + }); + + expect(buildLineMessageContextMock).not.toHaveBeenCalled(); + expect(processMessage).not.toHaveBeenCalled(); + }); + it("blocks group messages when wildcard group config disables groups", async () => { const processMessage = vi.fn(); const event = { diff --git a/src/line/bot-handlers.ts b/src/line/bot-handlers.ts index e6b30f42d20..ae432bcc599 100644 --- a/src/line/bot-handlers.ts +++ b/src/line/bot-handlers.ts @@ -21,7 +21,12 @@ import { upsertChannelPairingRequest, } from "../pairing/pairing-store.js"; import type { RuntimeEnv } from "../runtime.js"; -import { firstDefined, isSenderAllowed, normalizeAllowFromWithStore } from "./bot-access.js"; +import { + firstDefined, + isSenderAllowed, + normalizeAllowFrom, + normalizeDmAllowFromWithStore, +} from "./bot-access.js"; import { getLineSourceInfo, buildLineMessageContext, @@ -69,6 +74,7 @@ async function sendLinePairingReply(params: { const { code, created } = await upsertChannelPairingRequest({ channel: "line", id: senderId, + accountId: context.account.accountId, }); if (!created) { return; @@ -116,8 +122,12 @@ async function shouldProcessLineEvent( const senderId = userId ?? ""; const dmPolicy = account.config.dmPolicy ?? "pairing"; - const storeAllowFrom = await readChannelAllowFromStore("line").catch(() => []); - const effectiveDmAllow = normalizeAllowFromWithStore({ + const storeAllowFrom = await readChannelAllowFromStore( + "line", + process.env, + account.accountId, + ).catch(() => []); + const effectiveDmAllow = normalizeDmAllowFromWithStore({ allowFrom: account.config.allowFrom, storeAllowFrom, dmPolicy, @@ -132,11 +142,9 @@ async function shouldProcessLineEvent( account.config.groupAllowFrom, fallbackGroupAllowFrom, ); - const effectiveGroupAllow = normalizeAllowFromWithStore({ - allowFrom: groupAllowFrom, - storeAllowFrom, - dmPolicy, - }); + // Group authorization stays explicit to group allowlists and must not + // inherit DM pairing-store identities. + const effectiveGroupAllow = normalizeAllowFrom(groupAllowFrom); const defaultGroupPolicy = resolveDefaultGroupPolicy(cfg); const { groupPolicy, providerMissingFallbackApplied } = resolveAllowlistProviderRuntimeGroupPolicy({ diff --git a/src/line/monitor.lifecycle.test.ts b/src/line/monitor.lifecycle.test.ts new file mode 100644 index 00000000000..635d921e7ad --- /dev/null +++ b/src/line/monitor.lifecycle.test.ts @@ -0,0 +1,92 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import type { RuntimeEnv } from "../runtime.js"; + +const { createLineBotMock, registerPluginHttpRouteMock, unregisterHttpMock } = vi.hoisted(() => ({ + createLineBotMock: vi.fn(() => ({ + account: { accountId: "default" }, + handleWebhook: vi.fn(), + })), + registerPluginHttpRouteMock: vi.fn(), + unregisterHttpMock: vi.fn(), +})); + +vi.mock("./bot.js", () => ({ + createLineBot: createLineBotMock, +})); + +vi.mock("../plugins/http-path.js", () => ({ + normalizePluginHttpPath: (_path: string | undefined, fallback: string) => fallback, +})); + +vi.mock("../plugins/http-registry.js", () => ({ + registerPluginHttpRoute: registerPluginHttpRouteMock, +})); + +vi.mock("./webhook-node.js", () => ({ + createLineNodeWebhookHandler: vi.fn(() => vi.fn()), +})); + +describe("monitorLineProvider lifecycle", () => { + beforeEach(() => { + createLineBotMock.mockClear(); + unregisterHttpMock.mockClear(); + registerPluginHttpRouteMock.mockClear().mockReturnValue(unregisterHttpMock); + }); + + it("waits for abort before resolving", async () => { + const { monitorLineProvider } = await import("./monitor.js"); + const abort = new AbortController(); + let resolved = false; + + const task = monitorLineProvider({ + channelAccessToken: "token", + channelSecret: "secret", + config: {} as OpenClawConfig, + runtime: {} as RuntimeEnv, + abortSignal: abort.signal, + }).then((monitor) => { + resolved = true; + return monitor; + }); + + await vi.waitFor(() => expect(registerPluginHttpRouteMock).toHaveBeenCalledTimes(1)); + expect(resolved).toBe(false); + + abort.abort(); + await task; + expect(unregisterHttpMock).toHaveBeenCalledTimes(1); + }); + + it("stops immediately when signal is already aborted", async () => { + const { monitorLineProvider } = await import("./monitor.js"); + const abort = new AbortController(); + abort.abort(); + + await monitorLineProvider({ + channelAccessToken: "token", + channelSecret: "secret", + config: {} as OpenClawConfig, + runtime: {} as RuntimeEnv, + abortSignal: abort.signal, + }); + + expect(unregisterHttpMock).toHaveBeenCalledTimes(1); + }); + + it("returns immediately without abort signal and stop is idempotent", async () => { + const { monitorLineProvider } = await import("./monitor.js"); + + const monitor = await monitorLineProvider({ + channelAccessToken: "token", + channelSecret: "secret", + config: {} as OpenClawConfig, + runtime: {} as RuntimeEnv, + }); + + expect(unregisterHttpMock).not.toHaveBeenCalled(); + monitor.stop(); + monitor.stop(); + expect(unregisterHttpMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/src/line/monitor.ts b/src/line/monitor.ts index 07a995c4eed..49fcc518a3f 100644 --- a/src/line/monitor.ts +++ b/src/line/monitor.ts @@ -4,6 +4,7 @@ import { dispatchReplyWithBufferedBlockDispatcher } from "../auto-reply/reply/pr import { createReplyPrefixOptions } from "../channels/reply-prefix.js"; import type { OpenClawConfig } from "../config/config.js"; import { danger, logVerbose } from "../globals.js"; +import { waitForAbortSignal } from "../infra/abort-signal.js"; import { normalizePluginHttpPath } from "../plugins/http-path.js"; import { registerPluginHttpRoute } from "../plugins/http-registry.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -296,7 +297,12 @@ export async function monitorLineProvider( logVerbose(`line: registered webhook handler at ${normalizedPath}`); // Handle abort signal + let stopped = false; const stopHandler = () => { + if (stopped) { + return; + } + stopped = true; logVerbose(`line: stopping provider for account ${resolvedAccountId}`); unregisterHttp(); recordChannelRuntimeState({ @@ -309,7 +315,12 @@ export async function monitorLineProvider( }); }; - abortSignal?.addEventListener("abort", stopHandler); + if (abortSignal?.aborted) { + stopHandler(); + } else if (abortSignal) { + abortSignal.addEventListener("abort", stopHandler, { once: true }); + await waitForAbortSignal(abortSignal); + } return { account: bot.account, diff --git a/src/media/fetch.ts b/src/media/fetch.ts index 158e6d88d57..2991cda5bea 100644 --- a/src/media/fetch.ts +++ b/src/media/fetch.ts @@ -27,6 +27,7 @@ export type FetchLike = (input: RequestInfo | URL, init?: RequestInit) => Promis type FetchMediaOptions = { url: string; fetchImpl?: FetchLike; + requestInit?: RequestInit; filePathHint?: string; maxBytes?: number; maxRedirects?: number; @@ -79,7 +80,16 @@ async function readErrorBodySnippet(res: Response, maxChars = 200): Promise { - const { url, fetchImpl, filePathHint, maxBytes, maxRedirects, ssrfPolicy, lookupFn } = options; + const { + url, + fetchImpl, + requestInit, + filePathHint, + maxBytes, + maxRedirects, + ssrfPolicy, + lookupFn, + } = options; let res: Response; let finalUrl = url; @@ -88,6 +98,7 @@ export async function fetchRemoteMedia(options: FetchMediaOptions): Promise; + security: ExecSecurity; + safeBins: ReturnType["safeBins"]; + safeBinProfiles: ReturnType["safeBinProfiles"]; + trustedSafeBinDirs: ReturnType["trustedSafeBinDirs"]; + cwd: string | undefined; + env: Record | undefined; + skillBins: SkillBinTrustEntry[]; + autoAllowSkills: boolean; +}): SystemRunAllowlistAnalysis { + if (params.shellCommand) { + const allowlistEval = evaluateShellAllowlist({ + command: params.shellCommand, + allowlist: params.approvals.allowlist, + safeBins: params.safeBins, + safeBinProfiles: params.safeBinProfiles, + cwd: params.cwd, + env: params.env, + trustedSafeBinDirs: params.trustedSafeBinDirs, + skillBins: params.skillBins, + autoAllowSkills: params.autoAllowSkills, + platform: process.platform, + }); + return { + analysisOk: allowlistEval.analysisOk, + allowlistMatches: allowlistEval.allowlistMatches, + allowlistSatisfied: + params.security === "allowlist" && allowlistEval.analysisOk + ? allowlistEval.allowlistSatisfied + : false, + segments: allowlistEval.segments, + }; + } + + const analysis = analyzeArgvCommand({ argv: params.argv, cwd: params.cwd, env: params.env }); + const allowlistEval = evaluateExecAllowlist({ + analysis, + allowlist: params.approvals.allowlist, + safeBins: params.safeBins, + safeBinProfiles: params.safeBinProfiles, + cwd: params.cwd, + trustedSafeBinDirs: params.trustedSafeBinDirs, + skillBins: params.skillBins, + autoAllowSkills: params.autoAllowSkills, + }); + return { + analysisOk: analysis.ok, + allowlistMatches: allowlistEval.allowlistMatches, + allowlistSatisfied: + params.security === "allowlist" && analysis.ok ? allowlistEval.allowlistSatisfied : false, + segments: analysis.segments, + }; +} + +export function resolvePlannedAllowlistArgv(params: { + security: ExecSecurity; + shellCommand: string | null; + policy: { + approvedByAsk: boolean; + analysisOk: boolean; + allowlistSatisfied: boolean; + }; + segments: ExecCommandSegment[]; +}): string[] | undefined | null { + if ( + params.security !== "allowlist" || + params.policy.approvedByAsk || + params.shellCommand || + !params.policy.analysisOk || + !params.policy.allowlistSatisfied || + params.segments.length !== 1 + ) { + return undefined; + } + const plannedAllowlistArgv = params.segments[0]?.resolution?.effectiveArgv; + return plannedAllowlistArgv && plannedAllowlistArgv.length > 0 ? plannedAllowlistArgv : null; +} + +export function resolveSystemRunExecArgv(params: { + plannedAllowlistArgv: string[] | undefined; + argv: string[]; + security: ExecSecurity; + isWindows: boolean; + policy: { + approvedByAsk: boolean; + analysisOk: boolean; + allowlistSatisfied: boolean; + }; + shellCommand: string | null; + segments: ExecCommandSegment[]; +}): string[] { + let execArgv = params.plannedAllowlistArgv ?? params.argv; + if ( + params.security === "allowlist" && + params.isWindows && + !params.policy.approvedByAsk && + params.shellCommand && + params.policy.analysisOk && + params.policy.allowlistSatisfied && + params.segments.length === 1 && + params.segments[0]?.argv.length > 0 + ) { + execArgv = params.segments[0].argv; + } + return execArgv; +} + +export function applyOutputTruncation(result: RunResult): void { + if (!result.truncated) { + return; + } + const suffix = "... (truncated)"; + if (result.stderr.trim().length > 0) { + result.stderr = `${result.stderr}\n${suffix}`; + } else { + result.stdout = `${result.stdout}\n${suffix}`; + } +} diff --git a/src/node-host/invoke-system-run-plan.ts b/src/node-host/invoke-system-run-plan.ts new file mode 100644 index 00000000000..27af0f8bbf3 --- /dev/null +++ b/src/node-host/invoke-system-run-plan.ts @@ -0,0 +1,193 @@ +import fs from "node:fs"; +import path from "node:path"; +import type { SystemRunApprovalPlanV2 } from "../infra/exec-approvals.js"; +import { sameFileIdentity } from "../infra/file-identity.js"; +import { resolveSystemRunCommand } from "../infra/system-run-command.js"; + +function normalizeString(value: unknown): string | null { + if (typeof value !== "string") { + return null; + } + const trimmed = value.trim(); + return trimmed ? trimmed : null; +} + +function isPathLikeExecutableToken(value: string): boolean { + if (!value) { + return false; + } + if (value.startsWith(".") || value.startsWith("/") || value.startsWith("\\")) { + return true; + } + if (value.includes("/") || value.includes("\\")) { + return true; + } + if (process.platform === "win32" && /^[a-zA-Z]:[\\/]/.test(value)) { + return true; + } + return false; +} + +function pathComponentsFromRootSync(targetPath: string): string[] { + const absolute = path.resolve(targetPath); + const parts: string[] = []; + let cursor = absolute; + while (true) { + parts.unshift(cursor); + const parent = path.dirname(cursor); + if (parent === cursor) { + return parts; + } + cursor = parent; + } +} + +function isWritableByCurrentProcessSync(candidate: string): boolean { + try { + fs.accessSync(candidate, fs.constants.W_OK); + return true; + } catch { + return false; + } +} + +function hasMutableSymlinkPathComponentSync(targetPath: string): boolean { + for (const component of pathComponentsFromRootSync(targetPath)) { + try { + if (!fs.lstatSync(component).isSymbolicLink()) { + continue; + } + const parentDir = path.dirname(component); + if (isWritableByCurrentProcessSync(parentDir)) { + return true; + } + } catch { + return true; + } + } + return false; +} + +export function hardenApprovedExecutionPaths(params: { + approvedByAsk: boolean; + argv: string[]; + shellCommand: string | null; + cwd: string | undefined; +}): { ok: true; argv: string[]; cwd: string | undefined } | { ok: false; message: string } { + if (!params.approvedByAsk) { + return { ok: true, argv: params.argv, cwd: params.cwd }; + } + + let hardenedCwd = params.cwd; + if (hardenedCwd) { + const requestedCwd = path.resolve(hardenedCwd); + let cwdLstat: fs.Stats; + let cwdStat: fs.Stats; + let cwdReal: string; + let cwdRealStat: fs.Stats; + try { + cwdLstat = fs.lstatSync(requestedCwd); + cwdStat = fs.statSync(requestedCwd); + cwdReal = fs.realpathSync(requestedCwd); + cwdRealStat = fs.statSync(cwdReal); + } catch { + return { + ok: false, + message: "SYSTEM_RUN_DENIED: approval requires an existing canonical cwd", + }; + } + if (!cwdStat.isDirectory()) { + return { + ok: false, + message: "SYSTEM_RUN_DENIED: approval requires cwd to be a directory", + }; + } + if (hasMutableSymlinkPathComponentSync(requestedCwd)) { + return { + ok: false, + message: "SYSTEM_RUN_DENIED: approval requires canonical cwd (no symlink path components)", + }; + } + if (cwdLstat.isSymbolicLink()) { + return { + ok: false, + message: "SYSTEM_RUN_DENIED: approval requires canonical cwd (no symlink cwd)", + }; + } + if ( + !sameFileIdentity(cwdStat, cwdLstat) || + !sameFileIdentity(cwdStat, cwdRealStat) || + !sameFileIdentity(cwdLstat, cwdRealStat) + ) { + return { + ok: false, + message: "SYSTEM_RUN_DENIED: approval cwd identity mismatch", + }; + } + hardenedCwd = cwdReal; + } + + if (params.shellCommand !== null || params.argv.length === 0) { + return { ok: true, argv: params.argv, cwd: hardenedCwd }; + } + + const argv = [...params.argv]; + const rawExecutable = argv[0] ?? ""; + if (!isPathLikeExecutableToken(rawExecutable)) { + return { ok: true, argv, cwd: hardenedCwd }; + } + + const base = hardenedCwd ?? process.cwd(); + const candidate = path.isAbsolute(rawExecutable) + ? rawExecutable + : path.resolve(base, rawExecutable); + try { + argv[0] = fs.realpathSync(candidate); + } catch { + return { + ok: false, + message: "SYSTEM_RUN_DENIED: approval requires a stable executable path", + }; + } + return { ok: true, argv, cwd: hardenedCwd }; +} + +export function buildSystemRunApprovalPlanV2(params: { + command?: unknown; + rawCommand?: unknown; + cwd?: unknown; + agentId?: unknown; + sessionKey?: unknown; +}): { ok: true; plan: SystemRunApprovalPlanV2; cmdText: string } | { ok: false; message: string } { + const command = resolveSystemRunCommand({ + command: params.command, + rawCommand: params.rawCommand, + }); + if (!command.ok) { + return { ok: false, message: command.message }; + } + if (command.argv.length === 0) { + return { ok: false, message: "command required" }; + } + const hardening = hardenApprovedExecutionPaths({ + approvedByAsk: true, + argv: command.argv, + shellCommand: command.shellCommand, + cwd: normalizeString(params.cwd) ?? undefined, + }); + if (!hardening.ok) { + return { ok: false, message: hardening.message }; + } + return { + ok: true, + plan: { + version: 2, + argv: hardening.argv, + cwd: hardening.cwd ?? null, + rawCommand: command.cmdText.trim() || null, + agentId: normalizeString(params.agentId), + sessionKey: normalizeString(params.sessionKey), + }, + cmdText: command.cmdText, + }; +} diff --git a/src/node-host/invoke-system-run.test.ts b/src/node-host/invoke-system-run.test.ts index 2d939c7726e..1ad04cc4b38 100644 --- a/src/node-host/invoke-system-run.test.ts +++ b/src/node-host/invoke-system-run.test.ts @@ -49,6 +49,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { preferMacAppExecHost: boolean; runViaResponse?: ExecHostResponse | null; command?: string[]; + cwd?: string; security?: "full" | "allowlist"; ask?: "off" | "on-miss" | "always"; approved?: boolean; @@ -70,6 +71,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { client: {} as never, params: { command: params.command ?? ["echo", "ok"], + cwd: params.cwd, approved: params.approved ?? false, sessionKey: "agent:main:main", }, @@ -214,6 +216,104 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { }), ); }); + + it.runIf(process.platform !== "win32")( + "denies approval-based execution when cwd is a symlink", + async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-cwd-link-")); + const safeDir = path.join(tmp, "safe"); + const linkDir = path.join(tmp, "cwd-link"); + const script = path.join(safeDir, "run.sh"); + fs.mkdirSync(safeDir, { recursive: true }); + fs.writeFileSync(script, "#!/bin/sh\necho SAFE\n"); + fs.chmodSync(script, 0o755); + fs.symlinkSync(safeDir, linkDir, "dir"); + try { + const { runCommand, sendInvokeResult } = await runSystemInvoke({ + preferMacAppExecHost: false, + command: ["./run.sh"], + cwd: linkDir, + approved: true, + security: "full", + ask: "off", + }); + expect(runCommand).not.toHaveBeenCalled(); + expect(sendInvokeResult).toHaveBeenCalledWith( + expect.objectContaining({ + ok: false, + error: expect.objectContaining({ + message: expect.stringContaining("canonical cwd"), + }), + }), + ); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }, + ); + + it.runIf(process.platform !== "win32")( + "denies approval-based execution when cwd contains a symlink parent component", + async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-cwd-parent-link-")); + const safeRoot = path.join(tmp, "safe-root"); + const safeSub = path.join(safeRoot, "sub"); + const linkRoot = path.join(tmp, "approved-link"); + fs.mkdirSync(safeSub, { recursive: true }); + fs.symlinkSync(safeRoot, linkRoot, "dir"); + try { + const { runCommand, sendInvokeResult } = await runSystemInvoke({ + preferMacAppExecHost: false, + command: ["./run.sh"], + cwd: path.join(linkRoot, "sub"), + approved: true, + security: "full", + ask: "off", + }); + expect(runCommand).not.toHaveBeenCalled(); + expect(sendInvokeResult).toHaveBeenCalledWith( + expect.objectContaining({ + ok: false, + error: expect.objectContaining({ + message: expect.stringContaining("no symlink path components"), + }), + }), + ); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }, + ); + + it("uses canonical executable path for approval-based relative command execution", async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-cwd-real-")); + const script = path.join(tmp, "run.sh"); + fs.writeFileSync(script, "#!/bin/sh\necho SAFE\n"); + fs.chmodSync(script, 0o755); + try { + const { runCommand, sendInvokeResult } = await runSystemInvoke({ + preferMacAppExecHost: false, + command: ["./run.sh", "--flag"], + cwd: tmp, + approved: true, + security: "full", + ask: "off", + }); + expect(runCommand).toHaveBeenCalledWith( + [fs.realpathSync(script), "--flag"], + fs.realpathSync(tmp), + undefined, + undefined, + ); + expect(sendInvokeResult).toHaveBeenCalledWith( + expect.objectContaining({ + ok: true, + }), + ); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }); it("denies ./sh wrapper spoof in allowlist on-miss mode before execution", async () => { const marker = path.join(os.tmpdir(), `openclaw-wrapper-spoof-${process.pid}-${Date.now()}`); const runCommand = vi.fn(async () => { @@ -365,6 +465,31 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { ); }); + it("denies semicolon-chained shell payloads in allowlist mode without explicit approval", async () => { + const payloads = ["openclaw status; id", "openclaw status; cat /etc/passwd"]; + for (const payload of payloads) { + const command = + process.platform === "win32" + ? ["cmd.exe", "/d", "/s", "/c", payload] + : ["/bin/sh", "-lc", payload]; + const { runCommand, sendInvokeResult } = await runSystemInvoke({ + preferMacAppExecHost: false, + security: "allowlist", + ask: "on-miss", + command, + }); + expect(runCommand, payload).not.toHaveBeenCalled(); + expect(sendInvokeResult, payload).toHaveBeenCalledWith( + expect.objectContaining({ + ok: false, + error: expect.objectContaining({ + message: "SYSTEM_RUN_DENIED: approval required", + }), + }), + ); + } + }); + it("denies nested env shell payloads when wrapper depth is exceeded", async () => { if (process.platform === "win32") { return; diff --git a/src/node-host/invoke-system-run.ts b/src/node-host/invoke-system-run.ts index 39e6766f7d5..ab325321fe2 100644 --- a/src/node-host/invoke-system-run.ts +++ b/src/node-host/invoke-system-run.ts @@ -4,9 +4,6 @@ import { loadConfig } from "../config/config.js"; import type { GatewayClient } from "../gateway/client.js"; import { addAllowlistEntry, - analyzeArgvCommand, - evaluateExecAllowlist, - evaluateShellAllowlist, recordAllowlistUse, resolveAllowAlwaysPatterns, resolveExecApprovals, @@ -14,13 +11,19 @@ import { type ExecAsk, type ExecCommandSegment, type ExecSecurity, - type SkillBinTrustEntry, } from "../infra/exec-approvals.js"; import type { ExecHostRequest, ExecHostResponse, ExecHostRunResult } from "../infra/exec-host.js"; import { resolveExecSafeBinRuntimePolicy } from "../infra/exec-safe-bin-runtime-policy.js"; import { sanitizeSystemRunEnvOverrides } from "../infra/host-env-security.js"; import { resolveSystemRunCommand } from "../infra/system-run-command.js"; import { evaluateSystemRunPolicy, resolveExecApprovalDecision } from "./exec-policy.js"; +import { + applyOutputTruncation, + evaluateSystemRunAllowlist, + resolvePlannedAllowlistArgv, + resolveSystemRunExecArgv, +} from "./invoke-system-run-allowlist.js"; +import { hardenApprovedExecutionPaths } from "./invoke-system-run-plan.js"; import type { ExecEventPayload, RunResult, @@ -48,13 +51,6 @@ type SystemRunExecutionContext = { cmdText: string; }; -type SystemRunAllowlistAnalysis = { - analysisOk: boolean; - allowlistMatches: ExecAllowlistEntry[]; - allowlistSatisfied: boolean; - segments: ExecCommandSegment[]; -}; - type ResolvedExecApprovals = ReturnType; type SystemRunParsePhase = { @@ -177,129 +173,8 @@ async function sendSystemRunDenied( }); } -function evaluateSystemRunAllowlist(params: { - shellCommand: string | null; - argv: string[]; - approvals: ReturnType; - security: ExecSecurity; - safeBins: ReturnType["safeBins"]; - safeBinProfiles: ReturnType["safeBinProfiles"]; - trustedSafeBinDirs: ReturnType["trustedSafeBinDirs"]; - cwd: string | undefined; - env: Record | undefined; - skillBins: SkillBinTrustEntry[]; - autoAllowSkills: boolean; -}): SystemRunAllowlistAnalysis { - if (params.shellCommand) { - const allowlistEval = evaluateShellAllowlist({ - command: params.shellCommand, - allowlist: params.approvals.allowlist, - safeBins: params.safeBins, - safeBinProfiles: params.safeBinProfiles, - cwd: params.cwd, - env: params.env, - trustedSafeBinDirs: params.trustedSafeBinDirs, - skillBins: params.skillBins, - autoAllowSkills: params.autoAllowSkills, - platform: process.platform, - }); - return { - analysisOk: allowlistEval.analysisOk, - allowlistMatches: allowlistEval.allowlistMatches, - allowlistSatisfied: - params.security === "allowlist" && allowlistEval.analysisOk - ? allowlistEval.allowlistSatisfied - : false, - segments: allowlistEval.segments, - }; - } - - const analysis = analyzeArgvCommand({ argv: params.argv, cwd: params.cwd, env: params.env }); - const allowlistEval = evaluateExecAllowlist({ - analysis, - allowlist: params.approvals.allowlist, - safeBins: params.safeBins, - safeBinProfiles: params.safeBinProfiles, - cwd: params.cwd, - trustedSafeBinDirs: params.trustedSafeBinDirs, - skillBins: params.skillBins, - autoAllowSkills: params.autoAllowSkills, - }); - return { - analysisOk: analysis.ok, - allowlistMatches: allowlistEval.allowlistMatches, - allowlistSatisfied: - params.security === "allowlist" && analysis.ok ? allowlistEval.allowlistSatisfied : false, - segments: analysis.segments, - }; -} - -function resolvePlannedAllowlistArgv(params: { - security: ExecSecurity; - shellCommand: string | null; - policy: { - approvedByAsk: boolean; - analysisOk: boolean; - allowlistSatisfied: boolean; - }; - segments: ExecCommandSegment[]; -}): string[] | undefined | null { - if ( - params.security !== "allowlist" || - params.policy.approvedByAsk || - params.shellCommand || - !params.policy.analysisOk || - !params.policy.allowlistSatisfied || - params.segments.length !== 1 - ) { - return undefined; - } - const plannedAllowlistArgv = params.segments[0]?.resolution?.effectiveArgv; - return plannedAllowlistArgv && plannedAllowlistArgv.length > 0 ? plannedAllowlistArgv : null; -} - -function resolveSystemRunExecArgv(params: { - plannedAllowlistArgv: string[] | undefined; - argv: string[]; - security: ExecSecurity; - isWindows: boolean; - policy: { - approvedByAsk: boolean; - analysisOk: boolean; - allowlistSatisfied: boolean; - }; - shellCommand: string | null; - segments: ExecCommandSegment[]; -}): string[] { - let execArgv = params.plannedAllowlistArgv ?? params.argv; - if ( - params.security === "allowlist" && - params.isWindows && - !params.policy.approvedByAsk && - params.shellCommand && - params.policy.analysisOk && - params.policy.allowlistSatisfied && - params.segments.length === 1 && - params.segments[0]?.argv.length > 0 - ) { - execArgv = params.segments[0].argv; - } - return execArgv; -} - -function applyOutputTruncation(result: RunResult) { - if (!result.truncated) { - return; - } - const suffix = "... (truncated)"; - if (result.stderr.trim().length > 0) { - result.stderr = `${result.stderr}\n${suffix}`; - } else { - result.stdout = `${result.stdout}\n${suffix}`; - } -} - export { formatSystemRunAllowlistMissMessage } from "./exec-policy.js"; +export { buildSystemRunApprovalPlanV2 } from "./invoke-system-run-plan.js"; async function parseSystemRunPhase( opts: HandleSystemRunInvokeOptions, @@ -422,6 +297,20 @@ async function evaluateSystemRunPolicyPhase( return null; } + const hardenedPaths = hardenApprovedExecutionPaths({ + approvedByAsk: policy.approvedByAsk, + argv: parsed.argv, + shellCommand: parsed.shellCommand, + cwd: parsed.cwd, + }); + if (!hardenedPaths.ok) { + await sendSystemRunDenied(opts, parsed.execution, { + reason: "approval-required", + message: hardenedPaths.message, + }); + return null; + } + const plannedAllowlistArgv = resolvePlannedAllowlistArgv({ security, shellCommand: parsed.shellCommand, @@ -437,6 +326,8 @@ async function evaluateSystemRunPolicyPhase( } return { ...parsed, + argv: hardenedPaths.argv, + cwd: hardenedPaths.cwd, approvals, security, policy, diff --git a/src/node-host/invoke.ts b/src/node-host/invoke.ts index c6d5d2ccc8a..7d7b21ad474 100644 --- a/src/node-host/invoke.ts +++ b/src/node-host/invoke.ts @@ -20,7 +20,7 @@ import { } from "../infra/exec-host.js"; import { sanitizeHostExecEnv } from "../infra/host-env-security.js"; import { runBrowserProxyCommand } from "./invoke-browser.js"; -import { handleSystemRunInvoke } from "./invoke-system-run.js"; +import { buildSystemRunApprovalPlanV2, handleSystemRunInvoke } from "./invoke-system-run.js"; import type { ExecEventPayload, RunResult, @@ -420,6 +420,30 @@ export async function handleInvoke( return; } + if (command === "system.run.prepare") { + try { + const params = decodeParams<{ + command?: unknown; + rawCommand?: unknown; + cwd?: unknown; + agentId?: unknown; + sessionKey?: unknown; + }>(frame.paramsJSON); + const prepared = buildSystemRunApprovalPlanV2(params); + if (!prepared.ok) { + await sendErrorResult(client, frame, "INVALID_REQUEST", prepared.message); + return; + } + await sendJsonPayloadResult(client, frame, { + cmdText: prepared.cmdText, + plan: prepared.plan, + }); + } catch (err) { + await sendInvalidRequestResult(client, frame, err); + } + return; + } + if (command !== "system.run") { await sendErrorResult(client, frame, "UNAVAILABLE", "command not supported"); return; diff --git a/src/node-host/runner.ts b/src/node-host/runner.ts index edf2cc12215..e3b593f61ba 100644 --- a/src/node-host/runner.ts +++ b/src/node-host/runner.ts @@ -6,6 +6,11 @@ import { GatewayClient } from "../gateway/client.js"; import { loadOrCreateDeviceIdentity } from "../infra/device-identity.js"; import type { SkillBinTrustEntry } from "../infra/exec-approvals.js"; import { getMachineDisplayName } from "../infra/machine-name.js"; +import { + NODE_BROWSER_PROXY_COMMAND, + NODE_EXEC_APPROVALS_COMMANDS, + NODE_SYSTEM_RUN_COMMANDS, +} from "../infra/node-commands.js"; import { ensureOpenClawCliOnPath } from "../infra/path-env.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; import { VERSION } from "../version.js"; @@ -189,11 +194,9 @@ export async function runNodeHost(opts: NodeHostRunOptions): Promise { scopes: [], caps: ["system", ...(browserProxyEnabled ? ["browser"] : [])], commands: [ - "system.run", - "system.which", - "system.execApprovals.get", - "system.execApprovals.set", - ...(browserProxyEnabled ? ["browser.proxy"] : []), + ...NODE_SYSTEM_RUN_COMMANDS, + ...NODE_EXEC_APPROVALS_COMMANDS, + ...(browserProxyEnabled ? [NODE_BROWSER_PROXY_COMMAND] : []), ], pathEnv, permissions: undefined, diff --git a/src/pairing/pairing-challenge.ts b/src/pairing/pairing-challenge.ts new file mode 100644 index 00000000000..8bf068f8d23 --- /dev/null +++ b/src/pairing/pairing-challenge.ts @@ -0,0 +1,48 @@ +import { buildPairingReply } from "./pairing-messages.js"; + +type PairingMeta = Record; + +export type PairingChallengeParams = { + channel: string; + senderId: string; + senderIdLine: string; + meta?: PairingMeta; + upsertPairingRequest: (params: { + id: string; + meta?: PairingMeta; + }) => Promise<{ code: string; created: boolean }>; + sendPairingReply: (text: string) => Promise; + buildReplyText?: (params: { code: string; senderIdLine: string }) => string; + onCreated?: (params: { code: string }) => void; + onReplyError?: (err: unknown) => void; +}; + +/** + * Shared pairing challenge issuance for DM pairing policy pathways. + * Ensures every channel follows the same create-if-missing + reply flow. + */ +export async function issuePairingChallenge( + params: PairingChallengeParams, +): Promise<{ created: boolean; code?: string }> { + const { code, created } = await params.upsertPairingRequest({ + id: params.senderId, + meta: params.meta, + }); + if (!created) { + return { created: false }; + } + params.onCreated?.({ code }); + const replyText = + params.buildReplyText?.({ code, senderIdLine: params.senderIdLine }) ?? + buildPairingReply({ + channel: params.channel, + idLine: params.senderIdLine, + code, + }); + try { + await params.sendPairingReply(replyText); + } catch (err) { + params.onReplyError?.(err); + } + return { created: true, code }; +} diff --git a/src/pairing/pairing-store.test.ts b/src/pairing/pairing-store.test.ts index e44dd391eaf..9f0ba535711 100644 --- a/src/pairing/pairing-store.test.ts +++ b/src/pairing/pairing-store.test.ts @@ -4,12 +4,15 @@ import os from "node:os"; import path from "node:path"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; import { resolveOAuthDir } from "../config/paths.js"; +import { DEFAULT_ACCOUNT_ID } from "../routing/session-key.js"; import { withEnvAsync } from "../test-utils/env.js"; import { addChannelAllowFromStoreEntry, approveChannelPairingCode, listChannelPairingRequests, readChannelAllowFromStore, + readLegacyChannelAllowFromStore, + readLegacyChannelAllowFromStoreSync, readChannelAllowFromStoreSync, removeChannelAllowFromStoreEntry, upsertChannelPairingRequest, @@ -35,6 +38,7 @@ async function withTempStateDir(fn: (stateDir: string) => Promise) { } async function writeJsonFixture(filePath: string, value: unknown) { + await fs.mkdir(path.dirname(filePath), { recursive: true }); await fs.writeFile(filePath, `${JSON.stringify(value, null, 2)}\n`, "utf8"); } @@ -42,6 +46,11 @@ function resolvePairingFilePath(stateDir: string, channel: string) { return path.join(resolveOAuthDir(process.env, stateDir), `${channel}-pairing.json`); } +function resolveAllowFromFilePath(stateDir: string, channel: string, accountId?: string) { + const suffix = accountId ? `-${accountId}` : ""; + return path.join(resolveOAuthDir(process.env, stateDir), `${channel}${suffix}-allowFrom.json`); +} + async function writeAllowFromFixture(params: { stateDir: string; channel: string; @@ -63,10 +72,12 @@ describe("pairing store", () => { const first = await upsertChannelPairingRequest({ channel: "discord", id: "u1", + accountId: DEFAULT_ACCOUNT_ID, }); const second = await upsertChannelPairingRequest({ channel: "discord", id: "u1", + accountId: DEFAULT_ACCOUNT_ID, }); expect(first.created).toBe(true); expect(second.created).toBe(false); @@ -83,6 +94,7 @@ describe("pairing store", () => { const created = await upsertChannelPairingRequest({ channel: "signal", id: "+15550001111", + accountId: DEFAULT_ACCOUNT_ID, }); expect(created.created).toBe(true); @@ -105,6 +117,7 @@ describe("pairing store", () => { const next = await upsertChannelPairingRequest({ channel: "signal", id: "+15550001111", + accountId: DEFAULT_ACCOUNT_ID, }); expect(next.created).toBe(true); }); @@ -122,6 +135,7 @@ describe("pairing store", () => { const first = await upsertChannelPairingRequest({ channel: "telegram", id: "123", + accountId: DEFAULT_ACCOUNT_ID, }); expect(first.code).toBe("AAAAAAAA"); @@ -131,6 +145,7 @@ describe("pairing store", () => { const second = await upsertChannelPairingRequest({ channel: "telegram", id: "456", + accountId: DEFAULT_ACCOUNT_ID, }); expect(second.code).toBe("BBBBBBBB"); } finally { @@ -146,6 +161,7 @@ describe("pairing store", () => { const created = await upsertChannelPairingRequest({ channel: "whatsapp", id, + accountId: DEFAULT_ACCOUNT_ID, }); expect(created.created).toBe(true); } @@ -153,6 +169,7 @@ describe("pairing store", () => { const blocked = await upsertChannelPairingRequest({ channel: "whatsapp", id: "+15550000004", + accountId: DEFAULT_ACCOUNT_ID, }); expect(blocked.created).toBe(false); @@ -175,7 +192,7 @@ describe("pairing store", () => { }); const accountScoped = await readChannelAllowFromStore("telegram", process.env, "yy"); - const channelScoped = await readChannelAllowFromStore("telegram"); + const channelScoped = await readLegacyChannelAllowFromStore("telegram"); expect(accountScoped).toContain("12345"); expect(channelScoped).not.toContain("12345"); }); @@ -197,7 +214,7 @@ describe("pairing store", () => { expect(approved?.id).toBe("12345"); const accountScoped = await readChannelAllowFromStore("telegram", process.env, "yy"); - const channelScoped = await readChannelAllowFromStore("telegram"); + const channelScoped = await readLegacyChannelAllowFromStore("telegram"); expect(accountScoped).toContain("12345"); expect(channelScoped).not.toContain("12345"); }); @@ -257,7 +274,7 @@ describe("pairing store", () => { }); }); - it("reads sync allowFrom with scoped + legacy dedupe and wildcard filtering", async () => { + it("reads sync allowFrom with account-scoped isolation and wildcard filtering", async () => { await withTempStateDir(async (stateDir) => { await writeAllowFromFixture({ stateDir, @@ -272,9 +289,95 @@ describe("pairing store", () => { }); const scoped = readChannelAllowFromStoreSync("telegram", process.env, "yy"); - const channelScoped = readChannelAllowFromStoreSync("telegram"); + const channelScoped = readLegacyChannelAllowFromStoreSync("telegram"); expect(scoped).toEqual(["1002", "1001"]); - expect(channelScoped).toEqual(["1001", "1001"]); + expect(channelScoped).toEqual(["1001"]); + }); + }); + + it("does not read legacy channel-scoped allowFrom for non-default account ids", async () => { + await withTempStateDir(async (stateDir) => { + await writeAllowFromFixture({ + stateDir, + channel: "telegram", + allowFrom: ["1001", "*", "1002", "1001"], + }); + await writeAllowFromFixture({ + stateDir, + channel: "telegram", + accountId: "yy", + allowFrom: ["1003"], + }); + + const asyncScoped = await readChannelAllowFromStore("telegram", process.env, "yy"); + const syncScoped = readChannelAllowFromStoreSync("telegram", process.env, "yy"); + expect(asyncScoped).toEqual(["1003"]); + expect(syncScoped).toEqual(["1003"]); + }); + }); + + it("does not fall back to legacy allowFrom when scoped file exists but is empty", async () => { + await withTempStateDir(async (stateDir) => { + await writeAllowFromFixture({ + stateDir, + channel: "telegram", + allowFrom: ["1001"], + }); + await writeAllowFromFixture({ + stateDir, + channel: "telegram", + accountId: "yy", + allowFrom: [], + }); + + const asyncScoped = await readChannelAllowFromStore("telegram", process.env, "yy"); + const syncScoped = readChannelAllowFromStoreSync("telegram", process.env, "yy"); + expect(asyncScoped).toEqual([]); + expect(syncScoped).toEqual([]); + }); + }); + + it("keeps async and sync reads aligned for malformed scoped allowFrom files", async () => { + await withTempStateDir(async (stateDir) => { + await writeAllowFromFixture({ + stateDir, + channel: "telegram", + allowFrom: ["1001"], + }); + const malformedScopedPath = resolveAllowFromFilePath(stateDir, "telegram", "yy"); + await fs.mkdir(path.dirname(malformedScopedPath), { recursive: true }); + await fs.writeFile(malformedScopedPath, "{ this is not json\n", "utf8"); + + const asyncScoped = await readChannelAllowFromStore("telegram", process.env, "yy"); + const syncScoped = readChannelAllowFromStoreSync("telegram", process.env, "yy"); + expect(asyncScoped).toEqual([]); + expect(syncScoped).toEqual([]); + }); + }); + + it("does not reuse pairing requests across accounts for the same sender id", async () => { + await withTempStateDir(async () => { + const first = await upsertChannelPairingRequest({ + channel: "telegram", + accountId: "alpha", + id: "12345", + }); + const second = await upsertChannelPairingRequest({ + channel: "telegram", + accountId: "beta", + id: "12345", + }); + + expect(first.created).toBe(true); + expect(second.created).toBe(true); + expect(second.code).not.toBe(first.code); + + const alpha = await listChannelPairingRequests("telegram", process.env, "alpha"); + const beta = await listChannelPairingRequests("telegram", process.env, "beta"); + expect(alpha).toHaveLength(1); + expect(beta).toHaveLength(1); + expect(alpha[0]?.code).toBe(first.code); + expect(beta[0]?.code).toBe(second.code); }); }); @@ -288,7 +391,7 @@ describe("pairing store", () => { allowFrom: ["1002"], }); - const scoped = await readChannelAllowFromStore("telegram", process.env, "default"); + const scoped = await readChannelAllowFromStore("telegram", process.env, DEFAULT_ACCOUNT_ID); expect(scoped).toEqual(["1002", "1001"]); }); }); diff --git a/src/pairing/pairing-store.ts b/src/pairing/pairing-store.ts index eb0b52b308b..fe373b3ea1f 100644 --- a/src/pairing/pairing-store.ts +++ b/src/pairing/pairing-store.ts @@ -8,6 +8,7 @@ import { resolveOAuthDir, resolveStateDir } from "../config/paths.js"; import { withFileLock as withPathLock } from "../infra/file-lock.js"; import { resolveRequiredHomeDir } from "../infra/home-dir.js"; import { readJsonFileWithFallback, writeJsonFileAtomically } from "../plugin-sdk/json-store.js"; +import { DEFAULT_ACCOUNT_ID } from "../routing/session-key.js"; const PAIRING_CODE_LENGTH = 8; const PAIRING_CODE_ALPHABET = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"; @@ -218,6 +219,12 @@ function requestMatchesAccountId(entry: PairingRequest, normalizedAccountId: str ); } +function shouldIncludeLegacyAllowFromEntries(normalizedAccountId: string): boolean { + // Keep backward compatibility for legacy channel-scoped allowFrom only on default account. + // Non-default accounts should remain isolated to avoid cross-account implicit approvals. + return !normalizedAccountId || normalizedAccountId === DEFAULT_ACCOUNT_ID; +} + function normalizeId(value: string | number): string { return String(value).trim(); } @@ -237,7 +244,9 @@ function normalizeAllowEntry(channel: PairingChannel, entry: string): string { function normalizeAllowFromList(channel: PairingChannel, store: AllowFromStore): string[] { const list = Array.isArray(store.allowFrom) ? store.allowFrom : []; - return list.map((v) => normalizeAllowEntry(channel, String(v))).filter(Boolean); + return dedupePreserveOrder( + list.map((v) => normalizeAllowEntry(channel, String(v))).filter(Boolean), + ); } function normalizeAllowFromInput(channel: PairingChannel, entry: string | number): string { @@ -262,20 +271,46 @@ async function readAllowFromStateForPath( channel: PairingChannel, filePath: string, ): Promise { - const { value } = await readJsonFile(filePath, { + return (await readAllowFromStateForPathWithExists(channel, filePath)).entries; +} + +async function readAllowFromStateForPathWithExists( + channel: PairingChannel, + filePath: string, +): Promise<{ entries: string[]; exists: boolean }> { + const { value, exists } = await readJsonFile(filePath, { version: 1, allowFrom: [], }); - return normalizeAllowFromList(channel, value); + const entries = normalizeAllowFromList(channel, value); + return { entries, exists }; } function readAllowFromStateForPathSync(channel: PairingChannel, filePath: string): string[] { + return readAllowFromStateForPathSyncWithExists(channel, filePath).entries; +} + +function readAllowFromStateForPathSyncWithExists( + channel: PairingChannel, + filePath: string, +): { entries: string[]; exists: boolean } { + let raw = ""; + try { + raw = fs.readFileSync(filePath, "utf8"); + } catch (err) { + const code = (err as { code?: string }).code; + if (code === "ENOENT") { + return { entries: [], exists: false }; + } + return { entries: [], exists: false }; + } try { - const raw = fs.readFileSync(filePath, "utf8"); const parsed = JSON.parse(raw) as AllowFromStore; - return normalizeAllowFromList(channel, parsed); + const entries = normalizeAllowFromList(channel, parsed); + return { entries, exists: true }; } catch { - return []; + // Keep parity with async reads: malformed JSON still means the file exists. + return { entries: [], exists: true }; } } @@ -300,6 +335,24 @@ async function writeAllowFromState(filePath: string, allowFrom: string[]): Promi } satisfies AllowFromStore); } +async function readNonDefaultAccountAllowFrom(params: { + channel: PairingChannel; + env: NodeJS.ProcessEnv; + accountId: string; +}): Promise { + const scopedPath = resolveAllowFromPath(params.channel, params.env, params.accountId); + return await readAllowFromStateForPath(params.channel, scopedPath); +} + +function readNonDefaultAccountAllowFromSync(params: { + channel: PairingChannel; + env: NodeJS.ProcessEnv; + accountId: string; +}): string[] { + const scopedPath = resolveAllowFromPath(params.channel, params.env, params.accountId); + return readAllowFromStateForPathSync(params.channel, scopedPath); +} + async function updateAllowFromStoreEntry(params: { channel: PairingChannel; entry: string | number; @@ -331,38 +384,62 @@ async function updateAllowFromStoreEntry(params: { ); } +export async function readLegacyChannelAllowFromStore( + channel: PairingChannel, + env: NodeJS.ProcessEnv = process.env, +): Promise { + const filePath = resolveAllowFromPath(channel, env); + return await readAllowFromStateForPath(channel, filePath); +} + export async function readChannelAllowFromStore( channel: PairingChannel, env: NodeJS.ProcessEnv = process.env, - accountId?: string, + accountId: string, ): Promise { - const normalizedAccountId = accountId?.trim().toLowerCase() ?? ""; - if (!normalizedAccountId) { - const filePath = resolveAllowFromPath(channel, env); - return await readAllowFromStateForPath(channel, filePath); - } + const normalizedAccountId = accountId.trim().toLowerCase(); + const resolvedAccountId = normalizedAccountId || DEFAULT_ACCOUNT_ID; - const scopedPath = resolveAllowFromPath(channel, env, accountId); + if (!shouldIncludeLegacyAllowFromEntries(resolvedAccountId)) { + return await readNonDefaultAccountAllowFrom({ + channel, + env, + accountId: resolvedAccountId, + }); + } + const scopedPath = resolveAllowFromPath(channel, env, resolvedAccountId); const scopedEntries = await readAllowFromStateForPath(channel, scopedPath); // Backward compatibility: legacy channel-level allowFrom store was unscoped. - // Keep honoring it alongside account-scoped files to prevent re-pair prompts after upgrades. + // Keep honoring it for default account to prevent re-pair prompts after upgrades. const legacyPath = resolveAllowFromPath(channel, env); const legacyEntries = await readAllowFromStateForPath(channel, legacyPath); return dedupePreserveOrder([...scopedEntries, ...legacyEntries]); } +export function readLegacyChannelAllowFromStoreSync( + channel: PairingChannel, + env: NodeJS.ProcessEnv = process.env, +): string[] { + const filePath = resolveAllowFromPath(channel, env); + return readAllowFromStateForPathSync(channel, filePath); +} + export function readChannelAllowFromStoreSync( channel: PairingChannel, env: NodeJS.ProcessEnv = process.env, - accountId?: string, + accountId: string, ): string[] { - const normalizedAccountId = accountId?.trim().toLowerCase() ?? ""; - if (!normalizedAccountId) { - const filePath = resolveAllowFromPath(channel, env); - return readAllowFromStateForPathSync(channel, filePath); - } + const normalizedAccountId = accountId.trim().toLowerCase(); + const resolvedAccountId = normalizedAccountId || DEFAULT_ACCOUNT_ID; - const scopedPath = resolveAllowFromPath(channel, env, accountId); + if (!shouldIncludeLegacyAllowFromEntries(resolvedAccountId)) { + return readNonDefaultAccountAllowFromSync({ + channel, + env, + accountId: resolvedAccountId, + }); + } + const scopedPath = resolveAllowFromPath(channel, env, resolvedAccountId); const scopedEntries = readAllowFromStateForPathSync(channel, scopedPath); const legacyPath = resolveAllowFromPath(channel, env); const legacyEntries = readAllowFromStateForPathSync(channel, legacyPath); @@ -471,7 +548,7 @@ export async function listChannelPairingRequests( export async function upsertChannelPairingRequest(params: { channel: PairingChannel; id: string | number; - accountId?: string; + accountId: string; meta?: Record; env?: NodeJS.ProcessEnv; /** Extension channels can pass their adapter directly to bypass registry lookup. */ @@ -486,7 +563,7 @@ export async function upsertChannelPairingRequest(params: { const now = new Date().toISOString(); const nowMs = Date.now(); const id = normalizeId(params.id); - const normalizedAccountId = params.accountId?.trim(); + const normalizedAccountId = normalizePairingAccountId(params.accountId) || DEFAULT_ACCOUNT_ID; const baseMeta = params.meta && typeof params.meta === "object" ? Object.fromEntries( @@ -495,7 +572,7 @@ export async function upsertChannelPairingRequest(params: { .filter(([_, v]) => Boolean(v)), ) : undefined; - const meta = normalizedAccountId ? { ...baseMeta, accountId: normalizedAccountId } : baseMeta; + const meta = { ...baseMeta, accountId: normalizedAccountId }; let reqs = await readPairingRequests(filePath); const { requests: prunedExpired, removed: expiredRemoved } = pruneExpiredRequests( @@ -503,7 +580,13 @@ export async function upsertChannelPairingRequest(params: { nowMs, ); reqs = prunedExpired; - const existingIdx = reqs.findIndex((r) => r.id === id); + const normalizedMatchingAccountId = normalizedAccountId; + const existingIdx = reqs.findIndex((r) => { + if (r.id !== id) { + return false; + } + return requestMatchesAccountId(r, normalizedMatchingAccountId); + }); const existingCodes = new Set( reqs.map((req) => String(req.code ?? "") diff --git a/src/plugin-sdk/command-auth.test.ts b/src/plugin-sdk/command-auth.test.ts new file mode 100644 index 00000000000..c3ba8c2e8ca --- /dev/null +++ b/src/plugin-sdk/command-auth.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { resolveSenderCommandAuthorization } from "./command-auth.js"; + +const baseCfg = { + commands: { useAccessGroups: true }, +} as unknown as OpenClawConfig; + +describe("plugin-sdk/command-auth", () => { + it("authorizes group commands from explicit group allowlist", async () => { + const result = await resolveSenderCommandAuthorization({ + cfg: baseCfg, + rawBody: "/status", + isGroup: true, + dmPolicy: "pairing", + configuredAllowFrom: ["dm-owner"], + configuredGroupAllowFrom: ["group-owner"], + senderId: "group-owner", + isSenderAllowed: (senderId, allowFrom) => allowFrom.includes(senderId), + readAllowFromStore: async () => ["paired-user"], + shouldComputeCommandAuthorized: () => true, + resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => + useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), + }); + expect(result.commandAuthorized).toBe(true); + expect(result.senderAllowedForCommands).toBe(true); + expect(result.effectiveAllowFrom).toEqual(["dm-owner"]); + expect(result.effectiveGroupAllowFrom).toEqual(["group-owner"]); + }); + + it("keeps pairing-store identities DM-only for group command auth", async () => { + const result = await resolveSenderCommandAuthorization({ + cfg: baseCfg, + rawBody: "/status", + isGroup: true, + dmPolicy: "pairing", + configuredAllowFrom: ["dm-owner"], + configuredGroupAllowFrom: ["group-owner"], + senderId: "paired-user", + isSenderAllowed: (senderId, allowFrom) => allowFrom.includes(senderId), + readAllowFromStore: async () => ["paired-user"], + shouldComputeCommandAuthorized: () => true, + resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => + useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), + }); + expect(result.commandAuthorized).toBe(false); + expect(result.senderAllowedForCommands).toBe(false); + expect(result.effectiveAllowFrom).toEqual(["dm-owner"]); + expect(result.effectiveGroupAllowFrom).toEqual(["group-owner"]); + }); +}); diff --git a/src/plugin-sdk/command-auth.ts b/src/plugin-sdk/command-auth.ts index 287f1398da4..cc7d9d2207a 100644 --- a/src/plugin-sdk/command-auth.ts +++ b/src/plugin-sdk/command-auth.ts @@ -1,4 +1,5 @@ import type { OpenClawConfig } from "../config/config.js"; +import { resolveDmGroupAccessWithLists } from "../security/dm-policy-shared.js"; export type ResolveSenderCommandAuthorizationParams = { cfg: OpenClawConfig; @@ -6,6 +7,7 @@ export type ResolveSenderCommandAuthorizationParams = { isGroup: boolean; dmPolicy: string; configuredAllowFrom: string[]; + configuredGroupAllowFrom?: string[]; senderId: string; isSenderAllowed: (senderId: string, allowFrom: string[]) => boolean; readAllowFromStore: () => Promise; @@ -21,6 +23,7 @@ export async function resolveSenderCommandAuthorization( ): Promise<{ shouldComputeAuth: boolean; effectiveAllowFrom: string[]; + effectiveGroupAllowFrom: string[]; senderAllowedForCommands: boolean; commandAuthorized: boolean | undefined; }> { @@ -31,14 +34,30 @@ export async function resolveSenderCommandAuthorization( (params.dmPolicy !== "open" || shouldComputeAuth) ? await params.readAllowFromStore().catch(() => []) : []; - const effectiveAllowFrom = [...params.configuredAllowFrom, ...storeAllowFrom]; + const access = resolveDmGroupAccessWithLists({ + isGroup: params.isGroup, + dmPolicy: params.dmPolicy, + groupPolicy: "allowlist", + allowFrom: params.configuredAllowFrom, + groupAllowFrom: params.configuredGroupAllowFrom ?? [], + storeAllowFrom, + isSenderAllowed: (allowFrom) => params.isSenderAllowed(params.senderId, allowFrom), + }); + const effectiveAllowFrom = access.effectiveAllowFrom; + const effectiveGroupAllowFrom = access.effectiveGroupAllowFrom; const useAccessGroups = params.cfg.commands?.useAccessGroups !== false; - const senderAllowedForCommands = params.isSenderAllowed(params.senderId, effectiveAllowFrom); + const senderAllowedForCommands = params.isSenderAllowed( + params.senderId, + params.isGroup ? effectiveGroupAllowFrom : effectiveAllowFrom, + ); + const ownerAllowedForCommands = params.isSenderAllowed(params.senderId, effectiveAllowFrom); + const groupAllowedForCommands = params.isSenderAllowed(params.senderId, effectiveGroupAllowFrom); const commandAuthorized = shouldComputeAuth ? params.resolveCommandAuthorizedFromAuthorizers({ useAccessGroups, authorizers: [ - { configured: effectiveAllowFrom.length > 0, allowed: senderAllowedForCommands }, + { configured: effectiveAllowFrom.length > 0, allowed: ownerAllowedForCommands }, + { configured: effectiveGroupAllowFrom.length > 0, allowed: groupAllowedForCommands }, ], }) : undefined; @@ -46,6 +65,7 @@ export async function resolveSenderCommandAuthorization( return { shouldComputeAuth, effectiveAllowFrom, + effectiveGroupAllowFrom, senderAllowedForCommands, commandAuthorized, }; diff --git a/src/plugin-sdk/fetch-auth.test.ts b/src/plugin-sdk/fetch-auth.test.ts new file mode 100644 index 00000000000..abf4aac80c2 --- /dev/null +++ b/src/plugin-sdk/fetch-auth.test.ts @@ -0,0 +1,96 @@ +import { describe, expect, it, vi } from "vitest"; +import { fetchWithBearerAuthScopeFallback } from "./fetch-auth.js"; + +const asFetch = (fn: unknown): typeof fetch => fn as typeof fetch; + +describe("fetchWithBearerAuthScopeFallback", () => { + it("rejects non-https urls when https is required", async () => { + await expect( + fetchWithBearerAuthScopeFallback({ + url: "http://example.com/file", + scopes: [], + requireHttps: true, + }), + ).rejects.toThrow("URL must use HTTPS"); + }); + + it("returns immediately when the first attempt succeeds", async () => { + const fetchFn = vi.fn(async () => new Response("ok", { status: 200 })); + const tokenProvider = { getAccessToken: vi.fn(async () => "unused") }; + + const response = await fetchWithBearerAuthScopeFallback({ + url: "https://example.com/file", + scopes: ["https://graph.microsoft.com"], + fetchFn: asFetch(fetchFn), + tokenProvider, + }); + + expect(response.status).toBe(200); + expect(fetchFn).toHaveBeenCalledTimes(1); + expect(tokenProvider.getAccessToken).not.toHaveBeenCalled(); + }); + + it("retries with auth scopes after a 401 response", async () => { + const fetchFn = vi + .fn() + .mockResolvedValueOnce(new Response("unauthorized", { status: 401 })) + .mockResolvedValueOnce(new Response("ok", { status: 200 })); + const tokenProvider = { getAccessToken: vi.fn(async () => "token-1") }; + + const response = await fetchWithBearerAuthScopeFallback({ + url: "https://graph.microsoft.com/v1.0/me", + scopes: ["https://graph.microsoft.com", "https://api.botframework.com"], + fetchFn: asFetch(fetchFn), + tokenProvider, + }); + + expect(response.status).toBe(200); + expect(fetchFn).toHaveBeenCalledTimes(2); + expect(tokenProvider.getAccessToken).toHaveBeenCalledWith("https://graph.microsoft.com"); + const secondCall = fetchFn.mock.calls[1] as [string, RequestInit | undefined]; + const secondHeaders = new Headers(secondCall[1]?.headers); + expect(secondHeaders.get("authorization")).toBe("Bearer token-1"); + }); + + it("does not attach auth when host predicate rejects url", async () => { + const fetchFn = vi.fn(async () => new Response("unauthorized", { status: 401 })); + const tokenProvider = { getAccessToken: vi.fn(async () => "token-1") }; + + const response = await fetchWithBearerAuthScopeFallback({ + url: "https://example.com/file", + scopes: ["https://graph.microsoft.com"], + fetchFn: asFetch(fetchFn), + tokenProvider, + shouldAttachAuth: () => false, + }); + + expect(response.status).toBe(401); + expect(fetchFn).toHaveBeenCalledTimes(1); + expect(tokenProvider.getAccessToken).not.toHaveBeenCalled(); + }); + + it("continues across scopes when token retrieval fails", async () => { + const fetchFn = vi + .fn() + .mockResolvedValueOnce(new Response("unauthorized", { status: 401 })) + .mockResolvedValueOnce(new Response("ok", { status: 200 })); + const tokenProvider = { + getAccessToken: vi + .fn() + .mockRejectedValueOnce(new Error("first scope failed")) + .mockResolvedValueOnce("token-2"), + }; + + const response = await fetchWithBearerAuthScopeFallback({ + url: "https://graph.microsoft.com/v1.0/me", + scopes: ["https://first.example", "https://second.example"], + fetchFn: asFetch(fetchFn), + tokenProvider, + }); + + expect(response.status).toBe(200); + expect(tokenProvider.getAccessToken).toHaveBeenCalledTimes(2); + expect(tokenProvider.getAccessToken).toHaveBeenNthCalledWith(1, "https://first.example"); + expect(tokenProvider.getAccessToken).toHaveBeenNthCalledWith(2, "https://second.example"); + }); +}); diff --git a/src/plugin-sdk/fetch-auth.ts b/src/plugin-sdk/fetch-auth.ts new file mode 100644 index 00000000000..fc04e4aa910 --- /dev/null +++ b/src/plugin-sdk/fetch-auth.ts @@ -0,0 +1,71 @@ +export type ScopeTokenProvider = { + getAccessToken: (scope: string) => Promise; +}; + +function isAuthFailureStatus(status: number): boolean { + return status === 401 || status === 403; +} + +export async function fetchWithBearerAuthScopeFallback(params: { + url: string; + scopes: readonly string[]; + tokenProvider?: ScopeTokenProvider; + fetchFn?: typeof fetch; + requestInit?: RequestInit; + requireHttps?: boolean; + shouldAttachAuth?: (url: string) => boolean; + shouldRetry?: (response: Response) => boolean; +}): Promise { + const fetchFn = params.fetchFn ?? fetch; + let parsedUrl: URL; + try { + parsedUrl = new URL(params.url); + } catch { + throw new Error(`Invalid URL: ${params.url}`); + } + if (params.requireHttps === true && parsedUrl.protocol !== "https:") { + throw new Error(`URL must use HTTPS: ${params.url}`); + } + + const fetchOnce = (headers?: Headers): Promise => + fetchFn(params.url, { + ...params.requestInit, + ...(headers ? { headers } : {}), + }); + + const firstAttempt = await fetchOnce(); + if (firstAttempt.ok) { + return firstAttempt; + } + if (!params.tokenProvider) { + return firstAttempt; + } + + const shouldRetry = + params.shouldRetry ?? ((response: Response) => isAuthFailureStatus(response.status)); + if (!shouldRetry(firstAttempt)) { + return firstAttempt; + } + if (params.shouldAttachAuth && !params.shouldAttachAuth(params.url)) { + return firstAttempt; + } + + for (const scope of params.scopes) { + try { + const token = await params.tokenProvider.getAccessToken(scope); + const authHeaders = new Headers(params.requestInit?.headers); + authHeaders.set("Authorization", `Bearer ${token}`); + const authAttempt = await fetchOnce(authHeaders); + if (authAttempt.ok) { + return authAttempt; + } + if (!shouldRetry(authAttempt)) { + continue; + } + } catch { + // Ignore token/fetch errors and continue trying remaining scopes. + } + } + + return firstAttempt; +} diff --git a/src/plugin-sdk/index.ts b/src/plugin-sdk/index.ts index 7faa2341dc0..6a0829c0b9f 100644 --- a/src/plugin-sdk/index.ts +++ b/src/plugin-sdk/index.ts @@ -71,11 +71,35 @@ export { listThreadBindingsBySessionKey, unbindThreadBindingsBySessionKey, } from "../discord/monitor/thread-bindings.js"; +export type { + AcpRuntimeCapabilities, + AcpRuntimeControl, + AcpRuntimeDoctorReport, + AcpRuntime, + AcpRuntimeEnsureInput, + AcpRuntimeEvent, + AcpRuntimeHandle, + AcpRuntimePromptMode, + AcpRuntimeSessionMode, + AcpRuntimeStatus, + AcpRuntimeTurnInput, +} from "../acp/runtime/types.js"; +export type { AcpRuntimeBackend } from "../acp/runtime/registry.js"; +export { + getAcpRuntimeBackend, + registerAcpRuntimeBackend, + requireAcpRuntimeBackend, + unregisterAcpRuntimeBackend, +} from "../acp/runtime/registry.js"; +export { ACP_ERROR_CODES, AcpRuntimeError } from "../acp/runtime/errors.js"; +export type { AcpRuntimeErrorCode } from "../acp/runtime/errors.js"; export type { AnyAgentTool, + OpenClawPluginConfigSchema, OpenClawPluginApi, OpenClawPluginService, OpenClawPluginServiceContext, + PluginLogger, ProviderAuthContext, ProviderAuthResult, } from "../plugins/types.js"; @@ -192,6 +216,8 @@ export { type SenderGroupAccessReason, } from "./group-access.js"; export { resolveSenderCommandAuthorization } from "./command-auth.js"; +export { createScopedPairingAccess } from "./pairing-access.js"; +export { issuePairingChallenge } from "../pairing/pairing-challenge.js"; export { handleSlackMessageAction } from "./slack-message-actions.js"; export { extractToolSend } from "./tool-send.js"; export { @@ -244,6 +270,11 @@ export type { PersistentDedupeOptions, } from "./persistent-dedupe.js"; export { formatErrorMessage } from "../infra/errors.js"; +export { + formatUtcTimestamp, + formatZonedTimestamp, + resolveTimezone, +} from "../infra/format-time/format-datetime.js"; export { DEFAULT_WEBHOOK_BODY_TIMEOUT_MS, DEFAULT_WEBHOOK_MAX_BODY_BYTES, @@ -263,6 +294,13 @@ export { isPrivateIpAddress, } from "../infra/net/ssrf.js"; export type { LookupFn, SsrFPolicy } from "../infra/net/ssrf.js"; +export { + buildHostnameAllowlistPolicyFromSuffixAllowlist, + isHttpsUrlAllowedByHostnameSuffixAllowlist, + normalizeHostnameSuffixAllowlist, +} from "./ssrf-policy.js"; +export { fetchWithBearerAuthScopeFallback } from "./fetch-auth.js"; +export type { ScopeTokenProvider } from "./fetch-auth.js"; export { rawDataToString } from "../infra/ws.js"; export { isWSLSync, isWSL2Sync, isWSLEnv } from "../infra/wsl.js"; export { isTruthyEnvValue } from "../infra/env.js"; @@ -373,10 +411,15 @@ export { } from "../agents/tools/common.js"; export { formatDocsLink } from "../terminal/links.js"; export { + DM_GROUP_ACCESS_REASON, + readStoreAllowFromForDmPolicy, resolveDmAllowState, resolveDmGroupAccessDecision, + resolveDmGroupAccessWithCommandGate, + resolveDmGroupAccessWithLists, resolveEffectiveAllowFromLists, } from "../security/dm-policy-shared.js"; +export type { DmGroupAccessReasonCode } from "../security/dm-policy-shared.js"; export type { HookEntry } from "../hooks/types.js"; export { clamp, escapeRegExp, normalizeE164, safeParseJson, sleep } from "../utils.js"; export { stripAnsi } from "../terminal/ansi.js"; diff --git a/src/plugin-sdk/pairing-access.ts b/src/plugin-sdk/pairing-access.ts new file mode 100644 index 00000000000..31f0cd4d3a7 --- /dev/null +++ b/src/plugin-sdk/pairing-access.ts @@ -0,0 +1,36 @@ +import type { ChannelId } from "../channels/plugins/types.js"; +import type { PluginRuntime } from "../plugins/runtime/types.js"; +import { normalizeAccountId } from "../routing/session-key.js"; + +type PairingApi = PluginRuntime["channel"]["pairing"]; +type ScopedUpsertInput = Omit< + Parameters[0], + "channel" | "accountId" +>; + +export function createScopedPairingAccess(params: { + core: PluginRuntime; + channel: ChannelId; + accountId: string; +}) { + const resolvedAccountId = normalizeAccountId(params.accountId); + return { + accountId: resolvedAccountId, + readAllowFromStore: () => + params.core.channel.pairing.readAllowFromStore({ + channel: params.channel, + accountId: resolvedAccountId, + }), + readStoreForDmPolicy: (provider: ChannelId, accountId: string) => + params.core.channel.pairing.readAllowFromStore({ + channel: provider, + accountId: normalizeAccountId(accountId), + }), + upsertPairingRequest: (input: ScopedUpsertInput) => + params.core.channel.pairing.upsertPairingRequest({ + channel: params.channel, + accountId: resolvedAccountId, + ...input, + }), + }; +} diff --git a/src/plugin-sdk/ssrf-policy.test.ts b/src/plugin-sdk/ssrf-policy.test.ts new file mode 100644 index 00000000000..20247e7bc2a --- /dev/null +++ b/src/plugin-sdk/ssrf-policy.test.ts @@ -0,0 +1,84 @@ +import { describe, expect, it } from "vitest"; +import { + buildHostnameAllowlistPolicyFromSuffixAllowlist, + isHttpsUrlAllowedByHostnameSuffixAllowlist, + normalizeHostnameSuffixAllowlist, +} from "./ssrf-policy.js"; + +describe("normalizeHostnameSuffixAllowlist", () => { + it("uses defaults when input is missing", () => { + expect(normalizeHostnameSuffixAllowlist(undefined, ["GRAPH.MICROSOFT.COM"])).toEqual([ + "graph.microsoft.com", + ]); + }); + + it("normalizes wildcard prefixes and deduplicates", () => { + expect( + normalizeHostnameSuffixAllowlist([ + "*.TrafficManager.NET", + ".trafficmanager.net.", + " * ", + "x", + ]), + ).toEqual(["*"]); + }); +}); + +describe("isHttpsUrlAllowedByHostnameSuffixAllowlist", () => { + it("requires https", () => { + expect( + isHttpsUrlAllowedByHostnameSuffixAllowlist("http://a.example.com/x", ["example.com"]), + ).toBe(false); + }); + + it("supports exact and suffix match", () => { + expect( + isHttpsUrlAllowedByHostnameSuffixAllowlist("https://example.com/x", ["example.com"]), + ).toBe(true); + expect( + isHttpsUrlAllowedByHostnameSuffixAllowlist("https://a.example.com/x", ["example.com"]), + ).toBe(true); + expect(isHttpsUrlAllowedByHostnameSuffixAllowlist("https://evil.com/x", ["example.com"])).toBe( + false, + ); + }); + + it("supports wildcard allowlist", () => { + expect(isHttpsUrlAllowedByHostnameSuffixAllowlist("https://evil.com/x", ["*"])).toBe(true); + }); +}); + +describe("buildHostnameAllowlistPolicyFromSuffixAllowlist", () => { + it("returns undefined when allowHosts is empty", () => { + expect(buildHostnameAllowlistPolicyFromSuffixAllowlist()).toBeUndefined(); + expect(buildHostnameAllowlistPolicyFromSuffixAllowlist([])).toBeUndefined(); + }); + + it("returns undefined when wildcard host is present", () => { + expect(buildHostnameAllowlistPolicyFromSuffixAllowlist(["*"])).toBeUndefined(); + expect(buildHostnameAllowlistPolicyFromSuffixAllowlist(["example.com", "*"])).toBeUndefined(); + }); + + it("expands a suffix entry to exact + wildcard hostname allowlist patterns", () => { + expect(buildHostnameAllowlistPolicyFromSuffixAllowlist(["sharepoint.com"])).toEqual({ + hostnameAllowlist: ["sharepoint.com", "*.sharepoint.com"], + }); + }); + + it("normalizes wildcard prefixes, leading/trailing dots, and deduplicates patterns", () => { + expect( + buildHostnameAllowlistPolicyFromSuffixAllowlist([ + "*.TrafficManager.NET", + ".trafficmanager.net.", + " blob.core.windows.net ", + ]), + ).toEqual({ + hostnameAllowlist: [ + "trafficmanager.net", + "*.trafficmanager.net", + "blob.core.windows.net", + "*.blob.core.windows.net", + ], + }); + }); +}); diff --git a/src/plugin-sdk/ssrf-policy.ts b/src/plugin-sdk/ssrf-policy.ts new file mode 100644 index 00000000000..351938d0456 --- /dev/null +++ b/src/plugin-sdk/ssrf-policy.ts @@ -0,0 +1,85 @@ +import type { SsrFPolicy } from "../infra/net/ssrf.js"; + +function normalizeHostnameSuffix(value: string): string { + const trimmed = value.trim().toLowerCase(); + if (!trimmed) { + return ""; + } + if (trimmed === "*" || trimmed === "*.") { + return "*"; + } + const withoutWildcard = trimmed.replace(/^\*\.?/, ""); + const withoutLeadingDot = withoutWildcard.replace(/^\.+/, ""); + return withoutLeadingDot.replace(/\.+$/, ""); +} + +function isHostnameAllowedBySuffixAllowlist( + hostname: string, + allowlist: readonly string[], +): boolean { + if (allowlist.includes("*")) { + return true; + } + const normalized = hostname.toLowerCase(); + return allowlist.some((entry) => normalized === entry || normalized.endsWith(`.${entry}`)); +} + +export function normalizeHostnameSuffixAllowlist( + input?: readonly string[], + defaults?: readonly string[], +): string[] { + const source = input && input.length > 0 ? input : defaults; + if (!source || source.length === 0) { + return []; + } + const normalized = source.map(normalizeHostnameSuffix).filter(Boolean); + if (normalized.includes("*")) { + return ["*"]; + } + return Array.from(new Set(normalized)); +} + +export function isHttpsUrlAllowedByHostnameSuffixAllowlist( + url: string, + allowlist: readonly string[], +): boolean { + try { + const parsed = new URL(url); + if (parsed.protocol !== "https:") { + return false; + } + return isHostnameAllowedBySuffixAllowlist(parsed.hostname, allowlist); + } catch { + return false; + } +} + +/** + * Converts suffix-style host allowlists (for example "example.com") into SSRF + * hostname allowlist patterns used by the shared fetch guard. + * + * Suffix semantics: + * - "example.com" allows "example.com" and "*.example.com" + * - "*" disables hostname allowlist restrictions + */ +export function buildHostnameAllowlistPolicyFromSuffixAllowlist( + allowHosts?: readonly string[], +): SsrFPolicy | undefined { + const normalizedAllowHosts = normalizeHostnameSuffixAllowlist(allowHosts); + if (normalizedAllowHosts.length === 0) { + return undefined; + } + const patterns = new Set(); + for (const normalized of normalizedAllowHosts) { + if (normalized === "*") { + return undefined; + } + patterns.add(normalized); + patterns.add(`*.${normalized}`); + } + + if (patterns.size === 0) { + return undefined; + } + return { hostnameAllowlist: Array.from(patterns) }; +} diff --git a/src/plugins/bundled-sources.test.ts b/src/plugins/bundled-sources.test.ts new file mode 100644 index 00000000000..437b06c193e --- /dev/null +++ b/src/plugins/bundled-sources.test.ts @@ -0,0 +1,97 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { findBundledPluginByNpmSpec, resolveBundledPluginSources } from "./bundled-sources.js"; + +const discoverOpenClawPluginsMock = vi.fn(); +const loadPluginManifestMock = vi.fn(); + +vi.mock("./discovery.js", () => ({ + discoverOpenClawPlugins: (...args: unknown[]) => discoverOpenClawPluginsMock(...args), +})); + +vi.mock("./manifest.js", () => ({ + loadPluginManifest: (...args: unknown[]) => loadPluginManifestMock(...args), +})); + +describe("bundled plugin sources", () => { + beforeEach(() => { + discoverOpenClawPluginsMock.mockReset(); + loadPluginManifestMock.mockReset(); + }); + + it("resolves bundled sources keyed by plugin id", () => { + discoverOpenClawPluginsMock.mockReturnValue({ + candidates: [ + { + origin: "global", + rootDir: "/global/feishu", + packageName: "@openclaw/feishu", + packageManifest: { install: { npmSpec: "@openclaw/feishu" } }, + }, + { + origin: "bundled", + rootDir: "/app/extensions/feishu", + packageName: "@openclaw/feishu", + packageManifest: { install: { npmSpec: "@openclaw/feishu" } }, + }, + { + origin: "bundled", + rootDir: "/app/extensions/feishu-dup", + packageName: "@openclaw/feishu", + packageManifest: { install: { npmSpec: "@openclaw/feishu" } }, + }, + { + origin: "bundled", + rootDir: "/app/extensions/msteams", + packageName: "@openclaw/msteams", + packageManifest: { install: { npmSpec: "@openclaw/msteams" } }, + }, + ], + diagnostics: [], + }); + + loadPluginManifestMock.mockImplementation((rootDir: string) => { + if (rootDir === "/app/extensions/feishu") { + return { ok: true, manifest: { id: "feishu" } }; + } + if (rootDir === "/app/extensions/msteams") { + return { ok: true, manifest: { id: "msteams" } }; + } + return { + ok: false, + error: "invalid manifest", + manifestPath: `${rootDir}/openclaw.plugin.json`, + }; + }); + + const map = resolveBundledPluginSources({}); + + expect(Array.from(map.keys())).toEqual(["feishu", "msteams"]); + expect(map.get("feishu")).toEqual({ + pluginId: "feishu", + localPath: "/app/extensions/feishu", + npmSpec: "@openclaw/feishu", + }); + }); + + it("finds bundled source by npm spec", () => { + discoverOpenClawPluginsMock.mockReturnValue({ + candidates: [ + { + origin: "bundled", + rootDir: "/app/extensions/feishu", + packageName: "@openclaw/feishu", + packageManifest: { install: { npmSpec: "@openclaw/feishu" } }, + }, + ], + diagnostics: [], + }); + loadPluginManifestMock.mockReturnValue({ ok: true, manifest: { id: "feishu" } }); + + const resolved = findBundledPluginByNpmSpec({ spec: "@openclaw/feishu" }); + const missing = findBundledPluginByNpmSpec({ spec: "@openclaw/not-found" }); + + expect(resolved?.pluginId).toBe("feishu"); + expect(resolved?.localPath).toBe("/app/extensions/feishu"); + expect(missing).toBeUndefined(); + }); +}); diff --git a/src/plugins/bundled-sources.ts b/src/plugins/bundled-sources.ts new file mode 100644 index 00000000000..44ac618f211 --- /dev/null +++ b/src/plugins/bundled-sources.ts @@ -0,0 +1,59 @@ +import { discoverOpenClawPlugins } from "./discovery.js"; +import { loadPluginManifest } from "./manifest.js"; + +export type BundledPluginSource = { + pluginId: string; + localPath: string; + npmSpec?: string; +}; + +export function resolveBundledPluginSources(params: { + workspaceDir?: string; +}): Map { + const discovery = discoverOpenClawPlugins({ workspaceDir: params.workspaceDir }); + const bundled = new Map(); + + for (const candidate of discovery.candidates) { + if (candidate.origin !== "bundled") { + continue; + } + const manifest = loadPluginManifest(candidate.rootDir); + if (!manifest.ok) { + continue; + } + const pluginId = manifest.manifest.id; + if (bundled.has(pluginId)) { + continue; + } + + const npmSpec = + candidate.packageManifest?.install?.npmSpec?.trim() || + candidate.packageName?.trim() || + undefined; + + bundled.set(pluginId, { + pluginId, + localPath: candidate.rootDir, + npmSpec, + }); + } + + return bundled; +} + +export function findBundledPluginByNpmSpec(params: { + spec: string; + workspaceDir?: string; +}): BundledPluginSource | undefined { + const targetSpec = params.spec.trim(); + if (!targetSpec) { + return undefined; + } + const bundled = resolveBundledPluginSources({ workspaceDir: params.workspaceDir }); + for (const source of bundled.values()) { + if (source.npmSpec === targetSpec) { + return source; + } + } + return undefined; +} diff --git a/src/plugins/discovery.test.ts b/src/plugins/discovery.test.ts index 180ab87cc8c..68cd0c83915 100644 --- a/src/plugins/discovery.test.ts +++ b/src/plugins/discovery.test.ts @@ -231,6 +231,82 @@ describe("discoverOpenClawPlugins", () => { ); }); + it("rejects package extension entries that are hardlinked aliases", async () => { + if (process.platform === "win32") { + return; + } + const stateDir = makeTempDir(); + const globalExt = path.join(stateDir, "extensions", "pack"); + const outsideDir = path.join(stateDir, "outside"); + const outsideFile = path.join(outsideDir, "escape.ts"); + const linkedFile = path.join(globalExt, "escape.ts"); + fs.mkdirSync(globalExt, { recursive: true }); + fs.mkdirSync(outsideDir, { recursive: true }); + fs.writeFileSync(outsideFile, "export default {}", "utf-8"); + try { + fs.linkSync(outsideFile, linkedFile); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + fs.writeFileSync( + path.join(globalExt, "package.json"), + JSON.stringify({ + name: "@openclaw/pack", + openclaw: { extensions: ["./escape.ts"] }, + }), + "utf-8", + ); + + const { candidates, diagnostics } = await withStateDir(stateDir, async () => { + return discoverOpenClawPlugins({}); + }); + + expect(candidates.some((candidate) => candidate.idHint === "pack")).toBe(false); + expect(diagnostics.some((entry) => entry.message.includes("escapes package directory"))).toBe( + true, + ); + }); + + it("ignores package manifests that are hardlinked aliases", async () => { + if (process.platform === "win32") { + return; + } + const stateDir = makeTempDir(); + const globalExt = path.join(stateDir, "extensions", "pack"); + const outsideDir = path.join(stateDir, "outside"); + const outsideManifest = path.join(outsideDir, "package.json"); + const linkedManifest = path.join(globalExt, "package.json"); + fs.mkdirSync(globalExt, { recursive: true }); + fs.mkdirSync(outsideDir, { recursive: true }); + fs.writeFileSync(path.join(globalExt, "entry.ts"), "export default {}", "utf-8"); + fs.writeFileSync( + outsideManifest, + JSON.stringify({ + name: "@openclaw/pack", + openclaw: { extensions: ["./entry.ts"] }, + }), + "utf-8", + ); + try { + fs.linkSync(outsideManifest, linkedManifest); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const { candidates } = await withStateDir(stateDir, async () => { + return discoverOpenClawPlugins({}); + }); + + expect(candidates.some((candidate) => candidate.idHint === "pack")).toBe(false); + }); + it.runIf(process.platform !== "win32")("blocks world-writable plugin paths", async () => { const stateDir = makeTempDir(); const globalExt = path.join(stateDir, "extensions"); diff --git a/src/plugins/discovery.ts b/src/plugins/discovery.ts index 1df727fabfa..44759ed6903 100644 --- a/src/plugins/discovery.ts +++ b/src/plugins/discovery.ts @@ -1,6 +1,6 @@ import fs from "node:fs"; import path from "node:path"; -import { isPathInsideWithRealpath } from "../security/scan-paths.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { resolveConfigDir, resolveUserPath } from "../utils.js"; import { resolveBundledPluginsDir } from "./bundled-dir.js"; import { @@ -225,14 +225,21 @@ function shouldIgnoreScannedDirectory(dirName: string): boolean { function readPackageManifest(dir: string): PackageManifest | null { const manifestPath = path.join(dir, "package.json"); - if (!fs.existsSync(manifestPath)) { + const opened = openBoundaryFileSync({ + absolutePath: manifestPath, + rootPath: dir, + boundaryLabel: "plugin package directory", + }); + if (!opened.ok) { return null; } try { - const raw = fs.readFileSync(manifestPath, "utf-8"); + const raw = fs.readFileSync(opened.fd, "utf-8"); return JSON.parse(raw) as PackageManifest; } catch { return null; + } finally { + fs.closeSync(opened.fd); } } @@ -284,7 +291,7 @@ function addCandidate(params: { if (params.seen.has(resolved)) { return; } - const resolvedRoot = path.resolve(params.rootDir); + const resolvedRoot = safeRealpathSync(params.rootDir) ?? path.resolve(params.rootDir); if ( isUnsafePluginCandidate({ source: resolved, @@ -319,11 +326,12 @@ function resolvePackageEntrySource(params: { diagnostics: PluginDiagnostic[]; }): string | null { const source = path.resolve(params.packageDir, params.entryPath); - if ( - !isPathInsideWithRealpath(params.packageDir, source, { - requireRealpath: true, - }) - ) { + const opened = openBoundaryFileSync({ + absolutePath: source, + rootPath: params.packageDir, + boundaryLabel: "plugin package directory", + }); + if (!opened.ok) { params.diagnostics.push({ level: "error", message: `extension entry escapes package directory: ${params.entryPath}`, @@ -331,7 +339,9 @@ function resolvePackageEntrySource(params: { }); return null; } - return source; + const safeSource = opened.path; + fs.closeSync(opened.fd); + return safeSource; } function discoverInDirectory(params: { diff --git a/src/plugins/loader.test.ts b/src/plugins/loader.test.ts index 5a43702570e..d7390306ac7 100644 --- a/src/plugins/loader.test.ts +++ b/src/plugins/loader.test.ts @@ -4,7 +4,7 @@ import os from "node:os"; import path from "node:path"; import { afterAll, afterEach, describe, expect, it } from "vitest"; import { withEnv } from "../test-utils/env.js"; -import { loadOpenClawPlugins } from "./loader.js"; +import { __testing, loadOpenClawPlugins } from "./loader.js"; type TempPlugin = { dir: string; file: string; id: string }; @@ -295,6 +295,32 @@ describe("loadOpenClawPlugins", () => { expect(Object.keys(registry.gatewayHandlers)).toContain("allowed.ping"); }); + it("loads plugins when source and root differ only by realpath alias", () => { + process.env.OPENCLAW_BUNDLED_PLUGINS_DIR = "/nonexistent/bundled/plugins"; + const plugin = writePlugin({ + id: "alias-safe", + body: `export default { id: "alias-safe", register() {} };`, + }); + const realRoot = fs.realpathSync(plugin.dir); + if (realRoot === plugin.dir) { + return; + } + + const registry = loadOpenClawPlugins({ + cache: false, + workspaceDir: plugin.dir, + config: { + plugins: { + load: { paths: [plugin.file] }, + allow: ["alias-safe"], + }, + }, + }); + + const loaded = registry.plugins.find((entry) => entry.id === "alias-safe"); + expect(loaded?.status).toBe("loaded"); + }); + it("denylist disables plugins even if allowed", () => { process.env.OPENCLAW_BUNDLED_PLUGINS_DIR = "/nonexistent/bundled/plugins"; const plugin = writePlugin({ @@ -650,4 +676,90 @@ describe("loadOpenClawPlugins", () => { expect(record?.status).not.toBe("loaded"); expect(registry.diagnostics.some((entry) => entry.message.includes("escapes"))).toBe(true); }); + + it("rejects plugin entry files that escape plugin root via hardlink", () => { + if (process.platform === "win32") { + return; + } + process.env.OPENCLAW_BUNDLED_PLUGINS_DIR = "/nonexistent/bundled/plugins"; + const pluginDir = makeTempDir(); + const outsideDir = makeTempDir(); + const outsideEntry = path.join(outsideDir, "outside.js"); + const linkedEntry = path.join(pluginDir, "entry.js"); + fs.writeFileSync( + outsideEntry, + 'export default { id: "hardlinked", register() { throw new Error("should not run"); } };', + "utf-8", + ); + fs.writeFileSync( + path.join(pluginDir, "openclaw.plugin.json"), + JSON.stringify( + { + id: "hardlinked", + configSchema: EMPTY_PLUGIN_SCHEMA, + }, + null, + 2, + ), + "utf-8", + ); + try { + fs.linkSync(outsideEntry, linkedEntry); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const registry = loadOpenClawPlugins({ + cache: false, + config: { + plugins: { + load: { paths: [linkedEntry] }, + allow: ["hardlinked"], + }, + }, + }); + + const record = registry.plugins.find((entry) => entry.id === "hardlinked"); + expect(record?.status).not.toBe("loaded"); + expect(registry.diagnostics.some((entry) => entry.message.includes("escapes"))).toBe(true); + }); + + it("prefers dist plugin-sdk alias when loader runs from dist", () => { + const root = makeTempDir(); + const srcFile = path.join(root, "src", "plugin-sdk", "index.ts"); + const distFile = path.join(root, "dist", "plugin-sdk", "index.js"); + fs.mkdirSync(path.dirname(srcFile), { recursive: true }); + fs.mkdirSync(path.dirname(distFile), { recursive: true }); + fs.writeFileSync(srcFile, "export {};\n", "utf-8"); + fs.writeFileSync(distFile, "export {};\n", "utf-8"); + + const resolved = __testing.resolvePluginSdkAliasFile({ + srcFile: "index.ts", + distFile: "index.js", + modulePath: path.join(root, "dist", "plugins", "loader.js"), + }); + expect(resolved).toBe(distFile); + }); + + it("prefers src plugin-sdk alias when loader runs from src in non-production", () => { + const root = makeTempDir(); + const srcFile = path.join(root, "src", "plugin-sdk", "index.ts"); + const distFile = path.join(root, "dist", "plugin-sdk", "index.js"); + fs.mkdirSync(path.dirname(srcFile), { recursive: true }); + fs.mkdirSync(path.dirname(distFile), { recursive: true }); + fs.writeFileSync(srcFile, "export {};\n", "utf-8"); + fs.writeFileSync(distFile, "export {};\n", "utf-8"); + + const resolved = withEnv({ NODE_ENV: undefined }, () => + __testing.resolvePluginSdkAliasFile({ + srcFile: "index.ts", + distFile: "index.js", + modulePath: path.join(root, "src", "plugins", "loader.ts"), + }), + ); + expect(resolved).toBe(srcFile); + }); }); diff --git a/src/plugins/loader.ts b/src/plugins/loader.ts index c6cf256bc68..c60acba7396 100644 --- a/src/plugins/loader.ts +++ b/src/plugins/loader.ts @@ -4,8 +4,8 @@ import { fileURLToPath } from "node:url"; import { createJiti } from "jiti"; import type { OpenClawConfig } from "../config/config.js"; import type { GatewayRequestHandler } from "../gateway/server-methods/types.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; -import { isPathInsideWithRealpath } from "../security/scan-paths.js"; import { resolveUserPath } from "../utils.js"; import { clearPluginCommands } from "./commands.js"; import { @@ -48,20 +48,25 @@ const defaultLogger = () => createSubsystemLogger("plugins"); const resolvePluginSdkAliasFile = (params: { srcFile: string; distFile: string; + modulePath?: string; }): string | null => { try { - const modulePath = fileURLToPath(import.meta.url); + const modulePath = params.modulePath ?? fileURLToPath(import.meta.url); const isProduction = process.env.NODE_ENV === "production"; const isTest = process.env.VITEST || process.env.NODE_ENV === "test"; + const normalizedModulePath = modulePath.replace(/\\/g, "/"); + const isDistRuntime = normalizedModulePath.includes("/dist/"); let cursor = path.dirname(modulePath); for (let i = 0; i < 6; i += 1) { const srcCandidate = path.join(cursor, "src", "plugin-sdk", params.srcFile); const distCandidate = path.join(cursor, "dist", "plugin-sdk", params.distFile); - const orderedCandidates = isProduction - ? isTest - ? [distCandidate, srcCandidate] - : [distCandidate] - : [srcCandidate, distCandidate]; + const orderedCandidates = isDistRuntime + ? [distCandidate, srcCandidate] + : isProduction + ? isTest + ? [distCandidate, srcCandidate] + : [distCandidate] + : [srcCandidate, distCandidate]; for (const candidate of orderedCandidates) { if (fs.existsSync(candidate)) { return candidate; @@ -86,6 +91,10 @@ const resolvePluginSdkAccountIdAlias = (): string | null => { return resolvePluginSdkAliasFile({ srcFile: "account-id.ts", distFile: "account-id.js" }); }; +export const __testing = { + resolvePluginSdkAliasFile, +}; + function buildCacheKey(params: { workspaceDir?: string; plugins: NormalizedPluginsConfig; @@ -516,13 +525,19 @@ export function loadOpenClawPlugins(options: PluginLoadOptions = {}): PluginRegi continue; } - if ( - !isPathInsideWithRealpath(candidate.rootDir, candidate.source, { - requireRealpath: true, - }) - ) { + const pluginRoot = safeRealpathOrResolve(candidate.rootDir); + const opened = openBoundaryFileSync({ + absolutePath: candidate.source, + rootPath: pluginRoot, + boundaryLabel: "plugin root", + // Discovery stores rootDir as realpath but source may still be a lexical alias + // (e.g. /var/... vs /private/var/... on macOS). Canonical boundary checks + // still enforce containment; skip lexical pre-check to avoid false escapes. + skipLexicalRootCheck: true, + }); + if (!opened.ok) { record.status = "error"; - record.error = "plugin entry path escapes plugin root"; + record.error = "plugin entry path escapes plugin root or fails alias checks"; registry.plugins.push(record); seenIds.set(pluginId, candidate.origin); registry.diagnostics.push({ @@ -533,10 +548,12 @@ export function loadOpenClawPlugins(options: PluginLoadOptions = {}): PluginRegi }); continue; } + const safeSource = opened.path; + fs.closeSync(opened.fd); let mod: OpenClawPluginModule | null = null; try { - mod = getJiti()(candidate.source) as OpenClawPluginModule; + mod = getJiti()(safeSource) as OpenClawPluginModule; } catch (err) { recordPluginError({ logger, @@ -698,3 +715,11 @@ export function loadOpenClawPlugins(options: PluginLoadOptions = {}): PluginRegi initializeGlobalHookRunner(registry); return registry; } + +function safeRealpathOrResolve(value: string): string { + try { + return fs.realpathSync(value); + } catch { + return path.resolve(value); + } +} diff --git a/src/plugins/manifest-registry.test.ts b/src/plugins/manifest-registry.test.ts index 75ae9ef418c..356ca1f2074 100644 --- a/src/plugins/manifest-registry.test.ts +++ b/src/plugins/manifest-registry.test.ts @@ -167,4 +167,70 @@ describe("loadPluginManifestRegistry", () => { expect(registry.plugins.length).toBe(1); expect(registry.plugins[0]?.origin).toBe("config"); }); + + it("rejects manifest paths that escape plugin root via symlink", () => { + const rootDir = makeTempDir(); + const outsideDir = makeTempDir(); + const outsideManifest = path.join(outsideDir, "openclaw.plugin.json"); + const linkedManifest = path.join(rootDir, "openclaw.plugin.json"); + fs.writeFileSync(path.join(rootDir, "index.ts"), "export default function () {}", "utf-8"); + fs.writeFileSync( + outsideManifest, + JSON.stringify({ id: "unsafe-symlink", configSchema: { type: "object" } }), + "utf-8", + ); + try { + fs.symlinkSync(outsideManifest, linkedManifest); + } catch { + return; + } + + const registry = loadRegistry([ + createPluginCandidate({ + idHint: "unsafe-symlink", + rootDir, + origin: "workspace", + }), + ]); + expect(registry.plugins).toHaveLength(0); + expect( + registry.diagnostics.some((diag) => diag.message.includes("unsafe plugin manifest path")), + ).toBe(true); + }); + + it("rejects manifest paths that escape plugin root via hardlink", () => { + if (process.platform === "win32") { + return; + } + const rootDir = makeTempDir(); + const outsideDir = makeTempDir(); + const outsideManifest = path.join(outsideDir, "openclaw.plugin.json"); + const linkedManifest = path.join(rootDir, "openclaw.plugin.json"); + fs.writeFileSync(path.join(rootDir, "index.ts"), "export default function () {}", "utf-8"); + fs.writeFileSync( + outsideManifest, + JSON.stringify({ id: "unsafe-hardlink", configSchema: { type: "object" } }), + "utf-8", + ); + try { + fs.linkSync(outsideManifest, linkedManifest); + } catch (err) { + if ((err as NodeJS.ErrnoException).code === "EXDEV") { + return; + } + throw err; + } + + const registry = loadRegistry([ + createPluginCandidate({ + idHint: "unsafe-hardlink", + rootDir, + origin: "workspace", + }), + ]); + expect(registry.plugins).toHaveLength(0); + expect( + registry.diagnostics.some((diag) => diag.message.includes("unsafe plugin manifest path")), + ).toBe(true); + }); }); diff --git a/src/plugins/manifest.ts b/src/plugins/manifest.ts index 7840733f10f..b507ffd11f3 100644 --- a/src/plugins/manifest.ts +++ b/src/plugins/manifest.ts @@ -1,6 +1,7 @@ import fs from "node:fs"; import path from "node:path"; import { MANIFEST_KEY } from "../compat/legacy-names.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import { isRecord } from "../utils.js"; import type { PluginConfigUiHint, PluginKind } from "./types.js"; @@ -43,18 +44,32 @@ export function resolvePluginManifestPath(rootDir: string): string { export function loadPluginManifest(rootDir: string): PluginManifestLoadResult { const manifestPath = resolvePluginManifestPath(rootDir); - if (!fs.existsSync(manifestPath)) { - return { ok: false, error: `plugin manifest not found: ${manifestPath}`, manifestPath }; + const opened = openBoundaryFileSync({ + absolutePath: manifestPath, + rootPath: rootDir, + boundaryLabel: "plugin root", + }); + if (!opened.ok) { + if (opened.reason === "path") { + return { ok: false, error: `plugin manifest not found: ${manifestPath}`, manifestPath }; + } + return { + ok: false, + error: `unsafe plugin manifest path: ${manifestPath} (${opened.reason})`, + manifestPath, + }; } let raw: unknown; try { - raw = JSON.parse(fs.readFileSync(manifestPath, "utf-8")) as unknown; + raw = JSON.parse(fs.readFileSync(opened.fd, "utf-8")) as unknown; } catch (err) { return { ok: false, error: `failed to parse plugin manifest: ${String(err)}`, manifestPath, }; + } finally { + fs.closeSync(opened.fd); } if (!isRecord(raw)) { return { ok: false, error: "plugin manifest must be an object", manifestPath }; diff --git a/src/plugins/path-safety.ts b/src/plugins/path-safety.ts index 48c2da8e6fa..7935312cbe4 100644 --- a/src/plugins/path-safety.ts +++ b/src/plugins/path-safety.ts @@ -1,12 +1,8 @@ import fs from "node:fs"; -import path from "node:path"; +import { isPathInside as isBoundaryPathInside } from "../infra/path-guards.js"; export function isPathInside(baseDir: string, targetPath: string): boolean { - const rel = path.relative(baseDir, targetPath); - if (!rel) { - return true; - } - return !rel.startsWith("..") && !path.isAbsolute(rel); + return isBoundaryPathInside(baseDir, targetPath); } export function safeRealpathSync(targetPath: string, cache?: Map): string | null { diff --git a/src/plugins/runtime/index.ts b/src/plugins/runtime/index.ts index edfae611e7f..cba4e9f6d00 100644 --- a/src/plugins/runtime/index.ts +++ b/src/plugins/runtime/index.ts @@ -17,6 +17,7 @@ import { shouldComputeCommandAuthorized, } from "../../auto-reply/command-detection.js"; import { shouldHandleTextCommands } from "../../auto-reply/commands-registry.js"; +import { withReplyDispatcher } from "../../auto-reply/dispatch.js"; import { formatAgentEnvelope, formatInboundEnvelope, @@ -304,6 +305,7 @@ function createRuntimeChannel(): PluginRuntime["channel"] { resolveEffectiveMessagesConfig, resolveHumanDelayConfig, dispatchReplyFromConfig, + withReplyDispatcher, finalizeInboundContext, formatAgentEnvelope, /** @deprecated Prefer `BodyForAgent` + structured user-context blocks (do not build plaintext envelopes for prompts). */ @@ -315,8 +317,17 @@ function createRuntimeChannel(): PluginRuntime["channel"] { }, pairing: { buildPairingReply, - readAllowFromStore: readChannelAllowFromStore, - upsertPairingRequest: upsertChannelPairingRequest, + readAllowFromStore: ({ channel, accountId, env }) => + readChannelAllowFromStore(channel, env, accountId), + upsertPairingRequest: ({ channel, id, accountId, meta, env, pairingAdapter }) => + upsertChannelPairingRequest({ + channel, + id, + accountId, + meta, + env, + pairingAdapter, + }), }, media: { fetchRemoteMedia, diff --git a/src/plugins/runtime/types.ts b/src/plugins/runtime/types.ts index 71b85d6f12a..39ada4cd431 100644 --- a/src/plugins/runtime/types.ts +++ b/src/plugins/runtime/types.ts @@ -14,6 +14,14 @@ type ReadChannelAllowFromStore = typeof import("../../pairing/pairing-store.js").readChannelAllowFromStore; type UpsertChannelPairingRequest = typeof import("../../pairing/pairing-store.js").upsertChannelPairingRequest; +type ReadChannelAllowFromStoreForAccount = (params: { + channel: Parameters[0]; + accountId: string; + env?: Parameters[1]; +}) => ReturnType; +type UpsertChannelPairingRequestForAccount = ( + params: Omit[0], "accountId"> & { accountId: string }, +) => ReturnType; type FetchRemoteMedia = typeof import("../../media/fetch.js").fetchRemoteMedia; type SaveMediaBuffer = typeof import("../../media/store.js").saveMediaBuffer; type TextToSpeechTelephony = typeof import("../../tts/tts.js").textToSpeechTelephony; @@ -55,6 +63,7 @@ type ShouldHandleTextCommands = typeof import("../../auto-reply/commands-registry.js").shouldHandleTextCommands; type DispatchReplyFromConfig = typeof import("../../auto-reply/reply/dispatch-from-config.js").dispatchReplyFromConfig; +type WithReplyDispatcher = typeof import("../../auto-reply/dispatch.js").withReplyDispatcher; type FinalizeInboundContext = typeof import("../../auto-reply/reply/inbound-context.js").finalizeInboundContext; type FormatAgentEnvelope = typeof import("../../auto-reply/envelope.js").formatAgentEnvelope; @@ -222,6 +231,7 @@ export type PluginRuntime = { resolveEffectiveMessagesConfig: ResolveEffectiveMessagesConfig; resolveHumanDelayConfig: ResolveHumanDelayConfig; dispatchReplyFromConfig: DispatchReplyFromConfig; + withReplyDispatcher: WithReplyDispatcher; finalizeInboundContext: FinalizeInboundContext; formatAgentEnvelope: FormatAgentEnvelope; /** @deprecated Prefer `BodyForAgent` + structured user-context blocks (do not build plaintext envelopes for prompts). */ @@ -233,8 +243,8 @@ export type PluginRuntime = { }; pairing: { buildPairingReply: BuildPairingReply; - readAllowFromStore: ReadChannelAllowFromStore; - upsertPairingRequest: UpsertChannelPairingRequest; + readAllowFromStore: ReadChannelAllowFromStoreForAccount; + upsertPairingRequest: UpsertChannelPairingRequestForAccount; }; media: { fetchRemoteMedia: FetchRemoteMedia; diff --git a/src/plugins/update.ts b/src/plugins/update.ts index 78568e54c57..2ba71158065 100644 --- a/src/plugins/update.ts +++ b/src/plugins/update.ts @@ -1,11 +1,12 @@ -import fs from "node:fs/promises"; +import fsSync from "node:fs"; +import path from "node:path"; import type { OpenClawConfig } from "../config/config.js"; +import { openBoundaryFileSync } from "../infra/boundary-file-read.js"; import type { UpdateChannel } from "../infra/update-channels.js"; import { resolveUserPath } from "../utils.js"; -import { discoverOpenClawPlugins } from "./discovery.js"; +import { resolveBundledPluginSources } from "./bundled-sources.js"; import { installPluginFromNpmSpec, resolvePluginInstallDir } from "./install.js"; import { buildNpmResolutionInstallFields, recordPluginInstall } from "./installs.js"; -import { loadPluginManifest } from "./manifest.js"; export type PluginUpdateLogger = { info?: (message: string) => void; @@ -52,12 +53,6 @@ export type PluginChannelSyncResult = { summary: PluginChannelSyncSummary; }; -type BundledPluginSource = { - pluginId: string; - localPath: string; - npmSpec?: string; -}; - type InstallIntegrityDrift = { spec: string; expectedIntegrity: string; @@ -69,49 +64,26 @@ type InstallIntegrityDrift = { }; async function readInstalledPackageVersion(dir: string): Promise { + const manifestPath = path.join(dir, "package.json"); + const opened = openBoundaryFileSync({ + absolutePath: manifestPath, + rootPath: dir, + boundaryLabel: "installed plugin directory", + }); + if (!opened.ok) { + return undefined; + } try { - const raw = await fs.readFile(`${dir}/package.json`, "utf-8"); + const raw = fsSync.readFileSync(opened.fd, "utf-8"); const parsed = JSON.parse(raw) as { version?: unknown }; return typeof parsed.version === "string" ? parsed.version : undefined; } catch { return undefined; + } finally { + fsSync.closeSync(opened.fd); } } -function resolveBundledPluginSources(params: { - workspaceDir?: string; -}): Map { - const discovery = discoverOpenClawPlugins({ workspaceDir: params.workspaceDir }); - const bundled = new Map(); - - for (const candidate of discovery.candidates) { - if (candidate.origin !== "bundled") { - continue; - } - const manifest = loadPluginManifest(candidate.rootDir); - if (!manifest.ok) { - continue; - } - const pluginId = manifest.manifest.id; - if (bundled.has(pluginId)) { - continue; - } - - const npmSpec = - candidate.packageManifest?.install?.npmSpec?.trim() || - candidate.packageName?.trim() || - undefined; - - bundled.set(pluginId, { - pluginId, - localPath: candidate.rootDir, - npmSpec, - }); - } - - return bundled; -} - function pathsEqual(left?: string, right?: string): boolean { if (!left || !right) { return false; diff --git a/src/plugins/wired-hooks-compaction.test.ts b/src/plugins/wired-hooks-compaction.test.ts index 05e63a2b2f9..7ba3c3ad090 100644 --- a/src/plugins/wired-hooks-compaction.test.ts +++ b/src/plugins/wired-hooks-compaction.test.ts @@ -2,6 +2,8 @@ * Test: before_compaction & after_compaction hook wiring */ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { makeZeroUsageSnapshot } from "../agents/usage.js"; +import { emitAgentEvent } from "../infra/agent-events.js"; const hookMocks = vi.hoisted(() => ({ runner: { @@ -35,6 +37,7 @@ describe("compaction hook wiring", () => { hookMocks.runner.runBeforeCompaction.mockResolvedValue(undefined); hookMocks.runner.runAfterCompaction.mockClear(); hookMocks.runner.runAfterCompaction.mockResolvedValue(undefined); + vi.mocked(emitAgentEvent).mockClear(); }); it("calls runBeforeCompaction in handleAutoCompactionStart", () => { @@ -45,6 +48,7 @@ describe("compaction hook wiring", () => { runId: "r1", sessionKey: "agent:main:web-abc123", session: { messages: [1, 2, 3], sessionFile: "/tmp/test.jsonl" }, + onAgentEvent: vi.fn(), }, state: { compactionInFlight: false }, log: { debug: vi.fn(), warn: vi.fn() }, @@ -67,6 +71,16 @@ describe("compaction hook wiring", () => { expect(event?.sessionFile).toBe("/tmp/test.jsonl"); const hookCtx = beforeCalls[0]?.[1] as { sessionKey?: string } | undefined; expect(hookCtx?.sessionKey).toBe("agent:main:web-abc123"); + expect(ctx.ensureCompactionPromise).toHaveBeenCalledTimes(1); + expect(emitAgentEvent).toHaveBeenCalledWith({ + runId: "r1", + stream: "compaction", + data: { phase: "start" }, + }); + expect(ctx.params.onAgentEvent).toHaveBeenCalledWith({ + stream: "compaction", + data: { phase: "start" }, + }); }); it("calls runAfterCompaction when willRetry is false", () => { @@ -77,6 +91,7 @@ describe("compaction hook wiring", () => { state: { compactionInFlight: true }, log: { debug: vi.fn(), warn: vi.fn() }, maybeResolveCompactionWait: vi.fn(), + incrementCompactionCount: vi.fn(), getCompactionCount: () => 1, }; @@ -98,6 +113,13 @@ describe("compaction hook wiring", () => { | undefined; expect(event?.messageCount).toBe(2); expect(event?.compactedCount).toBe(1); + expect(ctx.incrementCompactionCount).toHaveBeenCalledTimes(1); + expect(ctx.maybeResolveCompactionWait).toHaveBeenCalledTimes(1); + expect(emitAgentEvent).toHaveBeenCalledWith({ + runId: "r2", + stream: "compaction", + data: { phase: "end", willRetry: false }, + }); }); it("does not call runAfterCompaction when willRetry is true", () => { @@ -109,6 +131,82 @@ describe("compaction hook wiring", () => { log: { debug: vi.fn(), warn: vi.fn() }, noteCompactionRetry: vi.fn(), resetForCompactionRetry: vi.fn(), + maybeResolveCompactionWait: vi.fn(), + getCompactionCount: () => 0, + }; + + handleAutoCompactionEnd( + ctx as never, + { + type: "auto_compaction_end", + willRetry: true, + } as never, + ); + + expect(hookMocks.runner.runAfterCompaction).not.toHaveBeenCalled(); + expect(ctx.noteCompactionRetry).toHaveBeenCalledTimes(1); + expect(ctx.resetForCompactionRetry).toHaveBeenCalledTimes(1); + expect(ctx.maybeResolveCompactionWait).not.toHaveBeenCalled(); + expect(emitAgentEvent).toHaveBeenCalledWith({ + runId: "r3", + stream: "compaction", + data: { phase: "end", willRetry: true }, + }); + }); + + it("resets stale assistant usage after final compaction", () => { + const messages = [ + { role: "user", content: "hello" }, + { + role: "assistant", + content: "response one", + usage: { totalTokens: 180_000, input: 100, output: 50 }, + }, + { + role: "assistant", + content: "response two", + usage: { totalTokens: 181_000, input: 120, output: 60 }, + }, + ]; + + const ctx = { + params: { runId: "r4", session: { messages } }, + state: { compactionInFlight: true }, + log: { debug: vi.fn(), warn: vi.fn() }, + maybeResolveCompactionWait: vi.fn(), + getCompactionCount: () => 1, + incrementCompactionCount: vi.fn(), + }; + + handleAutoCompactionEnd( + ctx as never, + { + type: "auto_compaction_end", + willRetry: false, + } as never, + ); + + const assistantOne = messages[1] as { usage?: unknown }; + const assistantTwo = messages[2] as { usage?: unknown }; + expect(assistantOne.usage).toEqual(makeZeroUsageSnapshot()); + expect(assistantTwo.usage).toEqual(makeZeroUsageSnapshot()); + }); + + it("does not clear assistant usage while compaction is retrying", () => { + const messages = [ + { + role: "assistant", + content: "response", + usage: { totalTokens: 184_297, input: 130_000, output: 2_000 }, + }, + ]; + + const ctx = { + params: { runId: "r5", session: { messages } }, + state: { compactionInFlight: true }, + log: { debug: vi.fn(), warn: vi.fn() }, + noteCompactionRetry: vi.fn(), + resetForCompactionRetry: vi.fn(), getCompactionCount: () => 0, }; @@ -120,6 +218,7 @@ describe("compaction hook wiring", () => { } as never, ); - expect(hookMocks.runner.runAfterCompaction).not.toHaveBeenCalled(); + const assistant = messages[0] as { usage?: unknown }; + expect(assistant.usage).toEqual({ totalTokens: 184_297, input: 130_000, output: 2_000 }); }); }); diff --git a/src/process/command-queue.test.ts b/src/process/command-queue.test.ts index 6c0a1f57f91..16766eabcd3 100644 --- a/src/process/command-queue.test.ts +++ b/src/process/command-queue.test.ts @@ -21,8 +21,10 @@ import { CommandLaneClearedError, enqueueCommand, enqueueCommandInLane, + GatewayDrainingError, getActiveTaskCount, getQueueSize, + markGatewayDraining, resetAllLanes, setCommandLaneConcurrency, waitForActiveTasks, @@ -52,6 +54,7 @@ function enqueueBlockedMainTask( describe("command queue", () => { beforeEach(() => { + resetAllLanes(); diagnosticMocks.logLaneEnqueue.mockClear(); diagnosticMocks.logLaneDequeue.mockClear(); diagnosticMocks.diag.debug.mockClear(); @@ -288,4 +291,47 @@ describe("command queue", () => { release(); await expect(first).resolves.toBe("first"); }); + + it("keeps draining functional after synchronous onWait failure", async () => { + const lane = `drain-sync-throw-${Date.now()}-${Math.random().toString(16).slice(2)}`; + setCommandLaneConcurrency(lane, 1); + + const deferred = createDeferred(); + const first = enqueueCommandInLane(lane, async () => { + await deferred.promise; + return "first"; + }); + const second = enqueueCommandInLane(lane, async () => "second", { + warnAfterMs: 0, + onWait: () => { + throw new Error("onWait exploded"); + }, + }); + await Promise.resolve(); + expect(getQueueSize(lane)).toBeGreaterThanOrEqual(2); + + deferred.resolve(); + await expect(first).resolves.toBe("first"); + await expect(second).resolves.toBe("second"); + }); + + it("rejects new enqueues with GatewayDrainingError after markGatewayDraining", async () => { + markGatewayDraining(); + await expect(enqueueCommand(async () => "blocked")).rejects.toBeInstanceOf( + GatewayDrainingError, + ); + }); + + it("does not affect already-active tasks after markGatewayDraining", async () => { + const { task, release } = enqueueBlockedMainTask(async () => "ok"); + markGatewayDraining(); + release(); + await expect(task).resolves.toBe("ok"); + }); + + it("resetAllLanes clears gateway draining flag and re-allows enqueue", async () => { + markGatewayDraining(); + resetAllLanes(); + await expect(enqueueCommand(async () => "ok")).resolves.toBe("ok"); + }); }); diff --git a/src/process/command-queue.ts b/src/process/command-queue.ts index 9ee4c741719..7b4a386bdad 100644 --- a/src/process/command-queue.ts +++ b/src/process/command-queue.ts @@ -12,6 +12,20 @@ export class CommandLaneClearedError extends Error { } } +/** + * Dedicated error type thrown when a new command is rejected because the + * gateway is currently draining for restart. + */ +export class GatewayDrainingError extends Error { + constructor() { + super("Gateway is draining for restart; new tasks are not accepted"); + this.name = "GatewayDrainingError"; + } +} + +// Set while gateway is draining for restart; new enqueues are rejected. +let gatewayDraining = false; + // Minimal in-process queue to serialize command executions. // Default lane ("main") preserves the existing behavior. Additional lanes allow // low-risk parallelism (e.g. cron jobs) without interleaving stdin / logs for @@ -66,57 +80,77 @@ function completeTask(state: LaneState, taskId: number, taskGeneration: number): function drainLane(lane: string) { const state = getLaneState(lane); if (state.draining) { + if (state.activeTaskIds.size === 0 && state.queue.length > 0) { + diag.warn( + `drainLane blocked: lane=${lane} draining=true active=0 queue=${state.queue.length}`, + ); + } return; } state.draining = true; const pump = () => { - while (state.activeTaskIds.size < state.maxConcurrent && state.queue.length > 0) { - const entry = state.queue.shift() as QueueEntry; - const waitedMs = Date.now() - entry.enqueuedAt; - if (waitedMs >= entry.warnAfterMs) { - entry.onWait?.(waitedMs, state.queue.length); - diag.warn( - `lane wait exceeded: lane=${lane} waitedMs=${waitedMs} queueAhead=${state.queue.length}`, - ); - } - logLaneDequeue(lane, waitedMs, state.queue.length); - const taskId = nextTaskId++; - const taskGeneration = state.generation; - state.activeTaskIds.add(taskId); - void (async () => { - const startTime = Date.now(); - try { - const result = await entry.task(); - const completedCurrentGeneration = completeTask(state, taskId, taskGeneration); - if (completedCurrentGeneration) { - diag.debug( - `lane task done: lane=${lane} durationMs=${Date.now() - startTime} active=${state.activeTaskIds.size} queued=${state.queue.length}`, - ); - pump(); + try { + while (state.activeTaskIds.size < state.maxConcurrent && state.queue.length > 0) { + const entry = state.queue.shift() as QueueEntry; + const waitedMs = Date.now() - entry.enqueuedAt; + if (waitedMs >= entry.warnAfterMs) { + try { + entry.onWait?.(waitedMs, state.queue.length); + } catch (err) { + diag.error(`lane onWait callback failed: lane=${lane} error="${String(err)}"`); } - entry.resolve(result); - } catch (err) { - const completedCurrentGeneration = completeTask(state, taskId, taskGeneration); - const isProbeLane = lane.startsWith("auth-probe:") || lane.startsWith("session:probe-"); - if (!isProbeLane) { - diag.error( - `lane task error: lane=${lane} durationMs=${Date.now() - startTime} error="${String(err)}"`, - ); - } - if (completedCurrentGeneration) { - pump(); - } - entry.reject(err); + diag.warn( + `lane wait exceeded: lane=${lane} waitedMs=${waitedMs} queueAhead=${state.queue.length}`, + ); } - })(); + logLaneDequeue(lane, waitedMs, state.queue.length); + const taskId = nextTaskId++; + const taskGeneration = state.generation; + state.activeTaskIds.add(taskId); + void (async () => { + const startTime = Date.now(); + try { + const result = await entry.task(); + const completedCurrentGeneration = completeTask(state, taskId, taskGeneration); + if (completedCurrentGeneration) { + diag.debug( + `lane task done: lane=${lane} durationMs=${Date.now() - startTime} active=${state.activeTaskIds.size} queued=${state.queue.length}`, + ); + pump(); + } + entry.resolve(result); + } catch (err) { + const completedCurrentGeneration = completeTask(state, taskId, taskGeneration); + const isProbeLane = lane.startsWith("auth-probe:") || lane.startsWith("session:probe-"); + if (!isProbeLane) { + diag.error( + `lane task error: lane=${lane} durationMs=${Date.now() - startTime} error="${String(err)}"`, + ); + } + if (completedCurrentGeneration) { + pump(); + } + entry.reject(err); + } + })(); + } + } finally { + state.draining = false; } - state.draining = false; }; pump(); } +/** + * Mark gateway as draining for restart so new enqueues fail fast with + * `GatewayDrainingError` instead of being silently killed on shutdown. + */ +export function markGatewayDraining(): void { + gatewayDraining = true; +} + export function setCommandLaneConcurrency(lane: string, maxConcurrent: number) { const cleaned = lane.trim() || CommandLane.Main; const state = getLaneState(cleaned); @@ -132,6 +166,9 @@ export function enqueueCommandInLane( onWait?: (waitMs: number, queuedAhead: number) => void; }, ): Promise { + if (gatewayDraining) { + return Promise.reject(new GatewayDrainingError()); + } const cleaned = lane.trim() || CommandLane.Main; const warnAfterMs = opts?.warnAfterMs ?? 2_000; const state = getLaneState(cleaned); @@ -205,6 +242,7 @@ export function clearCommandLane(lane: string = CommandLane.Main) { * `enqueueCommandInLane()` call (which may never come). */ export function resetAllLanes(): void { + gatewayDraining = false; const lanesToDrain: string[] = []; for (const state of lanes.values()) { state.generation += 1; diff --git a/src/process/exec.test.ts b/src/process/exec.test.ts index 67c443cb2e2..22f6dbf7e43 100644 --- a/src/process/exec.test.ts +++ b/src/process/exec.test.ts @@ -110,7 +110,8 @@ describe("runCommandWithTimeout", () => { ], { timeoutMs: 7_000, - noOutputTimeoutMs: 450, + // Keep a generous idle budget; CI event-loop stalls can exceed 450ms. + noOutputTimeoutMs: 900, }, ); diff --git a/src/process/exec.ts b/src/process/exec.ts index 6c4609e178e..9b42dfbf59c 100644 --- a/src/process/exec.ts +++ b/src/process/exec.ts @@ -46,7 +46,7 @@ export function shouldSpawnWithShell(params: { export async function runExec( command: string, args: string[], - opts: number | { timeoutMs?: number; maxBuffer?: number } = 10_000, + opts: number | { timeoutMs?: number; maxBuffer?: number; cwd?: string } = 10_000, ): Promise<{ stdout: string; stderr: string }> { const options = typeof opts === "number" @@ -54,6 +54,7 @@ export async function runExec( : { timeout: opts.timeoutMs, maxBuffer: opts.maxBuffer, + cwd: opts.cwd, encoding: "utf8" as const, }; try { diff --git a/src/routing/session-key.test.ts b/src/routing/session-key.test.ts index 41e659a9ff6..044b7b8a743 100644 --- a/src/routing/session-key.test.ts +++ b/src/routing/session-key.test.ts @@ -4,7 +4,11 @@ import { getSubagentDepth, isCronSessionKey, } from "../sessions/session-key-utils.js"; -import { classifySessionKeyShape } from "./session-key.js"; +import { + classifySessionKeyShape, + parseAgentSessionKey, + toAgentStoreSessionKey, +} from "./session-key.js"; describe("classifySessionKeyShape", () => { it("classifies empty keys as missing", () => { @@ -93,3 +97,21 @@ describe("deriveSessionChatType", () => { expect(deriveSessionChatType("")).toBe("unknown"); }); }); + +describe("session key canonicalization", () => { + it("parses agent keys case-insensitively and returns lowercase tokens", () => { + expect(parseAgentSessionKey("AGENT:Main:Hook:Webhook:42")).toEqual({ + agentId: "main", + rest: "hook:webhook:42", + }); + }); + + it("does not double-prefix already-qualified agent keys", () => { + expect( + toAgentStoreSessionKey({ + agentId: "main", + requestKey: "agent:main:main", + }), + ).toBe("agent:main:main"); + }); +}); diff --git a/src/routing/session-key.ts b/src/routing/session-key.ts index 73b10dfeb7c..50481e4bded 100644 --- a/src/routing/session-key.ts +++ b/src/routing/session-key.ts @@ -49,16 +49,17 @@ export function toAgentStoreSessionKey(params: { mainKey?: string | undefined; }): string { const raw = (params.requestKey ?? "").trim(); - if (!raw || raw === DEFAULT_MAIN_KEY) { + if (!raw || raw.toLowerCase() === DEFAULT_MAIN_KEY) { return buildAgentMainSessionKey({ agentId: params.agentId, mainKey: params.mainKey }); } + const parsed = parseAgentSessionKey(raw); + if (parsed) { + return `agent:${parsed.agentId}:${parsed.rest}`; + } const lowered = raw.toLowerCase(); if (lowered.startsWith("agent:")) { return lowered; } - if (lowered.startsWith("subagent:")) { - return `agent:${normalizeAgentId(params.agentId)}:${lowered}`; - } return `agent:${normalizeAgentId(params.agentId)}:${lowered}`; } diff --git a/src/secrets/apply.test.ts b/src/secrets/apply.test.ts new file mode 100644 index 00000000000..3395d6411b3 --- /dev/null +++ b/src/secrets/apply.test.ts @@ -0,0 +1,447 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { runSecretsApply } from "./apply.js"; +import type { SecretsApplyPlan } from "./plan.js"; + +function stripVolatileConfigMeta(input: string): Record { + const parsed = JSON.parse(input) as Record; + const meta = + parsed.meta && typeof parsed.meta === "object" && !Array.isArray(parsed.meta) + ? { ...(parsed.meta as Record) } + : undefined; + if (meta && "lastTouchedAt" in meta) { + delete meta.lastTouchedAt; + } + if (meta) { + parsed.meta = meta; + } + return parsed; +} + +describe("secrets apply", () => { + let rootDir = ""; + let stateDir = ""; + let configPath = ""; + let authStorePath = ""; + let authJsonPath = ""; + let envPath = ""; + let env: NodeJS.ProcessEnv; + + beforeEach(async () => { + rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-apply-")); + stateDir = path.join(rootDir, ".openclaw"); + configPath = path.join(stateDir, "openclaw.json"); + authStorePath = path.join(stateDir, "agents", "main", "agent", "auth-profiles.json"); + authJsonPath = path.join(stateDir, "agents", "main", "agent", "auth.json"); + envPath = path.join(stateDir, ".env"); + env = { + OPENCLAW_STATE_DIR: stateDir, + OPENCLAW_CONFIG_PATH: configPath, + OPENAI_API_KEY: "sk-live-env", + }; + + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.mkdir(path.dirname(authStorePath), { recursive: true }); + + await fs.writeFile( + configPath, + `${JSON.stringify( + { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: "sk-openai-plaintext", + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + await fs.writeFile( + authStorePath, + `${JSON.stringify( + { + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + key: "sk-openai-plaintext", + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + await fs.writeFile( + authJsonPath, + `${JSON.stringify( + { + openai: { + type: "api_key", + key: "sk-openai-plaintext", + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + await fs.writeFile(envPath, "OPENAI_API_KEY=sk-openai-plaintext\nUNRELATED=value\n", "utf8"); + }); + + afterEach(async () => { + await fs.rm(rootDir, { recursive: true, force: true }); + }); + + it("preflights and applies one-way scrub without plaintext backups", async () => { + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + targets: [ + { + type: "models.providers.apiKey", + path: "models.providers.openai.apiKey", + providerId: "openai", + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], + options: { + scrubEnv: true, + scrubAuthProfilesForProviderTargets: true, + scrubLegacyAuthJson: true, + }, + }; + + const dryRun = await runSecretsApply({ plan, env, write: false }); + expect(dryRun.mode).toBe("dry-run"); + expect(dryRun.changed).toBe(true); + + const applied = await runSecretsApply({ plan, env, write: true }); + expect(applied.mode).toBe("write"); + expect(applied.changed).toBe(true); + + const nextConfig = JSON.parse(await fs.readFile(configPath, "utf8")) as { + models: { providers: { openai: { apiKey: unknown } } }; + }; + expect(nextConfig.models.providers.openai.apiKey).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + + const nextAuthStore = JSON.parse(await fs.readFile(authStorePath, "utf8")) as { + profiles: { "openai:default": { key?: string; keyRef?: unknown } }; + }; + expect(nextAuthStore.profiles["openai:default"].key).toBeUndefined(); + expect(nextAuthStore.profiles["openai:default"].keyRef).toBeUndefined(); + + const nextAuthJson = JSON.parse(await fs.readFile(authJsonPath, "utf8")) as Record< + string, + unknown + >; + expect(nextAuthJson.openai).toBeUndefined(); + + const nextEnv = await fs.readFile(envPath, "utf8"); + expect(nextEnv).not.toContain("sk-openai-plaintext"); + expect(nextEnv).toContain("UNRELATED=value"); + }); + + it("is idempotent on repeated write applies", async () => { + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + targets: [ + { + type: "models.providers.apiKey", + path: "models.providers.openai.apiKey", + providerId: "openai", + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], + options: { + scrubEnv: true, + scrubAuthProfilesForProviderTargets: true, + scrubLegacyAuthJson: true, + }, + }; + + const first = await runSecretsApply({ plan, env, write: true }); + expect(first.changed).toBe(true); + const configAfterFirst = await fs.readFile(configPath, "utf8"); + const authStoreAfterFirst = await fs.readFile(authStorePath, "utf8"); + const authJsonAfterFirst = await fs.readFile(authJsonPath, "utf8"); + const envAfterFirst = await fs.readFile(envPath, "utf8"); + + // Second apply should be a true no-op and avoid file writes entirely. + await fs.chmod(configPath, 0o400); + await fs.chmod(authStorePath, 0o400); + + const second = await runSecretsApply({ plan, env, write: true }); + expect(second.mode).toBe("write"); + const configAfterSecond = await fs.readFile(configPath, "utf8"); + expect(stripVolatileConfigMeta(configAfterSecond)).toEqual( + stripVolatileConfigMeta(configAfterFirst), + ); + await expect(fs.readFile(authStorePath, "utf8")).resolves.toBe(authStoreAfterFirst); + await expect(fs.readFile(authJsonPath, "utf8")).resolves.toBe(authJsonAfterFirst); + await expect(fs.readFile(envPath, "utf8")).resolves.toBe(envAfterFirst); + }); + + it("applies targets safely when map keys contain dots", async () => { + await fs.writeFile( + configPath, + `${JSON.stringify( + { + models: { + providers: { + "openai.dev": { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: "sk-openai-plaintext", + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + targets: [ + { + type: "models.providers.apiKey", + path: "models.providers.openai.dev.apiKey", + pathSegments: ["models", "providers", "openai.dev", "apiKey"], + providerId: "openai.dev", + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], + options: { + scrubEnv: false, + scrubAuthProfilesForProviderTargets: false, + scrubLegacyAuthJson: false, + }, + }; + + const result = await runSecretsApply({ plan, env, write: true }); + expect(result.changed).toBe(true); + + const nextConfig = JSON.parse(await fs.readFile(configPath, "utf8")) as { + models?: { + providers?: Record; + }; + }; + expect(nextConfig.models?.providers?.["openai.dev"]?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + expect(nextConfig.models?.providers?.openai).toBeUndefined(); + }); + + it("migrates skills entries apiKey targets alongside provider api keys", async () => { + await fs.writeFile( + configPath, + `${JSON.stringify( + { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: "sk-openai-plaintext", + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + skills: { + entries: { + "qa-secret-test": { + enabled: true, + apiKey: "sk-skill-plaintext", + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + targets: [ + { + type: "models.providers.apiKey", + path: "models.providers.openai.apiKey", + pathSegments: ["models", "providers", "openai", "apiKey"], + providerId: "openai", + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + { + type: "skills.entries.apiKey", + path: "skills.entries.qa-secret-test.apiKey", + pathSegments: ["skills", "entries", "qa-secret-test", "apiKey"], + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], + options: { + scrubEnv: true, + scrubAuthProfilesForProviderTargets: true, + scrubLegacyAuthJson: true, + }, + }; + + const result = await runSecretsApply({ plan, env, write: true }); + expect(result.changed).toBe(true); + + const nextConfig = JSON.parse(await fs.readFile(configPath, "utf8")) as { + models: { providers: { openai: { apiKey: unknown } } }; + skills: { entries: { "qa-secret-test": { apiKey: unknown } } }; + }; + expect(nextConfig.models.providers.openai.apiKey).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + expect(nextConfig.skills.entries["qa-secret-test"].apiKey).toEqual({ + source: "env", + provider: "default", + id: "OPENAI_API_KEY", + }); + + const rawConfig = await fs.readFile(configPath, "utf8"); + expect(rawConfig).not.toContain("sk-openai-plaintext"); + expect(rawConfig).not.toContain("sk-skill-plaintext"); + }); + + it("rejects plan targets that do not match allowed secret-bearing paths", async () => { + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + targets: [ + { + type: "models.providers.apiKey", + path: "models.providers.openai.baseUrl", + pathSegments: ["models", "providers", "openai", "baseUrl"], + providerId: "openai", + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], + }; + + await expect(runSecretsApply({ plan, env, write: false })).rejects.toThrow( + "Invalid plan target path", + ); + }); + + it("rejects plan targets with forbidden prototype-like path segments", async () => { + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + targets: [ + { + type: "skills.entries.apiKey", + path: "skills.entries.__proto__.apiKey", + pathSegments: ["skills", "entries", "__proto__", "apiKey"], + ref: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + ], + }; + + await expect(runSecretsApply({ plan, env, write: false })).rejects.toThrow( + "Invalid plan target path", + ); + }); + + it("applies provider upserts and deletes from plan", async () => { + await fs.writeFile( + configPath, + `${JSON.stringify( + { + secrets: { + providers: { + envmain: { source: "env" }, + fileold: { source: "file", path: "/tmp/old-secrets.json", mode: "json" }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "manual", + providerUpserts: { + filemain: { + source: "file", + path: "/tmp/new-secrets.json", + mode: "json", + }, + }, + providerDeletes: ["fileold"], + targets: [], + }; + + const result = await runSecretsApply({ plan, env, write: true }); + expect(result.changed).toBe(true); + + const nextConfig = JSON.parse(await fs.readFile(configPath, "utf8")) as { + secrets?: { + providers?: Record; + }; + }; + expect(nextConfig.secrets?.providers?.fileold).toBeUndefined(); + expect(nextConfig.secrets?.providers?.filemain).toEqual({ + source: "file", + path: "/tmp/new-secrets.json", + mode: "json", + }); + }); +}); diff --git a/src/secrets/apply.ts b/src/secrets/apply.ts new file mode 100644 index 00000000000..18208ffe972 --- /dev/null +++ b/src/secrets/apply.ts @@ -0,0 +1,593 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { isDeepStrictEqual } from "node:util"; +import { listAgentIds, resolveAgentDir } from "../agents/agent-scope.js"; +import { loadAuthProfileStoreForSecretsRuntime } from "../agents/auth-profiles.js"; +import { resolveAuthStorePath } from "../agents/auth-profiles/paths.js"; +import { normalizeProviderId } from "../agents/model-selection.js"; +import { resolveStateDir, type OpenClawConfig } from "../config/config.js"; +import type { ConfigWriteOptions } from "../config/io.js"; +import type { SecretProviderConfig } from "../config/types.secrets.js"; +import { resolveConfigDir, resolveUserPath } from "../utils.js"; +import { createSecretsConfigIO } from "./config-io.js"; +import { + type SecretsApplyPlan, + type SecretsPlanTarget, + normalizeSecretsPlanOptions, + resolveValidatedTargetPathSegments, +} from "./plan.js"; +import { listKnownSecretEnvVarNames } from "./provider-env-vars.js"; +import { resolveSecretRefValue } from "./resolve.js"; +import { prepareSecretsRuntimeSnapshot } from "./runtime.js"; +import { isNonEmptyString, isRecord, writeTextFileAtomic } from "./shared.js"; + +type FileSnapshot = { + existed: boolean; + content: string; + mode: number; +}; + +type ApplyWrite = { + path: string; + content: string; + mode: number; +}; + +type ProjectedState = { + nextConfig: OpenClawConfig; + configPath: string; + configWriteOptions: ConfigWriteOptions; + authStoreByPath: Map>; + authJsonByPath: Map>; + envRawByPath: Map; + changedFiles: Set; + warnings: string[]; +}; + +export type SecretsApplyResult = { + mode: "dry-run" | "write"; + changed: boolean; + changedFiles: string[]; + warningCount: number; + warnings: string[]; +}; + +function getByPathSegments(root: unknown, segments: string[]): unknown { + if (segments.length === 0) { + return undefined; + } + let cursor: unknown = root; + for (const segment of segments) { + if (!isRecord(cursor)) { + return undefined; + } + cursor = cursor[segment]; + } + return cursor; +} + +function setByPathSegments(root: OpenClawConfig, segments: string[], value: unknown): boolean { + if (segments.length === 0) { + throw new Error("Target path is empty."); + } + let cursor: Record = root as unknown as Record; + let changed = false; + for (const segment of segments.slice(0, -1)) { + const existing = cursor[segment]; + if (!isRecord(existing)) { + cursor[segment] = {}; + changed = true; + } + cursor = cursor[segment] as Record; + } + const leaf = segments[segments.length - 1] ?? ""; + const previous = cursor[leaf]; + if (!isDeepStrictEqual(previous, value)) { + cursor[leaf] = value; + changed = true; + } + return changed; +} + +function deleteByPathSegments(root: OpenClawConfig, segments: string[]): boolean { + if (segments.length === 0) { + return false; + } + let cursor: Record = root as unknown as Record; + for (const segment of segments.slice(0, -1)) { + const existing = cursor[segment]; + if (!isRecord(existing)) { + return false; + } + cursor = existing; + } + const leaf = segments[segments.length - 1] ?? ""; + if (!Object.prototype.hasOwnProperty.call(cursor, leaf)) { + return false; + } + delete cursor[leaf]; + return true; +} + +function resolveTargetPathSegments(target: SecretsPlanTarget): string[] { + const resolved = resolveValidatedTargetPathSegments(target); + if (!resolved) { + throw new Error(`Invalid plan target path for ${target.type}: ${target.path}`); + } + return resolved; +} + +function parseEnvValue(raw: string): string { + const trimmed = raw.trim(); + if ( + (trimmed.startsWith('"') && trimmed.endsWith('"')) || + (trimmed.startsWith("'") && trimmed.endsWith("'")) + ) { + return trimmed.slice(1, -1); + } + return trimmed; +} + +function scrubEnvRaw( + raw: string, + migratedValues: Set, + allowedEnvKeys: Set, +): { + nextRaw: string; + removed: number; +} { + if (migratedValues.size === 0 || allowedEnvKeys.size === 0) { + return { nextRaw: raw, removed: 0 }; + } + const lines = raw.split(/\r?\n/); + const nextLines: string[] = []; + let removed = 0; + for (const line of lines) { + const match = line.match(/^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=\s*(.*)$/); + if (!match) { + nextLines.push(line); + continue; + } + const envKey = match[1] ?? ""; + if (!allowedEnvKeys.has(envKey)) { + nextLines.push(line); + continue; + } + const parsedValue = parseEnvValue(match[2] ?? ""); + if (migratedValues.has(parsedValue)) { + removed += 1; + continue; + } + nextLines.push(line); + } + const hadTrailingNewline = raw.endsWith("\n"); + const joined = nextLines.join("\n"); + return { + nextRaw: + hadTrailingNewline || joined.length === 0 + ? `${joined}${joined.endsWith("\n") ? "" : "\n"}` + : joined, + removed, + }; +} + +function collectAuthStorePaths(config: OpenClawConfig, stateDir: string): string[] { + const paths = new Set(); + // Scope default auth store discovery to the provided stateDir instead of + // ambient process env, so apply does not touch unrelated host-global stores. + paths.add(path.join(resolveUserPath(stateDir), "agents", "main", "agent", "auth-profiles.json")); + + const agentsRoot = path.join(resolveUserPath(stateDir), "agents"); + if (fs.existsSync(agentsRoot)) { + for (const entry of fs.readdirSync(agentsRoot, { withFileTypes: true })) { + if (!entry.isDirectory()) { + continue; + } + paths.add(path.join(agentsRoot, entry.name, "agent", "auth-profiles.json")); + } + } + + for (const agentId of listAgentIds(config)) { + if (agentId === "main") { + paths.add( + path.join(resolveUserPath(stateDir), "agents", "main", "agent", "auth-profiles.json"), + ); + continue; + } + const agentDir = resolveAgentDir(config, agentId); + paths.add(resolveUserPath(resolveAuthStorePath(agentDir))); + } + + return [...paths]; +} + +function collectAuthJsonPaths(stateDir: string): string[] { + const out: string[] = []; + const agentsRoot = path.join(resolveUserPath(stateDir), "agents"); + if (!fs.existsSync(agentsRoot)) { + return out; + } + for (const entry of fs.readdirSync(agentsRoot, { withFileTypes: true })) { + if (!entry.isDirectory()) { + continue; + } + const candidate = path.join(agentsRoot, entry.name, "agent", "auth.json"); + if (fs.existsSync(candidate)) { + out.push(candidate); + } + } + return out; +} + +function resolveGoogleChatRefPathSegments(pathSegments: string[]): string[] { + if (pathSegments.at(-1) === "serviceAccount") { + return [...pathSegments.slice(0, -1), "serviceAccountRef"]; + } + throw new Error( + `Google Chat target path must end with "serviceAccount": ${pathSegments.join(".")}`, + ); +} + +function applyProviderPlanMutations(params: { + config: OpenClawConfig; + upserts: Record | undefined; + deletes: string[] | undefined; +}): boolean { + const currentProviders = isRecord(params.config.secrets?.providers) + ? structuredClone(params.config.secrets?.providers) + : {}; + let changed = false; + + for (const providerAlias of params.deletes ?? []) { + if (!Object.prototype.hasOwnProperty.call(currentProviders, providerAlias)) { + continue; + } + delete currentProviders[providerAlias]; + changed = true; + } + + for (const [providerAlias, providerConfig] of Object.entries(params.upserts ?? {})) { + const previous = currentProviders[providerAlias]; + if (isDeepStrictEqual(previous, providerConfig)) { + continue; + } + currentProviders[providerAlias] = structuredClone(providerConfig); + changed = true; + } + + if (!changed) { + return false; + } + + params.config.secrets ??= {}; + if (Object.keys(currentProviders).length === 0) { + if ("providers" in params.config.secrets) { + delete params.config.secrets.providers; + } + return true; + } + params.config.secrets.providers = currentProviders; + return true; +} + +async function projectPlanState(params: { + plan: SecretsApplyPlan; + env: NodeJS.ProcessEnv; +}): Promise { + const io = createSecretsConfigIO({ env: params.env }); + const { snapshot, writeOptions } = await io.readConfigFileSnapshotForWrite(); + if (!snapshot.valid) { + throw new Error("Cannot apply secrets plan: config is invalid."); + } + const options = normalizeSecretsPlanOptions(params.plan.options); + const nextConfig = structuredClone(snapshot.config); + const stateDir = resolveStateDir(params.env, os.homedir); + const changedFiles = new Set(); + const warnings: string[] = []; + const scrubbedValues = new Set(); + const providerTargets = new Set(); + const configPath = resolveUserPath(snapshot.path); + + const providerConfigChanged = applyProviderPlanMutations({ + config: nextConfig, + upserts: params.plan.providerUpserts, + deletes: params.plan.providerDeletes, + }); + if (providerConfigChanged) { + changedFiles.add(configPath); + } + + for (const target of params.plan.targets) { + const targetPathSegments = resolveTargetPathSegments(target); + if (target.type === "channels.googlechat.serviceAccount") { + const previous = getByPathSegments(nextConfig, targetPathSegments); + if (isNonEmptyString(previous)) { + scrubbedValues.add(previous.trim()); + } + const refPathSegments = resolveGoogleChatRefPathSegments(targetPathSegments); + const wroteRef = setByPathSegments(nextConfig, refPathSegments, target.ref); + const deletedLegacy = deleteByPathSegments(nextConfig, targetPathSegments); + if (wroteRef || deletedLegacy) { + changedFiles.add(configPath); + } + continue; + } + + const previous = getByPathSegments(nextConfig, targetPathSegments); + if (isNonEmptyString(previous)) { + scrubbedValues.add(previous.trim()); + } + const wroteRef = setByPathSegments(nextConfig, targetPathSegments, target.ref); + if (wroteRef) { + changedFiles.add(configPath); + } + if (target.type === "models.providers.apiKey" && target.providerId) { + providerTargets.add(normalizeProviderId(target.providerId)); + } + } + + const authStoreByPath = new Map>(); + if (options.scrubAuthProfilesForProviderTargets && providerTargets.size > 0) { + for (const authStorePath of collectAuthStorePaths(nextConfig, stateDir)) { + if (!fs.existsSync(authStorePath)) { + continue; + } + const raw = fs.readFileSync(authStorePath, "utf8"); + const parsed = JSON.parse(raw) as unknown; + if (!isRecord(parsed) || !isRecord(parsed.profiles)) { + continue; + } + const nextStore = structuredClone(parsed) as Record & { + profiles: Record; + }; + let mutated = false; + for (const profileValue of Object.values(nextStore.profiles)) { + if (!isRecord(profileValue) || !isNonEmptyString(profileValue.provider)) { + continue; + } + const provider = normalizeProviderId(String(profileValue.provider)); + if (!providerTargets.has(provider)) { + continue; + } + if (profileValue.type === "api_key") { + if (isNonEmptyString(profileValue.key)) { + scrubbedValues.add(profileValue.key.trim()); + } + if ("key" in profileValue) { + delete profileValue.key; + mutated = true; + } + if ("keyRef" in profileValue) { + delete profileValue.keyRef; + mutated = true; + } + continue; + } + if (profileValue.type === "token") { + if (isNonEmptyString(profileValue.token)) { + scrubbedValues.add(profileValue.token.trim()); + } + if ("token" in profileValue) { + delete profileValue.token; + mutated = true; + } + if ("tokenRef" in profileValue) { + delete profileValue.tokenRef; + mutated = true; + } + continue; + } + if (profileValue.type === "oauth") { + warnings.push( + `Provider "${provider}" has OAuth credentials in ${authStorePath}; those still take precedence and are out of scope for static SecretRef migration.`, + ); + } + } + if (mutated) { + authStoreByPath.set(authStorePath, nextStore); + changedFiles.add(authStorePath); + } + } + } + + const authJsonByPath = new Map>(); + if (options.scrubLegacyAuthJson) { + for (const authJsonPath of collectAuthJsonPaths(stateDir)) { + const raw = fs.readFileSync(authJsonPath, "utf8"); + const parsed = JSON.parse(raw) as unknown; + if (!isRecord(parsed)) { + continue; + } + let mutated = false; + const nextParsed = structuredClone(parsed); + for (const [providerId, value] of Object.entries(nextParsed)) { + if (!isRecord(value)) { + continue; + } + if (value.type === "api_key" && isNonEmptyString(value.key)) { + delete nextParsed[providerId]; + mutated = true; + } + } + if (mutated) { + authJsonByPath.set(authJsonPath, nextParsed); + changedFiles.add(authJsonPath); + } + } + } + + const envRawByPath = new Map(); + if (options.scrubEnv && scrubbedValues.size > 0) { + const envPath = path.join(resolveConfigDir(params.env, os.homedir), ".env"); + if (fs.existsSync(envPath)) { + const current = fs.readFileSync(envPath, "utf8"); + const scrubbed = scrubEnvRaw(current, scrubbedValues, new Set(listKnownSecretEnvVarNames())); + if (scrubbed.removed > 0 && scrubbed.nextRaw !== current) { + envRawByPath.set(envPath, scrubbed.nextRaw); + changedFiles.add(envPath); + } + } + } + + const cache = {}; + for (const target of params.plan.targets) { + const resolved = await resolveSecretRefValue(target.ref, { + config: nextConfig, + env: params.env, + cache, + }); + if (target.type === "channels.googlechat.serviceAccount") { + if (!(isNonEmptyString(resolved) || isRecord(resolved))) { + throw new Error( + `Ref ${target.ref.source}:${target.ref.provider}:${target.ref.id} is not string/object.`, + ); + } + continue; + } + if (!isNonEmptyString(resolved)) { + throw new Error( + `Ref ${target.ref.source}:${target.ref.provider}:${target.ref.id} is not a non-empty string.`, + ); + } + } + + const authStoreLookup = new Map>(); + for (const [authStorePath, store] of authStoreByPath.entries()) { + authStoreLookup.set(resolveUserPath(authStorePath), store); + } + await prepareSecretsRuntimeSnapshot({ + config: nextConfig, + env: params.env, + loadAuthStore: (agentDir?: string) => { + const storePath = resolveUserPath(resolveAuthStorePath(agentDir)); + const override = authStoreLookup.get(storePath); + if (override) { + return structuredClone(override) as unknown as ReturnType< + typeof loadAuthProfileStoreForSecretsRuntime + >; + } + return loadAuthProfileStoreForSecretsRuntime(agentDir); + }, + }); + + return { + nextConfig, + configPath, + configWriteOptions: writeOptions, + authStoreByPath, + authJsonByPath, + envRawByPath, + changedFiles, + warnings, + }; +} + +function captureFileSnapshot(pathname: string): FileSnapshot { + if (!fs.existsSync(pathname)) { + return { existed: false, content: "", mode: 0o600 }; + } + const stat = fs.statSync(pathname); + return { + existed: true, + content: fs.readFileSync(pathname, "utf8"), + mode: stat.mode & 0o777, + }; +} + +function restoreFileSnapshot(pathname: string, snapshot: FileSnapshot): void { + if (!snapshot.existed) { + if (fs.existsSync(pathname)) { + fs.rmSync(pathname, { force: true }); + } + return; + } + writeTextFileAtomic(pathname, snapshot.content, snapshot.mode || 0o600); +} + +function toJsonWrite(pathname: string, value: Record): ApplyWrite { + return { + path: pathname, + content: `${JSON.stringify(value, null, 2)}\n`, + mode: 0o600, + }; +} + +export async function runSecretsApply(params: { + plan: SecretsApplyPlan; + env?: NodeJS.ProcessEnv; + write?: boolean; +}): Promise { + const env = params.env ?? process.env; + const projected = await projectPlanState({ plan: params.plan, env }); + const changedFiles = [...projected.changedFiles].toSorted(); + if (!params.write) { + return { + mode: "dry-run", + changed: changedFiles.length > 0, + changedFiles, + warningCount: projected.warnings.length, + warnings: projected.warnings, + }; + } + if (changedFiles.length === 0) { + return { + mode: "write", + changed: false, + changedFiles: [], + warningCount: projected.warnings.length, + warnings: projected.warnings, + }; + } + + const io = createSecretsConfigIO({ env }); + const snapshots = new Map(); + const capture = (pathname: string) => { + if (!snapshots.has(pathname)) { + snapshots.set(pathname, captureFileSnapshot(pathname)); + } + }; + + capture(projected.configPath); + const writes: ApplyWrite[] = []; + for (const [pathname, value] of projected.authStoreByPath.entries()) { + capture(pathname); + writes.push(toJsonWrite(pathname, value)); + } + for (const [pathname, value] of projected.authJsonByPath.entries()) { + capture(pathname); + writes.push(toJsonWrite(pathname, value)); + } + for (const [pathname, raw] of projected.envRawByPath.entries()) { + capture(pathname); + writes.push({ + path: pathname, + content: raw, + mode: 0o600, + }); + } + + try { + await io.writeConfigFile(projected.nextConfig, projected.configWriteOptions); + for (const write of writes) { + writeTextFileAtomic(write.path, write.content, write.mode); + } + } catch (err) { + for (const [pathname, snapshot] of snapshots.entries()) { + try { + restoreFileSnapshot(pathname, snapshot); + } catch { + // Best effort only; preserve original error. + } + } + throw new Error(`Secrets apply failed: ${String(err)}`, { cause: err }); + } + + return { + mode: "write", + changed: changedFiles.length > 0, + changedFiles, + warningCount: projected.warnings.length, + warnings: projected.warnings, + }; +} diff --git a/src/secrets/audit.test.ts b/src/secrets/audit.test.ts new file mode 100644 index 00000000000..44d4f385981 --- /dev/null +++ b/src/secrets/audit.test.ts @@ -0,0 +1,186 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { runSecretsAudit } from "./audit.js"; + +describe("secrets audit", () => { + let rootDir = ""; + let stateDir = ""; + let configPath = ""; + let authStorePath = ""; + let authJsonPath = ""; + let envPath = ""; + let env: NodeJS.ProcessEnv; + + beforeEach(async () => { + rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-audit-")); + stateDir = path.join(rootDir, ".openclaw"); + configPath = path.join(stateDir, "openclaw.json"); + authStorePath = path.join(stateDir, "agents", "main", "agent", "auth-profiles.json"); + authJsonPath = path.join(stateDir, "agents", "main", "agent", "auth.json"); + envPath = path.join(stateDir, ".env"); + env = { + OPENCLAW_STATE_DIR: stateDir, + OPENCLAW_CONFIG_PATH: configPath, + OPENAI_API_KEY: "env-openai-key", + ...(typeof process.env.PATH === "string" && process.env.PATH.trim().length > 0 + ? { PATH: process.env.PATH } + : { PATH: "/usr/bin:/bin" }), + }; + + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.mkdir(path.dirname(authStorePath), { recursive: true }); + await fs.writeFile( + configPath, + `${JSON.stringify( + { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + await fs.writeFile( + authStorePath, + `${JSON.stringify( + { + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + key: "sk-openai-plaintext", + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + await fs.writeFile(envPath, "OPENAI_API_KEY=sk-openai-plaintext\n", "utf8"); + }); + + afterEach(async () => { + await fs.rm(rootDir, { recursive: true, force: true }); + }); + + it("reports plaintext + shadowing findings", async () => { + const report = await runSecretsAudit({ env }); + expect(report.status).toBe("findings"); + expect(report.summary.plaintextCount).toBeGreaterThan(0); + expect(report.summary.shadowedRefCount).toBeGreaterThan(0); + expect(report.findings.some((entry) => entry.code === "REF_SHADOWED")).toBe(true); + expect(report.findings.some((entry) => entry.code === "PLAINTEXT_FOUND")).toBe(true); + }); + + it("does not mutate legacy auth.json during audit", async () => { + await fs.rm(authStorePath, { force: true }); + await fs.writeFile( + authJsonPath, + `${JSON.stringify( + { + openai: { + type: "api_key", + key: "sk-legacy-auth-json", + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + + const report = await runSecretsAudit({ env }); + expect(report.findings.some((entry) => entry.code === "LEGACY_RESIDUE")).toBe(true); + await expect(fs.stat(authJsonPath)).resolves.toBeTruthy(); + await expect(fs.stat(authStorePath)).rejects.toMatchObject({ code: "ENOENT" }); + }); + + it("reports malformed sidecar JSON as findings instead of crashing", async () => { + await fs.writeFile(authStorePath, "{invalid-json", "utf8"); + await fs.writeFile(authJsonPath, "{invalid-json", "utf8"); + + const report = await runSecretsAudit({ env }); + expect(report.findings.some((entry) => entry.file === authStorePath)).toBe(true); + expect(report.findings.some((entry) => entry.file === authJsonPath)).toBe(true); + expect(report.findings.some((entry) => entry.code === "REF_UNRESOLVED")).toBe(true); + }); + + it("batches ref resolution per provider during audit", async () => { + if (process.platform === "win32") { + return; + } + const execLogPath = path.join(rootDir, "exec-calls.log"); + const execScriptPath = path.join(rootDir, "resolver.mjs"); + await fs.writeFile( + execScriptPath, + [ + "#!/usr/bin/env node", + "import fs from 'node:fs';", + "const req = JSON.parse(fs.readFileSync(0, 'utf8'));", + `fs.appendFileSync(${JSON.stringify(execLogPath)}, 'x\\n');`, + "const values = Object.fromEntries((req.ids ?? []).map((id) => [id, `value:${id}`]));", + "process.stdout.write(JSON.stringify({ protocolVersion: 1, values }));", + ].join("\n"), + { encoding: "utf8", mode: 0o700 }, + ); + + await fs.writeFile( + configPath, + `${JSON.stringify( + { + secrets: { + providers: { + execmain: { + source: "exec", + command: execScriptPath, + jsonOnly: true, + passEnv: ["PATH"], + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: { source: "exec", provider: "execmain", id: "providers/openai/apiKey" }, + models: [{ id: "gpt-5", name: "gpt-5" }], + }, + moonshot: { + baseUrl: "https://api.moonshot.cn/v1", + api: "openai-completions", + apiKey: { source: "exec", provider: "execmain", id: "providers/moonshot/apiKey" }, + models: [{ id: "moonshot-v1-8k", name: "moonshot-v1-8k" }], + }, + }, + }, + }, + null, + 2, + )}\n`, + "utf8", + ); + await fs.rm(authStorePath, { force: true }); + await fs.writeFile(envPath, "", "utf8"); + + const report = await runSecretsAudit({ env }); + expect(report.summary.unresolvedRefCount).toBe(0); + + const callLog = await fs.readFile(execLogPath, "utf8"); + const callCount = callLog.split("\n").filter((line) => line.trim().length > 0).length; + expect(callCount).toBe(1); + }); +}); diff --git a/src/secrets/audit.ts b/src/secrets/audit.ts new file mode 100644 index 00000000000..4cd71e12c9a --- /dev/null +++ b/src/secrets/audit.ts @@ -0,0 +1,755 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { listAgentIds, resolveAgentDir } from "../agents/agent-scope.js"; +import { resolveAuthStorePath } from "../agents/auth-profiles/paths.js"; +import { normalizeProviderId } from "../agents/model-selection.js"; +import { resolveStateDir, type OpenClawConfig } from "../config/config.js"; +import { coerceSecretRef, type SecretRef } from "../config/types.secrets.js"; +import { resolveConfigDir, resolveUserPath } from "../utils.js"; +import { createSecretsConfigIO } from "./config-io.js"; +import { listKnownSecretEnvVarNames } from "./provider-env-vars.js"; +import { secretRefKey } from "./ref-contract.js"; +import { + resolveSecretRefValue, + resolveSecretRefValues, + type SecretRefResolveCache, +} from "./resolve.js"; +import { isNonEmptyString, isRecord } from "./shared.js"; + +export type SecretsAuditCode = + | "PLAINTEXT_FOUND" + | "REF_UNRESOLVED" + | "REF_SHADOWED" + | "LEGACY_RESIDUE"; + +export type SecretsAuditSeverity = "info" | "warn" | "error"; + +export type SecretsAuditFinding = { + code: SecretsAuditCode; + severity: SecretsAuditSeverity; + file: string; + jsonPath: string; + message: string; + provider?: string; + profileId?: string; +}; + +export type SecretsAuditStatus = "clean" | "findings" | "unresolved"; + +export type SecretsAuditReport = { + version: 1; + status: SecretsAuditStatus; + filesScanned: string[]; + summary: { + plaintextCount: number; + unresolvedRefCount: number; + shadowedRefCount: number; + legacyResidueCount: number; + }; + findings: SecretsAuditFinding[]; +}; + +type RefAssignment = { + file: string; + path: string; + ref: SecretRef; + expected: "string" | "string-or-object"; + provider?: string; +}; + +type ProviderAuthState = { + hasUsableStaticOrOAuth: boolean; + modes: Set<"api_key" | "token" | "oauth">; +}; + +type SecretDefaults = { + env?: string; + file?: string; + exec?: string; +}; + +type AuditCollector = { + findings: SecretsAuditFinding[]; + refAssignments: RefAssignment[]; + configProviderRefPaths: Map; + authProviderState: Map; + filesScanned: Set; +}; + +function addFinding(collector: AuditCollector, finding: SecretsAuditFinding): void { + collector.findings.push(finding); +} + +function collectProviderRefPath( + collector: AuditCollector, + providerId: string, + configPath: string, +): void { + const key = normalizeProviderId(providerId); + const existing = collector.configProviderRefPaths.get(key); + if (existing) { + existing.push(configPath); + return; + } + collector.configProviderRefPaths.set(key, [configPath]); +} + +function trackAuthProviderState( + collector: AuditCollector, + provider: string, + mode: "api_key" | "token" | "oauth", +): void { + const key = normalizeProviderId(provider); + const existing = collector.authProviderState.get(key); + if (existing) { + existing.hasUsableStaticOrOAuth = true; + existing.modes.add(mode); + return; + } + collector.authProviderState.set(key, { + hasUsableStaticOrOAuth: true, + modes: new Set([mode]), + }); +} + +function parseDotPath(pathname: string): string[] { + return pathname.split(".").filter(Boolean); +} + +function parseEnvValue(raw: string): string { + const trimmed = raw.trim(); + if ( + (trimmed.startsWith('"') && trimmed.endsWith('"')) || + (trimmed.startsWith("'") && trimmed.endsWith("'")) + ) { + return trimmed.slice(1, -1); + } + return trimmed; +} + +function collectEnvPlaintext(params: { envPath: string; collector: AuditCollector }): void { + if (!fs.existsSync(params.envPath)) { + return; + } + params.collector.filesScanned.add(params.envPath); + const knownKeys = new Set(listKnownSecretEnvVarNames()); + const raw = fs.readFileSync(params.envPath, "utf8"); + const lines = raw.split(/\r?\n/); + for (const line of lines) { + const match = line.match(/^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=\s*(.*)$/); + if (!match) { + continue; + } + const key = match[1] ?? ""; + if (!knownKeys.has(key)) { + continue; + } + const value = parseEnvValue(match[2] ?? ""); + if (!value) { + continue; + } + addFinding(params.collector, { + code: "PLAINTEXT_FOUND", + severity: "warn", + file: params.envPath, + jsonPath: `$env.${key}`, + message: `Potential secret found in .env (${key}).`, + }); + } +} + +function readJsonObject(filePath: string): { + value: Record | null; + error?: string; +} { + if (!fs.existsSync(filePath)) { + return { value: null }; + } + try { + const raw = fs.readFileSync(filePath, "utf8"); + const parsed = JSON.parse(raw) as unknown; + if (!isRecord(parsed)) { + return { value: null }; + } + return { value: parsed }; + } catch (err) { + return { + value: null, + error: err instanceof Error ? err.message : String(err), + }; + } +} + +function collectConfigSecrets(params: { + config: OpenClawConfig; + configPath: string; + collector: AuditCollector; +}): void { + const defaults = params.config.secrets?.defaults; + const providers = params.config.models?.providers as + | Record + | undefined; + if (providers) { + for (const [providerId, provider] of Object.entries(providers)) { + const pathLabel = `models.providers.${providerId}.apiKey`; + const ref = coerceSecretRef(provider.apiKey, defaults); + if (ref) { + params.collector.refAssignments.push({ + file: params.configPath, + path: pathLabel, + ref, + expected: "string", + provider: providerId, + }); + collectProviderRefPath(params.collector, providerId, pathLabel); + continue; + } + if (isNonEmptyString(provider.apiKey)) { + addFinding(params.collector, { + code: "PLAINTEXT_FOUND", + severity: "warn", + file: params.configPath, + jsonPath: pathLabel, + message: "Provider apiKey is stored as plaintext.", + provider: providerId, + }); + } + } + } + + const entries = params.config.skills?.entries as Record | undefined; + if (entries) { + for (const [entryId, entry] of Object.entries(entries)) { + const pathLabel = `skills.entries.${entryId}.apiKey`; + const ref = coerceSecretRef(entry.apiKey, defaults); + if (ref) { + params.collector.refAssignments.push({ + file: params.configPath, + path: pathLabel, + ref, + expected: "string", + }); + continue; + } + if (isNonEmptyString(entry.apiKey)) { + addFinding(params.collector, { + code: "PLAINTEXT_FOUND", + severity: "warn", + file: params.configPath, + jsonPath: pathLabel, + message: "Skill apiKey is stored as plaintext.", + }); + } + } + } + + const googlechat = params.config.channels?.googlechat as + | { + serviceAccount?: unknown; + serviceAccountRef?: unknown; + accounts?: Record; + } + | undefined; + if (!googlechat) { + return; + } + + const collectGoogleChatValue = ( + value: unknown, + refValue: unknown, + pathLabel: string, + accountId?: string, + ) => { + const explicitRef = coerceSecretRef(refValue, defaults); + const inlineRef = explicitRef ? null : coerceSecretRef(value, defaults); + const ref = explicitRef ?? inlineRef; + if (ref) { + params.collector.refAssignments.push({ + file: params.configPath, + path: pathLabel, + ref, + expected: "string-or-object", + provider: accountId ? "googlechat" : undefined, + }); + return; + } + if (isNonEmptyString(value) || (isRecord(value) && Object.keys(value).length > 0)) { + addFinding(params.collector, { + code: "PLAINTEXT_FOUND", + severity: "warn", + file: params.configPath, + jsonPath: pathLabel, + message: "Google Chat serviceAccount is stored as plaintext.", + }); + } + }; + + collectGoogleChatValue( + googlechat.serviceAccount, + googlechat.serviceAccountRef, + "channels.googlechat.serviceAccount", + ); + if (!isRecord(googlechat.accounts)) { + return; + } + for (const [accountId, accountValue] of Object.entries(googlechat.accounts)) { + if (!isRecord(accountValue)) { + continue; + } + collectGoogleChatValue( + accountValue.serviceAccount, + accountValue.serviceAccountRef, + `channels.googlechat.accounts.${accountId}.serviceAccount`, + accountId, + ); + } +} + +function collectAuthStorePaths(config: OpenClawConfig, stateDir: string): string[] { + const paths = new Set(); + // Scope default auth store discovery to the provided stateDir instead of + // ambient process env, so audits do not include unrelated host-global stores. + paths.add(path.join(resolveUserPath(stateDir), "agents", "main", "agent", "auth-profiles.json")); + + const agentsRoot = path.join(resolveUserPath(stateDir), "agents"); + if (fs.existsSync(agentsRoot)) { + for (const entry of fs.readdirSync(agentsRoot, { withFileTypes: true })) { + if (!entry.isDirectory()) { + continue; + } + paths.add(path.join(agentsRoot, entry.name, "agent", "auth-profiles.json")); + } + } + + for (const agentId of listAgentIds(config)) { + if (agentId === "main") { + paths.add( + path.join(resolveUserPath(stateDir), "agents", "main", "agent", "auth-profiles.json"), + ); + continue; + } + const agentDir = resolveAgentDir(config, agentId); + paths.add(resolveUserPath(resolveAuthStorePath(agentDir))); + } + + return [...paths]; +} + +function collectAuthStoreSecrets(params: { + authStorePath: string; + collector: AuditCollector; + defaults?: SecretDefaults; +}): void { + if (!fs.existsSync(params.authStorePath)) { + return; + } + params.collector.filesScanned.add(params.authStorePath); + const parsedResult = readJsonObject(params.authStorePath); + if (parsedResult.error) { + addFinding(params.collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: params.authStorePath, + jsonPath: "", + message: `Invalid JSON in auth-profiles store: ${parsedResult.error}`, + }); + return; + } + const parsed = parsedResult.value; + if (!parsed || !isRecord(parsed.profiles)) { + return; + } + for (const [profileId, profileValue] of Object.entries(parsed.profiles)) { + if (!isRecord(profileValue) || !isNonEmptyString(profileValue.provider)) { + continue; + } + const provider = String(profileValue.provider); + if (profileValue.type === "api_key") { + const keyRef = coerceSecretRef(profileValue.keyRef, params.defaults); + const inlineRef = keyRef ? null : coerceSecretRef(profileValue.key, params.defaults); + const ref = keyRef ?? inlineRef; + if (ref) { + params.collector.refAssignments.push({ + file: params.authStorePath, + path: `profiles.${profileId}.key`, + ref, + expected: "string", + provider, + }); + trackAuthProviderState(params.collector, provider, "api_key"); + } + if (isNonEmptyString(profileValue.key)) { + addFinding(params.collector, { + code: "PLAINTEXT_FOUND", + severity: "warn", + file: params.authStorePath, + jsonPath: `profiles.${profileId}.key`, + message: "Auth profile API key is stored as plaintext.", + provider, + profileId, + }); + trackAuthProviderState(params.collector, provider, "api_key"); + } + continue; + } + if (profileValue.type === "token") { + const tokenRef = coerceSecretRef(profileValue.tokenRef, params.defaults); + const inlineRef = tokenRef ? null : coerceSecretRef(profileValue.token, params.defaults); + const ref = tokenRef ?? inlineRef; + if (ref) { + params.collector.refAssignments.push({ + file: params.authStorePath, + path: `profiles.${profileId}.token`, + ref, + expected: "string", + provider, + }); + trackAuthProviderState(params.collector, provider, "token"); + } + if (isNonEmptyString(profileValue.token)) { + addFinding(params.collector, { + code: "PLAINTEXT_FOUND", + severity: "warn", + file: params.authStorePath, + jsonPath: `profiles.${profileId}.token`, + message: "Auth profile token is stored as plaintext.", + provider, + profileId, + }); + trackAuthProviderState(params.collector, provider, "token"); + } + continue; + } + if (profileValue.type === "oauth") { + const hasAccess = isNonEmptyString(profileValue.access); + const hasRefresh = isNonEmptyString(profileValue.refresh); + if (hasAccess || hasRefresh) { + addFinding(params.collector, { + code: "LEGACY_RESIDUE", + severity: "info", + file: params.authStorePath, + jsonPath: `profiles.${profileId}`, + message: "OAuth credentials are present (out of scope for static SecretRef migration).", + provider, + profileId, + }); + trackAuthProviderState(params.collector, provider, "oauth"); + } + } + } +} + +function collectAuthJsonResidue(params: { stateDir: string; collector: AuditCollector }): void { + const agentsRoot = path.join(resolveUserPath(params.stateDir), "agents"); + if (!fs.existsSync(agentsRoot)) { + return; + } + for (const entry of fs.readdirSync(agentsRoot, { withFileTypes: true })) { + if (!entry.isDirectory()) { + continue; + } + const authJsonPath = path.join(agentsRoot, entry.name, "agent", "auth.json"); + if (!fs.existsSync(authJsonPath)) { + continue; + } + params.collector.filesScanned.add(authJsonPath); + const parsedResult = readJsonObject(authJsonPath); + if (parsedResult.error) { + addFinding(params.collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: authJsonPath, + jsonPath: "", + message: `Invalid JSON in legacy auth.json: ${parsedResult.error}`, + }); + continue; + } + const parsed = parsedResult.value; + if (!parsed) { + continue; + } + for (const [providerId, value] of Object.entries(parsed)) { + if (!isRecord(value)) { + continue; + } + if (value.type === "api_key" && isNonEmptyString(value.key)) { + addFinding(params.collector, { + code: "LEGACY_RESIDUE", + severity: "warn", + file: authJsonPath, + jsonPath: providerId, + message: "Legacy auth.json contains static api_key credentials.", + provider: providerId, + }); + } + } + } +} + +async function collectUnresolvedRefFindings(params: { + collector: AuditCollector; + config: OpenClawConfig; + env: NodeJS.ProcessEnv; +}): Promise { + const cache: SecretRefResolveCache = {}; + const refsByProvider = new Map>(); + for (const assignment of params.collector.refAssignments) { + const providerKey = `${assignment.ref.source}:${assignment.ref.provider}`; + let refsForProvider = refsByProvider.get(providerKey); + if (!refsForProvider) { + refsForProvider = new Map(); + refsByProvider.set(providerKey, refsForProvider); + } + refsForProvider.set(secretRefKey(assignment.ref), assignment.ref); + } + + const resolvedByRefKey = new Map(); + const errorsByRefKey = new Map(); + + for (const refsForProvider of refsByProvider.values()) { + const refs = [...refsForProvider.values()]; + try { + const resolved = await resolveSecretRefValues(refs, { + config: params.config, + env: params.env, + cache, + }); + for (const [key, value] of resolved.entries()) { + resolvedByRefKey.set(key, value); + } + continue; + } catch { + // Fall back to per-ref resolution for provider-specific pinpoint errors. + } + + for (const ref of refs) { + const key = secretRefKey(ref); + try { + const resolved = await resolveSecretRefValue(ref, { + config: params.config, + env: params.env, + cache, + }); + resolvedByRefKey.set(key, resolved); + } catch (err) { + errorsByRefKey.set(key, err); + } + } + } + + for (const assignment of params.collector.refAssignments) { + const key = secretRefKey(assignment.ref); + const resolveErr = errorsByRefKey.get(key); + if (resolveErr) { + addFinding(params.collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: assignment.file, + jsonPath: assignment.path, + message: `Failed to resolve ${assignment.ref.source}:${assignment.ref.provider}:${assignment.ref.id} (${describeUnknownError(resolveErr)}).`, + provider: assignment.provider, + }); + continue; + } + + if (!resolvedByRefKey.has(key)) { + addFinding(params.collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: assignment.file, + jsonPath: assignment.path, + message: `Failed to resolve ${assignment.ref.source}:${assignment.ref.provider}:${assignment.ref.id} (resolved value is missing).`, + provider: assignment.provider, + }); + continue; + } + + const resolved = resolvedByRefKey.get(key); + if (assignment.expected === "string") { + if (!isNonEmptyString(resolved)) { + addFinding(params.collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: assignment.file, + jsonPath: assignment.path, + message: `Failed to resolve ${assignment.ref.source}:${assignment.ref.provider}:${assignment.ref.id} (resolved value is not a non-empty string).`, + provider: assignment.provider, + }); + } + continue; + } + if (!(isNonEmptyString(resolved) || isRecord(resolved))) { + addFinding(params.collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: assignment.file, + jsonPath: assignment.path, + message: `Failed to resolve ${assignment.ref.source}:${assignment.ref.provider}:${assignment.ref.id} (resolved value is not a string/object).`, + provider: assignment.provider, + }); + } + } +} + +function collectShadowingFindings(collector: AuditCollector): void { + for (const [provider, paths] of collector.configProviderRefPaths.entries()) { + const authState = collector.authProviderState.get(provider); + if (!authState?.hasUsableStaticOrOAuth) { + continue; + } + const modeText = [...authState.modes].join("/"); + for (const configPath of paths) { + addFinding(collector, { + code: "REF_SHADOWED", + severity: "warn", + file: "openclaw.json", + jsonPath: configPath, + message: `Auth profile credentials (${modeText}) take precedence for provider "${provider}", so this config ref may never be used.`, + provider, + }); + } + } +} + +function describeUnknownError(err: unknown): string { + if (err instanceof Error && err.message.trim().length > 0) { + return err.message; + } + if (typeof err === "string" && err.trim().length > 0) { + return err; + } + try { + const serialized = JSON.stringify(err); + return serialized ?? "unknown error"; + } catch { + return "unknown error"; + } +} + +function summarizeFindings(findings: SecretsAuditFinding[]): SecretsAuditReport["summary"] { + return { + plaintextCount: findings.filter((entry) => entry.code === "PLAINTEXT_FOUND").length, + unresolvedRefCount: findings.filter((entry) => entry.code === "REF_UNRESOLVED").length, + shadowedRefCount: findings.filter((entry) => entry.code === "REF_SHADOWED").length, + legacyResidueCount: findings.filter((entry) => entry.code === "LEGACY_RESIDUE").length, + }; +} + +export async function runSecretsAudit( + params: { + env?: NodeJS.ProcessEnv; + } = {}, +): Promise { + const env = params.env ?? process.env; + const previousAuthStoreReadOnly = process.env.OPENCLAW_AUTH_STORE_READONLY; + process.env.OPENCLAW_AUTH_STORE_READONLY = "1"; + try { + const io = createSecretsConfigIO({ env }); + const snapshot = await io.readConfigFileSnapshot(); + const configPath = resolveUserPath(snapshot.path); + const defaults = snapshot.valid ? snapshot.config.secrets?.defaults : undefined; + + const collector: AuditCollector = { + findings: [], + refAssignments: [], + configProviderRefPaths: new Map(), + authProviderState: new Map(), + filesScanned: new Set([configPath]), + }; + + const stateDir = resolveStateDir(env, os.homedir); + const envPath = path.join(resolveConfigDir(env, os.homedir), ".env"); + const config = snapshot.valid ? snapshot.config : ({} as OpenClawConfig); + + if (snapshot.valid) { + collectConfigSecrets({ + config, + configPath, + collector, + }); + for (const authStorePath of collectAuthStorePaths(config, stateDir)) { + collectAuthStoreSecrets({ + authStorePath, + collector, + defaults, + }); + } + await collectUnresolvedRefFindings({ + collector, + config, + env, + }); + collectShadowingFindings(collector); + } else { + addFinding(collector, { + code: "REF_UNRESOLVED", + severity: "error", + file: configPath, + jsonPath: "", + message: "Config is invalid; cannot validate secret references reliably.", + }); + } + + collectEnvPlaintext({ + envPath, + collector, + }); + collectAuthJsonResidue({ + stateDir, + collector, + }); + + const summary = summarizeFindings(collector.findings); + const status: SecretsAuditStatus = + summary.unresolvedRefCount > 0 + ? "unresolved" + : collector.findings.length > 0 + ? "findings" + : "clean"; + + return { + version: 1, + status, + filesScanned: [...collector.filesScanned].toSorted(), + summary, + findings: collector.findings, + }; + } finally { + if (previousAuthStoreReadOnly === undefined) { + delete process.env.OPENCLAW_AUTH_STORE_READONLY; + } else { + process.env.OPENCLAW_AUTH_STORE_READONLY = previousAuthStoreReadOnly; + } + } +} + +export function resolveSecretsAuditExitCode(report: SecretsAuditReport, check: boolean): number { + if (report.summary.unresolvedRefCount > 0) { + return 2; + } + if (check && report.findings.length > 0) { + return 1; + } + return 0; +} + +export function applySecretsPlanTarget( + config: OpenClawConfig, + pathLabel: string, + value: unknown, +): void { + const segments = parseDotPath(pathLabel); + if (segments.length === 0) { + throw new Error("Invalid target path."); + } + let cursor: Record = config as unknown as Record; + for (const segment of segments.slice(0, -1)) { + const existing = cursor[segment]; + if (!isRecord(existing)) { + cursor[segment] = {}; + } + cursor = cursor[segment] as Record; + } + cursor[segments[segments.length - 1]] = value; +} diff --git a/src/secrets/config-io.ts b/src/secrets/config-io.ts new file mode 100644 index 00000000000..1dafcac9253 --- /dev/null +++ b/src/secrets/config-io.ts @@ -0,0 +1,14 @@ +import { createConfigIO } from "../config/config.js"; + +const silentConfigIoLogger = { + error: () => {}, + warn: () => {}, +} as const; + +export function createSecretsConfigIO(params: { env: NodeJS.ProcessEnv }) { + // Secrets command output is owned by the CLI command so --json stays machine-parseable. + return createConfigIO({ + env: params.env, + logger: silentConfigIoLogger, + }); +} diff --git a/src/secrets/configure.ts b/src/secrets/configure.ts new file mode 100644 index 00000000000..518f95926d9 --- /dev/null +++ b/src/secrets/configure.ts @@ -0,0 +1,876 @@ +import path from "node:path"; +import { isDeepStrictEqual } from "node:util"; +import { confirm, select, text } from "@clack/prompts"; +import type { OpenClawConfig } from "../config/config.js"; +import type { SecretProviderConfig, SecretRef, SecretRefSource } from "../config/types.secrets.js"; +import { isSafeExecutableValue } from "../infra/exec-safety.js"; +import { runSecretsApply, type SecretsApplyResult } from "./apply.js"; +import { createSecretsConfigIO } from "./config-io.js"; +import { type SecretsApplyPlan } from "./plan.js"; +import { resolveDefaultSecretProviderAlias } from "./ref-contract.js"; +import { isRecord } from "./shared.js"; + +type ConfigureCandidate = { + type: "models.providers.apiKey" | "skills.entries.apiKey" | "channels.googlechat.serviceAccount"; + path: string; + pathSegments: string[]; + label: string; + providerId?: string; + accountId?: string; +}; + +export type SecretsConfigureResult = { + plan: SecretsApplyPlan; + preflight: SecretsApplyResult; +}; + +const PROVIDER_ALIAS_PATTERN = /^[a-z][a-z0-9_-]{0,63}$/; +const ENV_NAME_PATTERN = /^[A-Z][A-Z0-9_]{0,127}$/; +const WINDOWS_ABS_PATH_PATTERN = /^[A-Za-z]:[\\/]/; +const WINDOWS_UNC_PATH_PATTERN = /^\\\\[^\\]+\\[^\\]+/; + +function isAbsolutePathValue(value: string): boolean { + return ( + path.isAbsolute(value) || + WINDOWS_ABS_PATH_PATTERN.test(value) || + WINDOWS_UNC_PATH_PATTERN.test(value) + ); +} + +function parseCsv(value: string): string[] { + return value + .split(",") + .map((entry) => entry.trim()) + .filter((entry) => entry.length > 0); +} + +function parseOptionalPositiveInt(value: string, max: number): number | undefined { + const trimmed = value.trim(); + if (!trimmed) { + return undefined; + } + if (!/^\d+$/.test(trimmed)) { + return undefined; + } + const parsed = Number.parseInt(trimmed, 10); + if (!Number.isFinite(parsed) || parsed <= 0 || parsed > max) { + return undefined; + } + return parsed; +} + +function getSecretProviders(config: OpenClawConfig): Record { + if (!isRecord(config.secrets?.providers)) { + return {}; + } + return config.secrets.providers; +} + +function setSecretProvider( + config: OpenClawConfig, + providerAlias: string, + providerConfig: SecretProviderConfig, +): void { + config.secrets ??= {}; + if (!isRecord(config.secrets.providers)) { + config.secrets.providers = {}; + } + config.secrets.providers[providerAlias] = providerConfig; +} + +function removeSecretProvider(config: OpenClawConfig, providerAlias: string): boolean { + if (!isRecord(config.secrets?.providers)) { + return false; + } + const providers = config.secrets.providers; + if (!Object.prototype.hasOwnProperty.call(providers, providerAlias)) { + return false; + } + delete providers[providerAlias]; + if (Object.keys(providers).length === 0) { + delete config.secrets?.providers; + } + + if (isRecord(config.secrets?.defaults)) { + const defaults = config.secrets.defaults; + if (defaults?.env === providerAlias) { + delete defaults.env; + } + if (defaults?.file === providerAlias) { + delete defaults.file; + } + if (defaults?.exec === providerAlias) { + delete defaults.exec; + } + if ( + defaults && + defaults.env === undefined && + defaults.file === undefined && + defaults.exec === undefined + ) { + delete config.secrets?.defaults; + } + } + return true; +} + +function providerHint(provider: SecretProviderConfig): string { + if (provider.source === "env") { + return provider.allowlist?.length ? `env (${provider.allowlist.length} allowlisted)` : "env"; + } + if (provider.source === "file") { + return `file (${provider.mode ?? "json"})`; + } + return `exec (${provider.jsonOnly === false ? "json+text" : "json"})`; +} + +function buildCandidates(config: OpenClawConfig): ConfigureCandidate[] { + const out: ConfigureCandidate[] = []; + const providers = config.models?.providers as Record | undefined; + if (providers) { + for (const [providerId, providerValue] of Object.entries(providers)) { + if (!isRecord(providerValue)) { + continue; + } + out.push({ + type: "models.providers.apiKey", + path: `models.providers.${providerId}.apiKey`, + pathSegments: ["models", "providers", providerId, "apiKey"], + label: `Provider API key: ${providerId}`, + providerId, + }); + } + } + + const entries = config.skills?.entries as Record | undefined; + if (entries) { + for (const [entryId, entryValue] of Object.entries(entries)) { + if (!isRecord(entryValue)) { + continue; + } + out.push({ + type: "skills.entries.apiKey", + path: `skills.entries.${entryId}.apiKey`, + pathSegments: ["skills", "entries", entryId, "apiKey"], + label: `Skill API key: ${entryId}`, + }); + } + } + + const googlechat = config.channels?.googlechat; + if (isRecord(googlechat)) { + out.push({ + type: "channels.googlechat.serviceAccount", + path: "channels.googlechat.serviceAccount", + pathSegments: ["channels", "googlechat", "serviceAccount"], + label: "Google Chat serviceAccount (default)", + }); + const accounts = googlechat.accounts; + if (isRecord(accounts)) { + for (const [accountId, value] of Object.entries(accounts)) { + if (!isRecord(value)) { + continue; + } + out.push({ + type: "channels.googlechat.serviceAccount", + path: `channels.googlechat.accounts.${accountId}.serviceAccount`, + pathSegments: ["channels", "googlechat", "accounts", accountId, "serviceAccount"], + label: `Google Chat serviceAccount (${accountId})`, + accountId, + }); + } + } + } + + return out; +} + +function toSourceChoices(config: OpenClawConfig): Array<{ value: SecretRefSource; label: string }> { + const hasSource = (source: SecretRefSource) => + Object.values(config.secrets?.providers ?? {}).some((provider) => provider?.source === source); + const choices: Array<{ value: SecretRefSource; label: string }> = [ + { + value: "env", + label: "env", + }, + ]; + if (hasSource("file")) { + choices.push({ value: "file", label: "file" }); + } + if (hasSource("exec")) { + choices.push({ value: "exec", label: "exec" }); + } + return choices; +} + +function assertNoCancel(value: T | symbol, message: string): T { + if (typeof value === "symbol") { + throw new Error(message); + } + return value; +} + +async function promptProviderAlias(params: { existingAliases: Set }): Promise { + const alias = assertNoCancel( + await text({ + message: "Provider alias", + initialValue: "default", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return "Required"; + } + if (!PROVIDER_ALIAS_PATTERN.test(trimmed)) { + return "Must match /^[a-z][a-z0-9_-]{0,63}$/"; + } + if (params.existingAliases.has(trimmed)) { + return "Alias already exists"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + return String(alias).trim(); +} + +async function promptProviderSource(initial?: SecretRefSource): Promise { + const source = assertNoCancel( + await select({ + message: "Provider source", + options: [ + { value: "env", label: "env" }, + { value: "file", label: "file" }, + { value: "exec", label: "exec" }, + ], + initialValue: initial, + }), + "Secrets configure cancelled.", + ); + return source as SecretRefSource; +} + +async function promptEnvProvider( + base?: Extract, +): Promise> { + const allowlistRaw = assertNoCancel( + await text({ + message: "Env allowlist (comma-separated, blank for unrestricted)", + initialValue: base?.allowlist?.join(",") ?? "", + validate: (value) => { + const entries = parseCsv(String(value ?? "")); + for (const entry of entries) { + if (!ENV_NAME_PATTERN.test(entry)) { + return `Invalid env name: ${entry}`; + } + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + const allowlist = parseCsv(String(allowlistRaw ?? "")); + return { + source: "env", + ...(allowlist.length > 0 ? { allowlist } : {}), + }; +} + +async function promptFileProvider( + base?: Extract, +): Promise> { + const filePath = assertNoCancel( + await text({ + message: "File path (absolute)", + initialValue: base?.path ?? "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return "Required"; + } + if (!isAbsolutePathValue(trimmed)) { + return "Must be an absolute path"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const mode = assertNoCancel( + await select({ + message: "File mode", + options: [ + { value: "json", label: "json" }, + { value: "singleValue", label: "singleValue" }, + ], + initialValue: base?.mode ?? "json", + }), + "Secrets configure cancelled.", + ); + + const timeoutMsRaw = assertNoCancel( + await text({ + message: "Timeout ms (blank for default)", + initialValue: base?.timeoutMs ? String(base.timeoutMs) : "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return undefined; + } + if (parseOptionalPositiveInt(trimmed, 120000) === undefined) { + return "Must be an integer between 1 and 120000"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + const maxBytesRaw = assertNoCancel( + await text({ + message: "Max bytes (blank for default)", + initialValue: base?.maxBytes ? String(base.maxBytes) : "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return undefined; + } + if (parseOptionalPositiveInt(trimmed, 20 * 1024 * 1024) === undefined) { + return "Must be an integer between 1 and 20971520"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const timeoutMs = parseOptionalPositiveInt(String(timeoutMsRaw ?? ""), 120000); + const maxBytes = parseOptionalPositiveInt(String(maxBytesRaw ?? ""), 20 * 1024 * 1024); + + return { + source: "file", + path: String(filePath).trim(), + mode, + ...(timeoutMs ? { timeoutMs } : {}), + ...(maxBytes ? { maxBytes } : {}), + }; +} + +async function parseArgsInput(rawValue: string): Promise { + const trimmed = rawValue.trim(); + if (!trimmed) { + return undefined; + } + const parsed = JSON.parse(trimmed) as unknown; + if (!Array.isArray(parsed) || !parsed.every((entry) => typeof entry === "string")) { + throw new Error("args must be a JSON array of strings"); + } + return parsed; +} + +async function promptExecProvider( + base?: Extract, +): Promise> { + const command = assertNoCancel( + await text({ + message: "Command path (absolute)", + initialValue: base?.command ?? "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return "Required"; + } + if (!isAbsolutePathValue(trimmed)) { + return "Must be an absolute path"; + } + if (!isSafeExecutableValue(trimmed)) { + return "Command value is not allowed"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const argsRaw = assertNoCancel( + await text({ + message: "Args JSON array (blank for none)", + initialValue: JSON.stringify(base?.args ?? []), + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return undefined; + } + try { + const parsed = JSON.parse(trimmed) as unknown; + if (!Array.isArray(parsed) || !parsed.every((entry) => typeof entry === "string")) { + return "Must be a JSON array of strings"; + } + return undefined; + } catch { + return "Must be valid JSON"; + } + }, + }), + "Secrets configure cancelled.", + ); + + const timeoutMsRaw = assertNoCancel( + await text({ + message: "Timeout ms (blank for default)", + initialValue: base?.timeoutMs ? String(base.timeoutMs) : "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return undefined; + } + if (parseOptionalPositiveInt(trimmed, 120000) === undefined) { + return "Must be an integer between 1 and 120000"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const noOutputTimeoutMsRaw = assertNoCancel( + await text({ + message: "No-output timeout ms (blank for default)", + initialValue: base?.noOutputTimeoutMs ? String(base.noOutputTimeoutMs) : "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return undefined; + } + if (parseOptionalPositiveInt(trimmed, 120000) === undefined) { + return "Must be an integer between 1 and 120000"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const maxOutputBytesRaw = assertNoCancel( + await text({ + message: "Max output bytes (blank for default)", + initialValue: base?.maxOutputBytes ? String(base.maxOutputBytes) : "", + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return undefined; + } + if (parseOptionalPositiveInt(trimmed, 20 * 1024 * 1024) === undefined) { + return "Must be an integer between 1 and 20971520"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const jsonOnly = assertNoCancel( + await confirm({ + message: "Require JSON-only response?", + initialValue: base?.jsonOnly ?? true, + }), + "Secrets configure cancelled.", + ); + + const passEnvRaw = assertNoCancel( + await text({ + message: "Pass-through env vars (comma-separated, blank for none)", + initialValue: base?.passEnv?.join(",") ?? "", + validate: (value) => { + const entries = parseCsv(String(value ?? "")); + for (const entry of entries) { + if (!ENV_NAME_PATTERN.test(entry)) { + return `Invalid env name: ${entry}`; + } + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const trustedDirsRaw = assertNoCancel( + await text({ + message: "Trusted dirs (comma-separated absolute paths, blank for none)", + initialValue: base?.trustedDirs?.join(",") ?? "", + validate: (value) => { + const entries = parseCsv(String(value ?? "")); + for (const entry of entries) { + if (!isAbsolutePathValue(entry)) { + return `Trusted dir must be absolute: ${entry}`; + } + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + + const allowInsecurePath = assertNoCancel( + await confirm({ + message: "Allow insecure command path checks?", + initialValue: base?.allowInsecurePath ?? false, + }), + "Secrets configure cancelled.", + ); + const allowSymlinkCommand = assertNoCancel( + await confirm({ + message: "Allow symlink command path?", + initialValue: base?.allowSymlinkCommand ?? false, + }), + "Secrets configure cancelled.", + ); + + const args = await parseArgsInput(String(argsRaw ?? "")); + const timeoutMs = parseOptionalPositiveInt(String(timeoutMsRaw ?? ""), 120000); + const noOutputTimeoutMs = parseOptionalPositiveInt(String(noOutputTimeoutMsRaw ?? ""), 120000); + const maxOutputBytes = parseOptionalPositiveInt( + String(maxOutputBytesRaw ?? ""), + 20 * 1024 * 1024, + ); + const passEnv = parseCsv(String(passEnvRaw ?? "")); + const trustedDirs = parseCsv(String(trustedDirsRaw ?? "")); + + return { + source: "exec", + command: String(command).trim(), + ...(args && args.length > 0 ? { args } : {}), + ...(timeoutMs ? { timeoutMs } : {}), + ...(noOutputTimeoutMs ? { noOutputTimeoutMs } : {}), + ...(maxOutputBytes ? { maxOutputBytes } : {}), + ...(jsonOnly ? { jsonOnly } : { jsonOnly: false }), + ...(passEnv.length > 0 ? { passEnv } : {}), + ...(trustedDirs.length > 0 ? { trustedDirs } : {}), + ...(allowInsecurePath ? { allowInsecurePath: true } : {}), + ...(allowSymlinkCommand ? { allowSymlinkCommand: true } : {}), + ...(isRecord(base?.env) ? { env: base.env } : {}), + }; +} + +async function promptProviderConfig( + source: SecretRefSource, + current?: SecretProviderConfig, +): Promise { + if (source === "env") { + return await promptEnvProvider(current?.source === "env" ? current : undefined); + } + if (source === "file") { + return await promptFileProvider(current?.source === "file" ? current : undefined); + } + return await promptExecProvider(current?.source === "exec" ? current : undefined); +} + +async function configureProvidersInteractive(config: OpenClawConfig): Promise { + while (true) { + const providers = getSecretProviders(config); + const providerEntries = Object.entries(providers).toSorted(([left], [right]) => + left.localeCompare(right), + ); + + const actionOptions: Array<{ value: string; label: string; hint?: string }> = [ + { + value: "add", + label: "Add provider", + hint: "Define a new env/file/exec provider", + }, + ]; + if (providerEntries.length > 0) { + actionOptions.push({ + value: "edit", + label: "Edit provider", + hint: "Update an existing provider", + }); + actionOptions.push({ + value: "remove", + label: "Remove provider", + hint: "Delete a provider alias", + }); + } + actionOptions.push({ + value: "continue", + label: "Continue", + hint: "Move to credential mapping", + }); + + const action = assertNoCancel( + await select({ + message: + providerEntries.length > 0 + ? "Configure secret providers" + : "Configure secret providers (only env refs are available until file/exec providers are added)", + options: actionOptions, + }), + "Secrets configure cancelled.", + ); + + if (action === "continue") { + return; + } + + if (action === "add") { + const source = await promptProviderSource(); + const alias = await promptProviderAlias({ + existingAliases: new Set(providerEntries.map(([providerAlias]) => providerAlias)), + }); + const providerConfig = await promptProviderConfig(source); + setSecretProvider(config, alias, providerConfig); + continue; + } + + if (action === "edit") { + const alias = assertNoCancel( + await select({ + message: "Select provider to edit", + options: providerEntries.map(([providerAlias, providerConfig]) => ({ + value: providerAlias, + label: providerAlias, + hint: providerHint(providerConfig), + })), + }), + "Secrets configure cancelled.", + ); + const current = providers[alias]; + if (!current) { + continue; + } + const source = await promptProviderSource(current.source); + const nextProviderConfig = await promptProviderConfig(source, current); + if (!isDeepStrictEqual(current, nextProviderConfig)) { + setSecretProvider(config, alias, nextProviderConfig); + } + continue; + } + + if (action === "remove") { + const alias = assertNoCancel( + await select({ + message: "Select provider to remove", + options: providerEntries.map(([providerAlias, providerConfig]) => ({ + value: providerAlias, + label: providerAlias, + hint: providerHint(providerConfig), + })), + }), + "Secrets configure cancelled.", + ); + + const shouldRemove = assertNoCancel( + await confirm({ + message: `Remove provider "${alias}"?`, + initialValue: false, + }), + "Secrets configure cancelled.", + ); + if (shouldRemove) { + removeSecretProvider(config, alias); + } + } + } +} + +function collectProviderPlanChanges(params: { original: OpenClawConfig; next: OpenClawConfig }): { + upserts: Record; + deletes: string[]; +} { + const originalProviders = getSecretProviders(params.original); + const nextProviders = getSecretProviders(params.next); + + const upserts: Record = {}; + const deletes: string[] = []; + + for (const [providerAlias, nextProviderConfig] of Object.entries(nextProviders)) { + const current = originalProviders[providerAlias]; + if (isDeepStrictEqual(current, nextProviderConfig)) { + continue; + } + upserts[providerAlias] = structuredClone(nextProviderConfig); + } + + for (const providerAlias of Object.keys(originalProviders)) { + if (!Object.prototype.hasOwnProperty.call(nextProviders, providerAlias)) { + deletes.push(providerAlias); + } + } + + return { + upserts, + deletes: deletes.toSorted(), + }; +} + +export async function runSecretsConfigureInteractive( + params: { + env?: NodeJS.ProcessEnv; + providersOnly?: boolean; + skipProviderSetup?: boolean; + } = {}, +): Promise { + if (!process.stdin.isTTY) { + throw new Error("secrets configure requires an interactive TTY."); + } + if (params.providersOnly && params.skipProviderSetup) { + throw new Error("Cannot combine --providers-only with --skip-provider-setup."); + } + + const env = params.env ?? process.env; + const io = createSecretsConfigIO({ env }); + const { snapshot } = await io.readConfigFileSnapshotForWrite(); + if (!snapshot.valid) { + throw new Error("Cannot run interactive secrets configure because config is invalid."); + } + + const stagedConfig = structuredClone(snapshot.config); + if (!params.skipProviderSetup) { + await configureProvidersInteractive(stagedConfig); + } + + const providerChanges = collectProviderPlanChanges({ + original: snapshot.config, + next: stagedConfig, + }); + + const selectedByPath = new Map(); + if (!params.providersOnly) { + const candidates = buildCandidates(stagedConfig); + if (candidates.length === 0) { + throw new Error("No configurable secret-bearing fields found in openclaw.json."); + } + + const sourceChoices = toSourceChoices(stagedConfig); + + while (true) { + const options = candidates.map((candidate) => ({ + value: candidate.path, + label: candidate.label, + hint: candidate.path, + })); + if (selectedByPath.size > 0) { + options.unshift({ + value: "__done__", + label: "Done", + hint: "Finish and run preflight", + }); + } + + const selectedPath = assertNoCancel( + await select({ + message: "Select credential field", + options, + }), + "Secrets configure cancelled.", + ); + + if (selectedPath === "__done__") { + break; + } + + const candidate = candidates.find((entry) => entry.path === selectedPath); + if (!candidate) { + throw new Error(`Unknown configure target: ${selectedPath}`); + } + + const source = assertNoCancel( + await select({ + message: "Secret source", + options: sourceChoices, + }), + "Secrets configure cancelled.", + ) as SecretRefSource; + + const defaultAlias = resolveDefaultSecretProviderAlias(stagedConfig, source, { + preferFirstProviderForSource: true, + }); + const provider = assertNoCancel( + await text({ + message: "Provider alias", + initialValue: defaultAlias, + validate: (value) => { + const trimmed = String(value ?? "").trim(); + if (!trimmed) { + return "Required"; + } + if (!PROVIDER_ALIAS_PATTERN.test(trimmed)) { + return "Must match /^[a-z][a-z0-9_-]{0,63}$/"; + } + return undefined; + }, + }), + "Secrets configure cancelled.", + ); + const id = assertNoCancel( + await text({ + message: "Secret id", + validate: (value) => (String(value ?? "").trim().length > 0 ? undefined : "Required"), + }), + "Secrets configure cancelled.", + ); + const ref: SecretRef = { + source, + provider: String(provider).trim(), + id: String(id).trim(), + }; + + const next = { + ...candidate, + ref, + }; + selectedByPath.set(candidate.path, next); + + const addMore = assertNoCancel( + await confirm({ + message: "Configure another credential?", + initialValue: true, + }), + "Secrets configure cancelled.", + ); + if (!addMore) { + break; + } + } + } + + if ( + selectedByPath.size === 0 && + Object.keys(providerChanges.upserts).length === 0 && + providerChanges.deletes.length === 0 + ) { + throw new Error("No secrets changes were selected."); + } + + const plan: SecretsApplyPlan = { + version: 1, + protocolVersion: 1, + generatedAt: new Date().toISOString(), + generatedBy: "openclaw secrets configure", + targets: [...selectedByPath.values()].map((entry) => ({ + type: entry.type, + path: entry.path, + pathSegments: [...entry.pathSegments], + ref: entry.ref, + ...(entry.providerId ? { providerId: entry.providerId } : {}), + ...(entry.accountId ? { accountId: entry.accountId } : {}), + })), + ...(Object.keys(providerChanges.upserts).length > 0 + ? { providerUpserts: providerChanges.upserts } + : {}), + ...(providerChanges.deletes.length > 0 ? { providerDeletes: providerChanges.deletes } : {}), + options: { + scrubEnv: true, + scrubAuthProfilesForProviderTargets: true, + scrubLegacyAuthJson: true, + }, + }; + + const preflight = await runSecretsApply({ + plan, + env, + write: false, + }); + + return { plan, preflight }; +} diff --git a/src/secrets/json-pointer.ts b/src/secrets/json-pointer.ts new file mode 100644 index 00000000000..c9761af61c5 --- /dev/null +++ b/src/secrets/json-pointer.ts @@ -0,0 +1,94 @@ +function failOrUndefined(params: { onMissing: "throw" | "undefined"; message: string }): undefined { + if (params.onMissing === "throw") { + throw new Error(params.message); + } + return undefined; +} + +export function decodeJsonPointerToken(token: string): string { + return token.replace(/~1/g, "/").replace(/~0/g, "~"); +} + +export function encodeJsonPointerToken(token: string): string { + return token.replace(/~/g, "~0").replace(/\//g, "~1"); +} + +export function readJsonPointer( + root: unknown, + pointer: string, + options: { onMissing?: "throw" | "undefined" } = {}, +): unknown { + const onMissing = options.onMissing ?? "throw"; + if (!pointer.startsWith("/")) { + return failOrUndefined({ + onMissing, + message: + 'File-backed secret ids must be absolute JSON pointers (for example: "/providers/openai/apiKey").', + }); + } + + const tokens = pointer + .slice(1) + .split("/") + .map((token) => decodeJsonPointerToken(token)); + + let current: unknown = root; + for (const token of tokens) { + if (Array.isArray(current)) { + const index = Number.parseInt(token, 10); + if (!Number.isFinite(index) || index < 0 || index >= current.length) { + return failOrUndefined({ + onMissing, + message: `JSON pointer segment "${token}" is out of bounds.`, + }); + } + current = current[index]; + continue; + } + if (typeof current !== "object" || current === null || Array.isArray(current)) { + return failOrUndefined({ + onMissing, + message: `JSON pointer segment "${token}" does not exist.`, + }); + } + const record = current as Record; + if (!Object.hasOwn(record, token)) { + return failOrUndefined({ + onMissing, + message: `JSON pointer segment "${token}" does not exist.`, + }); + } + current = record[token]; + } + return current; +} + +export function setJsonPointer( + root: Record, + pointer: string, + value: unknown, +): void { + if (!pointer.startsWith("/")) { + throw new Error(`Invalid JSON pointer "${pointer}".`); + } + + const tokens = pointer + .slice(1) + .split("/") + .map((token) => decodeJsonPointerToken(token)); + + let current: Record = root; + for (let index = 0; index < tokens.length; index += 1) { + const token = tokens[index]; + const isLast = index === tokens.length - 1; + if (isLast) { + current[token] = value; + return; + } + const child = current[token]; + if (typeof child !== "object" || child === null || Array.isArray(child)) { + current[token] = {}; + } + current = current[token] as Record; + } +} diff --git a/src/secrets/plan.ts b/src/secrets/plan.ts new file mode 100644 index 00000000000..0956f9677de --- /dev/null +++ b/src/secrets/plan.ts @@ -0,0 +1,238 @@ +import type { SecretProviderConfig, SecretRef } from "../config/types.secrets.js"; +import { SecretProviderSchema } from "../config/zod-schema.core.js"; + +export type SecretsPlanTargetType = + | "models.providers.apiKey" + | "skills.entries.apiKey" + | "channels.googlechat.serviceAccount"; + +export type SecretsPlanTarget = { + type: SecretsPlanTargetType; + /** + * Dot path in openclaw.json for operator readability. + * Example: "models.providers.openai.apiKey" + */ + path: string; + /** + * Canonical path segments used for safe mutation. + * Example: ["models", "providers", "openai", "apiKey"] + */ + pathSegments?: string[]; + ref: SecretRef; + /** + * For provider targets, used to scrub auth-profile/static residues. + */ + providerId?: string; + /** + * For googlechat account-scoped targets. + */ + accountId?: string; +}; + +export type SecretsApplyPlan = { + version: 1; + protocolVersion: 1; + generatedAt: string; + generatedBy: "openclaw secrets configure" | "manual"; + providerUpserts?: Record; + providerDeletes?: string[]; + targets: SecretsPlanTarget[]; + options?: { + scrubEnv?: boolean; + scrubAuthProfilesForProviderTargets?: boolean; + scrubLegacyAuthJson?: boolean; + }; +}; + +const PROVIDER_ALIAS_PATTERN = /^[a-z][a-z0-9_-]{0,63}$/; +const FORBIDDEN_PATH_SEGMENTS = new Set(["__proto__", "prototype", "constructor"]); + +function isSecretsPlanTargetType(value: unknown): value is SecretsPlanTargetType { + return ( + value === "models.providers.apiKey" || + value === "skills.entries.apiKey" || + value === "channels.googlechat.serviceAccount" + ); +} + +function isObjectRecord(value: unknown): value is Record { + return Boolean(value) && typeof value === "object" && !Array.isArray(value); +} + +function isSecretProviderConfigShape(value: unknown): value is SecretProviderConfig { + return SecretProviderSchema.safeParse(value).success; +} + +function parseDotPath(pathname: string): string[] { + return pathname + .split(".") + .map((segment) => segment.trim()) + .filter((segment) => segment.length > 0); +} + +function hasForbiddenPathSegment(segments: string[]): boolean { + return segments.some((segment) => FORBIDDEN_PATH_SEGMENTS.has(segment)); +} + +function hasMatchingPathShape( + candidate: Pick, + segments: string[], +): boolean { + if (candidate.type === "models.providers.apiKey") { + if ( + segments.length !== 4 || + segments[0] !== "models" || + segments[1] !== "providers" || + segments[3] !== "apiKey" + ) { + return false; + } + return ( + candidate.providerId === undefined || + candidate.providerId.trim().length === 0 || + candidate.providerId === segments[2] + ); + } + if (candidate.type === "skills.entries.apiKey") { + return ( + segments.length === 4 && + segments[0] === "skills" && + segments[1] === "entries" && + segments[3] === "apiKey" + ); + } + if ( + segments.length === 3 && + segments[0] === "channels" && + segments[1] === "googlechat" && + segments[2] === "serviceAccount" + ) { + return candidate.accountId === undefined || candidate.accountId.trim().length === 0; + } + if ( + segments.length === 5 && + segments[0] === "channels" && + segments[1] === "googlechat" && + segments[2] === "accounts" && + segments[4] === "serviceAccount" + ) { + return ( + candidate.accountId === undefined || + candidate.accountId.trim().length === 0 || + candidate.accountId === segments[3] + ); + } + return false; +} + +export function resolveValidatedTargetPathSegments(candidate: { + type?: SecretsPlanTargetType; + path?: string; + pathSegments?: string[]; + providerId?: string; + accountId?: string; +}): string[] | null { + if (!isSecretsPlanTargetType(candidate.type)) { + return null; + } + const path = typeof candidate.path === "string" ? candidate.path.trim() : ""; + if (!path) { + return null; + } + const segments = + Array.isArray(candidate.pathSegments) && candidate.pathSegments.length > 0 + ? candidate.pathSegments.map((segment) => String(segment).trim()).filter(Boolean) + : parseDotPath(path); + if ( + segments.length === 0 || + hasForbiddenPathSegment(segments) || + path !== segments.join(".") || + !hasMatchingPathShape( + { + type: candidate.type, + providerId: candidate.providerId, + accountId: candidate.accountId, + }, + segments, + ) + ) { + return null; + } + return segments; +} + +export function isSecretsApplyPlan(value: unknown): value is SecretsApplyPlan { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + const typed = value as Partial; + if (typed.version !== 1 || typed.protocolVersion !== 1 || !Array.isArray(typed.targets)) { + return false; + } + for (const target of typed.targets) { + if (!target || typeof target !== "object") { + return false; + } + const candidate = target as Partial; + const ref = candidate.ref as Partial | undefined; + if ( + (candidate.type !== "models.providers.apiKey" && + candidate.type !== "skills.entries.apiKey" && + candidate.type !== "channels.googlechat.serviceAccount") || + typeof candidate.path !== "string" || + !candidate.path.trim() || + (candidate.pathSegments !== undefined && !Array.isArray(candidate.pathSegments)) || + !resolveValidatedTargetPathSegments({ + type: candidate.type, + path: candidate.path, + pathSegments: candidate.pathSegments, + providerId: candidate.providerId, + accountId: candidate.accountId, + }) || + !ref || + typeof ref !== "object" || + (ref.source !== "env" && ref.source !== "file" && ref.source !== "exec") || + typeof ref.provider !== "string" || + ref.provider.trim().length === 0 || + typeof ref.id !== "string" || + ref.id.trim().length === 0 + ) { + return false; + } + } + if (typed.providerUpserts !== undefined) { + if (!isObjectRecord(typed.providerUpserts)) { + return false; + } + for (const [providerAlias, providerValue] of Object.entries(typed.providerUpserts)) { + if (!PROVIDER_ALIAS_PATTERN.test(providerAlias)) { + return false; + } + if (!isSecretProviderConfigShape(providerValue)) { + return false; + } + } + } + if (typed.providerDeletes !== undefined) { + if ( + !Array.isArray(typed.providerDeletes) || + typed.providerDeletes.some( + (providerAlias) => + typeof providerAlias !== "string" || !PROVIDER_ALIAS_PATTERN.test(providerAlias), + ) + ) { + return false; + } + } + return true; +} + +export function normalizeSecretsPlanOptions( + options: SecretsApplyPlan["options"] | undefined, +): Required> { + return { + scrubEnv: options?.scrubEnv ?? true, + scrubAuthProfilesForProviderTargets: options?.scrubAuthProfilesForProviderTargets ?? true, + scrubLegacyAuthJson: options?.scrubLegacyAuthJson ?? true, + }; +} diff --git a/src/secrets/provider-env-vars.ts b/src/secrets/provider-env-vars.ts new file mode 100644 index 00000000000..9d2100d1852 --- /dev/null +++ b/src/secrets/provider-env-vars.ts @@ -0,0 +1,30 @@ +export const PROVIDER_ENV_VARS: Record = { + openai: ["OPENAI_API_KEY"], + anthropic: ["ANTHROPIC_API_KEY"], + google: ["GEMINI_API_KEY"], + minimax: ["MINIMAX_API_KEY"], + "minimax-cn": ["MINIMAX_API_KEY"], + moonshot: ["MOONSHOT_API_KEY"], + "kimi-coding": ["KIMI_API_KEY", "KIMICODE_API_KEY"], + synthetic: ["SYNTHETIC_API_KEY"], + venice: ["VENICE_API_KEY"], + zai: ["ZAI_API_KEY", "Z_AI_API_KEY"], + xiaomi: ["XIAOMI_API_KEY"], + openrouter: ["OPENROUTER_API_KEY"], + "cloudflare-ai-gateway": ["CLOUDFLARE_AI_GATEWAY_API_KEY"], + litellm: ["LITELLM_API_KEY"], + "vercel-ai-gateway": ["AI_GATEWAY_API_KEY"], + opencode: ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], + together: ["TOGETHER_API_KEY"], + huggingface: ["HUGGINGFACE_HUB_TOKEN", "HF_TOKEN"], + qianfan: ["QIANFAN_API_KEY"], + xai: ["XAI_API_KEY"], + mistral: ["MISTRAL_API_KEY"], + kilocode: ["KILOCODE_API_KEY"], + volcengine: ["VOLCANO_ENGINE_API_KEY"], + byteplus: ["BYTEPLUS_API_KEY"], +}; + +export function listKnownSecretEnvVarNames(): string[] { + return [...new Set(Object.values(PROVIDER_ENV_VARS).flatMap((keys) => keys))]; +} diff --git a/src/secrets/ref-contract.ts b/src/secrets/ref-contract.ts new file mode 100644 index 00000000000..5366b814999 --- /dev/null +++ b/src/secrets/ref-contract.ts @@ -0,0 +1,66 @@ +import { + DEFAULT_SECRET_PROVIDER_ALIAS, + type SecretRef, + type SecretRefSource, +} from "../config/types.secrets.js"; + +const FILE_SECRET_REF_SEGMENT_PATTERN = /^(?:[^~]|~0|~1)*$/; + +export const SINGLE_VALUE_FILE_REF_ID = "value"; + +export type SecretRefDefaultsCarrier = { + secrets?: { + defaults?: { + env?: string; + file?: string; + exec?: string; + }; + providers?: Record; + }; +}; + +export function secretRefKey(ref: SecretRef): string { + return `${ref.source}:${ref.provider}:${ref.id}`; +} + +export function resolveDefaultSecretProviderAlias( + config: SecretRefDefaultsCarrier, + source: SecretRefSource, + options?: { preferFirstProviderForSource?: boolean }, +): string { + const configured = + source === "env" + ? config.secrets?.defaults?.env + : source === "file" + ? config.secrets?.defaults?.file + : config.secrets?.defaults?.exec; + if (configured?.trim()) { + return configured.trim(); + } + + if (options?.preferFirstProviderForSource) { + const providers = config.secrets?.providers; + if (providers) { + for (const [providerName, provider] of Object.entries(providers)) { + if (provider?.source === source) { + return providerName; + } + } + } + } + + return DEFAULT_SECRET_PROVIDER_ALIAS; +} + +export function isValidFileSecretRefId(value: string): boolean { + if (value === SINGLE_VALUE_FILE_REF_ID) { + return true; + } + if (!value.startsWith("/")) { + return false; + } + return value + .slice(1) + .split("/") + .every((segment) => FILE_SECRET_REF_SEGMENT_PATTERN.test(segment)); +} diff --git a/src/secrets/resolve.test.ts b/src/secrets/resolve.test.ts new file mode 100644 index 00000000000..0c9119cb947 --- /dev/null +++ b/src/secrets/resolve.test.ts @@ -0,0 +1,536 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { resolveSecretRefString, resolveSecretRefValue } from "./resolve.js"; + +async function writeSecureFile(filePath: string, content: string, mode = 0o600): Promise { + await fs.mkdir(path.dirname(filePath), { recursive: true }); + await fs.writeFile(filePath, content, "utf8"); + await fs.chmod(filePath, mode); +} + +describe("secret ref resolver", () => { + const cleanupRoots: string[] = []; + + afterEach(async () => { + vi.restoreAllMocks(); + while (cleanupRoots.length > 0) { + const root = cleanupRoots.pop(); + if (!root) { + continue; + } + await fs.rm(root, { recursive: true, force: true }); + } + }); + + it("resolves env refs via implicit default env provider", async () => { + const config: OpenClawConfig = {}; + const value = await resolveSecretRefString( + { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + { + config, + env: { OPENAI_API_KEY: "sk-env-value" }, + }, + ); + expect(value).toBe("sk-env-value"); + }); + + it("resolves file refs in json mode", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-file-")); + cleanupRoots.push(root); + const filePath = path.join(root, "secrets.json"); + await writeSecureFile( + filePath, + JSON.stringify({ + providers: { + openai: { + apiKey: "sk-file-value", + }, + }, + }), + ); + + const value = await resolveSecretRefString( + { source: "file", provider: "filemain", id: "/providers/openai/apiKey" }, + { + config: { + secrets: { + providers: { + filemain: { + source: "file", + path: filePath, + mode: "json", + }, + }, + }, + }, + }, + ); + expect(value).toBe("sk-file-value"); + }); + + it("resolves exec refs with protocolVersion 1 response", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-")); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver.mjs"); + await writeSecureFile( + scriptPath, + [ + "#!/usr/bin/env node", + "import fs from 'node:fs';", + "const req = JSON.parse(fs.readFileSync(0, 'utf8'));", + "const values = Object.fromEntries((req.ids ?? []).map((id) => [id, `value:${id}`]));", + "process.stdout.write(JSON.stringify({ protocolVersion: 1, values }));", + ].join("\n"), + 0o700, + ); + + const value = await resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: scriptPath, + passEnv: ["PATH"], + }, + }, + }, + }, + }, + ); + expect(value).toBe("value:openai/api-key"); + }); + + it("supports non-JSON single-value exec output when jsonOnly is false", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-plain-")); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver-plain.mjs"); + await writeSecureFile( + scriptPath, + ["#!/usr/bin/env node", "process.stdout.write('plain-secret');"].join("\n"), + 0o700, + ); + + const value = await resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: scriptPath, + passEnv: ["PATH"], + jsonOnly: false, + }, + }, + }, + }, + }, + ); + expect(value).toBe("plain-secret"); + }); + + it("rejects symlink command paths unless allowSymlinkCommand is enabled", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-link-")); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver-target.mjs"); + const symlinkPath = path.join(root, "resolver-link.mjs"); + await writeSecureFile( + scriptPath, + ["#!/usr/bin/env node", "process.stdout.write('plain-secret');"].join("\n"), + 0o700, + ); + await fs.symlink(scriptPath, symlinkPath); + + await expect( + resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: symlinkPath, + passEnv: ["PATH"], + jsonOnly: false, + }, + }, + }, + }, + }, + ), + ).rejects.toThrow("must not be a symlink"); + }); + + it("allows symlink command paths when allowSymlinkCommand is enabled", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-link-")); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver-target.mjs"); + const symlinkPath = path.join(root, "resolver-link.mjs"); + await writeSecureFile( + scriptPath, + ["#!/usr/bin/env node", "process.stdout.write('plain-secret');"].join("\n"), + 0o700, + ); + await fs.symlink(scriptPath, symlinkPath); + const trustedRoot = await fs.realpath(root); + + const value = await resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: symlinkPath, + passEnv: ["PATH"], + jsonOnly: false, + allowSymlinkCommand: true, + trustedDirs: [trustedRoot], + }, + }, + }, + }, + }, + ); + expect(value).toBe("plain-secret"); + }); + + it("handles Homebrew-style symlinked exec commands with args only when explicitly allowed", async () => { + if (process.platform === "win32") { + return; + } + + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-homebrew-")); + cleanupRoots.push(root); + const binDir = path.join(root, "opt", "homebrew", "bin"); + const cellarDir = path.join(root, "opt", "homebrew", "Cellar", "node", "25.0.0", "bin"); + await fs.mkdir(binDir, { recursive: true }); + await fs.mkdir(cellarDir, { recursive: true }); + + const targetCommand = path.join(cellarDir, "node"); + const symlinkCommand = path.join(binDir, "node"); + await writeSecureFile( + targetCommand, + [ + `#!${process.execPath}`, + "import fs from 'node:fs';", + "const req = JSON.parse(fs.readFileSync(0, 'utf8'));", + "const suffix = process.argv[2] ?? 'missing';", + "const values = Object.fromEntries((req.ids ?? []).map((id) => [id, `${suffix}:${id}`]));", + "process.stdout.write(JSON.stringify({ protocolVersion: 1, values }));", + ].join("\n"), + 0o700, + ); + await fs.symlink(targetCommand, symlinkCommand); + const trustedRoot = await fs.realpath(root); + + await expect( + resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: symlinkCommand, + args: ["brew"], + passEnv: ["PATH"], + }, + }, + }, + }, + }, + ), + ).rejects.toThrow("must not be a symlink"); + + const value = await resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: symlinkCommand, + args: ["brew"], + allowSymlinkCommand: true, + trustedDirs: [trustedRoot], + }, + }, + }, + }, + }, + ); + expect(value).toBe("brew:openai/api-key"); + }); + + it("checks trustedDirs against resolved symlink target", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-link-")); + const outside = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-out-")); + cleanupRoots.push(root); + cleanupRoots.push(outside); + const scriptPath = path.join(outside, "resolver-target.mjs"); + const symlinkPath = path.join(root, "resolver-link.mjs"); + await writeSecureFile( + scriptPath, + ["#!/usr/bin/env node", "process.stdout.write('plain-secret');"].join("\n"), + 0o700, + ); + await fs.symlink(scriptPath, symlinkPath); + + await expect( + resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: symlinkPath, + passEnv: ["PATH"], + jsonOnly: false, + allowSymlinkCommand: true, + trustedDirs: [root], + }, + }, + }, + }, + }, + ), + ).rejects.toThrow("outside trustedDirs"); + }); + + it("rejects exec refs when protocolVersion is not 1", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp( + path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-protocol-"), + ); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver-protocol.mjs"); + await writeSecureFile( + scriptPath, + [ + "#!/usr/bin/env node", + "process.stdout.write(JSON.stringify({ protocolVersion: 2, values: { 'openai/api-key': 'x' } }));", + ].join("\n"), + 0o700, + ); + + await expect( + resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: scriptPath, + passEnv: ["PATH"], + }, + }, + }, + }, + }, + ), + ).rejects.toThrow("protocolVersion must be 1"); + }); + + it("rejects exec refs when response omits requested id", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-id-")); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver-missing-id.mjs"); + await writeSecureFile( + scriptPath, + [ + "#!/usr/bin/env node", + "process.stdout.write(JSON.stringify({ protocolVersion: 1, values: {} }));", + ].join("\n"), + 0o700, + ); + + await expect( + resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: scriptPath, + passEnv: ["PATH"], + }, + }, + }, + }, + }, + ), + ).rejects.toThrow('response missing id "openai/api-key"'); + }); + + it("rejects exec refs with invalid JSON when jsonOnly is true", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-exec-json-")); + cleanupRoots.push(root); + const scriptPath = path.join(root, "resolver-invalid-json.mjs"); + await writeSecureFile( + scriptPath, + ["#!/usr/bin/env node", "process.stdout.write('not-json');"].join("\n"), + 0o700, + ); + + await expect( + resolveSecretRefString( + { source: "exec", provider: "execmain", id: "openai/api-key" }, + { + config: { + secrets: { + providers: { + execmain: { + source: "exec", + command: scriptPath, + passEnv: ["PATH"], + jsonOnly: true, + }, + }, + }, + }, + }, + ), + ).rejects.toThrow("returned invalid JSON"); + }); + + it("supports file singleValue mode with id=value", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-single-value-")); + cleanupRoots.push(root); + const filePath = path.join(root, "token.txt"); + await writeSecureFile(filePath, "raw-token-value\n"); + + const value = await resolveSecretRefString( + { source: "file", provider: "rawfile", id: "value" }, + { + config: { + secrets: { + providers: { + rawfile: { + source: "file", + path: filePath, + mode: "singleValue", + }, + }, + }, + }, + }, + ); + expect(value).toBe("raw-token-value"); + }); + + it("times out file provider reads when timeoutMs elapses", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-resolve-timeout-")); + cleanupRoots.push(root); + const filePath = path.join(root, "secrets.json"); + await writeSecureFile( + filePath, + JSON.stringify({ + providers: { + openai: { + apiKey: "sk-file-value", + }, + }, + }), + ); + + const originalReadFile = fs.readFile.bind(fs); + vi.spyOn(fs, "readFile").mockImplementation((( + targetPath: Parameters[0], + options?: Parameters[1], + ) => { + if (typeof targetPath === "string" && targetPath === filePath) { + return new Promise(() => {}); + } + return originalReadFile(targetPath, options); + }) as typeof fs.readFile); + + await expect( + resolveSecretRefString( + { source: "file", provider: "filemain", id: "/providers/openai/apiKey" }, + { + config: { + secrets: { + providers: { + filemain: { + source: "file", + path: filePath, + mode: "json", + timeoutMs: 5, + }, + }, + }, + }, + }, + ), + ).rejects.toThrow('File provider "filemain" timed out'); + }); + + it("rejects misconfigured provider source mismatches", async () => { + await expect( + resolveSecretRefValue( + { source: "exec", provider: "default", id: "abc" }, + { + config: { + secrets: { + providers: { + default: { + source: "env", + }, + }, + }, + }, + }, + ), + ).rejects.toThrow('has source "env" but ref requests "exec"'); + }); +}); diff --git a/src/secrets/resolve.ts b/src/secrets/resolve.ts new file mode 100644 index 00000000000..9d81486ac0a --- /dev/null +++ b/src/secrets/resolve.ts @@ -0,0 +1,714 @@ +import { spawn } from "node:child_process"; +import fs from "node:fs/promises"; +import path from "node:path"; +import type { OpenClawConfig } from "../config/config.js"; +import type { + ExecSecretProviderConfig, + FileSecretProviderConfig, + SecretProviderConfig, + SecretRef, + SecretRefSource, +} from "../config/types.secrets.js"; +import { inspectPathPermissions, safeStat } from "../security/audit-fs.js"; +import { isPathInside } from "../security/scan-paths.js"; +import { resolveUserPath } from "../utils.js"; +import { runTasksWithConcurrency } from "../utils/run-with-concurrency.js"; +import { readJsonPointer } from "./json-pointer.js"; +import { + SINGLE_VALUE_FILE_REF_ID, + resolveDefaultSecretProviderAlias, + secretRefKey, +} from "./ref-contract.js"; +import { isNonEmptyString, isRecord, normalizePositiveInt } from "./shared.js"; + +const DEFAULT_PROVIDER_CONCURRENCY = 4; +const DEFAULT_MAX_REFS_PER_PROVIDER = 512; +const DEFAULT_MAX_BATCH_BYTES = 256 * 1024; +const DEFAULT_FILE_MAX_BYTES = 1024 * 1024; +const DEFAULT_FILE_TIMEOUT_MS = 5_000; +const DEFAULT_EXEC_TIMEOUT_MS = 5_000; +const DEFAULT_EXEC_NO_OUTPUT_TIMEOUT_MS = 2_000; +const DEFAULT_EXEC_MAX_OUTPUT_BYTES = 1024 * 1024; +const WINDOWS_ABS_PATH_PATTERN = /^[A-Za-z]:[\\/]/; +const WINDOWS_UNC_PATH_PATTERN = /^\\\\[^\\]+\\[^\\]+/; + +export type SecretRefResolveCache = { + resolvedByRefKey?: Map>; + filePayloadByProvider?: Map>; +}; + +type ResolveSecretRefOptions = { + config: OpenClawConfig; + env?: NodeJS.ProcessEnv; + cache?: SecretRefResolveCache; +}; + +type ResolutionLimits = { + maxProviderConcurrency: number; + maxRefsPerProvider: number; + maxBatchBytes: number; +}; + +type ProviderResolutionOutput = Map; + +function isAbsolutePathname(value: string): boolean { + return ( + path.isAbsolute(value) || + WINDOWS_ABS_PATH_PATTERN.test(value) || + WINDOWS_UNC_PATH_PATTERN.test(value) + ); +} + +function resolveResolutionLimits(config: OpenClawConfig): ResolutionLimits { + const resolution = config.secrets?.resolution; + return { + maxProviderConcurrency: normalizePositiveInt( + resolution?.maxProviderConcurrency, + DEFAULT_PROVIDER_CONCURRENCY, + ), + maxRefsPerProvider: normalizePositiveInt( + resolution?.maxRefsPerProvider, + DEFAULT_MAX_REFS_PER_PROVIDER, + ), + maxBatchBytes: normalizePositiveInt(resolution?.maxBatchBytes, DEFAULT_MAX_BATCH_BYTES), + }; +} + +function toProviderKey(source: SecretRefSource, provider: string): string { + return `${source}:${provider}`; +} + +function resolveConfiguredProvider(ref: SecretRef, config: OpenClawConfig): SecretProviderConfig { + const providerConfig = config.secrets?.providers?.[ref.provider]; + if (!providerConfig) { + if (ref.source === "env" && ref.provider === resolveDefaultSecretProviderAlias(config, "env")) { + return { source: "env" }; + } + throw new Error( + `Secret provider "${ref.provider}" is not configured (ref: ${ref.source}:${ref.provider}:${ref.id}).`, + ); + } + if (providerConfig.source !== ref.source) { + throw new Error( + `Secret provider "${ref.provider}" has source "${providerConfig.source}" but ref requests "${ref.source}".`, + ); + } + return providerConfig; +} + +async function assertSecurePath(params: { + targetPath: string; + label: string; + trustedDirs?: string[]; + allowInsecurePath?: boolean; + allowReadableByOthers?: boolean; + allowSymlinkPath?: boolean; +}): Promise { + if (!isAbsolutePathname(params.targetPath)) { + throw new Error(`${params.label} must be an absolute path.`); + } + + let effectivePath = params.targetPath; + let stat = await safeStat(effectivePath); + if (!stat.ok) { + throw new Error(`${params.label} is not readable: ${effectivePath}`); + } + if (stat.isDir) { + throw new Error(`${params.label} must be a file: ${effectivePath}`); + } + if (stat.isSymlink) { + if (!params.allowSymlinkPath) { + throw new Error(`${params.label} must not be a symlink: ${effectivePath}`); + } + try { + effectivePath = await fs.realpath(effectivePath); + } catch { + throw new Error(`${params.label} symlink target is not readable: ${params.targetPath}`); + } + if (!isAbsolutePathname(effectivePath)) { + throw new Error(`${params.label} resolved symlink target must be an absolute path.`); + } + stat = await safeStat(effectivePath); + if (!stat.ok) { + throw new Error(`${params.label} is not readable: ${effectivePath}`); + } + if (stat.isDir) { + throw new Error(`${params.label} must be a file: ${effectivePath}`); + } + if (stat.isSymlink) { + throw new Error(`${params.label} symlink target must not be a symlink: ${effectivePath}`); + } + } + + if (params.trustedDirs && params.trustedDirs.length > 0) { + const trusted = params.trustedDirs.map((entry) => resolveUserPath(entry)); + const inTrustedDir = trusted.some((dir) => isPathInside(dir, effectivePath)); + if (!inTrustedDir) { + throw new Error(`${params.label} is outside trustedDirs: ${effectivePath}`); + } + } + if (params.allowInsecurePath) { + return effectivePath; + } + + const perms = await inspectPathPermissions(effectivePath); + if (!perms.ok) { + throw new Error(`${params.label} permissions could not be verified: ${effectivePath}`); + } + const writableByOthers = perms.worldWritable || perms.groupWritable; + const readableByOthers = perms.worldReadable || perms.groupReadable; + if (writableByOthers || (!params.allowReadableByOthers && readableByOthers)) { + throw new Error(`${params.label} permissions are too open: ${effectivePath}`); + } + + if (process.platform === "win32" && perms.source === "unknown") { + throw new Error( + `${params.label} ACL verification unavailable on Windows for ${effectivePath}.`, + ); + } + + if (process.platform !== "win32" && typeof process.getuid === "function" && stat.uid != null) { + const uid = process.getuid(); + if (stat.uid !== uid) { + throw new Error( + `${params.label} must be owned by the current user (uid=${uid}): ${effectivePath}`, + ); + } + } + return effectivePath; +} + +async function readFileProviderPayload(params: { + providerName: string; + providerConfig: FileSecretProviderConfig; + cache?: SecretRefResolveCache; +}): Promise { + const cacheKey = params.providerName; + const cache = params.cache; + if (cache?.filePayloadByProvider?.has(cacheKey)) { + return await (cache.filePayloadByProvider.get(cacheKey) as Promise); + } + + const filePath = resolveUserPath(params.providerConfig.path); + const readPromise = (async () => { + const secureFilePath = await assertSecurePath({ + targetPath: filePath, + label: `secrets.providers.${params.providerName}.path`, + }); + const timeoutMs = normalizePositiveInt( + params.providerConfig.timeoutMs, + DEFAULT_FILE_TIMEOUT_MS, + ); + const maxBytes = normalizePositiveInt(params.providerConfig.maxBytes, DEFAULT_FILE_MAX_BYTES); + const abortController = new AbortController(); + const timeoutErrorMessage = `File provider "${params.providerName}" timed out after ${timeoutMs}ms.`; + let timeoutHandle: NodeJS.Timeout | null = null; + const timeoutPromise = new Promise((_resolve, reject) => { + timeoutHandle = setTimeout(() => { + abortController.abort(); + reject(new Error(timeoutErrorMessage)); + }, timeoutMs); + }); + try { + const payload = await Promise.race([ + fs.readFile(secureFilePath, { signal: abortController.signal }), + timeoutPromise, + ]); + if (payload.byteLength > maxBytes) { + throw new Error(`File provider "${params.providerName}" exceeded maxBytes (${maxBytes}).`); + } + const text = payload.toString("utf8"); + if (params.providerConfig.mode === "singleValue") { + return text.replace(/\r?\n$/, ""); + } + const parsed = JSON.parse(text) as unknown; + if (!isRecord(parsed)) { + throw new Error(`File provider "${params.providerName}" payload is not a JSON object.`); + } + return parsed; + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + throw new Error(timeoutErrorMessage, { cause: error }); + } + throw error; + } finally { + if (timeoutHandle) { + clearTimeout(timeoutHandle); + } + } + })(); + + if (cache) { + cache.filePayloadByProvider ??= new Map(); + cache.filePayloadByProvider.set(cacheKey, readPromise); + } + return await readPromise; +} + +async function resolveEnvRefs(params: { + refs: SecretRef[]; + providerName: string; + providerConfig: Extract; + env: NodeJS.ProcessEnv; +}): Promise { + const resolved = new Map(); + const allowlist = params.providerConfig.allowlist + ? new Set(params.providerConfig.allowlist) + : null; + for (const ref of params.refs) { + if (allowlist && !allowlist.has(ref.id)) { + throw new Error( + `Environment variable "${ref.id}" is not allowlisted in secrets.providers.${params.providerName}.allowlist.`, + ); + } + const envValue = params.env[ref.id] ?? process.env[ref.id]; + if (!isNonEmptyString(envValue)) { + throw new Error(`Environment variable "${ref.id}" is missing or empty.`); + } + resolved.set(ref.id, envValue); + } + return resolved; +} + +async function resolveFileRefs(params: { + refs: SecretRef[]; + providerName: string; + providerConfig: FileSecretProviderConfig; + cache?: SecretRefResolveCache; +}): Promise { + const payload = await readFileProviderPayload({ + providerName: params.providerName, + providerConfig: params.providerConfig, + cache: params.cache, + }); + const mode = params.providerConfig.mode ?? "json"; + const resolved = new Map(); + if (mode === "singleValue") { + for (const ref of params.refs) { + if (ref.id !== SINGLE_VALUE_FILE_REF_ID) { + throw new Error( + `singleValue file provider "${params.providerName}" expects ref id "${SINGLE_VALUE_FILE_REF_ID}".`, + ); + } + resolved.set(ref.id, payload); + } + return resolved; + } + for (const ref of params.refs) { + resolved.set(ref.id, readJsonPointer(payload, ref.id, { onMissing: "throw" })); + } + return resolved; +} + +type ExecRunResult = { + stdout: string; + stderr: string; + code: number | null; + signal: NodeJS.Signals | null; + termination: "exit" | "timeout" | "no-output-timeout"; +}; + +async function runExecResolver(params: { + command: string; + args: string[]; + cwd: string; + env: NodeJS.ProcessEnv; + input: string; + timeoutMs: number; + noOutputTimeoutMs: number; + maxOutputBytes: number; +}): Promise { + return await new Promise((resolve, reject) => { + const child = spawn(params.command, params.args, { + cwd: params.cwd, + env: params.env, + stdio: ["pipe", "pipe", "pipe"], + shell: false, + windowsHide: true, + }); + + let settled = false; + let stdout = ""; + let stderr = ""; + let timedOut = false; + let noOutputTimedOut = false; + let outputBytes = 0; + let noOutputTimer: NodeJS.Timeout | null = null; + const timeoutTimer = setTimeout(() => { + timedOut = true; + child.kill("SIGKILL"); + }, params.timeoutMs); + + const clearTimers = () => { + clearTimeout(timeoutTimer); + if (noOutputTimer) { + clearTimeout(noOutputTimer); + noOutputTimer = null; + } + }; + + const armNoOutputTimer = () => { + if (noOutputTimer) { + clearTimeout(noOutputTimer); + } + noOutputTimer = setTimeout(() => { + noOutputTimedOut = true; + child.kill("SIGKILL"); + }, params.noOutputTimeoutMs); + }; + + const append = (chunk: Buffer | string, target: "stdout" | "stderr") => { + const text = typeof chunk === "string" ? chunk : chunk.toString("utf8"); + outputBytes += Buffer.byteLength(text, "utf8"); + if (outputBytes > params.maxOutputBytes) { + child.kill("SIGKILL"); + if (!settled) { + settled = true; + clearTimers(); + reject( + new Error(`Exec provider output exceeded maxOutputBytes (${params.maxOutputBytes}).`), + ); + } + return; + } + if (target === "stdout") { + stdout += text; + } else { + stderr += text; + } + armNoOutputTimer(); + }; + + armNoOutputTimer(); + child.on("error", (error) => { + if (settled) { + return; + } + settled = true; + clearTimers(); + reject(error); + }); + child.stdout?.on("data", (chunk) => append(chunk, "stdout")); + child.stderr?.on("data", (chunk) => append(chunk, "stderr")); + child.on("close", (code, signal) => { + if (settled) { + return; + } + settled = true; + clearTimers(); + resolve({ + stdout, + stderr, + code, + signal, + termination: noOutputTimedOut ? "no-output-timeout" : timedOut ? "timeout" : "exit", + }); + }); + + child.stdin?.end(params.input); + }); +} + +function parseExecValues(params: { + providerName: string; + ids: string[]; + stdout: string; + jsonOnly: boolean; +}): Record { + const trimmed = params.stdout.trim(); + if (!trimmed) { + throw new Error(`Exec provider "${params.providerName}" returned empty stdout.`); + } + + let parsed: unknown; + if (!params.jsonOnly && params.ids.length === 1) { + try { + parsed = JSON.parse(trimmed) as unknown; + } catch { + return { [params.ids[0]]: trimmed }; + } + } else { + try { + parsed = JSON.parse(trimmed) as unknown; + } catch { + throw new Error(`Exec provider "${params.providerName}" returned invalid JSON.`); + } + } + + if (!isRecord(parsed)) { + if (!params.jsonOnly && params.ids.length === 1 && typeof parsed === "string") { + return { [params.ids[0]]: parsed }; + } + throw new Error(`Exec provider "${params.providerName}" response must be an object.`); + } + if (parsed.protocolVersion !== 1) { + throw new Error(`Exec provider "${params.providerName}" protocolVersion must be 1.`); + } + const responseValues = parsed.values; + if (!isRecord(responseValues)) { + throw new Error(`Exec provider "${params.providerName}" response missing "values".`); + } + const responseErrors = isRecord(parsed.errors) ? parsed.errors : null; + const out: Record = {}; + for (const id of params.ids) { + if (responseErrors && id in responseErrors) { + const entry = responseErrors[id]; + if (isRecord(entry) && typeof entry.message === "string" && entry.message.trim()) { + throw new Error( + `Exec provider "${params.providerName}" failed for id "${id}" (${entry.message.trim()}).`, + ); + } + throw new Error(`Exec provider "${params.providerName}" failed for id "${id}".`); + } + if (!(id in responseValues)) { + throw new Error(`Exec provider "${params.providerName}" response missing id "${id}".`); + } + out[id] = responseValues[id]; + } + return out; +} + +async function resolveExecRefs(params: { + refs: SecretRef[]; + providerName: string; + providerConfig: ExecSecretProviderConfig; + env: NodeJS.ProcessEnv; + limits: ResolutionLimits; +}): Promise { + const ids = [...new Set(params.refs.map((ref) => ref.id))]; + if (ids.length > params.limits.maxRefsPerProvider) { + throw new Error( + `Exec provider "${params.providerName}" exceeded maxRefsPerProvider (${params.limits.maxRefsPerProvider}).`, + ); + } + + const commandPath = resolveUserPath(params.providerConfig.command); + const secureCommandPath = await assertSecurePath({ + targetPath: commandPath, + label: `secrets.providers.${params.providerName}.command`, + trustedDirs: params.providerConfig.trustedDirs, + allowInsecurePath: params.providerConfig.allowInsecurePath, + allowReadableByOthers: true, + allowSymlinkPath: params.providerConfig.allowSymlinkCommand, + }); + + const requestPayload = { + protocolVersion: 1, + provider: params.providerName, + ids, + }; + const input = JSON.stringify(requestPayload); + if (Buffer.byteLength(input, "utf8") > params.limits.maxBatchBytes) { + throw new Error( + `Exec provider "${params.providerName}" request exceeded maxBatchBytes (${params.limits.maxBatchBytes}).`, + ); + } + + const childEnv: NodeJS.ProcessEnv = {}; + for (const key of params.providerConfig.passEnv ?? []) { + const value = params.env[key] ?? process.env[key]; + if (value !== undefined) { + childEnv[key] = value; + } + } + for (const [key, value] of Object.entries(params.providerConfig.env ?? {})) { + childEnv[key] = value; + } + + const timeoutMs = normalizePositiveInt(params.providerConfig.timeoutMs, DEFAULT_EXEC_TIMEOUT_MS); + const noOutputTimeoutMs = normalizePositiveInt( + params.providerConfig.noOutputTimeoutMs, + DEFAULT_EXEC_NO_OUTPUT_TIMEOUT_MS, + ); + const maxOutputBytes = normalizePositiveInt( + params.providerConfig.maxOutputBytes, + DEFAULT_EXEC_MAX_OUTPUT_BYTES, + ); + const jsonOnly = params.providerConfig.jsonOnly ?? true; + + const result = await runExecResolver({ + command: secureCommandPath, + args: params.providerConfig.args ?? [], + cwd: path.dirname(secureCommandPath), + env: childEnv, + input, + timeoutMs, + noOutputTimeoutMs, + maxOutputBytes, + }); + if (result.termination === "timeout") { + throw new Error(`Exec provider "${params.providerName}" timed out after ${timeoutMs}ms.`); + } + if (result.termination === "no-output-timeout") { + throw new Error( + `Exec provider "${params.providerName}" produced no output for ${noOutputTimeoutMs}ms.`, + ); + } + if (result.code !== 0) { + throw new Error( + `Exec provider "${params.providerName}" exited with code ${String(result.code)}.`, + ); + } + + const values = parseExecValues({ + providerName: params.providerName, + ids, + stdout: result.stdout, + jsonOnly, + }); + const resolved = new Map(); + for (const id of ids) { + resolved.set(id, values[id]); + } + return resolved; +} + +async function resolveProviderRefs(params: { + refs: SecretRef[]; + source: SecretRefSource; + providerName: string; + providerConfig: SecretProviderConfig; + options: ResolveSecretRefOptions; + limits: ResolutionLimits; +}): Promise { + if (params.providerConfig.source === "env") { + return await resolveEnvRefs({ + refs: params.refs, + providerName: params.providerName, + providerConfig: params.providerConfig, + env: params.options.env ?? process.env, + }); + } + if (params.providerConfig.source === "file") { + return await resolveFileRefs({ + refs: params.refs, + providerName: params.providerName, + providerConfig: params.providerConfig, + cache: params.options.cache, + }); + } + if (params.providerConfig.source === "exec") { + return await resolveExecRefs({ + refs: params.refs, + providerName: params.providerName, + providerConfig: params.providerConfig, + env: params.options.env ?? process.env, + limits: params.limits, + }); + } + throw new Error( + `Unsupported secret provider source "${String((params.providerConfig as { source?: unknown }).source)}".`, + ); +} + +export async function resolveSecretRefValues( + refs: SecretRef[], + options: ResolveSecretRefOptions, +): Promise> { + if (refs.length === 0) { + return new Map(); + } + const limits = resolveResolutionLimits(options.config); + const uniqueRefs = new Map(); + for (const ref of refs) { + const id = ref.id.trim(); + if (!id) { + throw new Error("Secret reference id is empty."); + } + uniqueRefs.set(secretRefKey(ref), { ...ref, id }); + } + + const grouped = new Map< + string, + { source: SecretRefSource; providerName: string; refs: SecretRef[] } + >(); + for (const ref of uniqueRefs.values()) { + const key = toProviderKey(ref.source, ref.provider); + const existing = grouped.get(key); + if (existing) { + existing.refs.push(ref); + continue; + } + grouped.set(key, { source: ref.source, providerName: ref.provider, refs: [ref] }); + } + + const tasks = [...grouped.values()].map( + (group) => async (): Promise<{ group: typeof group; values: ProviderResolutionOutput }> => { + if (group.refs.length > limits.maxRefsPerProvider) { + throw new Error( + `Secret provider "${group.providerName}" exceeded maxRefsPerProvider (${limits.maxRefsPerProvider}).`, + ); + } + const providerConfig = resolveConfiguredProvider(group.refs[0], options.config); + const values = await resolveProviderRefs({ + refs: group.refs, + source: group.source, + providerName: group.providerName, + providerConfig, + options, + limits, + }); + return { group, values }; + }, + ); + + const taskResults = await runTasksWithConcurrency({ + tasks, + limit: limits.maxProviderConcurrency, + errorMode: "stop", + }); + if (taskResults.hasError) { + throw taskResults.firstError; + } + + const resolved = new Map(); + for (const result of taskResults.results) { + for (const ref of result.group.refs) { + if (!result.values.has(ref.id)) { + throw new Error( + `Secret provider "${result.group.providerName}" did not return id "${ref.id}".`, + ); + } + resolved.set(secretRefKey(ref), result.values.get(ref.id)); + } + } + return resolved; +} + +export async function resolveSecretRefValue( + ref: SecretRef, + options: ResolveSecretRefOptions, +): Promise { + const cache = options.cache; + const key = secretRefKey(ref); + if (cache?.resolvedByRefKey?.has(key)) { + return await (cache.resolvedByRefKey.get(key) as Promise); + } + + const promise = (async () => { + const resolved = await resolveSecretRefValues([ref], options); + if (!resolved.has(key)) { + throw new Error(`Secret reference "${key}" resolved to no value.`); + } + return resolved.get(key); + })(); + + if (cache) { + cache.resolvedByRefKey ??= new Map(); + cache.resolvedByRefKey.set(key, promise); + } + return await promise; +} + +export async function resolveSecretRefString( + ref: SecretRef, + options: ResolveSecretRefOptions, +): Promise { + const resolved = await resolveSecretRefValue(ref, options); + if (!isNonEmptyString(resolved)) { + throw new Error( + `Secret reference "${ref.source}:${ref.provider}:${ref.id}" resolved to a non-string or empty value.`, + ); + } + return resolved; +} diff --git a/src/secrets/runtime.test.ts b/src/secrets/runtime.test.ts new file mode 100644 index 00000000000..00d11c7392a --- /dev/null +++ b/src/secrets/runtime.test.ts @@ -0,0 +1,270 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { ensureAuthProfileStore } from "../agents/auth-profiles.js"; +import { loadConfig, type OpenClawConfig } from "../config/config.js"; +import { + activateSecretsRuntimeSnapshot, + clearSecretsRuntimeSnapshot, + prepareSecretsRuntimeSnapshot, +} from "./runtime.js"; + +describe("secrets runtime snapshot", () => { + afterEach(() => { + clearSecretsRuntimeSnapshot(); + }); + + it("resolves env refs for config and auth profiles", async () => { + const config: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [], + }, + }, + }, + skills: { + entries: { + "review-pr": { + enabled: true, + apiKey: { source: "env", provider: "default", id: "REVIEW_SKILL_API_KEY" }, + }, + }, + }, + }; + + const snapshot = await prepareSecretsRuntimeSnapshot({ + config, + env: { + OPENAI_API_KEY: "sk-env-openai", + GITHUB_TOKEN: "ghp-env-token", + REVIEW_SKILL_API_KEY: "sk-skill-ref", + }, + agentDirs: ["/tmp/openclaw-agent-main"], + loadAuthStore: () => ({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + key: "old-openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + "github-copilot:default": { + type: "token", + provider: "github-copilot", + token: "old-gh", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + "openai:inline": { + type: "api_key", + provider: "openai", + key: "${OPENAI_API_KEY}", + }, + }, + }), + }); + + expect(snapshot.config.models?.providers?.openai?.apiKey).toBe("sk-env-openai"); + expect(snapshot.config.skills?.entries?.["review-pr"]?.apiKey).toBe("sk-skill-ref"); + expect(snapshot.warnings).toHaveLength(2); + expect(snapshot.authStores[0]?.store.profiles["openai:default"]).toMatchObject({ + type: "api_key", + key: "sk-env-openai", + }); + expect(snapshot.authStores[0]?.store.profiles["github-copilot:default"]).toMatchObject({ + type: "token", + token: "ghp-env-token", + }); + expect(snapshot.authStores[0]?.store.profiles["openai:inline"]).toMatchObject({ + type: "api_key", + key: "sk-env-openai", + }); + }); + + it("resolves file refs via configured file provider", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-file-provider-")); + const secretsPath = path.join(root, "secrets.json"); + try { + await fs.writeFile( + secretsPath, + JSON.stringify( + { + providers: { + openai: { + apiKey: "sk-from-file-provider", + }, + }, + }, + null, + 2, + ), + "utf8", + ); + await fs.chmod(secretsPath, 0o600); + + const config: OpenClawConfig = { + secrets: { + providers: { + default: { + source: "file", + path: secretsPath, + mode: "json", + }, + }, + defaults: { + file: "default", + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "file", provider: "default", id: "/providers/openai/apiKey" }, + models: [], + }, + }, + }, + }; + + const snapshot = await prepareSecretsRuntimeSnapshot({ + config, + agentDirs: ["/tmp/openclaw-agent-main"], + loadAuthStore: () => ({ version: 1, profiles: {} }), + }); + + expect(snapshot.config.models?.providers?.openai?.apiKey).toBe("sk-from-file-provider"); + } finally { + await fs.rm(root, { recursive: true, force: true }); + } + }); + + it("fails when file provider payload is not a JSON object", async () => { + if (process.platform === "win32") { + return; + } + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-file-provider-bad-")); + const secretsPath = path.join(root, "secrets.json"); + try { + await fs.writeFile(secretsPath, JSON.stringify(["not-an-object"]), "utf8"); + await fs.chmod(secretsPath, 0o600); + + await expect( + prepareSecretsRuntimeSnapshot({ + config: { + secrets: { + providers: { + default: { + source: "file", + path: secretsPath, + mode: "json", + }, + }, + }, + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "file", provider: "default", id: "/providers/openai/apiKey" }, + models: [], + }, + }, + }, + }, + agentDirs: ["/tmp/openclaw-agent-main"], + loadAuthStore: () => ({ version: 1, profiles: {} }), + }), + ).rejects.toThrow("payload is not a JSON object"); + } finally { + await fs.rm(root, { recursive: true, force: true }); + } + }); + + it("activates runtime snapshots for loadConfig and ensureAuthProfileStore", async () => { + const prepared = await prepareSecretsRuntimeSnapshot({ + config: { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + models: [], + }, + }, + }, + }, + env: { OPENAI_API_KEY: "sk-runtime" }, + agentDirs: ["/tmp/openclaw-agent-main"], + loadAuthStore: () => ({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }), + }); + + activateSecretsRuntimeSnapshot(prepared); + + expect(loadConfig().models?.providers?.openai?.apiKey).toBe("sk-runtime"); + const store = ensureAuthProfileStore("/tmp/openclaw-agent-main"); + expect(store.profiles["openai:default"]).toMatchObject({ + type: "api_key", + key: "sk-runtime", + }); + }); + + it("does not write inherited auth stores during runtime secret activation", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-secrets-runtime-")); + const stateDir = path.join(root, ".openclaw"); + const mainAgentDir = path.join(stateDir, "agents", "main", "agent"); + const workerStorePath = path.join(stateDir, "agents", "worker", "agent", "auth-profiles.json"); + const prevStateDir = process.env.OPENCLAW_STATE_DIR; + + try { + await fs.mkdir(mainAgentDir, { recursive: true }); + await fs.writeFile( + path.join(mainAgentDir, "auth-profiles.json"), + JSON.stringify({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }), + "utf8", + ); + process.env.OPENCLAW_STATE_DIR = stateDir; + + await prepareSecretsRuntimeSnapshot({ + config: { + agents: { + list: [{ id: "worker" }], + }, + }, + env: { OPENAI_API_KEY: "sk-runtime-worker" }, + }); + + await expect(fs.access(workerStorePath)).rejects.toMatchObject({ code: "ENOENT" }); + } finally { + if (prevStateDir === undefined) { + delete process.env.OPENCLAW_STATE_DIR; + } else { + process.env.OPENCLAW_STATE_DIR = prevStateDir; + } + await fs.rm(root, { recursive: true, force: true }); + } + }); +}); diff --git a/src/secrets/runtime.ts b/src/secrets/runtime.ts new file mode 100644 index 00000000000..cb79fbc355c --- /dev/null +++ b/src/secrets/runtime.ts @@ -0,0 +1,426 @@ +import { resolveOpenClawAgentDir } from "../agents/agent-paths.js"; +import { listAgentIds, resolveAgentDir } from "../agents/agent-scope.js"; +import type { AuthProfileCredential, AuthProfileStore } from "../agents/auth-profiles.js"; +import { + clearRuntimeAuthProfileStoreSnapshots, + loadAuthProfileStoreForSecretsRuntime, + replaceRuntimeAuthProfileStoreSnapshots, +} from "../agents/auth-profiles.js"; +import { + clearRuntimeConfigSnapshot, + setRuntimeConfigSnapshot, + type OpenClawConfig, +} from "../config/config.js"; +import { coerceSecretRef, type SecretRef } from "../config/types.secrets.js"; +import { resolveUserPath } from "../utils.js"; +import { secretRefKey } from "./ref-contract.js"; +import { resolveSecretRefValues, type SecretRefResolveCache } from "./resolve.js"; +import { isNonEmptyString, isRecord } from "./shared.js"; + +type SecretResolverWarningCode = "SECRETS_REF_OVERRIDES_PLAINTEXT"; + +export type SecretResolverWarning = { + code: SecretResolverWarningCode; + path: string; + message: string; +}; + +export type PreparedSecretsRuntimeSnapshot = { + sourceConfig: OpenClawConfig; + config: OpenClawConfig; + authStores: Array<{ agentDir: string; store: AuthProfileStore }>; + warnings: SecretResolverWarning[]; +}; + +type ProviderLike = { + apiKey?: unknown; +}; + +type SkillEntryLike = { + apiKey?: unknown; +}; + +type GoogleChatAccountLike = { + serviceAccount?: unknown; + serviceAccountRef?: unknown; + accounts?: Record; +}; + +type ApiKeyCredentialLike = AuthProfileCredential & { + type: "api_key"; + key?: string; + keyRef?: unknown; +}; + +type TokenCredentialLike = AuthProfileCredential & { + type: "token"; + token?: string; + tokenRef?: unknown; +}; + +type SecretAssignment = { + ref: SecretRef; + path: string; + expected: "string" | "string-or-object"; + apply: (value: unknown) => void; +}; + +type ResolverContext = { + sourceConfig: OpenClawConfig; + env: NodeJS.ProcessEnv; + cache: SecretRefResolveCache; + warnings: SecretResolverWarning[]; + assignments: SecretAssignment[]; +}; + +type SecretDefaults = NonNullable["defaults"]; + +let activeSnapshot: PreparedSecretsRuntimeSnapshot | null = null; + +function cloneSnapshot(snapshot: PreparedSecretsRuntimeSnapshot): PreparedSecretsRuntimeSnapshot { + return { + sourceConfig: structuredClone(snapshot.sourceConfig), + config: structuredClone(snapshot.config), + authStores: snapshot.authStores.map((entry) => ({ + agentDir: entry.agentDir, + store: structuredClone(entry.store), + })), + warnings: snapshot.warnings.map((warning) => ({ ...warning })), + }; +} + +function pushAssignment(context: ResolverContext, assignment: SecretAssignment): void { + context.assignments.push(assignment); +} + +function collectModelProviderAssignments(params: { + providers: Record; + defaults: SecretDefaults | undefined; + context: ResolverContext; +}): void { + for (const [providerId, provider] of Object.entries(params.providers)) { + const ref = coerceSecretRef(provider.apiKey, params.defaults); + if (!ref) { + continue; + } + pushAssignment(params.context, { + ref, + path: `models.providers.${providerId}.apiKey`, + expected: "string", + apply: (value) => { + provider.apiKey = value; + }, + }); + } +} + +function collectSkillAssignments(params: { + entries: Record; + defaults: SecretDefaults | undefined; + context: ResolverContext; +}): void { + for (const [skillKey, entry] of Object.entries(params.entries)) { + const ref = coerceSecretRef(entry.apiKey, params.defaults); + if (!ref) { + continue; + } + pushAssignment(params.context, { + ref, + path: `skills.entries.${skillKey}.apiKey`, + expected: "string", + apply: (value) => { + entry.apiKey = value; + }, + }); + } +} + +function collectGoogleChatAccountAssignment(params: { + target: GoogleChatAccountLike; + path: string; + defaults: SecretDefaults | undefined; + context: ResolverContext; +}): void { + const explicitRef = coerceSecretRef(params.target.serviceAccountRef, params.defaults); + const inlineRef = coerceSecretRef(params.target.serviceAccount, params.defaults); + const ref = explicitRef ?? inlineRef; + if (!ref) { + return; + } + if ( + explicitRef && + params.target.serviceAccount !== undefined && + !coerceSecretRef(params.target.serviceAccount, params.defaults) + ) { + params.context.warnings.push({ + code: "SECRETS_REF_OVERRIDES_PLAINTEXT", + path: params.path, + message: `${params.path}: serviceAccountRef is set; runtime will ignore plaintext serviceAccount.`, + }); + } + pushAssignment(params.context, { + ref, + path: `${params.path}.serviceAccount`, + expected: "string-or-object", + apply: (value) => { + params.target.serviceAccount = value; + }, + }); +} + +function collectGoogleChatAssignments(params: { + googleChat: GoogleChatAccountLike; + defaults: SecretDefaults | undefined; + context: ResolverContext; +}): void { + collectGoogleChatAccountAssignment({ + target: params.googleChat, + path: "channels.googlechat", + defaults: params.defaults, + context: params.context, + }); + if (!isRecord(params.googleChat.accounts)) { + return; + } + for (const [accountId, account] of Object.entries(params.googleChat.accounts)) { + if (!isRecord(account)) { + continue; + } + collectGoogleChatAccountAssignment({ + target: account as GoogleChatAccountLike, + path: `channels.googlechat.accounts.${accountId}`, + defaults: params.defaults, + context: params.context, + }); + } +} + +function collectConfigAssignments(params: { + config: OpenClawConfig; + context: ResolverContext; +}): void { + const defaults = params.context.sourceConfig.secrets?.defaults; + const providers = params.config.models?.providers as Record | undefined; + if (providers) { + collectModelProviderAssignments({ + providers, + defaults, + context: params.context, + }); + } + + const skillEntries = params.config.skills?.entries as Record | undefined; + if (skillEntries) { + collectSkillAssignments({ + entries: skillEntries, + defaults, + context: params.context, + }); + } + + const googleChat = params.config.channels?.googlechat as GoogleChatAccountLike | undefined; + if (googleChat) { + collectGoogleChatAssignments({ + googleChat, + defaults, + context: params.context, + }); + } +} + +function collectApiKeyProfileAssignment(params: { + profile: ApiKeyCredentialLike; + profileId: string; + agentDir: string; + defaults: SecretDefaults | undefined; + context: ResolverContext; +}): void { + const keyRef = coerceSecretRef(params.profile.keyRef, params.defaults); + const inlineKeyRef = keyRef ? null : coerceSecretRef(params.profile.key, params.defaults); + const resolvedKeyRef = keyRef ?? inlineKeyRef; + if (!resolvedKeyRef) { + return; + } + if (keyRef && isNonEmptyString(params.profile.key)) { + params.context.warnings.push({ + code: "SECRETS_REF_OVERRIDES_PLAINTEXT", + path: `${params.agentDir}.auth-profiles.${params.profileId}.key`, + message: `auth-profiles ${params.profileId}: keyRef is set; runtime will ignore plaintext key.`, + }); + } + pushAssignment(params.context, { + ref: resolvedKeyRef, + path: `${params.agentDir}.auth-profiles.${params.profileId}.key`, + expected: "string", + apply: (value) => { + params.profile.key = String(value); + }, + }); +} + +function collectTokenProfileAssignment(params: { + profile: TokenCredentialLike; + profileId: string; + agentDir: string; + defaults: SecretDefaults | undefined; + context: ResolverContext; +}): void { + const tokenRef = coerceSecretRef(params.profile.tokenRef, params.defaults); + const inlineTokenRef = tokenRef ? null : coerceSecretRef(params.profile.token, params.defaults); + const resolvedTokenRef = tokenRef ?? inlineTokenRef; + if (!resolvedTokenRef) { + return; + } + if (tokenRef && isNonEmptyString(params.profile.token)) { + params.context.warnings.push({ + code: "SECRETS_REF_OVERRIDES_PLAINTEXT", + path: `${params.agentDir}.auth-profiles.${params.profileId}.token`, + message: `auth-profiles ${params.profileId}: tokenRef is set; runtime will ignore plaintext token.`, + }); + } + pushAssignment(params.context, { + ref: resolvedTokenRef, + path: `${params.agentDir}.auth-profiles.${params.profileId}.token`, + expected: "string", + apply: (value) => { + params.profile.token = String(value); + }, + }); +} + +function collectAuthStoreAssignments(params: { + store: AuthProfileStore; + context: ResolverContext; + agentDir: string; +}): void { + const defaults = params.context.sourceConfig.secrets?.defaults; + for (const [profileId, profile] of Object.entries(params.store.profiles)) { + if (profile.type === "api_key") { + collectApiKeyProfileAssignment({ + profile: profile as ApiKeyCredentialLike, + profileId, + agentDir: params.agentDir, + defaults, + context: params.context, + }); + continue; + } + if (profile.type === "token") { + collectTokenProfileAssignment({ + profile: profile as TokenCredentialLike, + profileId, + agentDir: params.agentDir, + defaults, + context: params.context, + }); + } + } +} + +function applyAssignments(params: { + assignments: SecretAssignment[]; + resolved: Map; +}): void { + for (const assignment of params.assignments) { + const key = secretRefKey(assignment.ref); + if (!params.resolved.has(key)) { + throw new Error(`Secret reference "${key}" resolved to no value.`); + } + const value = params.resolved.get(key); + if (assignment.expected === "string") { + if (!isNonEmptyString(value)) { + throw new Error(`${assignment.path} resolved to a non-string or empty value.`); + } + assignment.apply(value); + continue; + } + if (!(isNonEmptyString(value) || isRecord(value))) { + throw new Error(`${assignment.path} resolved to an unsupported value type.`); + } + assignment.apply(value); + } +} + +function collectCandidateAgentDirs(config: OpenClawConfig): string[] { + const dirs = new Set(); + dirs.add(resolveUserPath(resolveOpenClawAgentDir())); + for (const agentId of listAgentIds(config)) { + dirs.add(resolveUserPath(resolveAgentDir(config, agentId))); + } + return [...dirs]; +} + +export async function prepareSecretsRuntimeSnapshot(params: { + config: OpenClawConfig; + env?: NodeJS.ProcessEnv; + agentDirs?: string[]; + loadAuthStore?: (agentDir?: string) => AuthProfileStore; +}): Promise { + const sourceConfig = structuredClone(params.config); + const resolvedConfig = structuredClone(params.config); + const context: ResolverContext = { + sourceConfig, + env: params.env ?? process.env, + cache: {}, + warnings: [], + assignments: [], + }; + + collectConfigAssignments({ + config: resolvedConfig, + context, + }); + + const loadAuthStore = params.loadAuthStore ?? loadAuthProfileStoreForSecretsRuntime; + const candidateDirs = params.agentDirs?.length + ? [...new Set(params.agentDirs.map((entry) => resolveUserPath(entry)))] + : collectCandidateAgentDirs(resolvedConfig); + + const authStores: Array<{ agentDir: string; store: AuthProfileStore }> = []; + for (const agentDir of candidateDirs) { + const store = structuredClone(loadAuthStore(agentDir)); + collectAuthStoreAssignments({ + store, + context, + agentDir, + }); + authStores.push({ agentDir, store }); + } + + if (context.assignments.length > 0) { + const refs = context.assignments.map((assignment) => assignment.ref); + const resolved = await resolveSecretRefValues(refs, { + config: sourceConfig, + env: context.env, + cache: context.cache, + }); + applyAssignments({ + assignments: context.assignments, + resolved, + }); + } + + return { + sourceConfig, + config: resolvedConfig, + authStores, + warnings: context.warnings, + }; +} + +export function activateSecretsRuntimeSnapshot(snapshot: PreparedSecretsRuntimeSnapshot): void { + const next = cloneSnapshot(snapshot); + setRuntimeConfigSnapshot(next.config, next.sourceConfig); + replaceRuntimeAuthProfileStoreSnapshots(next.authStores); + activeSnapshot = next; +} + +export function getActiveSecretsRuntimeSnapshot(): PreparedSecretsRuntimeSnapshot | null { + return activeSnapshot ? cloneSnapshot(activeSnapshot) : null; +} + +export function clearSecretsRuntimeSnapshot(): void { + activeSnapshot = null; + clearRuntimeConfigSnapshot(); + clearRuntimeAuthProfileStoreSnapshots(); +} diff --git a/src/secrets/shared.ts b/src/secrets/shared.ts new file mode 100644 index 00000000000..d576ae1cdba --- /dev/null +++ b/src/secrets/shared.ts @@ -0,0 +1,42 @@ +import fs from "node:fs"; +import path from "node:path"; + +export function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +export function isNonEmptyString(value: unknown): value is string { + return typeof value === "string" && value.trim().length > 0; +} + +export function normalizePositiveInt(value: unknown, fallback: number): number { + if (typeof value === "number" && Number.isFinite(value)) { + return Math.max(1, Math.floor(value)); + } + return Math.max(1, Math.floor(fallback)); +} + +export function ensureDirForFile(filePath: string): void { + fs.mkdirSync(path.dirname(filePath), { recursive: true, mode: 0o700 }); +} + +export function writeJsonFileSecure(pathname: string, value: unknown): void { + ensureDirForFile(pathname); + fs.writeFileSync(pathname, `${JSON.stringify(value, null, 2)}\n`, "utf8"); + fs.chmodSync(pathname, 0o600); +} + +export function readTextFileIfExists(pathname: string): string | null { + if (!fs.existsSync(pathname)) { + return null; + } + return fs.readFileSync(pathname, "utf8"); +} + +export function writeTextFileAtomic(pathname: string, value: string, mode = 0o600): void { + ensureDirForFile(pathname); + const tempPath = `${pathname}.tmp-${process.pid}-${Date.now()}`; + fs.writeFileSync(tempPath, value, "utf8"); + fs.chmodSync(tempPath, mode); + fs.renameSync(tempPath, pathname); +} diff --git a/src/security/audit-channel.ts b/src/security/audit-channel.ts index dcf344891cf..551437ffdce 100644 --- a/src/security/audit-channel.ts +++ b/src/security/audit-channel.ts @@ -115,6 +115,7 @@ export async function collectChannelSecurityFindings(params: { const warnDmPolicy = async (input: { label: string; provider: ChannelId; + accountId: string; dmPolicy: string; allowFrom?: Array | null; policyPath?: string; @@ -124,6 +125,7 @@ export async function collectChannelSecurityFindings(params: { const policyPath = input.policyPath ?? `${input.allowFromPath}policy`; const { hasWildcard, isMultiUserDm } = await resolveDmAllowState({ provider: input.provider, + accountId: input.accountId, allowFrom: input.allowFrom, normalizeEntry: input.normalizeEntry, }); @@ -224,7 +226,11 @@ export async function collectChannelSecurityFindings(params: { (account as { config?: Record } | null)?.config ?? ({} as Record); const dangerousNameMatchingEnabled = isDangerousNameMatchingEnabled(discordCfg); - const storeAllowFrom = await readChannelAllowFromStore("discord").catch(() => []); + const storeAllowFrom = await readChannelAllowFromStore( + "discord", + process.env, + accountId, + ).catch(() => []); const discordNameBasedAllowEntries = new Set(); const discordPathPrefix = orderedAccountIds.length > 1 || hasExplicitAccountPath @@ -427,7 +433,11 @@ export async function collectChannelSecurityFindings(params: { : Array.isArray(legacyAllowFromRaw) ? legacyAllowFromRaw : []; - const storeAllowFrom = await readChannelAllowFromStore("slack").catch(() => []); + const storeAllowFrom = await readChannelAllowFromStore( + "slack", + process.env, + accountId, + ).catch(() => []); const ownerAllowFromConfigured = normalizeAllowFromList([...allowFrom, ...storeAllowFrom]).length > 0; const channels = (slackCfg.channels as Record | undefined) ?? {}; @@ -462,6 +472,7 @@ export async function collectChannelSecurityFindings(params: { await warnDmPolicy({ label: plugin.meta.label ?? plugin.id, provider: plugin.id, + accountId, dmPolicy: dmPolicy.policy, allowFrom: dmPolicy.allowFrom, policyPath: dmPolicy.policyPath, @@ -513,7 +524,11 @@ export async function collectChannelSecurityFindings(params: { continue; } - const storeAllowFrom = await readChannelAllowFromStore("telegram").catch(() => []); + const storeAllowFrom = await readChannelAllowFromStore( + "telegram", + process.env, + accountId, + ).catch(() => []); const storeHasWildcard = storeAllowFrom.some((v) => String(v).trim() === "*"); const invalidTelegramAllowFromEntries = new Set(); for (const entry of storeAllowFrom) { diff --git a/src/security/audit-extra.sync.ts b/src/security/audit-extra.sync.ts index daa60aed73f..a3f81d40870 100644 --- a/src/security/audit-extra.sync.ts +++ b/src/security/audit-extra.sync.ts @@ -955,11 +955,11 @@ export function collectNodeDenyCommandPatternFindings(cfg: OpenClawConfig): Secu severity: "warn", title: "Some gateway.nodes.denyCommands entries are ineffective", detail: - "gateway.nodes.denyCommands uses exact command-name matching only.\n" + + "gateway.nodes.denyCommands uses exact node command-name matching only (for example `system.run`), not shell-text filtering inside a command payload.\n" + detailParts.map((entry) => `- ${entry}`).join("\n"), remediation: `Use exact command names (for example: ${examples.join(", ")}). ` + - "If you need broader restrictions, remove risky commands from allowCommands/default workflows.", + "If you need broader restrictions, remove risky command IDs from allowCommands/default workflows and tighten tools.exec policy.", }); return findings; diff --git a/src/security/dm-policy-channel-smoke.test.ts b/src/security/dm-policy-channel-smoke.test.ts new file mode 100644 index 00000000000..7a57317d628 --- /dev/null +++ b/src/security/dm-policy-channel-smoke.test.ts @@ -0,0 +1,66 @@ +import { describe, expect, it } from "vitest"; +import { isAllowedBlueBubblesSender } from "../../extensions/bluebubbles/src/targets.js"; +import { isMattermostSenderAllowed } from "../../extensions/mattermost/src/mattermost/monitor-auth.js"; +import { isSignalSenderAllowed, type SignalSender } from "../signal/identity.js"; +import { DM_GROUP_ACCESS_REASON, resolveDmGroupAccessWithLists } from "./dm-policy-shared.js"; + +type ChannelSmokeCase = { + name: string; + storeAllowFrom: string[]; + isSenderAllowed: (allowFrom: string[]) => boolean; +}; + +const signalSender: SignalSender = { + kind: "phone", + raw: "+15550001111", + e164: "+15550001111", +}; + +const cases: ChannelSmokeCase[] = [ + { + name: "bluebubbles", + storeAllowFrom: ["attacker-user"], + isSenderAllowed: (allowFrom) => + isAllowedBlueBubblesSender({ + allowFrom, + sender: "attacker-user", + chatId: 101, + }), + }, + { + name: "signal", + storeAllowFrom: [signalSender.e164], + isSenderAllowed: (allowFrom) => isSignalSenderAllowed(signalSender, allowFrom), + }, + { + name: "mattermost", + storeAllowFrom: ["user:attacker-user"], + isSenderAllowed: (allowFrom) => + isMattermostSenderAllowed({ + senderId: "attacker-user", + senderName: "Attacker", + allowFrom, + }), + }, +]; + +describe("security/dm-policy-shared channel smoke", () => { + for (const testCase of cases) { + for (const ingress of ["message", "reaction"] as const) { + it(`[${testCase.name}] blocks group ${ingress} when sender is only in pairing store`, () => { + const access = resolveDmGroupAccessWithLists({ + isGroup: true, + dmPolicy: "pairing", + groupPolicy: "allowlist", + allowFrom: ["owner-user"], + groupAllowFrom: ["group-owner"], + storeAllowFrom: testCase.storeAllowFrom, + isSenderAllowed: testCase.isSenderAllowed, + }); + expect(access.decision).toBe("block"); + expect(access.reasonCode).toBe(DM_GROUP_ACCESS_REASON.GROUP_POLICY_NOT_ALLOWLISTED); + expect(access.reason).toBe("groupPolicy=allowlist (not allowlisted)"); + }); + } + } +}); diff --git a/src/security/dm-policy-shared.test.ts b/src/security/dm-policy-shared.test.ts index d65d6a79188..b68489222b0 100644 --- a/src/security/dm-policy-shared.test.ts +++ b/src/security/dm-policy-shared.test.ts @@ -1,7 +1,11 @@ import { describe, expect, it } from "vitest"; import { + DM_GROUP_ACCESS_REASON, + readStoreAllowFromForDmPolicy, resolveDmAllowState, + resolveDmGroupAccessWithCommandGate, resolveDmGroupAccessDecision, + resolveDmGroupAccessWithLists, resolveEffectiveAllowFromLists, } from "./dm-policy-shared.js"; @@ -9,9 +13,10 @@ describe("security/dm-policy-shared", () => { it("normalizes config + store allow entries and counts distinct senders", async () => { const state = await resolveDmAllowState({ provider: "telegram", + accountId: "default", allowFrom: [" * ", " alice ", "ALICE", "bob"], normalizeEntry: (value) => value.toLowerCase(), - readStore: async () => [" Bob ", "carol", ""], + readStore: async (_provider, _accountId) => [" Bob ", "carol", ""], }); expect(state.configAllowFrom).toEqual(["*", "alice", "ALICE", "bob"]); expect(state.hasWildcard).toBe(true); @@ -22,8 +27,9 @@ describe("security/dm-policy-shared", () => { it("handles empty allowlists and store failures", async () => { const state = await resolveDmAllowState({ provider: "slack", + accountId: "default", allowFrom: undefined, - readStore: async () => { + readStore: async (_provider, _accountId) => { throw new Error("offline"); }, }); @@ -33,6 +39,36 @@ describe("security/dm-policy-shared", () => { expect(state.isMultiUserDm).toBe(false); }); + it("skips pairing-store reads when dmPolicy is allowlist", async () => { + let called = false; + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "telegram", + accountId: "default", + dmPolicy: "allowlist", + readStore: async (_provider, _accountId) => { + called = true; + return ["should-not-be-read"]; + }, + }); + expect(called).toBe(false); + expect(storeAllowFrom).toEqual([]); + }); + + it("skips pairing-store reads when shouldRead=false", async () => { + let called = false; + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "slack", + accountId: "default", + shouldRead: false, + readStore: async (_provider, _accountId) => { + called = true; + return ["should-not-be-read"]; + }, + }); + expect(called).toBe(false); + expect(storeAllowFrom).toEqual([]); + }); + it("builds effective DM/group allowlists from config + pairing store", () => { const lists = resolveEffectiveAllowFromLists({ allowFrom: [" owner ", "", "owner2"], @@ -40,7 +76,7 @@ describe("security/dm-policy-shared", () => { storeAllowFrom: [" owner3 ", ""], }); expect(lists.effectiveAllowFrom).toEqual(["owner", "owner2", "owner3"]); - expect(lists.effectiveGroupAllowFrom).toEqual(["group:abc", "owner3"]); + expect(lists.effectiveGroupAllowFrom).toEqual(["group:abc"]); }); it("falls back to DM allowlist for groups when groupAllowFrom is empty", () => { @@ -50,7 +86,18 @@ describe("security/dm-policy-shared", () => { storeAllowFrom: [" owner2 "], }); expect(lists.effectiveAllowFrom).toEqual(["owner", "owner2"]); - expect(lists.effectiveGroupAllowFrom).toEqual(["owner", "owner2"]); + expect(lists.effectiveGroupAllowFrom).toEqual(["owner"]); + }); + + it("can keep group allowlist empty when fallback is disabled", () => { + const lists = resolveEffectiveAllowFromLists({ + allowFrom: ["owner"], + groupAllowFrom: [], + storeAllowFrom: ["paired-user"], + groupAllowFromFallbackToAllowFrom: false, + }); + expect(lists.effectiveAllowFrom).toEqual(["owner", "paired-user"]); + expect(lists.effectiveGroupAllowFrom).toEqual([]); }); it("excludes storeAllowFrom when dmPolicy is allowlist", () => { @@ -64,7 +111,7 @@ describe("security/dm-policy-shared", () => { expect(lists.effectiveGroupAllowFrom).toEqual(["group:abc"]); }); - it("includes storeAllowFrom when dmPolicy is pairing", () => { + it("keeps group allowlist explicit when dmPolicy is pairing", () => { const lists = resolveEffectiveAllowFromLists({ allowFrom: ["+1111"], groupAllowFrom: [], @@ -72,7 +119,120 @@ describe("security/dm-policy-shared", () => { dmPolicy: "pairing", }); expect(lists.effectiveAllowFrom).toEqual(["+1111", "+2222"]); - expect(lists.effectiveGroupAllowFrom).toEqual(["+1111", "+2222"]); + expect(lists.effectiveGroupAllowFrom).toEqual(["+1111"]); + }); + + it("resolves access + effective allowlists in one shared call", () => { + const resolved = resolveDmGroupAccessWithLists({ + isGroup: false, + dmPolicy: "pairing", + groupPolicy: "allowlist", + allowFrom: ["owner"], + groupAllowFrom: ["group:room"], + storeAllowFrom: ["paired-user"], + isSenderAllowed: (allowFrom) => allowFrom.includes("paired-user"), + }); + expect(resolved.decision).toBe("allow"); + expect(resolved.reasonCode).toBe(DM_GROUP_ACCESS_REASON.DM_POLICY_ALLOWLISTED); + expect(resolved.reason).toBe("dmPolicy=pairing (allowlisted)"); + expect(resolved.effectiveAllowFrom).toEqual(["owner", "paired-user"]); + expect(resolved.effectiveGroupAllowFrom).toEqual(["group:room"]); + }); + + it("resolves command gate with dm/group parity for groups", () => { + const resolved = resolveDmGroupAccessWithCommandGate({ + isGroup: true, + dmPolicy: "pairing", + groupPolicy: "allowlist", + allowFrom: ["owner"], + groupAllowFrom: ["group-owner"], + storeAllowFrom: ["paired-user"], + isSenderAllowed: (allowFrom) => allowFrom.includes("paired-user"), + command: { + useAccessGroups: true, + allowTextCommands: true, + hasControlCommand: true, + }, + }); + expect(resolved.decision).toBe("block"); + expect(resolved.reason).toBe("groupPolicy=allowlist (not allowlisted)"); + expect(resolved.commandAuthorized).toBe(false); + expect(resolved.shouldBlockControlCommand).toBe(true); + }); + + it("keeps configured dm allowlist usable for group command auth", () => { + const resolved = resolveDmGroupAccessWithCommandGate({ + isGroup: true, + dmPolicy: "pairing", + groupPolicy: "open", + allowFrom: ["owner"], + groupAllowFrom: [], + storeAllowFrom: ["paired-user"], + isSenderAllowed: (allowFrom) => allowFrom.includes("owner"), + command: { + useAccessGroups: true, + allowTextCommands: true, + hasControlCommand: true, + }, + }); + expect(resolved.commandAuthorized).toBe(true); + expect(resolved.shouldBlockControlCommand).toBe(false); + }); + + it("treats dm command authorization as dm access result", () => { + const resolved = resolveDmGroupAccessWithCommandGate({ + isGroup: false, + dmPolicy: "pairing", + groupPolicy: "allowlist", + allowFrom: ["owner"], + groupAllowFrom: ["group-owner"], + storeAllowFrom: ["paired-user"], + isSenderAllowed: (allowFrom) => allowFrom.includes("paired-user"), + command: { + useAccessGroups: true, + allowTextCommands: true, + hasControlCommand: true, + }, + }); + expect(resolved.decision).toBe("allow"); + expect(resolved.commandAuthorized).toBe(true); + expect(resolved.shouldBlockControlCommand).toBe(false); + }); + + it("does not auto-authorize dm commands in open mode without explicit allowlists", () => { + const resolved = resolveDmGroupAccessWithCommandGate({ + isGroup: false, + dmPolicy: "open", + groupPolicy: "allowlist", + allowFrom: [], + groupAllowFrom: [], + storeAllowFrom: [], + isSenderAllowed: () => false, + command: { + useAccessGroups: true, + allowTextCommands: true, + hasControlCommand: true, + }, + }); + expect(resolved.decision).toBe("allow"); + expect(resolved.commandAuthorized).toBe(false); + expect(resolved.shouldBlockControlCommand).toBe(false); + }); + + it("keeps allowlist mode strict in shared resolver (no pairing-store fallback)", () => { + const resolved = resolveDmGroupAccessWithLists({ + isGroup: false, + dmPolicy: "allowlist", + groupPolicy: "allowlist", + allowFrom: ["owner"], + groupAllowFrom: [], + storeAllowFrom: ["paired-user"], + isSenderAllowed: () => false, + }); + expect(resolved.decision).toBe("block"); + expect(resolved.reasonCode).toBe(DM_GROUP_ACCESS_REASON.DM_POLICY_NOT_ALLOWLISTED); + expect(resolved.reason).toBe("dmPolicy=allowlist (not allowlisted)"); + expect(resolved.effectiveAllowFrom).toEqual(["owner"]); }); const channels = [ @@ -86,6 +246,102 @@ describe("security/dm-policy-shared", () => { "zalo", ] as const; + it("keeps message/reaction policy parity table across channels", () => { + const cases = [ + { + name: "dmPolicy=open", + isGroup: false, + dmPolicy: "open" as const, + groupPolicy: "allowlist" as const, + allowFrom: [] as string[], + groupAllowFrom: [] as string[], + storeAllowFrom: [] as string[], + isSenderAllowed: () => false, + expectedDecision: "allow" as const, + expectedReactionAllowed: true, + }, + { + name: "dmPolicy=disabled", + isGroup: false, + dmPolicy: "disabled" as const, + groupPolicy: "allowlist" as const, + allowFrom: [] as string[], + groupAllowFrom: [] as string[], + storeAllowFrom: [] as string[], + isSenderAllowed: () => false, + expectedDecision: "block" as const, + expectedReactionAllowed: false, + }, + { + name: "dmPolicy=allowlist unauthorized", + isGroup: false, + dmPolicy: "allowlist" as const, + groupPolicy: "allowlist" as const, + allowFrom: ["owner"], + groupAllowFrom: [] as string[], + storeAllowFrom: [] as string[], + isSenderAllowed: () => false, + expectedDecision: "block" as const, + expectedReactionAllowed: false, + }, + { + name: "dmPolicy=allowlist authorized", + isGroup: false, + dmPolicy: "allowlist" as const, + groupPolicy: "allowlist" as const, + allowFrom: ["owner"], + groupAllowFrom: [] as string[], + storeAllowFrom: [] as string[], + isSenderAllowed: () => true, + expectedDecision: "allow" as const, + expectedReactionAllowed: true, + }, + { + name: "dmPolicy=pairing unauthorized", + isGroup: false, + dmPolicy: "pairing" as const, + groupPolicy: "allowlist" as const, + allowFrom: [] as string[], + groupAllowFrom: [] as string[], + storeAllowFrom: [] as string[], + isSenderAllowed: () => false, + expectedDecision: "pairing" as const, + expectedReactionAllowed: false, + }, + { + name: "groupPolicy=allowlist rejects DM-paired sender not in explicit group list", + isGroup: true, + dmPolicy: "pairing" as const, + groupPolicy: "allowlist" as const, + allowFrom: ["owner"] as string[], + groupAllowFrom: ["group-owner"] as string[], + storeAllowFrom: ["paired-user"] as string[], + isSenderAllowed: (allowFrom: string[]) => allowFrom.includes("paired-user"), + expectedDecision: "block" as const, + expectedReactionAllowed: false, + }, + ]; + + for (const channel of channels) { + for (const testCase of cases) { + const access = resolveDmGroupAccessWithLists({ + isGroup: testCase.isGroup, + dmPolicy: testCase.dmPolicy, + groupPolicy: testCase.groupPolicy, + allowFrom: testCase.allowFrom, + groupAllowFrom: testCase.groupAllowFrom, + storeAllowFrom: testCase.storeAllowFrom, + isSenderAllowed: testCase.isSenderAllowed, + }); + const reactionAllowed = access.decision === "allow"; + expect(access.decision, `[${channel}] ${testCase.name}`).toBe(testCase.expectedDecision); + expect(reactionAllowed, `[${channel}] ${testCase.name} reaction`).toBe( + testCase.expectedReactionAllowed, + ); + } + } + }); + for (const channel of channels) { it(`[${channel}] blocks DM allowlist mode when allowlist is empty`, () => { const decision = resolveDmGroupAccessDecision({ @@ -98,6 +354,7 @@ describe("security/dm-policy-shared", () => { }); expect(decision).toEqual({ decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_NOT_ALLOWLISTED, reason: "dmPolicy=allowlist (not allowlisted)", }); }); @@ -113,6 +370,7 @@ describe("security/dm-policy-shared", () => { }); expect(decision).toEqual({ decision: "pairing", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_PAIRING_REQUIRED, reason: "dmPolicy=pairing (not allowlisted)", }); }); @@ -140,6 +398,7 @@ describe("security/dm-policy-shared", () => { }); expect(decision).toEqual({ decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.GROUP_POLICY_NOT_ALLOWLISTED, reason: "groupPolicy=allowlist (not allowlisted)", }); }); diff --git a/src/security/dm-policy-shared.ts b/src/security/dm-policy-shared.ts index ee07dfff3c7..35c9fceaf74 100644 --- a/src/security/dm-policy-shared.ts +++ b/src/security/dm-policy-shared.ts @@ -1,3 +1,5 @@ +import { mergeDmAllowFromSources, resolveGroupAllowFromSources } from "../channels/allow-from.js"; +import { resolveControlCommandGate } from "../channels/command-gating.js"; import type { ChannelId } from "../channels/plugins/types.js"; import { readChannelAllowFromStore } from "../pairing/pairing-store.js"; import { normalizeStringEntries } from "../shared/string-normalization.js"; @@ -7,29 +9,63 @@ export function resolveEffectiveAllowFromLists(params: { groupAllowFrom?: Array | null; storeAllowFrom?: Array | null; dmPolicy?: string | null; + groupAllowFromFallbackToAllowFrom?: boolean | null; }): { effectiveAllowFrom: string[]; effectiveGroupAllowFrom: string[]; } { - const configAllowFrom = normalizeStringEntries( - Array.isArray(params.allowFrom) ? params.allowFrom : undefined, + const allowFrom = Array.isArray(params.allowFrom) ? params.allowFrom : undefined; + const groupAllowFrom = Array.isArray(params.groupAllowFrom) ? params.groupAllowFrom : undefined; + const storeAllowFrom = Array.isArray(params.storeAllowFrom) ? params.storeAllowFrom : undefined; + const effectiveAllowFrom = normalizeStringEntries( + mergeDmAllowFromSources({ + allowFrom, + storeAllowFrom, + dmPolicy: params.dmPolicy ?? undefined, + }), ); - const configGroupAllowFrom = normalizeStringEntries( - Array.isArray(params.groupAllowFrom) ? params.groupAllowFrom : undefined, + // Group auth is explicit (groupAllowFrom fallback allowFrom). Pairing store is DM-only. + const effectiveGroupAllowFrom = normalizeStringEntries( + resolveGroupAllowFromSources({ + allowFrom, + groupAllowFrom, + fallbackToAllowFrom: params.groupAllowFromFallbackToAllowFrom ?? undefined, + }), ); - const storeAllowFrom = - params.dmPolicy === "allowlist" - ? [] - : normalizeStringEntries( - Array.isArray(params.storeAllowFrom) ? params.storeAllowFrom : undefined, - ); - const effectiveAllowFrom = normalizeStringEntries([...configAllowFrom, ...storeAllowFrom]); - const groupBase = configGroupAllowFrom.length > 0 ? configGroupAllowFrom : configAllowFrom; - const effectiveGroupAllowFrom = normalizeStringEntries([...groupBase, ...storeAllowFrom]); return { effectiveAllowFrom, effectiveGroupAllowFrom }; } export type DmGroupAccessDecision = "allow" | "block" | "pairing"; +export const DM_GROUP_ACCESS_REASON = { + GROUP_POLICY_ALLOWED: "group_policy_allowed", + GROUP_POLICY_DISABLED: "group_policy_disabled", + GROUP_POLICY_EMPTY_ALLOWLIST: "group_policy_empty_allowlist", + GROUP_POLICY_NOT_ALLOWLISTED: "group_policy_not_allowlisted", + DM_POLICY_OPEN: "dm_policy_open", + DM_POLICY_DISABLED: "dm_policy_disabled", + DM_POLICY_ALLOWLISTED: "dm_policy_allowlisted", + DM_POLICY_PAIRING_REQUIRED: "dm_policy_pairing_required", + DM_POLICY_NOT_ALLOWLISTED: "dm_policy_not_allowlisted", +} as const; +export type DmGroupAccessReasonCode = + (typeof DM_GROUP_ACCESS_REASON)[keyof typeof DM_GROUP_ACCESS_REASON]; + +export async function readStoreAllowFromForDmPolicy(params: { + provider: ChannelId; + accountId: string; + dmPolicy?: string | null; + shouldRead?: boolean | null; + readStore?: (provider: ChannelId, accountId: string) => Promise; +}): Promise { + if (params.shouldRead === false || params.dmPolicy === "allowlist") { + return []; + } + const readStore = + params.readStore ?? + ((provider: ChannelId, accountId: string) => + readChannelAllowFromStore(provider, process.env, accountId)); + return await readStore(params.provider, params.accountId).catch(() => []); +} export function resolveDmGroupAccessDecision(params: { isGroup: boolean; @@ -40,6 +76,7 @@ export function resolveDmGroupAccessDecision(params: { isSenderAllowed: (allowFrom: string[]) => boolean; }): { decision: DmGroupAccessDecision; + reasonCode: DmGroupAccessReasonCode; reason: string; } { const dmPolicy = params.dmPolicy ?? "pairing"; @@ -49,39 +86,187 @@ export function resolveDmGroupAccessDecision(params: { if (params.isGroup) { if (groupPolicy === "disabled") { - return { decision: "block", reason: "groupPolicy=disabled" }; + return { + decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.GROUP_POLICY_DISABLED, + reason: "groupPolicy=disabled", + }; } if (groupPolicy === "allowlist") { if (effectiveGroupAllowFrom.length === 0) { - return { decision: "block", reason: "groupPolicy=allowlist (empty allowlist)" }; + return { + decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.GROUP_POLICY_EMPTY_ALLOWLIST, + reason: "groupPolicy=allowlist (empty allowlist)", + }; } if (!params.isSenderAllowed(effectiveGroupAllowFrom)) { - return { decision: "block", reason: "groupPolicy=allowlist (not allowlisted)" }; + return { + decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.GROUP_POLICY_NOT_ALLOWLISTED, + reason: "groupPolicy=allowlist (not allowlisted)", + }; } } - return { decision: "allow", reason: `groupPolicy=${groupPolicy}` }; + return { + decision: "allow", + reasonCode: DM_GROUP_ACCESS_REASON.GROUP_POLICY_ALLOWED, + reason: `groupPolicy=${groupPolicy}`, + }; } if (dmPolicy === "disabled") { - return { decision: "block", reason: "dmPolicy=disabled" }; + return { + decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_DISABLED, + reason: "dmPolicy=disabled", + }; } if (dmPolicy === "open") { - return { decision: "allow", reason: "dmPolicy=open" }; + return { + decision: "allow", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_OPEN, + reason: "dmPolicy=open", + }; } if (params.isSenderAllowed(effectiveAllowFrom)) { - return { decision: "allow", reason: `dmPolicy=${dmPolicy} (allowlisted)` }; + return { + decision: "allow", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_ALLOWLISTED, + reason: `dmPolicy=${dmPolicy} (allowlisted)`, + }; } if (dmPolicy === "pairing") { - return { decision: "pairing", reason: "dmPolicy=pairing (not allowlisted)" }; + return { + decision: "pairing", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_PAIRING_REQUIRED, + reason: "dmPolicy=pairing (not allowlisted)", + }; } - return { decision: "block", reason: `dmPolicy=${dmPolicy} (not allowlisted)` }; + return { + decision: "block", + reasonCode: DM_GROUP_ACCESS_REASON.DM_POLICY_NOT_ALLOWLISTED, + reason: `dmPolicy=${dmPolicy} (not allowlisted)`, + }; +} + +export function resolveDmGroupAccessWithLists(params: { + isGroup: boolean; + dmPolicy?: string | null; + groupPolicy?: string | null; + allowFrom?: Array | null; + groupAllowFrom?: Array | null; + storeAllowFrom?: Array | null; + groupAllowFromFallbackToAllowFrom?: boolean | null; + isSenderAllowed: (allowFrom: string[]) => boolean; +}): { + decision: DmGroupAccessDecision; + reasonCode: DmGroupAccessReasonCode; + reason: string; + effectiveAllowFrom: string[]; + effectiveGroupAllowFrom: string[]; +} { + const { effectiveAllowFrom, effectiveGroupAllowFrom } = resolveEffectiveAllowFromLists({ + allowFrom: params.allowFrom, + groupAllowFrom: params.groupAllowFrom, + storeAllowFrom: params.storeAllowFrom, + dmPolicy: params.dmPolicy, + groupAllowFromFallbackToAllowFrom: params.groupAllowFromFallbackToAllowFrom, + }); + const access = resolveDmGroupAccessDecision({ + isGroup: params.isGroup, + dmPolicy: params.dmPolicy, + groupPolicy: params.groupPolicy, + effectiveAllowFrom, + effectiveGroupAllowFrom, + isSenderAllowed: params.isSenderAllowed, + }); + return { + ...access, + effectiveAllowFrom, + effectiveGroupAllowFrom, + }; +} + +export function resolveDmGroupAccessWithCommandGate(params: { + isGroup: boolean; + dmPolicy?: string | null; + groupPolicy?: string | null; + allowFrom?: Array | null; + groupAllowFrom?: Array | null; + storeAllowFrom?: Array | null; + groupAllowFromFallbackToAllowFrom?: boolean | null; + isSenderAllowed: (allowFrom: string[]) => boolean; + command?: { + useAccessGroups: boolean; + allowTextCommands: boolean; + hasControlCommand: boolean; + }; +}): { + decision: DmGroupAccessDecision; + reason: string; + effectiveAllowFrom: string[]; + effectiveGroupAllowFrom: string[]; + commandAuthorized: boolean; + shouldBlockControlCommand: boolean; +} { + const access = resolveDmGroupAccessWithLists({ + isGroup: params.isGroup, + dmPolicy: params.dmPolicy, + groupPolicy: params.groupPolicy, + allowFrom: params.allowFrom, + groupAllowFrom: params.groupAllowFrom, + storeAllowFrom: params.storeAllowFrom, + groupAllowFromFallbackToAllowFrom: params.groupAllowFromFallbackToAllowFrom, + isSenderAllowed: params.isSenderAllowed, + }); + + const configuredAllowFrom = normalizeStringEntries(params.allowFrom ?? []); + const configuredGroupAllowFrom = normalizeStringEntries( + resolveGroupAllowFromSources({ + allowFrom: configuredAllowFrom, + groupAllowFrom: normalizeStringEntries(params.groupAllowFrom ?? []), + fallbackToAllowFrom: params.groupAllowFromFallbackToAllowFrom ?? undefined, + }), + ); + // Group command authorization must not inherit DM pairing-store approvals. + const commandDmAllowFrom = params.isGroup ? configuredAllowFrom : access.effectiveAllowFrom; + const commandGroupAllowFrom = params.isGroup + ? configuredGroupAllowFrom + : access.effectiveGroupAllowFrom; + const ownerAllowedForCommands = params.isSenderAllowed(commandDmAllowFrom); + const groupAllowedForCommands = params.isSenderAllowed(commandGroupAllowFrom); + const commandGate = params.command + ? resolveControlCommandGate({ + useAccessGroups: params.command.useAccessGroups, + authorizers: [ + { + configured: commandDmAllowFrom.length > 0, + allowed: ownerAllowedForCommands, + }, + { + configured: commandGroupAllowFrom.length > 0, + allowed: groupAllowedForCommands, + }, + ], + allowTextCommands: params.command.allowTextCommands, + hasControlCommand: params.command.hasControlCommand, + }) + : { commandAuthorized: false, shouldBlock: false }; + + return { + ...access, + commandAuthorized: commandGate.commandAuthorized, + shouldBlockControlCommand: params.isGroup && commandGate.shouldBlock, + }; } export async function resolveDmAllowState(params: { provider: ChannelId; + accountId: string; allowFrom?: Array | null; normalizeEntry?: (raw: string) => string; - readStore?: (provider: ChannelId) => Promise; + readStore?: (provider: ChannelId, accountId: string) => Promise; }): Promise<{ configAllowFrom: string[]; hasWildcard: boolean; @@ -92,9 +277,11 @@ export async function resolveDmAllowState(params: { Array.isArray(params.allowFrom) ? params.allowFrom : undefined, ); const hasWildcard = configAllowFrom.includes("*"); - const storeAllowFrom = await (params.readStore ?? readChannelAllowFromStore)( - params.provider, - ).catch(() => []); + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: params.provider, + accountId: params.accountId, + readStore: params.readStore, + }); const normalizeEntry = params.normalizeEntry ?? ((value: string) => value); const normalizedCfg = configAllowFrom .filter((value) => value !== "*") diff --git a/src/security/fix.ts b/src/security/fix.ts index 6de16b08850..d0c86e528cf 100644 --- a/src/security/fix.ts +++ b/src/security/fix.ts @@ -7,7 +7,7 @@ import { collectIncludePathsRecursive } from "../config/includes-scan.js"; import { resolveConfigPath, resolveOAuthDir, resolveStateDir } from "../config/paths.js"; import { readChannelAllowFromStore } from "../pairing/pairing-store.js"; import { runExec } from "../process/exec.js"; -import { normalizeAgentId } from "../routing/session-key.js"; +import { DEFAULT_ACCOUNT_ID, normalizeAgentId } from "../routing/session-key.js"; import { createIcaclsResetCommand, formatIcaclsResetCommand, type ExecFn } from "./windows-acl.js"; export type SecurityFixChmodAction = { @@ -412,7 +412,11 @@ export async function fixSecurityFootguns(opts?: { const fixed = applyConfigFixes({ cfg: snap.config, env }); changes = fixed.changes; - const whatsappStoreAllowFrom = await readChannelAllowFromStore("whatsapp", env).catch(() => []); + const whatsappStoreAllowFrom = await readChannelAllowFromStore( + "whatsapp", + env, + DEFAULT_ACCOUNT_ID, + ).catch(() => []); if (whatsappStoreAllowFrom.length > 0) { setWhatsAppGroupAllowFromFromStore({ cfg: fixed.cfg, diff --git a/src/sessions/session-key-utils.ts b/src/sessions/session-key-utils.ts index d6061a88631..c405df3a5ff 100644 --- a/src/sessions/session-key-utils.ts +++ b/src/sessions/session-key-utils.ts @@ -5,10 +5,14 @@ export type ParsedAgentSessionKey = { export type SessionKeyChatType = "direct" | "group" | "channel" | "unknown"; +/** + * Parse agent-scoped session keys in a canonical, case-insensitive way. + * Returned values are normalized to lowercase for stable comparisons/routing. + */ export function parseAgentSessionKey( sessionKey: string | undefined | null, ): ParsedAgentSessionKey | null { - const raw = (sessionKey ?? "").trim(); + const raw = (sessionKey ?? "").trim().toLowerCase(); if (!raw) { return null; } diff --git a/src/shared/net/ip-test-fixtures.ts b/src/shared/net/ip-test-fixtures.ts new file mode 100644 index 00000000000..d2fa9cd5436 --- /dev/null +++ b/src/shared/net/ip-test-fixtures.ts @@ -0,0 +1 @@ +export const blockedIpv6MulticastLiterals = ["ff02::1", "ff05::1:3", "[ff02::1]"] as const; diff --git a/src/shared/net/ip.test.ts b/src/shared/net/ip.test.ts index 73d385832f0..f89fb03f7ef 100644 --- a/src/shared/net/ip.test.ts +++ b/src/shared/net/ip.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it } from "vitest"; +import { blockedIpv6MulticastLiterals } from "./ip-test-fixtures.js"; import { extractEmbeddedIpv4FromIpv6, isCanonicalDottedDecimalIPv4, @@ -45,8 +46,11 @@ describe("shared ip helpers", () => { } }); - it("treats deprecated site-local IPv6 as private/internal", () => { + it("treats blocked IPv6 classes as private/internal", () => { expect(isPrivateOrLoopbackIpAddress("fec0::1")).toBe(true); + for (const literal of blockedIpv6MulticastLiterals) { + expect(isPrivateOrLoopbackIpAddress(literal)).toBe(true); + } expect(isPrivateOrLoopbackIpAddress("2001:4860:4860::8888")).toBe(false); }); }); diff --git a/src/shared/net/ip.ts b/src/shared/net/ip.ts index 2342bdedafe..c386c687898 100644 --- a/src/shared/net/ip.ts +++ b/src/shared/net/ip.ts @@ -22,11 +22,12 @@ const PRIVATE_OR_LOOPBACK_IPV4_RANGES = new Set([ "carrierGradeNat", ]); -const PRIVATE_OR_LOOPBACK_IPV6_RANGES = new Set([ +const BLOCKED_IPV6_SPECIAL_USE_RANGES = new Set([ "unspecified", "loopback", "linkLocal", "uniqueLocal", + "multicast", ]); const RFC2544_BENCHMARK_PREFIX: [ipaddr.IPv4, number] = [ipaddr.IPv4.parse("198.18.0.0"), 15]; export type Ipv4SpecialUseBlockOptions = { @@ -227,11 +228,15 @@ export function isPrivateOrLoopbackIpAddress(raw: string | undefined): boolean { if (isIpv4Address(normalized)) { return PRIVATE_OR_LOOPBACK_IPV4_RANGES.has(normalized.range()); } - if (PRIVATE_OR_LOOPBACK_IPV6_RANGES.has(normalized.range())) { + return isBlockedSpecialUseIpv6Address(normalized); +} + +export function isBlockedSpecialUseIpv6Address(address: ipaddr.IPv6): boolean { + if (BLOCKED_IPV6_SPECIAL_USE_RANGES.has(address.range())) { return true; } // ipaddr.js does not classify deprecated site-local fec0::/10 as private. - return (normalized.parts[0] & 0xffc0) === 0xfec0; + return (address.parts[0] & 0xffc0) === 0xfec0; } export function isRfc1918Ipv4Address(raw: string | undefined): boolean { diff --git a/src/signal/monitor.tool-result.sends-tool-summaries-responseprefix.test.ts b/src/signal/monitor.tool-result.sends-tool-summaries-responseprefix.test.ts index 429f9e3896c..a06d17d61d9 100644 --- a/src/signal/monitor.tool-result.sends-tool-summaries-responseprefix.test.ts +++ b/src/signal/monitor.tool-result.sends-tool-summaries-responseprefix.test.ts @@ -378,6 +378,49 @@ describe("monitorSignalProvider tool results", () => { expect(events.some((text) => text.includes("Signal reaction added"))).toBe(true); }); + it.each([ + { + name: "blocks reaction notifications from unauthorized senders when dmPolicy is allowlist", + mode: "all" as const, + extra: { dmPolicy: "allowlist", allowFrom: ["+15550007777"] } as Record, + targetAuthor: "+15550002222", + shouldEnqueue: false, + }, + { + name: "blocks reaction notifications from unauthorized senders when dmPolicy is pairing", + mode: "own" as const, + extra: { + dmPolicy: "pairing", + allowFrom: [], + account: "+15550009999", + } as Record, + targetAuthor: "+15550009999", + shouldEnqueue: false, + }, + { + name: "allows reaction notifications for allowlisted senders when dmPolicy is allowlist", + mode: "all" as const, + extra: { dmPolicy: "allowlist", allowFrom: ["+15550001111"] } as Record, + targetAuthor: "+15550002222", + shouldEnqueue: true, + }, + ])("$name", async ({ mode, extra, targetAuthor, shouldEnqueue }) => { + setReactionNotificationConfig(mode, extra); + await receiveSingleEnvelope({ + ...makeBaseEnvelope(), + reactionMessage: { + emoji: "✅", + targetAuthor, + targetSentTimestamp: 2, + }, + }); + + const events = getDirectSignalEventsFor("+15550001111"); + expect(events.some((text) => text.includes("Signal reaction added"))).toBe(shouldEnqueue); + expect(sendMock).not.toHaveBeenCalled(); + expect(upsertPairingRequestMock).not.toHaveBeenCalled(); + }); + it("notifies on own reactions when target includes uuid + phone", async () => { setReactionNotificationConfig("own", { account: "+15550002222" }); await receiveSingleEnvelope({ diff --git a/src/signal/monitor/access-policy.ts b/src/signal/monitor/access-policy.ts new file mode 100644 index 00000000000..e836868ec8d --- /dev/null +++ b/src/signal/monitor/access-policy.ts @@ -0,0 +1,87 @@ +import { issuePairingChallenge } from "../../pairing/pairing-challenge.js"; +import { upsertChannelPairingRequest } from "../../pairing/pairing-store.js"; +import { + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithLists, +} from "../../security/dm-policy-shared.js"; +import { isSignalSenderAllowed, type SignalSender } from "../identity.js"; + +type SignalDmPolicy = "open" | "pairing" | "allowlist" | "disabled"; +type SignalGroupPolicy = "open" | "allowlist" | "disabled"; + +export async function resolveSignalAccessState(params: { + accountId: string; + dmPolicy: SignalDmPolicy; + groupPolicy: SignalGroupPolicy; + allowFrom: string[]; + groupAllowFrom: string[]; + sender: SignalSender; +}) { + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "signal", + accountId: params.accountId, + dmPolicy: params.dmPolicy, + }); + const resolveAccessDecision = (isGroup: boolean) => + resolveDmGroupAccessWithLists({ + isGroup, + dmPolicy: params.dmPolicy, + groupPolicy: params.groupPolicy, + allowFrom: params.allowFrom, + groupAllowFrom: params.groupAllowFrom, + storeAllowFrom, + isSenderAllowed: (allowEntries) => isSignalSenderAllowed(params.sender, allowEntries), + }); + const dmAccess = resolveAccessDecision(false); + return { + resolveAccessDecision, + dmAccess, + effectiveDmAllow: dmAccess.effectiveAllowFrom, + effectiveGroupAllow: dmAccess.effectiveGroupAllowFrom, + }; +} + +export async function handleSignalDirectMessageAccess(params: { + dmPolicy: SignalDmPolicy; + dmAccessDecision: "allow" | "block" | "pairing"; + senderId: string; + senderIdLine: string; + senderDisplay: string; + senderName?: string; + accountId: string; + sendPairingReply: (text: string) => Promise; + log: (message: string) => void; +}): Promise { + if (params.dmAccessDecision === "allow") { + return true; + } + if (params.dmAccessDecision === "block") { + if (params.dmPolicy !== "disabled") { + params.log(`Blocked signal sender ${params.senderDisplay} (dmPolicy=${params.dmPolicy})`); + } + return false; + } + if (params.dmPolicy === "pairing") { + await issuePairingChallenge({ + channel: "signal", + senderId: params.senderId, + senderIdLine: params.senderIdLine, + meta: { name: params.senderName }, + upsertPairingRequest: async ({ id, meta }) => + await upsertChannelPairingRequest({ + channel: "signal", + id, + accountId: params.accountId, + meta, + }), + sendPairingReply: params.sendPairingReply, + onCreated: () => { + params.log(`signal pairing request sender=${params.senderId}`); + }, + onReplyError: (err) => { + params.log(`signal pairing reply failed for ${params.senderId}: ${String(err)}`); + }, + }); + } + return false; +} diff --git a/src/signal/monitor/event-handler.inbound-contract.test.ts b/src/signal/monitor/event-handler.inbound-contract.test.ts index 82abd3917c2..ecb5c270b9a 100644 --- a/src/signal/monitor/event-handler.inbound-contract.test.ts +++ b/src/signal/monitor/event-handler.inbound-contract.test.ts @@ -143,4 +143,33 @@ describe("signal createSignalEventHandler inbound contract", () => { expect.any(Object), ); }); + + it("does not auto-authorize DM commands in open mode without allowlists", async () => { + const handler = createSignalEventHandler( + createBaseSignalEventHandlerDeps({ + cfg: { + messages: { inbound: { debounceMs: 0 } }, + channels: { signal: { dmPolicy: "open", allowFrom: [] } }, + }, + allowFrom: [], + groupAllowFrom: [], + account: "+15550009999", + blockStreaming: false, + historyLimit: 0, + groupHistories: new Map(), + }), + ); + + await handler( + createSignalReceiveEvent({ + dataMessage: { + message: "/status", + attachments: [], + }, + }), + ); + + expect(capture.ctx).toBeTruthy(); + expect(capture.ctx?.CommandAuthorized).toBe(false); + }); }); diff --git a/src/signal/monitor/event-handler.ts b/src/signal/monitor/event-handler.ts index b095626ab46..9aea1f6433a 100644 --- a/src/signal/monitor/event-handler.ts +++ b/src/signal/monitor/event-handler.ts @@ -30,12 +30,8 @@ import { readSessionUpdatedAt, resolveStorePath } from "../../config/sessions.js import { danger, logVerbose, shouldLogVerbose } from "../../globals.js"; import { enqueueSystemEvent } from "../../infra/system-events.js"; import { mediaKindFromMime } from "../../media/constants.js"; -import { buildPairingReply } from "../../pairing/pairing-messages.js"; -import { - readChannelAllowFromStore, - upsertChannelPairingRequest, -} from "../../pairing/pairing-store.js"; import { resolveAgentRoute } from "../../routing/resolve-route.js"; +import { DM_GROUP_ACCESS_REASON } from "../../security/dm-policy-shared.js"; import { normalizeE164 } from "../../utils.js"; import { formatSignalPairingIdLine, @@ -45,9 +41,16 @@ import { resolveSignalPeerId, resolveSignalRecipient, resolveSignalSender, + type SignalSender, } from "../identity.js"; import { sendMessageSignal, sendReadReceiptSignal, sendTypingSignal } from "../send.js"; -import type { SignalEventHandlerDeps, SignalReceivePayload } from "./event-handler.types.js"; +import { handleSignalDirectMessageAccess, resolveSignalAccessState } from "./access-policy.js"; +import type { + SignalEnvelope, + SignalEventHandlerDeps, + SignalReactionMessage, + SignalReceivePayload, +} from "./event-handler.types.js"; import { renderSignalMentions } from "./mentions.js"; export function createSignalEventHandler(deps: SignalEventHandlerDeps) { const inboundDebounceMs = resolveInboundDebounceMs({ cfg: deps.cfg, channel: "signal" }); @@ -317,6 +320,85 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) { }, }); + function handleReactionOnlyInbound(params: { + envelope: SignalEnvelope; + sender: SignalSender; + senderDisplay: string; + reaction: SignalReactionMessage; + hasBodyContent: boolean; + resolveAccessDecision: (isGroup: boolean) => { + decision: "allow" | "block" | "pairing"; + reason: string; + }; + }): boolean { + if (params.hasBodyContent) { + return false; + } + if (params.reaction.isRemove) { + return true; // Ignore reaction removals + } + const emojiLabel = params.reaction.emoji?.trim() || "emoji"; + const senderName = params.envelope.sourceName ?? params.senderDisplay; + logVerbose(`signal reaction: ${emojiLabel} from ${senderName}`); + const groupId = params.reaction.groupInfo?.groupId ?? undefined; + const groupName = params.reaction.groupInfo?.groupName ?? undefined; + const isGroup = Boolean(groupId); + const reactionAccess = params.resolveAccessDecision(isGroup); + if (reactionAccess.decision !== "allow") { + logVerbose( + `Blocked signal reaction sender ${params.senderDisplay} (${reactionAccess.reason})`, + ); + return true; + } + const targets = deps.resolveSignalReactionTargets(params.reaction); + const shouldNotify = deps.shouldEmitSignalReactionNotification({ + mode: deps.reactionMode, + account: deps.account, + targets, + sender: params.sender, + allowlist: deps.reactionAllowlist, + }); + if (!shouldNotify) { + return true; + } + + const senderPeerId = resolveSignalPeerId(params.sender); + const route = resolveAgentRoute({ + cfg: deps.cfg, + channel: "signal", + accountId: deps.accountId, + peer: { + kind: isGroup ? "group" : "direct", + id: isGroup ? (groupId ?? "unknown") : senderPeerId, + }, + }); + const groupLabel = isGroup ? `${groupName ?? "Signal Group"} id:${groupId}` : undefined; + const messageId = params.reaction.targetSentTimestamp + ? String(params.reaction.targetSentTimestamp) + : "unknown"; + const text = deps.buildSignalReactionSystemEventText({ + emojiLabel, + actorLabel: senderName, + messageId, + targetLabel: targets[0]?.display, + groupLabel, + }); + const senderId = formatSignalSenderId(params.sender); + const contextKey = [ + "signal", + "reaction", + "added", + messageId, + senderId, + emojiLabel, + groupId ?? "", + ] + .filter(Boolean) + .join(":"); + enqueueSystemEvent(text, { sessionKey: route.sessionKey, contextKey }); + return true; + } + return async (event: { event?: string; data?: string }) => { if (event.event !== "receive" || !event.data) { return; @@ -366,71 +448,34 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) { const quoteText = dataMessage?.quote?.text?.trim() ?? ""; const hasBodyContent = Boolean(messageText || quoteText) || Boolean(!reaction && dataMessage?.attachments?.length); - - if (reaction && !hasBodyContent) { - if (reaction.isRemove) { - return; - } // Ignore reaction removals - const emojiLabel = reaction.emoji?.trim() || "emoji"; - const senderDisplay = formatSignalSenderDisplay(sender); - const senderName = envelope.sourceName ?? senderDisplay; - logVerbose(`signal reaction: ${emojiLabel} from ${senderName}`); - const targets = deps.resolveSignalReactionTargets(reaction); - const shouldNotify = deps.shouldEmitSignalReactionNotification({ - mode: deps.reactionMode, - account: deps.account, - targets, - sender, - allowlist: deps.reactionAllowlist, - }); - if (!shouldNotify) { - return; - } - - const groupId = reaction.groupInfo?.groupId ?? undefined; - const groupName = reaction.groupInfo?.groupName ?? undefined; - const isGroup = Boolean(groupId); - const senderPeerId = resolveSignalPeerId(sender); - const route = resolveAgentRoute({ - cfg: deps.cfg, - channel: "signal", + const senderDisplay = formatSignalSenderDisplay(sender); + const { resolveAccessDecision, dmAccess, effectiveDmAllow, effectiveGroupAllow } = + await resolveSignalAccessState({ accountId: deps.accountId, - peer: { - kind: isGroup ? "group" : "direct", - id: isGroup ? (groupId ?? "unknown") : senderPeerId, - }, + dmPolicy: deps.dmPolicy, + groupPolicy: deps.groupPolicy, + allowFrom: deps.allowFrom, + groupAllowFrom: deps.groupAllowFrom, + sender, }); - const groupLabel = isGroup ? `${groupName ?? "Signal Group"} id:${groupId}` : undefined; - const messageId = reaction.targetSentTimestamp - ? String(reaction.targetSentTimestamp) - : "unknown"; - const text = deps.buildSignalReactionSystemEventText({ - emojiLabel, - actorLabel: senderName, - messageId, - targetLabel: targets[0]?.display, - groupLabel, - }); - const senderId = formatSignalSenderId(sender); - const contextKey = [ - "signal", - "reaction", - "added", - messageId, - senderId, - emojiLabel, - groupId ?? "", - ] - .filter(Boolean) - .join(":"); - enqueueSystemEvent(text, { sessionKey: route.sessionKey, contextKey }); + + if ( + reaction && + handleReactionOnlyInbound({ + envelope, + sender, + senderDisplay, + reaction, + hasBodyContent, + resolveAccessDecision, + }) + ) { return; } if (!dataMessage) { return; } - const senderDisplay = formatSignalSenderDisplay(sender); const senderRecipient = resolveSignalRecipient(sender); const senderPeerId = resolveSignalPeerId(sender); const senderAllowId = formatSignalSenderId(sender); @@ -441,83 +486,59 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) { const groupId = dataMessage.groupInfo?.groupId ?? undefined; const groupName = dataMessage.groupInfo?.groupName ?? undefined; const isGroup = Boolean(groupId); - const storeAllowFrom = - deps.dmPolicy === "allowlist" - ? [] - : await readChannelAllowFromStore("signal").catch(() => []); - const effectiveDmAllow = [...deps.allowFrom, ...storeAllowFrom]; - const effectiveGroupAllow = [...deps.groupAllowFrom, ...storeAllowFrom]; - const dmAllowed = - deps.dmPolicy === "open" ? true : isSignalSenderAllowed(sender, effectiveDmAllow); if (!isGroup) { - if (deps.dmPolicy === "disabled") { - return; - } - if (!dmAllowed) { - if (deps.dmPolicy === "pairing") { - const senderId = senderAllowId; - const { code, created } = await upsertChannelPairingRequest({ - channel: "signal", - id: senderId, - meta: { name: envelope.sourceName ?? undefined }, + const allowedDirectMessage = await handleSignalDirectMessageAccess({ + dmPolicy: deps.dmPolicy, + dmAccessDecision: dmAccess.decision, + senderId: senderAllowId, + senderIdLine, + senderDisplay, + senderName: envelope.sourceName ?? undefined, + accountId: deps.accountId, + sendPairingReply: async (text) => { + await sendMessageSignal(`signal:${senderRecipient}`, text, { + baseUrl: deps.baseUrl, + account: deps.account, + maxBytes: deps.mediaMaxBytes, + accountId: deps.accountId, }); - if (created) { - logVerbose(`signal pairing request sender=${senderId}`); - try { - await sendMessageSignal( - `signal:${senderRecipient}`, - buildPairingReply({ - channel: "signal", - idLine: senderIdLine, - code, - }), - { - baseUrl: deps.baseUrl, - account: deps.account, - maxBytes: deps.mediaMaxBytes, - accountId: deps.accountId, - }, - ); - } catch (err) { - logVerbose(`signal pairing reply failed for ${senderId}: ${String(err)}`); - } - } + }, + log: logVerbose, + }); + if (!allowedDirectMessage) { + return; + } + } + if (isGroup) { + const groupAccess = resolveAccessDecision(true); + if (groupAccess.decision !== "allow") { + if (groupAccess.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_DISABLED) { + logVerbose("Blocked signal group message (groupPolicy: disabled)"); + } else if (groupAccess.reasonCode === DM_GROUP_ACCESS_REASON.GROUP_POLICY_EMPTY_ALLOWLIST) { + logVerbose("Blocked signal group message (groupPolicy: allowlist, no groupAllowFrom)"); } else { - logVerbose(`Blocked signal sender ${senderDisplay} (dmPolicy=${deps.dmPolicy})`); + logVerbose(`Blocked signal group sender ${senderDisplay} (not in groupAllowFrom)`); } return; } } - if (isGroup && deps.groupPolicy === "disabled") { - logVerbose("Blocked signal group message (groupPolicy: disabled)"); - return; - } - if (isGroup && deps.groupPolicy === "allowlist") { - if (effectiveGroupAllow.length === 0) { - logVerbose("Blocked signal group message (groupPolicy: allowlist, no groupAllowFrom)"); - return; - } - if (!isSignalSenderAllowed(sender, effectiveGroupAllow)) { - logVerbose(`Blocked signal group sender ${senderDisplay} (not in groupAllowFrom)`); - return; - } - } const useAccessGroups = deps.cfg.commands?.useAccessGroups !== false; - const ownerAllowedForCommands = isSignalSenderAllowed(sender, effectiveDmAllow); + const commandDmAllow = isGroup ? deps.allowFrom : effectiveDmAllow; + const ownerAllowedForCommands = isSignalSenderAllowed(sender, commandDmAllow); const groupAllowedForCommands = isSignalSenderAllowed(sender, effectiveGroupAllow); const hasControlCommandInMessage = hasControlCommand(messageText, deps.cfg); const commandGate = resolveControlCommandGate({ useAccessGroups, authorizers: [ - { configured: effectiveDmAllow.length > 0, allowed: ownerAllowedForCommands }, + { configured: commandDmAllow.length > 0, allowed: ownerAllowedForCommands }, { configured: effectiveGroupAllow.length > 0, allowed: groupAllowedForCommands }, ], allowTextCommands: true, hasControlCommand: hasControlCommandInMessage, }); - const commandAuthorized = isGroup ? commandGate.commandAuthorized : dmAllowed; + const commandAuthorized = commandGate.commandAuthorized; if (isGroup && commandGate.shouldBlock) { logInboundDrop({ log: logVerbose, diff --git a/src/slack/accounts.test.ts b/src/slack/accounts.test.ts new file mode 100644 index 00000000000..d89d29bbbb6 --- /dev/null +++ b/src/slack/accounts.test.ts @@ -0,0 +1,85 @@ +import { describe, expect, it } from "vitest"; +import { resolveSlackAccount } from "./accounts.js"; + +describe("resolveSlackAccount allowFrom precedence", () => { + it("prefers accounts.default.allowFrom over top-level for default account", () => { + const resolved = resolveSlackAccount({ + cfg: { + channels: { + slack: { + allowFrom: ["top"], + accounts: { + default: { + botToken: "xoxb-default", + appToken: "xapp-default", + allowFrom: ["default"], + }, + }, + }, + }, + }, + accountId: "default", + }); + + expect(resolved.config.allowFrom).toEqual(["default"]); + }); + + it("falls back to top-level allowFrom for named account without override", () => { + const resolved = resolveSlackAccount({ + cfg: { + channels: { + slack: { + allowFrom: ["top"], + accounts: { + work: { botToken: "xoxb-work", appToken: "xapp-work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toEqual(["top"]); + }); + + it("does not inherit default account allowFrom for named account when top-level is absent", () => { + const resolved = resolveSlackAccount({ + cfg: { + channels: { + slack: { + accounts: { + default: { + botToken: "xoxb-default", + appToken: "xapp-default", + allowFrom: ["default"], + }, + work: { botToken: "xoxb-work", appToken: "xapp-work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toBeUndefined(); + }); + + it("falls back to top-level dm.allowFrom when allowFrom alias is unset", () => { + const resolved = resolveSlackAccount({ + cfg: { + channels: { + slack: { + dm: { allowFrom: ["U123"] }, + accounts: { + work: { botToken: "xoxb-work", appToken: "xapp-work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toBeUndefined(); + expect(resolved.config.dm?.allowFrom).toEqual(["U123"]); + }); +}); diff --git a/src/slack/modal-metadata.test.ts b/src/slack/modal-metadata.test.ts index d209c70587c..a7a7ce8224b 100644 --- a/src/slack/modal-metadata.test.ts +++ b/src/slack/modal-metadata.test.ts @@ -18,6 +18,7 @@ describe("parseSlackModalPrivateMetadata", () => { sessionKey: "agent:main:slack:channel:C1", channelId: "D123", channelType: "im", + userId: "U123", ignored: "x", }), ), @@ -25,6 +26,7 @@ describe("parseSlackModalPrivateMetadata", () => { sessionKey: "agent:main:slack:channel:C1", channelId: "D123", channelType: "im", + userId: "U123", }); }); }); @@ -37,11 +39,13 @@ describe("encodeSlackModalPrivateMetadata", () => { sessionKey: "agent:main:slack:channel:C1", channelId: "", channelType: "im", + userId: "U123", }), ), ).toEqual({ sessionKey: "agent:main:slack:channel:C1", channelType: "im", + userId: "U123", }); }); diff --git a/src/slack/modal-metadata.ts b/src/slack/modal-metadata.ts index 491fb5d38f3..963024487a9 100644 --- a/src/slack/modal-metadata.ts +++ b/src/slack/modal-metadata.ts @@ -2,6 +2,7 @@ export type SlackModalPrivateMetadata = { sessionKey?: string; channelId?: string; channelType?: string; + userId?: string; }; const SLACK_PRIVATE_METADATA_MAX = 3000; @@ -20,6 +21,7 @@ export function parseSlackModalPrivateMetadata(raw: unknown): SlackModalPrivateM sessionKey: normalizeString(parsed.sessionKey), channelId: normalizeString(parsed.channelId), channelType: normalizeString(parsed.channelType), + userId: normalizeString(parsed.userId), }; } catch { return {}; @@ -31,6 +33,7 @@ export function encodeSlackModalPrivateMetadata(input: SlackModalPrivateMetadata ...(input.sessionKey ? { sessionKey: input.sessionKey } : {}), ...(input.channelId ? { channelId: input.channelId } : {}), ...(input.channelType ? { channelType: input.channelType } : {}), + ...(input.userId ? { userId: input.userId } : {}), }; const encoded = JSON.stringify(payload); if (encoded.length > SLACK_PRIVATE_METADATA_MAX) { diff --git a/src/slack/monitor/auth.ts b/src/slack/monitor/auth.ts index d8fa5e5b4e5..421bc084d92 100644 --- a/src/slack/monitor/auth.ts +++ b/src/slack/monitor/auth.ts @@ -1,10 +1,25 @@ -import { readChannelAllowFromStore } from "../../pairing/pairing-store.js"; -import { allowListMatches, normalizeAllowList, normalizeAllowListLower } from "./allow-list.js"; -import type { SlackMonitorContext } from "./context.js"; +import { readStoreAllowFromForDmPolicy } from "../../security/dm-policy-shared.js"; +import { + allowListMatches, + normalizeAllowList, + normalizeAllowListLower, + resolveSlackUserAllowed, +} from "./allow-list.js"; +import { resolveSlackChannelConfig } from "./channel-config.js"; +import { normalizeSlackChannelType, type SlackMonitorContext } from "./context.js"; -export async function resolveSlackEffectiveAllowFrom(ctx: SlackMonitorContext) { - const storeAllowFrom = - ctx.dmPolicy === "allowlist" ? [] : await readChannelAllowFromStore("slack").catch(() => []); +export async function resolveSlackEffectiveAllowFrom( + ctx: SlackMonitorContext, + options?: { includePairingStore?: boolean }, +) { + const includePairingStore = options?.includePairingStore === true; + const storeAllowFrom = includePairingStore + ? await readStoreAllowFromForDmPolicy({ + provider: "slack", + accountId: ctx.accountId, + dmPolicy: ctx.dmPolicy, + }) + : []; const allowFrom = normalizeAllowList([...ctx.allowFrom, ...storeAllowFrom]); const allowFromLower = normalizeAllowListLower(allowFrom); return { allowFrom, allowFromLower }; @@ -27,3 +42,137 @@ export function isSlackSenderAllowListed(params: { }) ); } + +export type SlackSystemEventAuthResult = { + allowed: boolean; + reason?: + | "missing-sender" + | "sender-mismatch" + | "channel-not-allowed" + | "dm-disabled" + | "sender-not-allowlisted" + | "sender-not-channel-allowed"; + channelType?: "im" | "mpim" | "channel" | "group"; + channelName?: string; +}; + +export async function authorizeSlackSystemEventSender(params: { + ctx: SlackMonitorContext; + senderId?: string; + channelId?: string; + channelType?: string | null; + expectedSenderId?: string; +}): Promise { + const senderId = params.senderId?.trim(); + if (!senderId) { + return { allowed: false, reason: "missing-sender" }; + } + + const expectedSenderId = params.expectedSenderId?.trim(); + if (expectedSenderId && expectedSenderId !== senderId) { + return { allowed: false, reason: "sender-mismatch" }; + } + + const channelId = params.channelId?.trim(); + let channelType = normalizeSlackChannelType(params.channelType, channelId); + let channelName: string | undefined; + if (channelId) { + const info: { + name?: string; + type?: "im" | "mpim" | "channel" | "group"; + } = await params.ctx.resolveChannelName(channelId).catch(() => ({})); + channelName = info.name; + channelType = normalizeSlackChannelType(params.channelType ?? info.type, channelId); + if ( + !params.ctx.isChannelAllowed({ + channelId, + channelName, + channelType, + }) + ) { + return { + allowed: false, + reason: "channel-not-allowed", + channelType, + channelName, + }; + } + } + + const senderInfo: { name?: string } = await params.ctx + .resolveUserName(senderId) + .catch(() => ({})); + const senderName = senderInfo.name; + + const resolveAllowFromLower = async (includePairingStore = false) => + (await resolveSlackEffectiveAllowFrom(params.ctx, { includePairingStore })).allowFromLower; + + if (channelType === "im") { + if (!params.ctx.dmEnabled || params.ctx.dmPolicy === "disabled") { + return { allowed: false, reason: "dm-disabled", channelType, channelName }; + } + if (params.ctx.dmPolicy !== "open") { + const allowFromLower = await resolveAllowFromLower(true); + const senderAllowListed = isSlackSenderAllowListed({ + allowListLower: allowFromLower, + senderId, + senderName, + allowNameMatching: params.ctx.allowNameMatching, + }); + if (!senderAllowListed) { + return { + allowed: false, + reason: "sender-not-allowlisted", + channelType, + channelName, + }; + } + } + } else if (!channelId) { + // No channel context. Apply allowFrom if configured so we fail closed + // for privileged interactive events when owner allowlist is present. + const allowFromLower = await resolveAllowFromLower(false); + if (allowFromLower.length > 0) { + const senderAllowListed = isSlackSenderAllowListed({ + allowListLower: allowFromLower, + senderId, + senderName, + allowNameMatching: params.ctx.allowNameMatching, + }); + if (!senderAllowListed) { + return { allowed: false, reason: "sender-not-allowlisted" }; + } + } + } else { + const channelConfig = resolveSlackChannelConfig({ + channelId, + channelName, + channels: params.ctx.channelsConfig, + defaultRequireMention: params.ctx.defaultRequireMention, + }); + const channelUsersAllowlistConfigured = + Array.isArray(channelConfig?.users) && channelConfig.users.length > 0; + if (channelUsersAllowlistConfigured) { + const channelUserAllowed = resolveSlackUserAllowed({ + allowList: channelConfig?.users, + userId: senderId, + userName: senderName, + allowNameMatching: params.ctx.allowNameMatching, + }); + if (!channelUserAllowed) { + return { + allowed: false, + reason: "sender-not-channel-allowed", + channelType, + channelName, + }; + } + } + } + + return { + allowed: true, + channelType, + channelName, + }; +} diff --git a/src/slack/monitor/channel-config.ts b/src/slack/monitor/channel-config.ts index 15ba7c3b146..b594a34d43b 100644 --- a/src/slack/monitor/channel-config.ts +++ b/src/slack/monitor/channel-config.ts @@ -96,8 +96,16 @@ export function resolveSlackChannelConfig(params: { const keys = Object.keys(entries); const normalizedName = channelName ? normalizeSlackSlug(channelName) : ""; const directName = channelName ? channelName.trim() : ""; + // Slack always delivers channel IDs in uppercase (e.g. C0ABC12345) but + // operators commonly write them in lowercase in their config. Add both + // case variants so the lookup is case-insensitive without requiring a full + // entry-scan. buildChannelKeyCandidates deduplicates identical keys. + const channelIdLower = channelId.toLowerCase(); + const channelIdUpper = channelId.toUpperCase(); const candidates = buildChannelKeyCandidates( channelId, + channelIdLower !== channelId ? channelIdLower : undefined, + channelIdUpper !== channelId ? channelIdUpper : undefined, channelName ? `#${directName}` : undefined, directName, normalizedName, diff --git a/src/slack/monitor/dm-auth.ts b/src/slack/monitor/dm-auth.ts new file mode 100644 index 00000000000..f11a2aa51f7 --- /dev/null +++ b/src/slack/monitor/dm-auth.ts @@ -0,0 +1,67 @@ +import { formatAllowlistMatchMeta } from "../../channels/allowlist-match.js"; +import { issuePairingChallenge } from "../../pairing/pairing-challenge.js"; +import { upsertChannelPairingRequest } from "../../pairing/pairing-store.js"; +import { resolveSlackAllowListMatch } from "./allow-list.js"; +import type { SlackMonitorContext } from "./context.js"; + +export async function authorizeSlackDirectMessage(params: { + ctx: SlackMonitorContext; + accountId: string; + senderId: string; + allowFromLower: string[]; + resolveSenderName: (senderId: string) => Promise<{ name?: string }>; + sendPairingReply: (text: string) => Promise; + onDisabled: () => Promise | void; + onUnauthorized: (params: { allowMatchMeta: string; senderName?: string }) => Promise | void; + log: (message: string) => void; +}): Promise { + if (!params.ctx.dmEnabled || params.ctx.dmPolicy === "disabled") { + await params.onDisabled(); + return false; + } + if (params.ctx.dmPolicy === "open") { + return true; + } + + const sender = await params.resolveSenderName(params.senderId); + const senderName = sender?.name ?? undefined; + const allowMatch = resolveSlackAllowListMatch({ + allowList: params.allowFromLower, + id: params.senderId, + name: senderName, + allowNameMatching: params.ctx.allowNameMatching, + }); + const allowMatchMeta = formatAllowlistMatchMeta(allowMatch); + if (allowMatch.allowed) { + return true; + } + + if (params.ctx.dmPolicy === "pairing") { + await issuePairingChallenge({ + channel: "slack", + senderId: params.senderId, + senderIdLine: `Your Slack user id: ${params.senderId}`, + meta: { name: senderName }, + upsertPairingRequest: async ({ id, meta }) => + await upsertChannelPairingRequest({ + channel: "slack", + id, + accountId: params.accountId, + meta, + }), + sendPairingReply: params.sendPairingReply, + onCreated: () => { + params.log( + `slack pairing request sender=${params.senderId} name=${senderName ?? "unknown"} (${allowMatchMeta})`, + ); + }, + onReplyError: (err) => { + params.log(`slack pairing reply failed for ${params.senderId}: ${String(err)}`); + }, + }); + return false; + } + + await params.onUnauthorized({ allowMatchMeta, senderName }); + return false; +} diff --git a/src/slack/monitor/events/interactions.test.ts b/src/slack/monitor/events/interactions.test.ts index 7710239cc71..cfd53506358 100644 --- a/src/slack/monitor/events/interactions.test.ts +++ b/src/slack/monitor/events/interactions.test.ts @@ -30,6 +30,7 @@ type RegisteredViewHandler = (args: { view?: { id?: string; callback_id?: string; + private_metadata?: string; root_view_id?: string; previous_view_id?: string; external_id?: string; @@ -58,7 +59,23 @@ type RegisteredViewClosedHandler = (args: { }; }) => Promise; -function createContext() { +function createContext(overrides?: { + dmEnabled?: boolean; + dmPolicy?: "open" | "allowlist" | "pairing" | "disabled"; + allowFrom?: string[]; + allowNameMatching?: boolean; + channelsConfig?: Record; + isChannelAllowed?: (params: { + channelId?: string; + channelName?: string; + channelType?: "im" | "mpim" | "channel" | "group"; + }) => boolean; + resolveUserName?: (userId: string) => Promise<{ name?: string }>; + resolveChannelName?: (channelId: string) => Promise<{ + name?: string; + type?: "im" | "mpim" | "channel" | "group"; + }>; +}) { let handler: RegisteredHandler | null = null; let viewHandler: RegisteredViewHandler | null = null; let viewClosedHandler: RegisteredViewClosedHandler | null = null; @@ -80,9 +97,40 @@ function createContext() { }; const runtimeLog = vi.fn(); const resolveSessionKey = vi.fn().mockReturnValue("agent:ops:slack:channel:C1"); + const isChannelAllowed = vi + .fn< + (params: { + channelId?: string; + channelName?: string; + channelType?: "im" | "mpim" | "channel" | "group"; + }) => boolean + >() + .mockImplementation((params) => overrides?.isChannelAllowed?.(params) ?? true); + const resolveUserName = vi + .fn<(userId: string) => Promise<{ name?: string }>>() + .mockImplementation((userId) => overrides?.resolveUserName?.(userId) ?? Promise.resolve({})); + const resolveChannelName = vi + .fn< + (channelId: string) => Promise<{ + name?: string; + type?: "im" | "mpim" | "channel" | "group"; + }> + >() + .mockImplementation( + (channelId) => overrides?.resolveChannelName?.(channelId) ?? Promise.resolve({}), + ); const ctx = { app, runtime: { log: runtimeLog }, + dmEnabled: overrides?.dmEnabled ?? true, + dmPolicy: overrides?.dmPolicy ?? ("open" as const), + allowFrom: overrides?.allowFrom ?? [], + allowNameMatching: overrides?.allowNameMatching ?? false, + channelsConfig: overrides?.channelsConfig ?? {}, + defaultRequireMention: true, + isChannelAllowed, + resolveUserName, + resolveChannelName, resolveSlackSystemEventSessionKey: resolveSessionKey, }; return { @@ -90,6 +138,9 @@ function createContext() { app, runtimeLog, resolveSessionKey, + isChannelAllowed, + resolveUserName, + resolveChannelName, getHandler: () => handler, getViewHandler: () => viewHandler, getViewClosedHandler: () => viewClosedHandler, @@ -168,7 +219,7 @@ describe("registerSlackInteractionEvents", () => { }); expect(resolveSessionKey).toHaveBeenCalledWith({ channelId: "C1", - channelType: undefined, + channelType: "channel", }); expect(app.client.chat.update).toHaveBeenCalledTimes(1); }); @@ -228,6 +279,85 @@ describe("registerSlackInteractionEvents", () => { ); }); + it("blocks block actions from users outside configured channel users allowlist", async () => { + enqueueSystemEventMock.mockClear(); + const { ctx, app, getHandler } = createContext({ + channelsConfig: { + C1: { users: ["U_ALLOWED"] }, + }, + }); + registerSlackInteractionEvents({ ctx: ctx as never }); + const handler = getHandler(); + expect(handler).toBeTruthy(); + + const ack = vi.fn().mockResolvedValue(undefined); + const respond = vi.fn().mockResolvedValue(undefined); + await handler!({ + ack, + respond, + body: { + user: { id: "U_DENIED" }, + channel: { id: "C1" }, + message: { + ts: "201.202", + blocks: [{ type: "actions", block_id: "verify_block", elements: [] }], + }, + }, + action: { + type: "button", + action_id: "openclaw:verify", + block_id: "verify_block", + }, + }); + + expect(ack).toHaveBeenCalled(); + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + expect(app.client.chat.update).not.toHaveBeenCalled(); + expect(respond).toHaveBeenCalledWith({ + text: "You are not authorized to use this control.", + response_type: "ephemeral", + }); + }); + + it("blocks DM block actions when sender is not in allowFrom", async () => { + enqueueSystemEventMock.mockClear(); + const { ctx, app, getHandler } = createContext({ + dmPolicy: "allowlist", + allowFrom: ["U_OWNER"], + }); + registerSlackInteractionEvents({ ctx: ctx as never }); + const handler = getHandler(); + expect(handler).toBeTruthy(); + + const ack = vi.fn().mockResolvedValue(undefined); + const respond = vi.fn().mockResolvedValue(undefined); + await handler!({ + ack, + respond, + body: { + user: { id: "U_ATTACKER" }, + channel: { id: "D222" }, + message: { + ts: "301.302", + blocks: [{ type: "actions", block_id: "verify_block", elements: [] }], + }, + }, + action: { + type: "button", + action_id: "openclaw:verify", + block_id: "verify_block", + }, + }); + + expect(ack).toHaveBeenCalled(); + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + expect(app.client.chat.update).not.toHaveBeenCalled(); + expect(respond).toHaveBeenCalledWith({ + text: "You are not authorized to use this control.", + response_type: "ephemeral", + }); + }); + it("ignores malformed action payloads after ack and logs warning", async () => { enqueueSystemEventMock.mockClear(); const { ctx, app, getHandler, runtimeLog } = createContext(); @@ -338,7 +468,7 @@ describe("registerSlackInteractionEvents", () => { expect(ack).toHaveBeenCalled(); expect(resolveSessionKey).toHaveBeenCalledWith({ channelId: "C222", - channelType: undefined, + channelType: "channel", }); expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); const [eventText] = enqueueSystemEventMock.mock.calls[0] as [string]; @@ -697,7 +827,11 @@ describe("registerSlackInteractionEvents", () => { previous_view_id: "VPREV", external_id: "deploy-ext-1", hash: "view-hash-1", - private_metadata: JSON.stringify({ channelId: "D123", channelType: "im" }), + private_metadata: JSON.stringify({ + channelId: "D123", + channelType: "im", + userId: "U777", + }), state: { values: { env_block: { @@ -771,6 +905,59 @@ describe("registerSlackInteractionEvents", () => { ); }); + it("blocks modal events when private metadata userId does not match submitter", async () => { + enqueueSystemEventMock.mockClear(); + const { ctx, getViewHandler } = createContext(); + registerSlackInteractionEvents({ ctx: ctx as never }); + const viewHandler = getViewHandler(); + expect(viewHandler).toBeTruthy(); + + const ack = vi.fn().mockResolvedValue(undefined); + await viewHandler!({ + ack, + body: { + user: { id: "U222" }, + view: { + callback_id: "openclaw:deploy_form", + private_metadata: JSON.stringify({ + channelId: "D123", + channelType: "im", + userId: "U111", + }), + }, + }, + } as never); + + expect(ack).toHaveBeenCalled(); + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks modal events when private metadata is missing userId", async () => { + enqueueSystemEventMock.mockClear(); + const { ctx, getViewHandler } = createContext(); + registerSlackInteractionEvents({ ctx: ctx as never }); + const viewHandler = getViewHandler(); + expect(viewHandler).toBeTruthy(); + + const ack = vi.fn().mockResolvedValue(undefined); + await viewHandler!({ + ack, + body: { + user: { id: "U222" }, + view: { + callback_id: "openclaw:deploy_form", + private_metadata: JSON.stringify({ + channelId: "D123", + channelType: "im", + }), + }, + }, + } as never); + + expect(ack).toHaveBeenCalled(); + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + it("captures modal input labels and picker values across block types", async () => { enqueueSystemEventMock.mockClear(); const { ctx, getViewHandler } = createContext(); @@ -786,6 +973,7 @@ describe("registerSlackInteractionEvents", () => { view: { id: "V400", callback_id: "openclaw:routing_form", + private_metadata: JSON.stringify({ userId: "U444" }), state: { values: { env_block: { @@ -1001,6 +1189,7 @@ describe("registerSlackInteractionEvents", () => { view: { id: "V555", callback_id: "openclaw:long_richtext", + private_metadata: JSON.stringify({ userId: "U555" }), state: { values: { richtext_block: { @@ -1054,7 +1243,10 @@ describe("registerSlackInteractionEvents", () => { previous_view_id: "VPREV900", external_id: "deploy-ext-900", hash: "view-hash-900", - private_metadata: JSON.stringify({ sessionKey: "agent:main:slack:channel:C99" }), + private_metadata: JSON.stringify({ + sessionKey: "agent:main:slack:channel:C99", + userId: "U900", + }), state: { values: { env_block: { @@ -1101,7 +1293,10 @@ describe("registerSlackInteractionEvents", () => { viewId: "V900", userId: "U900", isCleared: true, - privateMetadata: JSON.stringify({ sessionKey: "agent:main:slack:channel:C99" }), + privateMetadata: JSON.stringify({ + sessionKey: "agent:main:slack:channel:C99", + userId: "U900", + }), rootViewId: "VROOT900", previousViewId: "VPREV900", externalId: "deploy-ext-900", @@ -1131,6 +1326,7 @@ describe("registerSlackInteractionEvents", () => { view: { id: "V901", callback_id: "openclaw:deploy_form", + private_metadata: JSON.stringify({ userId: "U901" }), }, }, }); diff --git a/src/slack/monitor/events/interactions.ts b/src/slack/monitor/events/interactions.ts index cbc4fc9f36e..40a06ad9f2e 100644 --- a/src/slack/monitor/events/interactions.ts +++ b/src/slack/monitor/events/interactions.ts @@ -2,6 +2,7 @@ import type { SlackActionMiddlewareArgs } from "@slack/bolt"; import type { Block, KnownBlock } from "@slack/web-api"; import { enqueueSystemEvent } from "../../../infra/system-events.js"; import { parseSlackModalPrivateMetadata } from "../../modal-metadata.js"; +import { authorizeSlackSystemEventSender } from "../auth.js"; import type { SlackMonitorContext } from "../context.js"; import { escapeSlackMrkdwn } from "../mrkdwn.js"; @@ -78,6 +79,7 @@ type SlackModalBody = { type SlackModalEventBase = { callbackId: string; userId: string; + expectedUserId?: string; viewId?: string; sessionRouting: ReturnType; payload: { @@ -366,11 +368,15 @@ function summarizeViewState(values: unknown): ModalInputSummary[] { function resolveModalSessionRouting(params: { ctx: SlackMonitorContext; - privateMetadata: unknown; + metadata: ReturnType; }): { sessionKey: string; channelId?: string; channelType?: string } { - const metadata = parseSlackModalPrivateMetadata(params.privateMetadata); + const metadata = params.metadata; if (metadata.sessionKey) { - return { sessionKey: metadata.sessionKey }; + return { + sessionKey: metadata.sessionKey, + channelId: metadata.channelId, + channelType: metadata.channelType, + }; } if (metadata.channelId) { return { @@ -416,17 +422,19 @@ function resolveSlackModalEventBase(params: { ctx: SlackMonitorContext; body: SlackModalBody; }): SlackModalEventBase { + const metadata = parseSlackModalPrivateMetadata(params.body.view?.private_metadata); const callbackId = params.body.view?.callback_id ?? "unknown"; const userId = params.body.user?.id ?? "unknown"; const viewId = params.body.view?.id; const inputs = summarizeViewState(params.body.view?.state?.values); const sessionRouting = resolveModalSessionRouting({ ctx: params.ctx, - privateMetadata: params.body.view?.private_metadata, + metadata, }); return { callbackId, userId, + expectedUserId: metadata.userId, viewId, sessionRouting, payload: { @@ -449,16 +457,17 @@ function resolveSlackModalEventBase(params: { }; } -function emitSlackModalLifecycleEvent(params: { +async function emitSlackModalLifecycleEvent(params: { ctx: SlackMonitorContext; body: SlackModalBody; interactionType: SlackModalInteractionKind; contextPrefix: "slack:interaction:view" | "slack:interaction:view-closed"; -}): void { - const { callbackId, userId, viewId, sessionRouting, payload } = resolveSlackModalEventBase({ - ctx: params.ctx, - body: params.body, - }); +}): Promise { + const { callbackId, userId, expectedUserId, viewId, sessionRouting, payload } = + resolveSlackModalEventBase({ + ctx: params.ctx, + body: params.body, + }); const isViewClosed = params.interactionType === "view_closed"; const isCleared = params.body.is_cleared === true; const eventPayload = isViewClosed @@ -482,6 +491,27 @@ function emitSlackModalLifecycleEvent(params: { ); } + if (!expectedUserId) { + params.ctx.runtime.log?.( + `slack:interaction drop modal callback=${callbackId} user=${userId} reason=missing-expected-user`, + ); + return; + } + + const auth = await authorizeSlackSystemEventSender({ + ctx: params.ctx, + senderId: userId, + channelId: sessionRouting.channelId, + channelType: sessionRouting.channelType, + expectedSenderId: expectedUserId, + }); + if (!auth.allowed) { + params.ctx.runtime.log?.( + `slack:interaction drop modal callback=${callbackId} user=${userId} reason=${auth.reason ?? "unauthorized"}`, + ); + return; + } + enqueueSystemEvent(`Slack interaction: ${JSON.stringify(eventPayload)}`, { sessionKey: sessionRouting.sessionKey, contextKey: [params.contextPrefix, callbackId, viewId, userId].filter(Boolean).join(":"), @@ -497,7 +527,7 @@ function registerModalLifecycleHandler(params: { }) { params.register(params.matcher, async ({ ack, body }: SlackModalEventHandlerArgs) => { await ack(); - emitSlackModalLifecycleEvent({ + await emitSlackModalLifecycleEvent({ ctx: params.ctx, body: body as SlackModalBody, interactionType: params.interactionType, @@ -557,6 +587,27 @@ export function registerSlackInteractionEvents(params: { ctx: SlackMonitorContex const channelId = typedBody.channel?.id ?? typedBody.container?.channel_id; const messageTs = typedBody.message?.ts ?? typedBody.container?.message_ts; const threadTs = typedBody.container?.thread_ts; + const auth = await authorizeSlackSystemEventSender({ + ctx, + senderId: userId, + channelId, + }); + if (!auth.allowed) { + ctx.runtime.log?.( + `slack:interaction drop action=${actionId} user=${userId} channel=${channelId ?? "unknown"} reason=${auth.reason ?? "unauthorized"}`, + ); + if (respond) { + try { + await respond({ + text: "You are not authorized to use this control.", + response_type: "ephemeral", + }); + } catch { + // Best-effort feedback only. + } + } + return; + } const actionSummary = summarizeAction(typedAction); const eventPayload: InteractionSummary = { interactionType: "block_action", @@ -581,7 +632,7 @@ export function registerSlackInteractionEvents(params: { ctx: SlackMonitorContex // Pass undefined (not "unknown") to allow proper main session fallback const sessionKey = ctx.resolveSlackSystemEventSessionKey({ channelId: channelId, - channelType: undefined, + channelType: auth.channelType, }); // Build context key - only include defined values to avoid "unknown" noise diff --git a/src/slack/monitor/events/members.test.ts b/src/slack/monitor/events/members.test.ts new file mode 100644 index 00000000000..bc9c6805aaa --- /dev/null +++ b/src/slack/monitor/events/members.test.ts @@ -0,0 +1,131 @@ +import { describe, expect, it, vi } from "vitest"; +import { registerSlackMemberEvents } from "./members.js"; +import { + createSlackSystemEventTestHarness, + type SlackSystemEventTestOverrides, +} from "./system-event-test-harness.js"; + +const enqueueSystemEventMock = vi.fn(); +const readAllowFromStoreMock = vi.fn(); + +vi.mock("../../../infra/system-events.js", () => ({ + enqueueSystemEvent: (...args: unknown[]) => enqueueSystemEventMock(...args), +})); + +vi.mock("../../../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: (...args: unknown[]) => readAllowFromStoreMock(...args), +})); + +type SlackMemberHandler = (args: { + event: Record; + body: unknown; +}) => Promise; + +function createMembersContext(overrides?: SlackSystemEventTestOverrides) { + const harness = createSlackSystemEventTestHarness(overrides); + registerSlackMemberEvents({ ctx: harness.ctx }); + return { + getJoinedHandler: () => + harness.getHandler("member_joined_channel") as SlackMemberHandler | null, + getLeftHandler: () => harness.getHandler("member_left_channel") as SlackMemberHandler | null, + }; +} + +function makeMemberEvent(overrides?: { user?: string; channel?: string }) { + return { + type: "member_joined_channel", + user: overrides?.user ?? "U1", + channel: overrides?.channel ?? "D1", + event_ts: "123.456", + }; +} + +describe("registerSlackMemberEvents", () => { + it("enqueues DM member events when dmPolicy is open", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getJoinedHandler } = createMembersContext({ dmPolicy: "open" }); + const joinedHandler = getJoinedHandler(); + expect(joinedHandler).toBeTruthy(); + + await joinedHandler!({ + event: makeMemberEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks DM member events when dmPolicy is disabled", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getJoinedHandler } = createMembersContext({ dmPolicy: "disabled" }); + const joinedHandler = getJoinedHandler(); + expect(joinedHandler).toBeTruthy(); + + await joinedHandler!({ + event: makeMemberEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks DM member events for unauthorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getJoinedHandler } = createMembersContext({ + dmPolicy: "allowlist", + allowFrom: ["U2"], + }); + const joinedHandler = getJoinedHandler(); + expect(joinedHandler).toBeTruthy(); + + await joinedHandler!({ + event: makeMemberEvent({ user: "U1" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("allows DM member events for authorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getLeftHandler } = createMembersContext({ + dmPolicy: "allowlist", + allowFrom: ["U1"], + }); + const leftHandler = getLeftHandler(); + expect(leftHandler).toBeTruthy(); + + await leftHandler!({ + event: { + ...makeMemberEvent({ user: "U1" }), + type: "member_left_channel", + }, + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks channel member events for users outside channel users allowlist", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getJoinedHandler } = createMembersContext({ + dmPolicy: "open", + channelType: "channel", + channelUsers: ["U_OWNER"], + }); + const joinedHandler = getJoinedHandler(); + expect(joinedHandler).toBeTruthy(); + + await joinedHandler!({ + event: makeMemberEvent({ channel: "C1", user: "U_ATTACKER" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/slack/monitor/events/members.ts b/src/slack/monitor/events/members.ts index 652c75bb4e2..ca7907706d2 100644 --- a/src/slack/monitor/events/members.ts +++ b/src/slack/monitor/events/members.ts @@ -1,9 +1,9 @@ import type { SlackEventMiddlewareArgs } from "@slack/bolt"; import { danger } from "../../../globals.js"; import { enqueueSystemEvent } from "../../../infra/system-events.js"; -import { resolveSlackChannelLabel } from "../channel-config.js"; import type { SlackMonitorContext } from "../context.js"; import type { SlackMemberChannelEvent } from "../types.js"; +import { authorizeAndResolveSlackSystemEventContext } from "./system-event-context.js"; export function registerSlackMemberEvents(params: { ctx: SlackMonitorContext }) { const { ctx } = params; @@ -21,27 +21,20 @@ export function registerSlackMemberEvents(params: { ctx: SlackMonitorContext }) const channelId = payload.channel; const channelInfo = channelId ? await ctx.resolveChannelName(channelId) : {}; const channelType = payload.channel_type ?? channelInfo?.type; - if ( - !ctx.isChannelAllowed({ - channelId, - channelName: channelInfo?.name, - channelType, - }) - ) { + const ingressContext = await authorizeAndResolveSlackSystemEventContext({ + ctx, + senderId: payload.user, + channelId, + channelType, + eventKind: `member-${params.verb}`, + }); + if (!ingressContext) { return; } const userInfo = payload.user ? await ctx.resolveUserName(payload.user) : {}; const userLabel = userInfo?.name ?? payload.user ?? "someone"; - const label = resolveSlackChannelLabel({ - channelId, - channelName: channelInfo?.name, - }); - const sessionKey = ctx.resolveSlackSystemEventSessionKey({ - channelId, - channelType, - }); - enqueueSystemEvent(`Slack: ${userLabel} ${params.verb} ${label}.`, { - sessionKey, + enqueueSystemEvent(`Slack: ${userLabel} ${params.verb} ${ingressContext.channelLabel}.`, { + sessionKey: ingressContext.sessionKey, contextKey: `slack:member:${params.verb}:${channelId ?? "unknown"}:${payload.user ?? "unknown"}`, }); } catch (err) { diff --git a/src/slack/monitor/events/messages.test.ts b/src/slack/monitor/events/messages.test.ts new file mode 100644 index 00000000000..0534cdcfa73 --- /dev/null +++ b/src/slack/monitor/events/messages.test.ts @@ -0,0 +1,196 @@ +import { describe, expect, it, vi } from "vitest"; +import { registerSlackMessageEvents } from "./messages.js"; +import { + createSlackSystemEventTestHarness, + type SlackSystemEventTestOverrides, +} from "./system-event-test-harness.js"; + +const enqueueSystemEventMock = vi.fn(); +const readAllowFromStoreMock = vi.fn(); + +vi.mock("../../../infra/system-events.js", () => ({ + enqueueSystemEvent: (...args: unknown[]) => enqueueSystemEventMock(...args), +})); + +vi.mock("../../../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: (...args: unknown[]) => readAllowFromStoreMock(...args), +})); + +type SlackMessageHandler = (args: { + event: Record; + body: unknown; +}) => Promise; + +function createMessagesContext(overrides?: SlackSystemEventTestOverrides) { + const harness = createSlackSystemEventTestHarness(overrides); + const handleSlackMessage = vi.fn(async () => {}); + registerSlackMessageEvents({ + ctx: harness.ctx, + handleSlackMessage, + }); + return { + getMessageHandler: () => harness.getHandler("message") as SlackMessageHandler | null, + handleSlackMessage, + }; +} + +function makeChangedEvent(overrides?: { channel?: string; user?: string }) { + const user = overrides?.user ?? "U1"; + return { + type: "message", + subtype: "message_changed", + channel: overrides?.channel ?? "D1", + message: { + ts: "123.456", + user, + }, + previous_message: { + ts: "123.450", + user, + }, + event_ts: "123.456", + }; +} + +function makeDeletedEvent(overrides?: { channel?: string; user?: string }) { + return { + type: "message", + subtype: "message_deleted", + channel: overrides?.channel ?? "D1", + deleted_ts: "123.456", + previous_message: { + ts: "123.450", + user: overrides?.user ?? "U1", + }, + event_ts: "123.456", + }; +} + +function makeThreadBroadcastEvent(overrides?: { channel?: string; user?: string }) { + const user = overrides?.user ?? "U1"; + return { + type: "message", + subtype: "thread_broadcast", + channel: overrides?.channel ?? "D1", + user, + message: { + ts: "123.456", + user, + }, + event_ts: "123.456", + }; +} + +describe("registerSlackMessageEvents", () => { + it("enqueues message_changed system events when dmPolicy is open", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getMessageHandler } = createMessagesContext({ dmPolicy: "open" }); + const messageHandler = getMessageHandler(); + expect(messageHandler).toBeTruthy(); + + await messageHandler!({ + event: makeChangedEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks message_changed system events when dmPolicy is disabled", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getMessageHandler } = createMessagesContext({ dmPolicy: "disabled" }); + const messageHandler = getMessageHandler(); + expect(messageHandler).toBeTruthy(); + + await messageHandler!({ + event: makeChangedEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks message_changed system events for unauthorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getMessageHandler } = createMessagesContext({ + dmPolicy: "allowlist", + allowFrom: ["U2"], + }); + const messageHandler = getMessageHandler(); + expect(messageHandler).toBeTruthy(); + + await messageHandler!({ + event: makeChangedEvent({ user: "U1" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks message_deleted system events for users outside channel users allowlist", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getMessageHandler } = createMessagesContext({ + dmPolicy: "open", + channelType: "channel", + channelUsers: ["U_OWNER"], + }); + const messageHandler = getMessageHandler(); + expect(messageHandler).toBeTruthy(); + + await messageHandler!({ + event: makeDeletedEvent({ channel: "C1", user: "U_ATTACKER" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks thread_broadcast system events without an authenticated sender", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getMessageHandler } = createMessagesContext({ dmPolicy: "open" }); + const messageHandler = getMessageHandler(); + expect(messageHandler).toBeTruthy(); + + await messageHandler!({ + event: { + ...makeThreadBroadcastEvent(), + user: undefined, + message: { + ts: "123.456", + }, + }, + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("passes regular message events to the message handler", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getMessageHandler, handleSlackMessage } = createMessagesContext({ + dmPolicy: "open", + }); + const messageHandler = getMessageHandler(); + expect(messageHandler).toBeTruthy(); + + await messageHandler!({ + event: { + type: "message", + channel: "D1", + user: "U1", + text: "hello", + ts: "123.456", + }, + body: {}, + }); + + expect(handleSlackMessage).toHaveBeenCalledTimes(1); + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/slack/monitor/events/messages.ts b/src/slack/monitor/events/messages.ts index 0ccb8dc100b..5d16bb967f6 100644 --- a/src/slack/monitor/events/messages.ts +++ b/src/slack/monitor/events/messages.ts @@ -2,7 +2,6 @@ import type { SlackEventMiddlewareArgs } from "@slack/bolt"; import { danger } from "../../../globals.js"; import { enqueueSystemEvent } from "../../../infra/system-events.js"; import type { SlackAppMentionEvent, SlackMessageEvent } from "../../types.js"; -import { resolveSlackChannelLabel } from "../channel-config.js"; import type { SlackMonitorContext } from "../context.js"; import type { SlackMessageHandler } from "../message-handler.js"; import type { @@ -10,6 +9,7 @@ import type { SlackMessageDeletedEvent, SlackThreadBroadcastEvent, } from "../types.js"; +import { authorizeAndResolveSlackSystemEventContext } from "./system-event-context.js"; export function registerSlackMessageEvents(params: { ctx: SlackMonitorContext; @@ -17,30 +17,15 @@ export function registerSlackMessageEvents(params: { }) { const { ctx, handleSlackMessage } = params; - const resolveSlackChannelSystemEventTarget = async (channelId: string | undefined) => { - const channelInfo = channelId ? await ctx.resolveChannelName(channelId) : {}; - const channelType = channelInfo?.type; - if ( - !ctx.isChannelAllowed({ - channelId, - channelName: channelInfo?.name, - channelType, - }) - ) { - return null; - } - - const label = resolveSlackChannelLabel({ - channelId, - channelName: channelInfo?.name, - }); - const sessionKey = ctx.resolveSlackSystemEventSessionKey({ - channelId, - channelType, - }); - - return { channelInfo, channelType, label, sessionKey }; - }; + const resolveChangedSenderId = (changed: SlackMessageChangedEvent): string | undefined => + changed.message?.user ?? + changed.previous_message?.user ?? + changed.message?.bot_id ?? + changed.previous_message?.bot_id; + const resolveDeletedSenderId = (deleted: SlackMessageDeletedEvent): string | undefined => + deleted.previous_message?.user ?? deleted.previous_message?.bot_id; + const resolveThreadBroadcastSenderId = (thread: SlackThreadBroadcastEvent): string | undefined => + thread.user ?? thread.message?.user ?? thread.message?.bot_id; ctx.app.event("message", async ({ event, body }: SlackEventMiddlewareArgs<"message">) => { try { @@ -52,13 +37,18 @@ export function registerSlackMessageEvents(params: { if (message.subtype === "message_changed") { const changed = event as SlackMessageChangedEvent; const channelId = changed.channel; - const target = await resolveSlackChannelSystemEventTarget(channelId); - if (!target) { + const ingressContext = await authorizeAndResolveSlackSystemEventContext({ + ctx, + senderId: resolveChangedSenderId(changed), + channelId, + eventKind: "message_changed", + }); + if (!ingressContext) { return; } const messageId = changed.message?.ts ?? changed.previous_message?.ts; - enqueueSystemEvent(`Slack message edited in ${target.label}.`, { - sessionKey: target.sessionKey, + enqueueSystemEvent(`Slack message edited in ${ingressContext.channelLabel}.`, { + sessionKey: ingressContext.sessionKey, contextKey: `slack:message:changed:${channelId ?? "unknown"}:${messageId ?? changed.event_ts ?? "unknown"}`, }); return; @@ -66,12 +56,17 @@ export function registerSlackMessageEvents(params: { if (message.subtype === "message_deleted") { const deleted = event as SlackMessageDeletedEvent; const channelId = deleted.channel; - const target = await resolveSlackChannelSystemEventTarget(channelId); - if (!target) { + const ingressContext = await authorizeAndResolveSlackSystemEventContext({ + ctx, + senderId: resolveDeletedSenderId(deleted), + channelId, + eventKind: "message_deleted", + }); + if (!ingressContext) { return; } - enqueueSystemEvent(`Slack message deleted in ${target.label}.`, { - sessionKey: target.sessionKey, + enqueueSystemEvent(`Slack message deleted in ${ingressContext.channelLabel}.`, { + sessionKey: ingressContext.sessionKey, contextKey: `slack:message:deleted:${channelId ?? "unknown"}:${deleted.deleted_ts ?? deleted.event_ts ?? "unknown"}`, }); return; @@ -79,13 +74,18 @@ export function registerSlackMessageEvents(params: { if (message.subtype === "thread_broadcast") { const thread = event as SlackThreadBroadcastEvent; const channelId = thread.channel; - const target = await resolveSlackChannelSystemEventTarget(channelId); - if (!target) { + const ingressContext = await authorizeAndResolveSlackSystemEventContext({ + ctx, + senderId: resolveThreadBroadcastSenderId(thread), + channelId, + eventKind: "thread_broadcast", + }); + if (!ingressContext) { return; } const messageId = thread.message?.ts ?? thread.event_ts; - enqueueSystemEvent(`Slack thread reply broadcast in ${target.label}.`, { - sessionKey: target.sessionKey, + enqueueSystemEvent(`Slack thread reply broadcast in ${ingressContext.channelLabel}.`, { + sessionKey: ingressContext.sessionKey, contextKey: `slack:thread:broadcast:${channelId ?? "unknown"}:${messageId ?? "unknown"}`, }); return; diff --git a/src/slack/monitor/events/pins.test.ts b/src/slack/monitor/events/pins.test.ts new file mode 100644 index 00000000000..00c2528bbdb --- /dev/null +++ b/src/slack/monitor/events/pins.test.ts @@ -0,0 +1,130 @@ +import { describe, expect, it, vi } from "vitest"; +import { registerSlackPinEvents } from "./pins.js"; +import { + createSlackSystemEventTestHarness, + type SlackSystemEventTestOverrides, +} from "./system-event-test-harness.js"; + +const enqueueSystemEventMock = vi.fn(); +const readAllowFromStoreMock = vi.fn(); + +vi.mock("../../../infra/system-events.js", () => ({ + enqueueSystemEvent: (...args: unknown[]) => enqueueSystemEventMock(...args), +})); + +vi.mock("../../../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: (...args: unknown[]) => readAllowFromStoreMock(...args), +})); + +type SlackPinHandler = (args: { event: Record; body: unknown }) => Promise; + +function createPinContext(overrides?: SlackSystemEventTestOverrides) { + const harness = createSlackSystemEventTestHarness(overrides); + registerSlackPinEvents({ ctx: harness.ctx }); + return { + getAddedHandler: () => harness.getHandler("pin_added") as SlackPinHandler | null, + getRemovedHandler: () => harness.getHandler("pin_removed") as SlackPinHandler | null, + }; +} + +function makePinEvent(overrides?: { user?: string; channel?: string }) { + return { + type: "pin_added", + user: overrides?.user ?? "U1", + channel_id: overrides?.channel ?? "D1", + event_ts: "123.456", + item: { + type: "message", + message: { + ts: "123.456", + }, + }, + }; +} + +describe("registerSlackPinEvents", () => { + it("enqueues DM pin system events when dmPolicy is open", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createPinContext({ dmPolicy: "open" }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makePinEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks DM pin system events when dmPolicy is disabled", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createPinContext({ dmPolicy: "disabled" }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makePinEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks DM pin system events for unauthorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createPinContext({ + dmPolicy: "allowlist", + allowFrom: ["U2"], + }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makePinEvent({ user: "U1" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("allows DM pin system events for authorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createPinContext({ + dmPolicy: "allowlist", + allowFrom: ["U1"], + }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makePinEvent({ user: "U1" }), + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks channel pin events for users outside channel users allowlist", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createPinContext({ + dmPolicy: "open", + channelType: "channel", + channelUsers: ["U_OWNER"], + }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makePinEvent({ channel: "C1", user: "U_ATTACKER" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/slack/monitor/events/pins.ts b/src/slack/monitor/events/pins.ts index 2613bc35e24..9a63aa4a972 100644 --- a/src/slack/monitor/events/pins.ts +++ b/src/slack/monitor/events/pins.ts @@ -1,9 +1,9 @@ import type { SlackEventMiddlewareArgs } from "@slack/bolt"; import { danger } from "../../../globals.js"; import { enqueueSystemEvent } from "../../../infra/system-events.js"; -import { resolveSlackChannelLabel } from "../channel-config.js"; import type { SlackMonitorContext } from "../context.js"; import type { SlackPinEvent } from "../types.js"; +import { authorizeAndResolveSlackSystemEventContext } from "./system-event-context.js"; async function handleSlackPinEvent(params: { ctx: SlackMonitorContext; @@ -22,32 +22,26 @@ async function handleSlackPinEvent(params: { const payload = event as SlackPinEvent; const channelId = payload.channel_id; - const channelInfo = channelId ? await ctx.resolveChannelName(channelId) : {}; - if ( - !ctx.isChannelAllowed({ - channelId, - channelName: channelInfo?.name, - channelType: channelInfo?.type, - }) - ) { + const ingressContext = await authorizeAndResolveSlackSystemEventContext({ + ctx, + senderId: payload.user, + channelId, + eventKind: "pin", + }); + if (!ingressContext) { return; } - const label = resolveSlackChannelLabel({ - channelId, - channelName: channelInfo?.name, - }); const userInfo = payload.user ? await ctx.resolveUserName(payload.user) : {}; const userLabel = userInfo?.name ?? payload.user ?? "someone"; const itemType = payload.item?.type ?? "item"; const messageId = payload.item?.message?.ts ?? payload.event_ts; - const sessionKey = ctx.resolveSlackSystemEventSessionKey({ - channelId, - channelType: channelInfo?.type ?? undefined, - }); - enqueueSystemEvent(`Slack: ${userLabel} ${action} a ${itemType} in ${label}.`, { - sessionKey, - contextKey: `slack:pin:${contextKeySuffix}:${channelId ?? "unknown"}:${messageId ?? "unknown"}`, - }); + enqueueSystemEvent( + `Slack: ${userLabel} ${action} a ${itemType} in ${ingressContext.channelLabel}.`, + { + sessionKey: ingressContext.sessionKey, + contextKey: `slack:pin:${contextKeySuffix}:${channelId ?? "unknown"}:${messageId ?? "unknown"}`, + }, + ); } catch (err) { ctx.runtime.error?.(danger(`slack ${errorLabel} handler failed: ${String(err)}`)); } diff --git a/src/slack/monitor/events/reactions.test.ts b/src/slack/monitor/events/reactions.test.ts new file mode 100644 index 00000000000..e95a1ec5a8c --- /dev/null +++ b/src/slack/monitor/events/reactions.test.ts @@ -0,0 +1,153 @@ +import { describe, expect, it, vi } from "vitest"; +import { registerSlackReactionEvents } from "./reactions.js"; +import { + createSlackSystemEventTestHarness, + type SlackSystemEventTestOverrides, +} from "./system-event-test-harness.js"; + +const enqueueSystemEventMock = vi.fn(); +const readAllowFromStoreMock = vi.fn(); + +vi.mock("../../../infra/system-events.js", () => ({ + enqueueSystemEvent: (...args: unknown[]) => enqueueSystemEventMock(...args), +})); + +vi.mock("../../../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: (...args: unknown[]) => readAllowFromStoreMock(...args), +})); + +type SlackReactionHandler = (args: { + event: Record; + body: unknown; +}) => Promise; + +function createReactionContext(overrides?: SlackSystemEventTestOverrides) { + const harness = createSlackSystemEventTestHarness(overrides); + registerSlackReactionEvents({ ctx: harness.ctx }); + return { + getAddedHandler: () => harness.getHandler("reaction_added") as SlackReactionHandler | null, + getRemovedHandler: () => harness.getHandler("reaction_removed") as SlackReactionHandler | null, + }; +} + +function makeReactionEvent(overrides?: { user?: string; channel?: string }) { + return { + type: "reaction_added", + user: overrides?.user ?? "U1", + reaction: "thumbsup", + item: { + type: "message", + channel: overrides?.channel ?? "D1", + ts: "123.456", + }, + item_user: "UBOT", + }; +} + +describe("registerSlackReactionEvents", () => { + it("enqueues DM reaction system events when dmPolicy is open", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createReactionContext({ dmPolicy: "open" }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makeReactionEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks DM reaction system events when dmPolicy is disabled", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createReactionContext({ dmPolicy: "disabled" }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makeReactionEvent(), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("blocks DM reaction system events for unauthorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createReactionContext({ + dmPolicy: "allowlist", + allowFrom: ["U2"], + }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makeReactionEvent({ user: "U1" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); + + it("allows DM reaction system events for authorized senders in allowlist mode", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createReactionContext({ + dmPolicy: "allowlist", + allowFrom: ["U1"], + }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makeReactionEvent({ user: "U1" }), + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("enqueues channel reaction events regardless of dmPolicy", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getRemovedHandler } = createReactionContext({ + dmPolicy: "disabled", + channelType: "channel", + }); + const removedHandler = getRemovedHandler(); + expect(removedHandler).toBeTruthy(); + + await removedHandler!({ + event: { + ...makeReactionEvent({ channel: "C1" }), + type: "reaction_removed", + }, + body: {}, + }); + + expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1); + }); + + it("blocks channel reaction events for users outside channel users allowlist", async () => { + enqueueSystemEventMock.mockClear(); + readAllowFromStoreMock.mockReset().mockResolvedValue([]); + const { getAddedHandler } = createReactionContext({ + dmPolicy: "open", + channelType: "channel", + channelUsers: ["U_OWNER"], + }); + const addedHandler = getAddedHandler(); + expect(addedHandler).toBeTruthy(); + + await addedHandler!({ + event: makeReactionEvent({ channel: "C1", user: "U_ATTACKER" }), + body: {}, + }); + + expect(enqueueSystemEventMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/slack/monitor/events/reactions.ts b/src/slack/monitor/events/reactions.ts index b437352d6ca..07dcf0f8be3 100644 --- a/src/slack/monitor/events/reactions.ts +++ b/src/slack/monitor/events/reactions.ts @@ -1,9 +1,9 @@ import type { SlackEventMiddlewareArgs } from "@slack/bolt"; import { danger } from "../../../globals.js"; import { enqueueSystemEvent } from "../../../infra/system-events.js"; -import { resolveSlackChannelLabel } from "../channel-config.js"; import type { SlackMonitorContext } from "../context.js"; import type { SlackReactionEvent } from "../types.js"; +import { authorizeAndResolveSlackSystemEventContext } from "./system-event-context.js"; export function registerSlackReactionEvents(params: { ctx: SlackMonitorContext }) { const { ctx } = params; @@ -15,35 +15,30 @@ export function registerSlackReactionEvents(params: { ctx: SlackMonitorContext } return; } - const channelInfo = item.channel ? await ctx.resolveChannelName(item.channel) : {}; - const channelType = channelInfo?.type; - if ( - !ctx.isChannelAllowed({ - channelId: item.channel, - channelName: channelInfo?.name, - channelType, - }) - ) { + const ingressContext = await authorizeAndResolveSlackSystemEventContext({ + ctx, + senderId: event.user, + channelId: item.channel, + eventKind: "reaction", + }); + if (!ingressContext) { return; } - const channelLabel = resolveSlackChannelLabel({ - channelId: item.channel, - channelName: channelInfo?.name, - }); - const actorInfo = event.user ? await ctx.resolveUserName(event.user) : undefined; + const actorInfoPromise: Promise<{ name?: string } | undefined> = event.user + ? ctx.resolveUserName(event.user) + : Promise.resolve(undefined); + const authorInfoPromise: Promise<{ name?: string } | undefined> = event.item_user + ? ctx.resolveUserName(event.item_user) + : Promise.resolve(undefined); + const [actorInfo, authorInfo] = await Promise.all([actorInfoPromise, authorInfoPromise]); const actorLabel = actorInfo?.name ?? event.user; const emojiLabel = event.reaction ?? "emoji"; - const authorInfo = event.item_user ? await ctx.resolveUserName(event.item_user) : undefined; const authorLabel = authorInfo?.name ?? event.item_user; - const baseText = `Slack reaction ${action}: :${emojiLabel}: by ${actorLabel} in ${channelLabel} msg ${item.ts}`; + const baseText = `Slack reaction ${action}: :${emojiLabel}: by ${actorLabel} in ${ingressContext.channelLabel} msg ${item.ts}`; const text = authorLabel ? `${baseText} from ${authorLabel}` : baseText; - const sessionKey = ctx.resolveSlackSystemEventSessionKey({ - channelId: item.channel, - channelType, - }); enqueueSystemEvent(text, { - sessionKey, + sessionKey: ingressContext.sessionKey, contextKey: `slack:reaction:${action}:${item.channel}:${item.ts}:${event.user}:${emojiLabel}`, }); } catch (err) { diff --git a/src/slack/monitor/events/system-event-context.ts b/src/slack/monitor/events/system-event-context.ts new file mode 100644 index 00000000000..5df48dfd167 --- /dev/null +++ b/src/slack/monitor/events/system-event-context.ts @@ -0,0 +1,44 @@ +import { logVerbose } from "../../../globals.js"; +import { authorizeSlackSystemEventSender } from "../auth.js"; +import { resolveSlackChannelLabel } from "../channel-config.js"; +import type { SlackMonitorContext } from "../context.js"; + +export type SlackAuthorizedSystemEventContext = { + channelLabel: string; + sessionKey: string; +}; + +export async function authorizeAndResolveSlackSystemEventContext(params: { + ctx: SlackMonitorContext; + senderId?: string; + channelId?: string; + channelType?: string | null; + eventKind: string; +}): Promise { + const { ctx, senderId, channelId, channelType, eventKind } = params; + const auth = await authorizeSlackSystemEventSender({ + ctx, + senderId, + channelId, + channelType, + }); + if (!auth.allowed) { + logVerbose( + `slack: drop ${eventKind} sender ${senderId ?? "unknown"} channel=${channelId ?? "unknown"} reason=${auth.reason ?? "unauthorized"}`, + ); + return undefined; + } + + const channelLabel = resolveSlackChannelLabel({ + channelId, + channelName: auth.channelName, + }); + const sessionKey = ctx.resolveSlackSystemEventSessionKey({ + channelId, + channelType: auth.channelType, + }); + return { + channelLabel, + sessionKey, + }; +} diff --git a/src/slack/monitor/events/system-event-test-harness.ts b/src/slack/monitor/events/system-event-test-harness.ts new file mode 100644 index 00000000000..73a50d0444c --- /dev/null +++ b/src/slack/monitor/events/system-event-test-harness.ts @@ -0,0 +1,56 @@ +import type { SlackMonitorContext } from "../context.js"; + +export type SlackSystemEventHandler = (args: { + event: Record; + body: unknown; +}) => Promise; + +export type SlackSystemEventTestOverrides = { + dmPolicy?: "open" | "pairing" | "allowlist" | "disabled"; + allowFrom?: string[]; + channelType?: "im" | "channel"; + channelUsers?: string[]; +}; + +export function createSlackSystemEventTestHarness(overrides?: SlackSystemEventTestOverrides) { + const handlers: Record = {}; + const channelType = overrides?.channelType ?? "im"; + const app = { + event: (name: string, handler: SlackSystemEventHandler) => { + handlers[name] = handler; + }, + }; + const ctx = { + app, + runtime: { error: () => {} }, + dmEnabled: true, + dmPolicy: overrides?.dmPolicy ?? "open", + defaultRequireMention: true, + channelsConfig: overrides?.channelUsers + ? { + C1: { + users: overrides.channelUsers, + allow: true, + }, + } + : undefined, + groupPolicy: "open", + allowFrom: overrides?.allowFrom ?? [], + allowNameMatching: false, + shouldDropMismatchedSlackEvent: () => false, + isChannelAllowed: () => true, + resolveChannelName: async () => ({ + name: channelType === "im" ? "direct" : "general", + type: channelType, + }), + resolveUserName: async () => ({ name: "alice" }), + resolveSlackSystemEventSessionKey: () => "agent:main:main", + } as unknown as SlackMonitorContext; + + return { + ctx, + getHandler(name: string): SlackSystemEventHandler | null { + return handlers[name] ?? null; + }, + }; +} diff --git a/src/slack/monitor/message-handler/prepare.ts b/src/slack/monitor/message-handler/prepare.ts index 6a0121d996e..02ee265f7ca 100644 --- a/src/slack/monitor/message-handler/prepare.ts +++ b/src/slack/monitor/message-handler/prepare.ts @@ -19,7 +19,6 @@ import { shouldAckReaction as shouldAckReactionGate, type AckReactionScope, } from "../../../channels/ack-reactions.js"; -import { formatAllowlistMatchMeta } from "../../../channels/allowlist-match.js"; import { resolveControlCommandGate } from "../../../channels/command-gating.js"; import { resolveConversationLabel } from "../../../channels/conversation-label.js"; import { logInboundDrop } from "../../../channels/logging.js"; @@ -28,8 +27,6 @@ import { recordInboundSession } from "../../../channels/session.js"; import { readSessionUpdatedAt, resolveStorePath } from "../../../config/sessions.js"; import { logVerbose, shouldLogVerbose } from "../../../globals.js"; import { enqueueSystemEvent } from "../../../infra/system-events.js"; -import { buildPairingReply } from "../../../pairing/pairing-messages.js"; -import { upsertChannelPairingRequest } from "../../../pairing/pairing-store.js"; import { resolveAgentRoute } from "../../../routing/resolve-route.js"; import { resolveThreadSessionKeys } from "../../../routing/session-key.js"; import type { ResolvedSlackAccount } from "../../accounts.js"; @@ -42,6 +39,7 @@ import { resolveSlackEffectiveAllowFrom } from "../auth.js"; import { resolveSlackChannelConfig } from "../channel-config.js"; import { stripSlackMentionsForCommandDetection } from "../commands.js"; import { normalizeSlackChannelType, type SlackMonitorContext } from "../context.js"; +import { authorizeSlackDirectMessage } from "../dm-auth.js"; import { resolveSlackAttachmentContent, MAX_SLACK_MEDIA_FILES, @@ -127,7 +125,9 @@ export async function prepareSlackMessage(params: { return null; } - const { allowFromLower } = await resolveSlackEffectiveAllowFrom(ctx); + const { allowFromLower } = await resolveSlackEffectiveAllowFrom(ctx, { + includePairingStore: isDirectMessage, + }); if (isDirectMessage) { const directUserId = message.user; @@ -135,58 +135,32 @@ export async function prepareSlackMessage(params: { logVerbose("slack: drop dm message (missing user id)"); return null; } - if (!ctx.dmEnabled || ctx.dmPolicy === "disabled") { - logVerbose("slack: drop dm (dms disabled)"); + const allowed = await authorizeSlackDirectMessage({ + ctx, + accountId: account.accountId, + senderId: directUserId, + allowFromLower, + resolveSenderName: ctx.resolveUserName, + sendPairingReply: async (text) => { + await sendMessageSlack(message.channel, text, { + token: ctx.botToken, + client: ctx.app.client, + accountId: account.accountId, + }); + }, + onDisabled: () => { + logVerbose("slack: drop dm (dms disabled)"); + }, + onUnauthorized: ({ allowMatchMeta }) => { + logVerbose( + `Blocked unauthorized slack sender ${message.user} (dmPolicy=${ctx.dmPolicy}, ${allowMatchMeta})`, + ); + }, + log: logVerbose, + }); + if (!allowed) { return null; } - if (ctx.dmPolicy !== "open") { - const allowMatch = resolveSlackAllowListMatch({ - allowList: allowFromLower, - id: directUserId, - allowNameMatching: ctx.allowNameMatching, - }); - const allowMatchMeta = formatAllowlistMatchMeta(allowMatch); - if (!allowMatch.allowed) { - if (ctx.dmPolicy === "pairing") { - const sender = await ctx.resolveUserName(directUserId); - const senderName = sender?.name ?? undefined; - const { code, created } = await upsertChannelPairingRequest({ - channel: "slack", - id: directUserId, - meta: { name: senderName }, - }); - if (created) { - logVerbose( - `slack pairing request sender=${directUserId} name=${ - senderName ?? "unknown" - } (${allowMatchMeta})`, - ); - try { - await sendMessageSlack( - message.channel, - buildPairingReply({ - channel: "slack", - idLine: `Your Slack user id: ${directUserId}`, - code, - }), - { - token: ctx.botToken, - client: ctx.app.client, - accountId: account.accountId, - }, - ); - } catch (err) { - logVerbose(`slack pairing reply failed for ${message.user}: ${String(err)}`); - } - } - } else { - logVerbose( - `Blocked unauthorized slack sender ${message.user} (dmPolicy=${ctx.dmPolicy}, ${allowMatchMeta})`, - ); - } - return null; - } - } } const route = resolveAgentRoute({ diff --git a/src/slack/monitor/monitor.test.ts b/src/slack/monitor/monitor.test.ts index 3262873718d..3da7f08164e 100644 --- a/src/slack/monitor/monitor.test.ts +++ b/src/slack/monitor/monitor.test.ts @@ -60,6 +60,27 @@ describe("resolveSlackChannelConfig", () => { matchSource: "direct", }); }); + + it("matches channel config key stored in lowercase when Slack delivers uppercase channel ID", () => { + // Slack always delivers channel IDs in uppercase (e.g. C0ABC12345). + // Users commonly copy them in lowercase from docs or older CLI output. + const res = resolveSlackChannelConfig({ + channelId: "C0ABC12345", + channels: { c0abc12345: { allow: true, requireMention: false } }, + defaultRequireMention: true, + }); + expect(res).toMatchObject({ allowed: true, requireMention: false }); + }); + + it("matches channel config key stored in uppercase when user types lowercase channel ID", () => { + // Defensive: also handle the inverse direction. + const res = resolveSlackChannelConfig({ + channelId: "c0abc12345", + channels: { C0ABC12345: { allow: true, requireMention: false } }, + defaultRequireMention: true, + }); + expect(res).toMatchObject({ allowed: true, requireMention: false }); + }); }); const baseParams = () => ({ diff --git a/src/slack/monitor/slash.ts b/src/slack/monitor/slash.ts index 92afc734a91..c494a3696e5 100644 --- a/src/slack/monitor/slash.ts +++ b/src/slack/monitor/slash.ts @@ -1,27 +1,18 @@ import type { SlackActionMiddlewareArgs, SlackCommandMiddlewareArgs } from "@slack/bolt"; import type { ChatCommandDefinition, CommandArgs } from "../../auto-reply/commands-registry.js"; import type { ReplyPayload } from "../../auto-reply/types.js"; -import { formatAllowlistMatchMeta } from "../../channels/allowlist-match.js"; import { resolveCommandAuthorizedFromAuthorizers } from "../../channels/command-gating.js"; import { resolveNativeCommandsEnabled, resolveNativeSkillsEnabled } from "../../config/commands.js"; import { danger, logVerbose } from "../../globals.js"; -import { buildPairingReply } from "../../pairing/pairing-messages.js"; -import { - readChannelAllowFromStore, - upsertChannelPairingRequest, -} from "../../pairing/pairing-store.js"; import { chunkItems } from "../../utils/chunk-items.js"; import type { ResolvedSlackAccount } from "../accounts.js"; -import { - normalizeAllowList, - normalizeAllowListLower, - resolveSlackAllowListMatch, - resolveSlackUserAllowed, -} from "./allow-list.js"; +import { resolveSlackAllowListMatch, resolveSlackUserAllowed } from "./allow-list.js"; +import { resolveSlackEffectiveAllowFrom } from "./auth.js"; import { resolveSlackChannelConfig, type SlackChannelConfigResolved } from "./channel-config.js"; import { buildSlackSlashCommandMatcher, resolveSlackSlashCommandConfig } from "./commands.js"; import type { SlackMonitorContext } from "./context.js"; import { normalizeSlackChannelType } from "./context.js"; +import { authorizeSlackDirectMessage } from "./dm-auth.js"; import { createSlackExternalArgMenuStore, SLACK_EXTERNAL_ARG_MENU_PREFIX, @@ -335,69 +326,50 @@ export async function registerSlackMonitorSlashCommands(params: { return; } - const storeAllowFrom = - ctx.dmPolicy === "allowlist" - ? [] - : await readChannelAllowFromStore("slack").catch(() => []); - const effectiveAllowFrom = normalizeAllowList([...ctx.allowFrom, ...storeAllowFrom]); - const effectiveAllowFromLower = normalizeAllowListLower(effectiveAllowFrom); + const { allowFromLower: effectiveAllowFromLower } = await resolveSlackEffectiveAllowFrom( + ctx, + { + includePairingStore: isDirectMessage, + }, + ); // Privileged command surface: compute CommandAuthorized, don't assume true. // Keep this aligned with the Slack message path (message-handler/prepare.ts). let commandAuthorized = false; let channelConfig: SlackChannelConfigResolved | null = null; if (isDirectMessage) { - if (!ctx.dmEnabled || ctx.dmPolicy === "disabled") { - await respond({ - text: "Slack DMs are disabled.", - response_type: "ephemeral", - }); + const allowed = await authorizeSlackDirectMessage({ + ctx, + accountId: ctx.accountId, + senderId: command.user_id, + allowFromLower: effectiveAllowFromLower, + resolveSenderName: ctx.resolveUserName, + sendPairingReply: async (text) => { + await respond({ + text, + response_type: "ephemeral", + }); + }, + onDisabled: async () => { + await respond({ + text: "Slack DMs are disabled.", + response_type: "ephemeral", + }); + }, + onUnauthorized: async ({ allowMatchMeta }) => { + logVerbose( + `slack: blocked slash sender ${command.user_id} (dmPolicy=${ctx.dmPolicy}, ${allowMatchMeta})`, + ); + await respond({ + text: "You are not authorized to use this command.", + response_type: "ephemeral", + }); + }, + log: logVerbose, + }); + if (!allowed) { return; } - if (ctx.dmPolicy !== "open") { - const sender = await ctx.resolveUserName(command.user_id); - const senderName = sender?.name ?? undefined; - const allowMatch = resolveSlackAllowListMatch({ - allowList: effectiveAllowFromLower, - id: command.user_id, - name: senderName, - allowNameMatching: ctx.allowNameMatching, - }); - const allowMatchMeta = formatAllowlistMatchMeta(allowMatch); - if (!allowMatch.allowed) { - if (ctx.dmPolicy === "pairing") { - const { code, created } = await upsertChannelPairingRequest({ - channel: "slack", - id: command.user_id, - meta: { name: senderName }, - }); - if (created) { - logVerbose( - `slack pairing request sender=${command.user_id} name=${ - senderName ?? "unknown" - } (${allowMatchMeta})`, - ); - await respond({ - text: buildPairingReply({ - channel: "slack", - idLine: `Your Slack user id: ${command.user_id}`, - code, - }), - response_type: "ephemeral", - }); - } - } else { - logVerbose( - `slack: blocked slash sender ${command.user_id} (dmPolicy=${ctx.dmPolicy}, ${allowMatchMeta})`, - ); - await respond({ - text: "You are not authorized to use this command.", - response_type: "ephemeral", - }); - } - return; - } - } } if (isRoom) { diff --git a/src/slack/monitor/types.ts b/src/slack/monitor/types.ts index c77bd53f964..58c103e04a5 100644 --- a/src/slack/monitor/types.ts +++ b/src/slack/monitor/types.ts @@ -66,8 +66,8 @@ export type SlackMessageChangedEvent = { type: "message"; subtype: "message_changed"; channel?: string; - message?: { ts?: string }; - previous_message?: { ts?: string }; + message?: { ts?: string; user?: string; bot_id?: string }; + previous_message?: { ts?: string; user?: string; bot_id?: string }; event_ts?: string; }; @@ -76,6 +76,7 @@ export type SlackMessageDeletedEvent = { subtype: "message_deleted"; channel?: string; deleted_ts?: string; + previous_message?: { ts?: string; user?: string; bot_id?: string }; event_ts?: string; }; @@ -83,7 +84,8 @@ export type SlackThreadBroadcastEvent = { type: "message"; subtype: "thread_broadcast"; channel?: string; - message?: { ts?: string }; + user?: string; + message?: { ts?: string; user?: string; bot_id?: string }; event_ts?: string; }; diff --git a/src/slack/send.blocks.test.ts b/src/slack/send.blocks.test.ts index 2b70b6c29b2..690f95120f0 100644 --- a/src/slack/send.blocks.test.ts +++ b/src/slack/send.blocks.test.ts @@ -4,6 +4,52 @@ import { createSlackSendTestClient, installSlackBlockTestMocks } from "./blocks. installSlackBlockTestMocks(); const { sendMessageSlack } = await import("./send.js"); +describe("sendMessageSlack NO_REPLY guard", () => { + it("suppresses NO_REPLY text before any Slack API call", async () => { + const client = createSlackSendTestClient(); + const result = await sendMessageSlack("channel:C123", "NO_REPLY", { + token: "xoxb-test", + client, + }); + + expect(client.chat.postMessage).not.toHaveBeenCalled(); + expect(result.messageId).toBe("suppressed"); + }); + + it("suppresses NO_REPLY with surrounding whitespace", async () => { + const client = createSlackSendTestClient(); + const result = await sendMessageSlack("channel:C123", " NO_REPLY ", { + token: "xoxb-test", + client, + }); + + expect(client.chat.postMessage).not.toHaveBeenCalled(); + expect(result.messageId).toBe("suppressed"); + }); + + it("does not suppress substantive text containing NO_REPLY", async () => { + const client = createSlackSendTestClient(); + await sendMessageSlack("channel:C123", "This is not a NO_REPLY situation", { + token: "xoxb-test", + client, + }); + + expect(client.chat.postMessage).toHaveBeenCalled(); + }); + + it("does not suppress NO_REPLY when blocks are attached", async () => { + const client = createSlackSendTestClient(); + const result = await sendMessageSlack("channel:C123", "NO_REPLY", { + token: "xoxb-test", + client, + blocks: [{ type: "section", text: { type: "mrkdwn", text: "content" } }], + }); + + expect(client.chat.postMessage).toHaveBeenCalled(); + expect(result.messageId).toBe("171234.567"); + }); +}); + describe("sendMessageSlack blocks", () => { it("posts blocks with fallback text when message is empty", async () => { const client = createSlackSendTestClient(); diff --git a/src/slack/send.ts b/src/slack/send.ts index 5905473970f..ede97bafd71 100644 --- a/src/slack/send.ts +++ b/src/slack/send.ts @@ -9,6 +9,7 @@ import { resolveChunkMode, resolveTextChunkLimit, } from "../auto-reply/chunk.js"; +import { isSilentReplyText } from "../auto-reply/tokens.js"; import { loadConfig } from "../config/config.js"; import { resolveMarkdownTableMode } from "../config/markdown-tables.js"; import { logVerbose } from "../globals.js"; @@ -231,6 +232,10 @@ export async function sendMessageSlack( opts: SlackSendOpts = {}, ): Promise { const trimmedMessage = message?.trim() ?? ""; + if (isSilentReplyText(trimmedMessage) && !opts.mediaUrl && !opts.blocks) { + logVerbose("slack send: suppressed NO_REPLY token before API call"); + return { messageId: "suppressed", channelId: "" }; + } const blocks = opts.blocks == null ? undefined : validateSlackBlocksArray(opts.blocks); if (!trimmedMessage && !opts.mediaUrl && !blocks) { throw new Error("Slack send requires text, blocks, or media"); diff --git a/src/telegram/accounts.test.ts b/src/telegram/accounts.test.ts index 3eaee29819b..919ca989fe3 100644 --- a/src/telegram/accounts.test.ts +++ b/src/telegram/accounts.test.ts @@ -99,3 +99,72 @@ describe("resolveTelegramAccount", () => { expect(lines).toContain("resolve { accountId: 'work', enabled: true, tokenSource: 'config' }"); }); }); + +describe("resolveTelegramAccount allowFrom precedence", () => { + it("prefers accounts.default allowlists over top-level for default account", () => { + const resolved = resolveTelegramAccount({ + cfg: { + channels: { + telegram: { + allowFrom: ["top"], + groupAllowFrom: ["top-group"], + accounts: { + default: { + botToken: "123:default", + allowFrom: ["default"], + groupAllowFrom: ["default-group"], + }, + }, + }, + }, + }, + accountId: "default", + }); + + expect(resolved.config.allowFrom).toEqual(["default"]); + expect(resolved.config.groupAllowFrom).toEqual(["default-group"]); + }); + + it("falls back to top-level allowlists for named account without overrides", () => { + const resolved = resolveTelegramAccount({ + cfg: { + channels: { + telegram: { + allowFrom: ["top"], + groupAllowFrom: ["top-group"], + accounts: { + work: { botToken: "123:work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toEqual(["top"]); + expect(resolved.config.groupAllowFrom).toEqual(["top-group"]); + }); + + it("does not inherit default account allowlists for named account when top-level is absent", () => { + const resolved = resolveTelegramAccount({ + cfg: { + channels: { + telegram: { + accounts: { + default: { + botToken: "123:default", + allowFrom: ["default"], + groupAllowFrom: ["default-group"], + }, + work: { botToken: "123:work" }, + }, + }, + }, + }, + accountId: "work", + }); + + expect(resolved.config.allowFrom).toBeUndefined(); + expect(resolved.config.groupAllowFrom).toBeUndefined(); + }); +}); diff --git a/src/telegram/bot-access.ts b/src/telegram/bot-access.ts index 48ba43a64c2..d08a54616f0 100644 --- a/src/telegram/bot-access.ts +++ b/src/telegram/bot-access.ts @@ -1,4 +1,8 @@ -import { firstDefined, isSenderIdAllowed, mergeAllowFromSources } from "../channels/allow-from.js"; +import { + firstDefined, + isSenderIdAllowed, + mergeDmAllowFromSources, +} from "../channels/allow-from.js"; import type { AllowlistMatch } from "../channels/allowlist-match.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; @@ -53,11 +57,11 @@ export const normalizeAllowFrom = (list?: Array): NormalizedAll }; }; -export const normalizeAllowFromWithStore = (params: { +export const normalizeDmAllowFromWithStore = (params: { allowFrom?: Array; storeAllowFrom?: string[]; dmPolicy?: string; -}): NormalizedAllowFrom => normalizeAllowFrom(mergeAllowFromSources(params)); +}): NormalizedAllowFrom => normalizeAllowFrom(mergeDmAllowFromSources(params)); export const isSenderAllowed = (params: { allow: NormalizedAllowFrom; diff --git a/src/telegram/bot-handlers.ts b/src/telegram/bot-handlers.ts index e4d42cd889e..ba4c0eb91b6 100644 --- a/src/telegram/bot-handlers.ts +++ b/src/telegram/bot-handlers.ts @@ -17,6 +17,7 @@ import { resolveChannelConfigWrites } from "../channels/plugins/config-writes.js import { loadConfig } from "../config/config.js"; import { writeConfigFile } from "../config/io.js"; import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; +import type { DmPolicy } from "../config/types.base.js"; import type { TelegramGroupConfig, TelegramTopicConfig } from "../config/types.js"; import { danger, logVerbose, warn } from "../globals.js"; import { enqueueSystemEvent } from "../infra/system-events.js"; @@ -27,7 +28,7 @@ import { resolveThreadSessionKeys } from "../routing/session-key.js"; import { withTelegramApiErrorLogging } from "./api-logging.js"; import { isSenderAllowed, - normalizeAllowFromWithStore, + normalizeDmAllowFromWithStore, type NormalizedAllowFrom, } from "./bot-access.js"; import type { TelegramMediaRef } from "./bot-message-context.js"; @@ -507,6 +508,133 @@ export const registerTelegramHandlers = ({ return false; }; + type TelegramGroupAllowContext = Awaited>; + type TelegramEventAuthorizationMode = "reaction" | "callback-scope" | "callback-allowlist"; + type TelegramEventAuthorizationResult = { allowed: true } | { allowed: false; reason: string }; + type TelegramEventAuthorizationContext = TelegramGroupAllowContext & { dmPolicy: DmPolicy }; + + const TELEGRAM_EVENT_AUTH_RULES: Record< + TelegramEventAuthorizationMode, + { + enforceDirectAuthorization: boolean; + enforceGroupAllowlistAuthorization: boolean; + deniedDmReason: string; + deniedGroupReason: string; + } + > = { + reaction: { + enforceDirectAuthorization: true, + enforceGroupAllowlistAuthorization: false, + deniedDmReason: "reaction unauthorized by dm policy/allowlist", + deniedGroupReason: "reaction unauthorized by group allowlist", + }, + "callback-scope": { + enforceDirectAuthorization: false, + enforceGroupAllowlistAuthorization: false, + deniedDmReason: "callback unauthorized by inlineButtonsScope", + deniedGroupReason: "callback unauthorized by inlineButtonsScope", + }, + "callback-allowlist": { + enforceDirectAuthorization: true, + // Group auth is already enforced by shouldSkipGroupMessage (group policy + allowlist). + // An extra allowlist gate here would block users whose original command was authorized. + enforceGroupAllowlistAuthorization: false, + deniedDmReason: "callback unauthorized by inlineButtonsScope allowlist", + deniedGroupReason: "callback unauthorized by inlineButtonsScope allowlist", + }, + }; + + const resolveTelegramEventAuthorizationContext = async (params: { + chatId: number; + isForum: boolean; + messageThreadId?: number; + groupAllowContext?: TelegramGroupAllowContext; + }): Promise => { + const dmPolicy = telegramCfg.dmPolicy ?? "pairing"; + const groupAllowContext = + params.groupAllowContext ?? + (await resolveTelegramGroupAllowFromContext({ + chatId: params.chatId, + accountId, + isForum: params.isForum, + messageThreadId: params.messageThreadId, + groupAllowFrom, + resolveTelegramGroupConfig, + })); + return { dmPolicy, ...groupAllowContext }; + }; + + const authorizeTelegramEventSender = (params: { + chatId: number; + chatTitle?: string; + isGroup: boolean; + senderId: string; + senderUsername: string; + mode: TelegramEventAuthorizationMode; + context: TelegramEventAuthorizationContext; + }): TelegramEventAuthorizationResult => { + const { chatId, chatTitle, isGroup, senderId, senderUsername, mode, context } = params; + const { + dmPolicy, + resolvedThreadId, + storeAllowFrom, + groupConfig, + topicConfig, + effectiveGroupAllow, + hasGroupAllowOverride, + } = context; + const authRules = TELEGRAM_EVENT_AUTH_RULES[mode]; + const { + enforceDirectAuthorization, + enforceGroupAllowlistAuthorization, + deniedDmReason, + deniedGroupReason, + } = authRules; + if ( + shouldSkipGroupMessage({ + isGroup, + chatId, + chatTitle, + resolvedThreadId, + senderId, + senderUsername, + effectiveGroupAllow, + hasGroupAllowOverride, + groupConfig, + topicConfig, + }) + ) { + return { allowed: false, reason: "group-policy" }; + } + + if (!isGroup && enforceDirectAuthorization) { + if (dmPolicy === "disabled") { + logVerbose( + `Blocked telegram direct event from ${senderId || "unknown"} (${deniedDmReason})`, + ); + return { allowed: false, reason: "direct-disabled" }; + } + if (dmPolicy !== "open") { + const effectiveDmAllow = normalizeDmAllowFromWithStore({ + allowFrom, + storeAllowFrom, + dmPolicy, + }); + if (!isAllowlistAuthorized(effectiveDmAllow, senderId, senderUsername)) { + logVerbose(`Blocked telegram direct sender ${senderId || "unknown"} (${deniedDmReason})`); + return { allowed: false, reason: "direct-unauthorized" }; + } + } + } + if (isGroup && enforceGroupAllowlistAuthorization) { + if (!isAllowlistAuthorized(effectiveGroupAllow, senderId, senderUsername)) { + logVerbose(`Blocked telegram group sender ${senderId || "unknown"} (${deniedGroupReason})`); + return { allowed: false, reason: "group-unauthorized" }; + } + } + return { allowed: true }; + }; + // Handle emoji reactions to messages. bot.on("message_reaction", async (ctx) => { try { @@ -521,6 +649,10 @@ export const registerTelegramHandlers = ({ const chatId = reaction.chat.id; const messageId = reaction.message_id; const user = reaction.user; + const senderId = user?.id != null ? String(user.id) : ""; + const senderUsername = user?.username ?? ""; + const isGroup = reaction.chat.type === "group" || reaction.chat.type === "supergroup"; + const isForum = reaction.chat.is_forum === true; // Resolve reaction notification mode (default: "own"). const reactionMode = telegramCfg.reactionNotifications ?? "own"; @@ -533,6 +665,22 @@ export const registerTelegramHandlers = ({ if (reactionMode === "own" && !wasSentByBot(chatId, messageId)) { return; } + const eventAuthContext = await resolveTelegramEventAuthorizationContext({ + chatId, + isForum, + }); + const senderAuthorization = authorizeTelegramEventSender({ + chatId, + chatTitle: reaction.chat.title, + isGroup, + senderId, + senderUsername, + mode: "reaction", + context: eventAuthContext, + }); + if (!senderAuthorization.allowed) { + return; + } // Detect added reactions. const oldEmojis = new Set( @@ -552,12 +700,12 @@ export const registerTelegramHandlers = ({ const senderName = user ? [user.first_name, user.last_name].filter(Boolean).join(" ").trim() || user.username : undefined; - const senderUsername = user?.username ? `@${user.username}` : undefined; + const senderUsernameLabel = user?.username ? `@${user.username}` : undefined; let senderLabel = senderName; - if (senderName && senderUsername) { - senderLabel = `${senderName} (${senderUsername})`; - } else if (!senderName && senderUsername) { - senderLabel = senderUsername; + if (senderName && senderUsernameLabel) { + senderLabel = `${senderName} (${senderUsernameLabel})`; + } else if (!senderName && senderUsernameLabel) { + senderLabel = senderUsernameLabel; } if (!senderLabel && user?.id) { senderLabel = `id:${user.id}`; @@ -567,8 +715,6 @@ export const registerTelegramHandlers = ({ // Reactions target a specific message_id; the Telegram Bot API does not include // message_thread_id on MessageReactionUpdated, so we route to the chat-level // session (forum topic routing is not available for reactions). - const isGroup = reaction.chat.type === "group" || reaction.chat.type === "supergroup"; - const isForum = reaction.chat.is_forum === true; const resolvedThreadId = isForum ? resolveTelegramForumThreadId({ isForum, messageThreadId: undefined }) : undefined; @@ -855,67 +1001,29 @@ export const registerTelegramHandlers = ({ const messageThreadId = callbackMessage.message_thread_id; const isForum = callbackMessage.chat.is_forum === true; - const groupAllowContext = await resolveTelegramGroupAllowFromContext({ + const eventAuthContext = await resolveTelegramEventAuthorizationContext({ chatId, - accountId, - dmPolicy: telegramCfg.dmPolicy ?? "pairing", isForum, messageThreadId, - groupAllowFrom, - resolveTelegramGroupConfig, - }); - const { - resolvedThreadId, - storeAllowFrom, - groupConfig, - topicConfig, - effectiveGroupAllow, - hasGroupAllowOverride, - } = groupAllowContext; - const dmPolicy = telegramCfg.dmPolicy ?? "pairing"; - const effectiveDmAllow = normalizeAllowFromWithStore({ - allowFrom: telegramCfg.allowFrom, - storeAllowFrom, - dmPolicy, }); + const { resolvedThreadId, storeAllowFrom } = eventAuthContext; const senderId = callback.from?.id ? String(callback.from.id) : ""; const senderUsername = callback.from?.username ?? ""; - if ( - shouldSkipGroupMessage({ - isGroup, - chatId, - chatTitle: callbackMessage.chat.title, - resolvedThreadId, - senderId, - senderUsername, - effectiveGroupAllow, - hasGroupAllowOverride, - groupConfig, - topicConfig, - }) - ) { + const authorizationMode: TelegramEventAuthorizationMode = + inlineButtonsScope === "allowlist" ? "callback-allowlist" : "callback-scope"; + const senderAuthorization = authorizeTelegramEventSender({ + chatId, + chatTitle: callbackMessage.chat.title, + isGroup, + senderId, + senderUsername, + mode: authorizationMode, + context: eventAuthContext, + }); + if (!senderAuthorization.allowed) { return; } - if (inlineButtonsScope === "allowlist") { - if (!isGroup) { - if (dmPolicy === "disabled") { - return; - } - if (dmPolicy !== "open") { - const allowed = isAllowlistAuthorized(effectiveDmAllow, senderId, senderUsername); - if (!allowed) { - return; - } - } - } else { - const allowed = isAllowlistAuthorized(effectiveGroupAllow, senderId, senderUsername); - if (!allowed) { - return; - } - } - } - const paginationMatch = data.match(/^commands_page_(\d+|noop)(?::(.+))?$/); if (paginationMatch) { const pageValue = paginationMatch[1]; @@ -1151,26 +1259,21 @@ export const registerTelegramHandlers = ({ if (shouldSkipUpdate(event.ctxForDedupe)) { return; } - const dmPolicy = telegramCfg.dmPolicy ?? "pairing"; - - const groupAllowContext = await resolveTelegramGroupAllowFromContext({ + const eventAuthContext = await resolveTelegramEventAuthorizationContext({ chatId: event.chatId, - accountId, - dmPolicy, isForum: event.isForum, messageThreadId: event.messageThreadId, - groupAllowFrom, - resolveTelegramGroupConfig, }); const { + dmPolicy, resolvedThreadId, storeAllowFrom, groupConfig, topicConfig, effectiveGroupAllow, hasGroupAllowOverride, - } = groupAllowContext; - const effectiveDmAllow = normalizeAllowFromWithStore({ + } = eventAuthContext; + const effectiveDmAllow = normalizeDmAllowFromWithStore({ allowFrom, storeAllowFrom, dmPolicy, diff --git a/src/telegram/bot-message-context.test-harness.ts b/src/telegram/bot-message-context.test-harness.ts index afdbbffce68..acfb84e6d69 100644 --- a/src/telegram/bot-message-context.test-harness.ts +++ b/src/telegram/bot-message-context.test-harness.ts @@ -61,5 +61,6 @@ export async function buildTelegramMessageContextForTest( groupConfig: { requireMention: false }, topicConfig: undefined, })), + sendChatActionHandler: { sendChatAction: vi.fn() } as never, }); } diff --git a/src/telegram/bot-message-context.ts b/src/telegram/bot-message-context.ts index 3ea805c944d..2a20b0c4be6 100644 --- a/src/telegram/bot-message-context.ts +++ b/src/telegram/bot-message-context.ts @@ -36,7 +36,12 @@ import { recordChannelActivity } from "../infra/channel-activity.js"; import { resolveAgentRoute } from "../routing/resolve-route.js"; import { resolveThreadSessionKeys } from "../routing/session-key.js"; import { withTelegramApiErrorLogging } from "./api-logging.js"; -import { firstDefined, isSenderAllowed, normalizeAllowFromWithStore } from "./bot-access.js"; +import { + firstDefined, + isSenderAllowed, + normalizeAllowFrom, + normalizeDmAllowFromWithStore, +} from "./bot-access.js"; import { buildGroupLabel, buildSenderLabel, @@ -111,6 +116,8 @@ export type BuildTelegramMessageContextParams = { resolveGroupActivation: ResolveGroupActivation; resolveGroupRequireMention: ResolveGroupRequireMention; resolveTelegramGroupConfig: ResolveTelegramGroupConfig; + /** Global (per-account) handler for sendChatAction 401 backoff (#27092). */ + sendChatActionHandler: import("./sendchataction-401-backoff.js").TelegramSendChatActionHandler; }; async function resolveStickerVisionSupport(params: { @@ -151,6 +158,7 @@ export const buildTelegramMessageContext = async ({ resolveGroupActivation, resolveGroupRequireMention, resolveTelegramGroupConfig, + sendChatActionHandler, }: BuildTelegramMessageContextParams) => { const msg = primaryCtx.message; const chatId = msg.chat.id; @@ -187,13 +195,10 @@ export const buildTelegramMessageContext = async ({ : null; const sessionKey = threadKeys?.sessionKey ?? baseSessionKey; const mentionRegexes = buildMentionRegexes(cfg, route.agentId); - const effectiveDmAllow = normalizeAllowFromWithStore({ allowFrom, storeAllowFrom, dmPolicy }); + const effectiveDmAllow = normalizeDmAllowFromWithStore({ allowFrom, storeAllowFrom, dmPolicy }); const groupAllowOverride = firstDefined(topicConfig?.allowFrom, groupConfig?.allowFrom); - const effectiveGroupAllow = normalizeAllowFromWithStore({ - allowFrom: groupAllowOverride ?? groupAllowFrom, - storeAllowFrom, - dmPolicy, - }); + // Group sender checks are explicit and must not inherit DM pairing-store entries. + const effectiveGroupAllow = normalizeAllowFrom(groupAllowOverride ?? groupAllowFrom); const hasGroupAllowOverride = typeof groupAllowOverride !== "undefined"; const senderId = msg.from?.id ? String(msg.from.id) : ""; const senderUsername = msg.from?.username ?? ""; @@ -241,7 +246,12 @@ export const buildTelegramMessageContext = async ({ const sendTyping = async () => { await withTelegramApiErrorLogging({ operation: "sendChatAction", - fn: () => bot.api.sendChatAction(chatId, "typing", buildTypingThreadParams(replyThreadId)), + fn: () => + sendChatActionHandler.sendChatAction( + chatId, + "typing", + buildTypingThreadParams(replyThreadId), + ), }); }; @@ -250,7 +260,11 @@ export const buildTelegramMessageContext = async ({ await withTelegramApiErrorLogging({ operation: "sendChatAction", fn: () => - bot.api.sendChatAction(chatId, "record_voice", buildTypingThreadParams(replyThreadId)), + sendChatActionHandler.sendChatAction( + chatId, + "record_voice", + buildTypingThreadParams(replyThreadId), + ), }); } catch (err) { logVerbose(`telegram record_voice cue failed for chat ${chatId}: ${String(err)}`); diff --git a/src/telegram/bot-message-dispatch.test.ts b/src/telegram/bot-message-dispatch.test.ts index 75a8fb6b9af..842018b71bd 100644 --- a/src/telegram/bot-message-dispatch.test.ts +++ b/src/telegram/bot-message-dispatch.test.ts @@ -381,6 +381,118 @@ describe("dispatchTelegramMessage draft streaming", () => { expect(draftStream.stop).toHaveBeenCalled(); }); + it("primes stop() with final text when pending partial is below initial threshold", async () => { + let answerMessageId: number | undefined; + const answerDraftStream = { + update: vi.fn(), + flush: vi.fn().mockResolvedValue(undefined), + messageId: vi.fn().mockImplementation(() => answerMessageId), + clear: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockImplementation(async () => { + answerMessageId = 777; + }), + forceNewMessage: vi.fn(), + }; + const reasoningDraftStream = createDraftStream(); + createTelegramDraftStream + .mockImplementationOnce(() => answerDraftStream) + .mockImplementationOnce(() => reasoningDraftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation( + async ({ dispatcherOptions, replyOptions }) => { + await replyOptions?.onPartialReply?.({ text: "no" }); + await dispatcherOptions.deliver({ text: "no problem" }, { kind: "final" }); + return { queuedFinal: true }; + }, + ); + deliverReplies.mockResolvedValue({ delivered: true }); + editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "777" }); + + await dispatchWithContext({ context: createContext() }); + + expect(answerDraftStream.update).toHaveBeenCalledWith("no"); + expect(answerDraftStream.update).toHaveBeenLastCalledWith("no problem"); + expect(editMessageTelegram).toHaveBeenCalledWith(123, 777, "no problem", expect.any(Object)); + expect(deliverReplies).not.toHaveBeenCalled(); + expect(answerDraftStream.stop).toHaveBeenCalled(); + }); + + it("does not duplicate final delivery when stop-created preview edit fails", async () => { + let messageId: number | undefined; + const draftStream = { + update: vi.fn(), + flush: vi.fn().mockResolvedValue(undefined), + messageId: vi.fn().mockImplementation(() => messageId), + clear: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockImplementation(async () => { + messageId = 777; + }), + forceNewMessage: vi.fn(), + }; + createTelegramDraftStream.mockReturnValue(draftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation(async ({ dispatcherOptions }) => { + await dispatcherOptions.deliver({ text: "Short final" }, { kind: "final" }); + return { queuedFinal: true }; + }); + deliverReplies.mockResolvedValue({ delivered: true }); + editMessageTelegram.mockRejectedValue(new Error("500: edit failed after stop flush")); + + await dispatchWithContext({ context: createContext() }); + + expect(editMessageTelegram).toHaveBeenCalledWith(123, 777, "Short final", expect.any(Object)); + expect(deliverReplies).not.toHaveBeenCalled(); + expect(draftStream.stop).toHaveBeenCalled(); + }); + + it("falls back to normal delivery when existing preview edit fails", async () => { + const draftStream = createDraftStream(999); + createTelegramDraftStream.mockReturnValue(draftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation( + async ({ dispatcherOptions, replyOptions }) => { + await replyOptions?.onPartialReply?.({ text: "Hel" }); + await dispatcherOptions.deliver({ text: "Hello final" }, { kind: "final" }); + return { queuedFinal: true }; + }, + ); + deliverReplies.mockResolvedValue({ delivered: true }); + editMessageTelegram.mockRejectedValue(new Error("500: preview edit failed")); + + await dispatchWithContext({ context: createContext() }); + + expect(editMessageTelegram).toHaveBeenCalledWith(123, 999, "Hello final", expect.any(Object)); + expect(deliverReplies).toHaveBeenCalledWith( + expect.objectContaining({ + replies: [expect.objectContaining({ text: "Hello final" })], + }), + ); + }); + + it("falls back to normal delivery when stop-created preview has no message id", async () => { + const draftStream = { + update: vi.fn(), + flush: vi.fn().mockResolvedValue(undefined), + messageId: vi.fn().mockReturnValue(undefined), + clear: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + forceNewMessage: vi.fn(), + }; + createTelegramDraftStream.mockReturnValue(draftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation(async ({ dispatcherOptions }) => { + await dispatcherOptions.deliver({ text: "Short final" }, { kind: "final" }); + return { queuedFinal: true }; + }); + deliverReplies.mockResolvedValue({ delivered: true }); + + await dispatchWithContext({ context: createContext() }); + + expect(editMessageTelegram).not.toHaveBeenCalled(); + expect(deliverReplies).toHaveBeenCalledWith( + expect.objectContaining({ + replies: [expect.objectContaining({ text: "Short final" })], + }), + ); + expect(draftStream.stop).toHaveBeenCalled(); + }); + it("does not overwrite finalized preview when additional final payloads are sent", async () => { const draftStream = createDraftStream(999); createTelegramDraftStream.mockReturnValue(draftStream); @@ -691,6 +803,52 @@ describe("dispatchTelegramMessage draft streaming", () => { expect(deliverReplies).not.toHaveBeenCalled(); }); + it.each(["partial", "block"] as const)( + "keeps finalized text preview when the next assistant message is media-only (%s mode)", + async (streamMode) => { + let answerMessageId: number | undefined = 1001; + const answerDraftStream = { + update: vi.fn(), + flush: vi.fn().mockResolvedValue(undefined), + messageId: vi.fn().mockImplementation(() => answerMessageId), + clear: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + forceNewMessage: vi.fn().mockImplementation(() => { + answerMessageId = undefined; + }), + }; + const reasoningDraftStream = createDraftStream(); + createTelegramDraftStream + .mockImplementationOnce(() => answerDraftStream) + .mockImplementationOnce(() => reasoningDraftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation( + async ({ dispatcherOptions, replyOptions }) => { + await replyOptions?.onPartialReply?.({ text: "First message preview" }); + await dispatcherOptions.deliver({ text: "First message final" }, { kind: "final" }); + await replyOptions?.onAssistantMessageStart?.(); + await dispatcherOptions.deliver({ mediaUrl: "file:///tmp/voice.ogg" }, { kind: "final" }); + return { queuedFinal: true }; + }, + ); + deliverReplies.mockResolvedValue({ delivered: true }); + editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "1001" }); + const bot = createBot(); + + await dispatchWithContext({ context: createContext(), streamMode, bot }); + + expect(editMessageTelegram).toHaveBeenCalledWith( + 123, + 1001, + "First message final", + expect.any(Object), + ); + const deleteMessageCalls = ( + bot.api as unknown as { deleteMessage: { mock: { calls: unknown[][] } } } + ).deleteMessage.mock.calls; + expect(deleteMessageCalls).not.toContainEqual([123, 1001]); + }, + ); + it("maps finals correctly when archived preview id arrives during final flush", async () => { let answerMessageId: number | undefined; let answerDraftParams: diff --git a/src/telegram/bot-message-dispatch.ts b/src/telegram/bot-message-dispatch.ts index f45b79fb9ab..5b000a8dcd0 100644 --- a/src/telegram/bot-message-dispatch.ts +++ b/src/telegram/bot-message-dispatch.ts @@ -567,7 +567,10 @@ export const dispatchTelegramMessage = async ({ reasoningStepState.resetForNextStep(); if (answerLane.hasStreamedMessage) { const previewMessageId = answerLane.stream?.messageId(); - if (typeof previewMessageId === "number") { + // Only archive previews that still need a matching final text update. + // Once a preview has already been finalized, archiving it here causes + // cleanup to delete a user-visible final message on later media-only turns. + if (typeof previewMessageId === "number" && !finalizedPreviewByLane.answer) { archivedAnswerPreviews.push({ messageId: previewMessageId, textSnapshot: answerLane.lastPartialText, @@ -576,6 +579,8 @@ export const dispatchTelegramMessage = async ({ answerLane.stream?.forceNewMessage(); } resetDraftLaneState(answerLane); + // New assistant message boundary: this lane now tracks a fresh preview lifecycle. + finalizedPreviewByLane.answer = false; } : undefined, onReasoningEnd: reasoningLane.stream diff --git a/src/telegram/bot-message.ts b/src/telegram/bot-message.ts index 6d9fa9ee451..1b598b71456 100644 --- a/src/telegram/bot-message.ts +++ b/src/telegram/bot-message.ts @@ -39,6 +39,7 @@ export const createTelegramMessageProcessor = (deps: TelegramMessageProcessorDep resolveGroupActivation, resolveGroupRequireMention, resolveTelegramGroupConfig, + sendChatActionHandler, runtime, replyToMode, streamMode, @@ -70,6 +71,7 @@ export const createTelegramMessageProcessor = (deps: TelegramMessageProcessorDep resolveGroupActivation, resolveGroupRequireMention, resolveTelegramGroupConfig, + sendChatActionHandler, }); if (!context) { return; diff --git a/src/telegram/bot-native-command-menu.test.ts b/src/telegram/bot-native-command-menu.test.ts index cabea3132d5..b73d4735875 100644 --- a/src/telegram/bot-native-command-menu.test.ts +++ b/src/telegram/bot-native-command-menu.test.ts @@ -86,4 +86,42 @@ describe("bot-native-command-menu", () => { expect(callOrder).toEqual(["delete", "set"]); }); + + it("retries with fewer commands on BOT_COMMANDS_TOO_MUCH", async () => { + const deleteMyCommands = vi.fn(async () => undefined); + const setMyCommands = vi + .fn() + .mockRejectedValueOnce(new Error("400: Bad Request: BOT_COMMANDS_TOO_MUCH")) + .mockResolvedValue(undefined); + const runtimeLog = vi.fn(); + + syncTelegramMenuCommands({ + bot: { + api: { + deleteMyCommands, + setMyCommands, + }, + } as unknown as Parameters[0]["bot"], + runtime: { + log: runtimeLog, + error: vi.fn(), + exit: vi.fn(), + } as Parameters[0]["runtime"], + commandsToRegister: Array.from({ length: 100 }, (_, i) => ({ + command: `cmd_${i}`, + description: `Command ${i}`, + })), + }); + + await vi.waitFor(() => { + expect(setMyCommands).toHaveBeenCalledTimes(2); + }); + const firstPayload = setMyCommands.mock.calls[0]?.[0] as Array; + const secondPayload = setMyCommands.mock.calls[1]?.[0] as Array; + expect(firstPayload).toHaveLength(100); + expect(secondPayload).toHaveLength(80); + expect(runtimeLog).toHaveBeenCalledWith( + "Telegram rejected 100 commands (BOT_COMMANDS_TOO_MUCH); retrying with 80.", + ); + }); }); diff --git a/src/telegram/bot-native-command-menu.ts b/src/telegram/bot-native-command-menu.ts index 5528fd06ff7..0f993b7cdba 100644 --- a/src/telegram/bot-native-command-menu.ts +++ b/src/telegram/bot-native-command-menu.ts @@ -7,6 +7,7 @@ import type { RuntimeEnv } from "../runtime.js"; import { withTelegramApiErrorLogging } from "./api-logging.js"; export const TELEGRAM_MAX_COMMANDS = 100; +const TELEGRAM_COMMAND_RETRY_RATIO = 0.8; export type TelegramMenuCommand = { command: string; @@ -18,6 +19,31 @@ type TelegramPluginCommandSpec = { description: string; }; +function isBotCommandsTooMuchError(err: unknown): boolean { + if (!err) { + return false; + } + const pattern = /\bBOT_COMMANDS_TOO_MUCH\b/i; + if (typeof err === "string") { + return pattern.test(err); + } + if (err instanceof Error) { + if (pattern.test(err.message)) { + return true; + } + } + if (typeof err === "object") { + const maybe = err as { description?: unknown; message?: unknown }; + if (typeof maybe.description === "string" && pattern.test(maybe.description)) { + return true; + } + if (typeof maybe.message === "string" && pattern.test(maybe.message)) { + return true; + } + } + return false; +} + export function buildPluginTelegramMenuCommands(params: { specs: TelegramPluginCommandSpec[]; existingCommands: Set; @@ -93,11 +119,34 @@ export function syncTelegramMenuCommands(params: { return; } - await withTelegramApiErrorLogging({ - operation: "setMyCommands", - runtime, - fn: () => bot.api.setMyCommands(commandsToRegister), - }); + let retryCommands = commandsToRegister; + while (retryCommands.length > 0) { + try { + await withTelegramApiErrorLogging({ + operation: "setMyCommands", + runtime, + fn: () => bot.api.setMyCommands(retryCommands), + }); + return; + } catch (err) { + if (!isBotCommandsTooMuchError(err)) { + throw err; + } + const nextCount = Math.floor(retryCommands.length * TELEGRAM_COMMAND_RETRY_RATIO); + const reducedCount = + nextCount < retryCommands.length ? nextCount : retryCommands.length - 1; + if (reducedCount <= 0) { + runtime.error?.( + "Telegram rejected native command registration (BOT_COMMANDS_TOO_MUCH); leaving menu empty. Reduce commands or disable channels.telegram.commands.native.", + ); + return; + } + runtime.log?.( + `Telegram rejected ${retryCommands.length} commands (BOT_COMMANDS_TOO_MUCH); retrying with ${reducedCount}.`, + ); + retryCommands = retryCommands.slice(0, reducedCount); + } + } }; void sync().catch((err) => { diff --git a/src/telegram/bot-native-commands.ts b/src/telegram/bot-native-commands.ts index 88316cbeb82..246732a6d1e 100644 --- a/src/telegram/bot-native-commands.ts +++ b/src/telegram/bot-native-commands.ts @@ -41,7 +41,7 @@ import { resolveAgentRoute } from "../routing/resolve-route.js"; import { resolveThreadSessionKeys } from "../routing/session-key.js"; import type { RuntimeEnv } from "../runtime.js"; import { withTelegramApiErrorLogging } from "./api-logging.js"; -import { isSenderAllowed, normalizeAllowFromWithStore } from "./bot-access.js"; +import { isSenderAllowed, normalizeDmAllowFromWithStore } from "./bot-access.js"; import { buildCappedTelegramMenuCommands, buildPluginTelegramMenuCommands, @@ -170,7 +170,6 @@ async function resolveTelegramCommandAuth(params: { const groupAllowContext = await resolveTelegramGroupAllowFromContext({ chatId, accountId, - dmPolicy: telegramCfg.dmPolicy ?? "pairing", isForum, messageThreadId, groupAllowFrom, @@ -252,9 +251,9 @@ async function resolveTelegramCommandAuth(params: { } } - const dmAllow = normalizeAllowFromWithStore({ + const dmAllow = normalizeDmAllowFromWithStore({ allowFrom: allowFrom, - storeAllowFrom, + storeAllowFrom: isGroup ? [] : storeAllowFrom, dmPolicy: telegramCfg.dmPolicy ?? "pairing", }); const senderAllowed = isSenderAllowed({ diff --git a/src/telegram/bot.create-telegram-bot.test.ts b/src/telegram/bot.create-telegram-bot.test.ts index 942a1c6c2b3..4be6b0dcbf3 100644 --- a/src/telegram/bot.create-telegram-bot.test.ts +++ b/src/telegram/bot.create-telegram-bot.test.ts @@ -1416,6 +1416,30 @@ describe("createTelegramBot", () => { expect(replySpy.mock.calls.length, testCase.name).toBe(0); } }); + it("blocks group sender not in groupAllowFrom even when sender is paired in DM store", async () => { + resetHarnessSpies(); + loadConfig.mockReturnValue({ + channels: { + telegram: { + groupPolicy: "allowlist", + groupAllowFrom: ["222222222"], + groups: { "*": { requireMention: false } }, + }, + }, + }); + readChannelAllowFromStore.mockResolvedValueOnce(["123456789"]); + + await dispatchMessage({ + message: { + chat: { id: -100123456789, type: "group", title: "Test Group" }, + from: { id: 123456789, username: "testuser" }, + text: "hello", + date: 1736380800, + }, + }); + + expect(replySpy).not.toHaveBeenCalled(); + }); it("allows control commands with TG-prefixed groupAllowFrom entries", async () => { loadConfig.mockReturnValue({ channels: { diff --git a/src/telegram/bot.test.ts b/src/telegram/bot.test.ts index 03380dbbf62..2ffcc489baf 100644 --- a/src/telegram/bot.test.ts +++ b/src/telegram/bot.test.ts @@ -193,6 +193,50 @@ describe("createTelegramBot", () => { expect(answerCallbackQuerySpy).toHaveBeenCalledWith("cbq-2"); }); + it("allows callback_query in groups when group policy authorizes the sender", async () => { + onSpy.mockClear(); + editMessageTextSpy.mockClear(); + listSkillCommandsForAgents.mockClear(); + + createTelegramBot({ + token: "tok", + config: { + channels: { + telegram: { + dmPolicy: "open", + capabilities: { inlineButtons: "allowlist" }, + allowFrom: [], + groupPolicy: "open", + groups: { "*": { requireMention: false } }, + }, + }, + }, + }); + const callbackHandler = onSpy.mock.calls.find((call) => call[0] === "callback_query")?.[1] as ( + ctx: Record, + ) => Promise; + expect(callbackHandler).toBeDefined(); + + await callbackHandler({ + callbackQuery: { + id: "cbq-group-1", + data: "commands_page_2", + from: { id: 42, first_name: "Ada", username: "ada_bot" }, + message: { + chat: { id: -100999, type: "supergroup", title: "Test Group" }, + date: 1736380800, + message_id: 20, + }, + }, + me: { username: "openclaw_bot" }, + getFile: async () => ({ download: async () => new Uint8Array() }), + }); + + // The callback should be processed (not silently blocked) + expect(editMessageTextSpy).toHaveBeenCalledTimes(1); + expect(answerCallbackQuerySpy).toHaveBeenCalledWith("cbq-group-1"); + }); + it("edits commands list for pagination callbacks", async () => { onSpy.mockClear(); listSkillCommandsForAgents.mockClear(); @@ -832,6 +876,95 @@ describe("createTelegramBot", () => { ); }); + it.each([ + { + name: "blocks reaction when dmPolicy is disabled", + updateId: 510, + channelConfig: { dmPolicy: "disabled", reactionNotifications: "all" }, + reaction: { + chat: { id: 1234, type: "private" }, + message_id: 42, + user: { id: 9, first_name: "Ada" }, + date: 1736380800, + old_reaction: [], + new_reaction: [{ type: "emoji", emoji: "👍" }], + }, + expectedEnqueueCalls: 0, + }, + { + name: "blocks reaction in allowlist mode for unauthorized direct sender", + updateId: 511, + channelConfig: { + dmPolicy: "allowlist", + allowFrom: ["12345"], + reactionNotifications: "all", + }, + reaction: { + chat: { id: 1234, type: "private" }, + message_id: 42, + user: { id: 9, first_name: "Ada" }, + date: 1736380800, + old_reaction: [], + new_reaction: [{ type: "emoji", emoji: "👍" }], + }, + expectedEnqueueCalls: 0, + }, + { + name: "allows reaction in allowlist mode for authorized direct sender", + updateId: 512, + channelConfig: { dmPolicy: "allowlist", allowFrom: ["9"], reactionNotifications: "all" }, + reaction: { + chat: { id: 1234, type: "private" }, + message_id: 42, + user: { id: 9, first_name: "Ada" }, + date: 1736380800, + old_reaction: [], + new_reaction: [{ type: "emoji", emoji: "👍" }], + }, + expectedEnqueueCalls: 1, + }, + { + name: "blocks reaction in group allowlist mode for unauthorized sender", + updateId: 513, + channelConfig: { + dmPolicy: "open", + groupPolicy: "allowlist", + groupAllowFrom: ["12345"], + reactionNotifications: "all", + }, + reaction: { + chat: { id: 9999, type: "supergroup" }, + message_id: 77, + user: { id: 9, first_name: "Ada" }, + date: 1736380800, + old_reaction: [], + new_reaction: [{ type: "emoji", emoji: "🔥" }], + }, + expectedEnqueueCalls: 0, + }, + ])("$name", async ({ updateId, channelConfig, reaction, expectedEnqueueCalls }) => { + onSpy.mockClear(); + enqueueSystemEventSpy.mockClear(); + + loadConfig.mockReturnValue({ + channels: { + telegram: channelConfig, + }, + }); + + createTelegramBot({ token: "tok" }); + const handler = getOnHandler("message_reaction") as ( + ctx: Record, + ) => Promise; + + await handler({ + update: { update_id: updateId }, + messageReaction: reaction, + }); + + expect(enqueueSystemEventSpy).toHaveBeenCalledTimes(expectedEnqueueCalls); + }); + it("skips reaction when reactionNotifications is off", async () => { onSpy.mockClear(); enqueueSystemEventSpy.mockClear(); diff --git a/src/telegram/bot.ts b/src/telegram/bot.ts index 409815fa3ae..a501be23206 100644 --- a/src/telegram/bot.ts +++ b/src/telegram/bot.ts @@ -40,6 +40,7 @@ import { resolveTelegramStreamMode, } from "./bot/helpers.js"; import { resolveTelegramFetch } from "./fetch.js"; +import { createTelegramSendChatActionHandler } from "./sendchataction-401-backoff.js"; export type TelegramBotOptions = { token: string; @@ -348,6 +349,20 @@ export function createTelegramBot(opts: TelegramBotOptions) { return { groupConfig, topicConfig }; }; + // Global sendChatAction handler with 401 backoff / circuit breaker (issue #27092). + // Created BEFORE the message processor so it can be injected into every message context. + // Shared across all message contexts for this account so that consecutive 401s + // from ANY chat are tracked together — prevents infinite retry storms. + const sendChatActionHandler = createTelegramSendChatActionHandler({ + sendChatActionFn: (chatId, action, threadParams) => + bot.api.sendChatAction( + chatId, + action, + threadParams as Parameters[2], + ), + logger: (message) => logVerbose(`telegram: ${message}`), + }); + const processMessage = createTelegramMessageProcessor({ bot, cfg, @@ -363,6 +378,7 @@ export function createTelegramBot(opts: TelegramBotOptions) { resolveGroupActivation, resolveGroupRequireMention, resolveTelegramGroupConfig, + sendChatActionHandler, runtime, replyToMode, streamMode, diff --git a/src/telegram/bot/helpers.ts b/src/telegram/bot/helpers.ts index 493ad010082..11d9798e262 100644 --- a/src/telegram/bot/helpers.ts +++ b/src/telegram/bot/helpers.ts @@ -3,11 +3,8 @@ import { formatLocationText, type NormalizedLocation } from "../../channels/loca import { resolveTelegramPreviewStreamMode } from "../../config/discord-preview-streaming.js"; import type { TelegramGroupConfig, TelegramTopicConfig } from "../../config/types.js"; import { readChannelAllowFromStore } from "../../pairing/pairing-store.js"; -import { - firstDefined, - normalizeAllowFromWithStore, - type NormalizedAllowFrom, -} from "../bot-access.js"; +import { normalizeAccountId } from "../../routing/session-key.js"; +import { firstDefined, normalizeAllowFrom, type NormalizedAllowFrom } from "../bot-access.js"; import type { TelegramStreamMode } from "./types.js"; const TELEGRAM_GENERAL_TOPIC_ID = 1; @@ -20,7 +17,6 @@ export type TelegramThreadSpec = { export async function resolveTelegramGroupAllowFromContext(params: { chatId: string | number; accountId?: string; - dmPolicy?: string; isForum?: boolean; messageThreadId?: number | null; groupAllowFrom?: Array; @@ -37,25 +33,22 @@ export async function resolveTelegramGroupAllowFromContext(params: { effectiveGroupAllow: NormalizedAllowFrom; hasGroupAllowOverride: boolean; }> { + const accountId = normalizeAccountId(params.accountId); const resolvedThreadId = resolveTelegramForumThreadId({ isForum: params.isForum, messageThreadId: params.messageThreadId, }); - const storeAllowFrom = await readChannelAllowFromStore( - "telegram", - process.env, - params.accountId, - ).catch(() => []); + const storeAllowFrom = await readChannelAllowFromStore("telegram", process.env, accountId).catch( + () => [], + ); const { groupConfig, topicConfig } = params.resolveTelegramGroupConfig( params.chatId, resolvedThreadId, ); const groupAllowOverride = firstDefined(topicConfig?.allowFrom, groupConfig?.allowFrom); - const effectiveGroupAllow = normalizeAllowFromWithStore({ - allowFrom: groupAllowOverride ?? params.groupAllowFrom, - storeAllowFrom, - dmPolicy: params.dmPolicy, - }); + // Group sender access must remain explicit (groupAllowFrom/per-group allowFrom only). + // DM pairing store entries are not a group authorization source. + const effectiveGroupAllow = normalizeAllowFrom(groupAllowOverride ?? params.groupAllowFrom); const hasGroupAllowOverride = typeof groupAllowOverride !== "undefined"; return { resolvedThreadId, diff --git a/src/telegram/lane-delivery.ts b/src/telegram/lane-delivery.ts index 91aa59dc888..890a2a5ec97 100644 --- a/src/telegram/lane-delivery.ts +++ b/src/telegram/lane-delivery.ts @@ -104,6 +104,61 @@ type ConsumeArchivedAnswerPreviewParams = { export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { const getLanePreviewText = (lane: DraftLaneState) => lane.lastPartialText; + const shouldSkipRegressivePreviewUpdate = (args: { + currentPreviewText: string | undefined; + text: string; + skipRegressive: "always" | "existingOnly"; + hadPreviewMessage: boolean; + }): boolean => { + const currentPreviewText = args.currentPreviewText; + if (currentPreviewText === undefined) { + return false; + } + return ( + currentPreviewText.startsWith(args.text) && + args.text.length < currentPreviewText.length && + (args.skipRegressive === "always" || args.hadPreviewMessage) + ); + }; + + const tryEditPreviewMessage = async (args: { + laneName: LaneName; + messageId: number; + text: string; + context: "final" | "update"; + previewButtons?: TelegramInlineButtons; + updateLaneSnapshot: boolean; + lane: DraftLaneState; + treatEditFailureAsDelivered: boolean; + }): Promise => { + try { + await params.editPreview({ + laneName: args.laneName, + messageId: args.messageId, + text: args.text, + previewButtons: args.previewButtons, + context: args.context, + }); + if (args.updateLaneSnapshot) { + args.lane.lastPartialText = args.text; + } + params.markDelivered(); + return true; + } catch (err) { + if (args.treatEditFailureAsDelivered) { + params.log( + `telegram: ${args.laneName} preview ${args.context} edit failed after stop-created flush; treating as delivered (${String(err)})`, + ); + params.markDelivered(); + return true; + } + params.log( + `telegram: ${args.laneName} preview ${args.context} edit failed; falling back to standard send (${String(err)})`, + ); + return false; + } + }; + const tryUpdatePreviewForLane = async ({ lane, laneName, @@ -122,6 +177,38 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { const lanePreviewMessageId = lane.stream.messageId(); const hadPreviewMessage = typeof previewMessageIdOverride === "number" || typeof lanePreviewMessageId === "number"; + const stopCreatesFirstPreview = stopBeforeEdit && !hadPreviewMessage && context === "final"; + if (stopCreatesFirstPreview) { + // Final stop() can create the first visible preview message. + // Prime pending text so the stop flush sends the final text snapshot. + lane.stream.update(text); + await params.stopDraftLane(lane); + const previewMessageId = lane.stream.messageId(); + if (typeof previewMessageId !== "number") { + return false; + } + const currentPreviewText = previewTextSnapshot ?? getLanePreviewText(lane); + const shouldSkipRegressive = shouldSkipRegressivePreviewUpdate({ + currentPreviewText, + text, + skipRegressive, + hadPreviewMessage, + }); + if (shouldSkipRegressive) { + params.markDelivered(); + return true; + } + return tryEditPreviewMessage({ + laneName, + messageId: previewMessageId, + text, + context, + previewButtons, + updateLaneSnapshot, + lane, + treatEditFailureAsDelivered: true, + }); + } if (stopBeforeEdit) { await params.stopDraftLane(lane); } @@ -133,34 +220,26 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { return false; } const currentPreviewText = previewTextSnapshot ?? getLanePreviewText(lane); - const shouldSkipRegressive = - Boolean(currentPreviewText) && - currentPreviewText.startsWith(text) && - text.length < currentPreviewText.length && - (skipRegressive === "always" || hadPreviewMessage); + const shouldSkipRegressive = shouldSkipRegressivePreviewUpdate({ + currentPreviewText, + text, + skipRegressive, + hadPreviewMessage, + }); if (shouldSkipRegressive) { params.markDelivered(); return true; } - try { - await params.editPreview({ - laneName, - messageId: previewMessageId, - text, - previewButtons, - context, - }); - if (updateLaneSnapshot) { - lane.lastPartialText = text; - } - params.markDelivered(); - return true; - } catch (err) { - params.log( - `telegram: ${laneName} preview ${context} edit failed; falling back to standard send (${String(err)})`, - ); - return false; - } + return tryEditPreviewMessage({ + laneName, + messageId: previewMessageId, + text, + context, + previewButtons, + updateLaneSnapshot, + lane, + treatEditFailureAsDelivered: false, + }); }; const consumeArchivedAnswerPreviewForFinal = async ({ diff --git a/src/telegram/monitor.test.ts b/src/telegram/monitor.test.ts index 49fbcc13155..5c0df3de6ef 100644 --- a/src/telegram/monitor.test.ts +++ b/src/telegram/monitor.test.ts @@ -1,4 +1,4 @@ -import { beforeEach, describe, expect, it, vi } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { monitorTelegramProvider } from "./monitor.js"; type MockCtx = { @@ -67,6 +67,36 @@ const { startTelegramWebhookSpy } = vi.hoisted(() => ({ startTelegramWebhookSpy: vi.fn(async () => ({ server: { close: vi.fn() }, stop: vi.fn() })), })); +type RunnerStub = { + task: () => Promise; + stop: ReturnType void | Promise>>; + isRunning: () => boolean; +}; + +const makeRunnerStub = (overrides: Partial = {}): RunnerStub => ({ + task: overrides.task ?? (() => Promise.resolve()), + stop: overrides.stop ?? vi.fn<() => void | Promise>(), + isRunning: overrides.isRunning ?? (() => false), +}); + +async function monitorWithAutoAbort( + opts: Omit[0], "abortSignal"> = {}, +) { + const abort = new AbortController(); + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }), + ); + await monitorTelegramProvider({ + token: "tok", + ...opts, + abortSignal: abort.signal, + }); +} + vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); return { @@ -130,26 +160,37 @@ vi.mock("../auto-reply/reply.js", () => ({ })); describe("monitorTelegramProvider (grammY)", () => { + let consoleErrorSpy: { mockRestore: () => void } | undefined; + beforeEach(() => { loadConfig.mockReturnValue({ agents: { defaults: { maxConcurrent: 2 } }, channels: { telegram: {} }, }); initSpy.mockClear(); - runSpy.mockClear(); + runSpy.mockReset().mockImplementation(() => + makeRunnerStub({ + task: () => Promise.reject(new Error("runSpy called without explicit test stub")), + }), + ); computeBackoff.mockClear(); sleepWithAbort.mockClear(); startTelegramWebhookSpy.mockClear(); registerUnhandledRejectionHandlerMock.mockClear(); resetUnhandledRejection(); createTelegramBotErrors.length = 0; + consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + }); + + afterEach(() => { + consoleErrorSpy?.mockRestore(); }); it("processes a DM and sends reply", async () => { Object.values(api).forEach((fn) => { fn?.mockReset?.(); }); - await monitorTelegramProvider({ token: "tok" }); + await monitorWithAutoAbort(); expect(handlers.message).toBeDefined(); await handlers.message?.({ message: { @@ -172,7 +213,7 @@ describe("monitorTelegramProvider (grammY)", () => { channels: { telegram: {} }, }); - await monitorTelegramProvider({ token: "tok" }); + await monitorWithAutoAbort(); expect(runSpy).toHaveBeenCalledWith( expect.anything(), @@ -180,7 +221,7 @@ describe("monitorTelegramProvider (grammY)", () => { sink: { concurrency: 3 }, runner: expect.objectContaining({ silent: true, - maxRetryTime: 5 * 60 * 1000, + maxRetryTime: 60 * 60 * 1000, retryInterval: "exponential", }), }), @@ -191,7 +232,7 @@ describe("monitorTelegramProvider (grammY)", () => { Object.values(api).forEach((fn) => { fn?.mockReset?.(); }); - await monitorTelegramProvider({ token: "tok" }); + await monitorWithAutoAbort(); await handlers.message?.({ message: { message_id: 2, @@ -205,24 +246,27 @@ describe("monitorTelegramProvider (grammY)", () => { }); it("retries on recoverable undici fetch errors", async () => { + const abort = new AbortController(); const networkError = Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT", }), }); runSpy - .mockImplementationOnce(() => ({ - task: () => Promise.reject(networkError), - stop: vi.fn(), - isRunning: (): boolean => false, - })) - .mockImplementationOnce(() => ({ - task: () => Promise.resolve(), - stop: vi.fn(), - isRunning: (): boolean => false, - })); + .mockImplementationOnce(() => + makeRunnerStub({ + task: () => Promise.reject(networkError), + }), + ) + .mockImplementationOnce(() => + makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }), + ); - await monitorTelegramProvider({ token: "tok" }); + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); expect(computeBackoff).toHaveBeenCalled(); expect(sleepWithAbort).toHaveBeenCalled(); @@ -230,6 +274,7 @@ describe("monitorTelegramProvider (grammY)", () => { }); it("deletes webhook before starting polling", async () => { + const abort = new AbortController(); const order: string[] = []; api.deleteWebhook.mockReset(); api.deleteWebhook.mockImplementationOnce(async () => { @@ -238,20 +283,21 @@ describe("monitorTelegramProvider (grammY)", () => { }); runSpy.mockImplementationOnce(() => { order.push("run"); - return { - task: () => Promise.resolve(), - stop: vi.fn(), - isRunning: () => false, - }; + return makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }); }); - await monitorTelegramProvider({ token: "tok" }); + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); expect(api.deleteWebhook).toHaveBeenCalledWith({ drop_pending_updates: false }); expect(order).toEqual(["deleteWebhook", "run"]); }); it("retries recoverable deleteWebhook failures before polling", async () => { + const abort = new AbortController(); const cleanupError = Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT", @@ -259,13 +305,15 @@ describe("monitorTelegramProvider (grammY)", () => { }); api.deleteWebhook.mockReset(); api.deleteWebhook.mockRejectedValueOnce(cleanupError).mockResolvedValueOnce(true); - runSpy.mockImplementationOnce(() => ({ - task: () => Promise.resolve(), - stop: vi.fn(), - isRunning: () => false, - })); + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }), + ); - await monitorTelegramProvider({ token: "tok" }); + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); expect(api.deleteWebhook).toHaveBeenCalledTimes(2); expect(computeBackoff).toHaveBeenCalled(); @@ -274,6 +322,7 @@ describe("monitorTelegramProvider (grammY)", () => { }); it("retries setup-time recoverable errors before starting polling", async () => { + const abort = new AbortController(); const setupError = Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT", @@ -281,13 +330,15 @@ describe("monitorTelegramProvider (grammY)", () => { }); createTelegramBotErrors.push(setupError); - runSpy.mockImplementationOnce(() => ({ - task: () => Promise.resolve(), - stop: vi.fn(), - isRunning: () => false, - })); + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }), + ); - await monitorTelegramProvider({ token: "tok" }); + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); expect(computeBackoff).toHaveBeenCalled(); expect(sleepWithAbort).toHaveBeenCalled(); @@ -295,6 +346,7 @@ describe("monitorTelegramProvider (grammY)", () => { }); it("awaits runner.stop before retrying after recoverable polling error", async () => { + const abort = new AbortController(); const recoverableError = Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT", @@ -307,21 +359,22 @@ describe("monitorTelegramProvider (grammY)", () => { }); runSpy - .mockImplementationOnce(() => ({ - task: () => Promise.reject(recoverableError), - stop: firstStop, - isRunning: () => false, - })) + .mockImplementationOnce(() => + makeRunnerStub({ + task: () => Promise.reject(recoverableError), + stop: firstStop, + }), + ) .mockImplementationOnce(() => { expect(firstStopped).toBe(true); - return { - task: () => Promise.resolve(), - stop: vi.fn(), - isRunning: () => false, - }; + return makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }); }); - await monitorTelegramProvider({ token: "tok" }); + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); expect(firstStop).toHaveBeenCalled(); expect(computeBackoff).toHaveBeenCalled(); @@ -330,16 +383,17 @@ describe("monitorTelegramProvider (grammY)", () => { }); it("surfaces non-recoverable errors", async () => { - runSpy.mockImplementationOnce(() => ({ - task: () => Promise.reject(new Error("bad token")), - stop: vi.fn(), - isRunning: (): boolean => false, - })); + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: () => Promise.reject(new Error("bad token")), + }), + ); await expect(monitorTelegramProvider({ token: "tok" })).rejects.toThrow("bad token"); }); it("force-restarts polling when unhandled network rejection stalls runner", async () => { + const abort = new AbortController(); let running = true; let releaseTask: (() => void) | undefined; const stop = vi.fn(async () => { @@ -348,21 +402,25 @@ describe("monitorTelegramProvider (grammY)", () => { }); runSpy - .mockImplementationOnce(() => ({ - task: () => - new Promise((resolve) => { - releaseTask = resolve; - }), - stop, - isRunning: () => running, - })) - .mockImplementationOnce(() => ({ - task: () => Promise.resolve(), - stop: vi.fn(), - isRunning: () => false, - })); + .mockImplementationOnce(() => + makeRunnerStub({ + task: () => + new Promise((resolve) => { + releaseTask = resolve; + }), + stop, + isRunning: () => running, + }), + ) + .mockImplementationOnce(() => + makeRunnerStub({ + task: async () => { + abort.abort(); + }, + }), + ); - const monitor = monitorTelegramProvider({ token: "tok" }); + const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); expect(emitUnhandledRejection(new TypeError("fetch failed"))).toBe(true); diff --git a/src/telegram/monitor.ts b/src/telegram/monitor.ts index 8637f488dd6..06410b74ed1 100644 --- a/src/telegram/monitor.ts +++ b/src/telegram/monitor.ts @@ -2,6 +2,7 @@ import { type RunOptions, run } from "@grammyjs/runner"; import { resolveAgentMaxConcurrent } from "../config/agent-limits.js"; import type { OpenClawConfig } from "../config/config.js"; import { loadConfig } from "../config/config.js"; +import { waitForAbortSignal } from "../infra/abort-signal.js"; import { computeBackoff, sleepWithAbort } from "../infra/backoff.js"; import { formatErrorMessage } from "../infra/errors.js"; import { formatDurationPrecise } from "../infra/format-time/format-duration.ts"; @@ -45,8 +46,9 @@ export function createTelegramRunnerOptions(cfg: OpenClawConfig): RunOptions; + const isGetUpdatesConflict = (err: unknown) => { if (!err || typeof err !== "object") { return false; @@ -169,16 +173,7 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { abortSignal: opts.abortSignal, publicUrl: opts.webhookUrl, }); - const abortSignal = opts.abortSignal; - if (abortSignal && !abortSignal.aborted) { - await new Promise((resolve) => { - const onAbort = () => { - abortSignal.removeEventListener("abort", onAbort); - resolve(); - }; - abortSignal.addEventListener("abort", onAbort, { once: true }); - }); - } + await waitForAbortSignal(opts.abortSignal); return; } @@ -186,21 +181,11 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { let restartAttempts = 0; let webhookCleared = false; const runnerOptions = createTelegramRunnerOptions(cfg); - const waitBeforeRetryOnRecoverableSetupError = async ( - err: unknown, - logPrefix: string, - ): Promise => { - if (opts.abortSignal?.aborted) { - return false; - } - if (!isRecoverableTelegramNetworkError(err, { context: "unknown" })) { - throw err; - } + const waitBeforeRestart = async (buildLine: (delay: string) => string): Promise => { restartAttempts += 1; const delayMs = computeBackoff(TELEGRAM_POLL_RESTART_POLICY, restartAttempts); - (opts.runtime?.error ?? console.error)( - `${logPrefix}: ${formatErrorMessage(err)}; retrying in ${formatDurationPrecise(delayMs)}.`, - ); + const delay = formatDurationPrecise(delayMs); + log(buildLine(delay)); try { await sleepWithAbort(delayMs, opts.abortSignal); } catch (sleepErr) { @@ -212,10 +197,24 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { return true; }; - while (!opts.abortSignal?.aborted) { - let bot; + const waitBeforeRetryOnRecoverableSetupError = async ( + err: unknown, + logPrefix: string, + ): Promise => { + if (opts.abortSignal?.aborted) { + return false; + } + if (!isRecoverableTelegramNetworkError(err, { context: "unknown" })) { + throw err; + } + return waitBeforeRestart( + (delay) => `${logPrefix}: ${formatErrorMessage(err)}; retrying in ${delay}.`, + ); + }; + + const createPollingBot = async (): Promise => { try { - bot = createTelegramBot({ + return createTelegramBot({ token, runtime: opts.runtime, proxyFetch, @@ -232,31 +231,34 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { "Telegram setup network error", ); if (!shouldRetry) { - return; + return undefined; } - continue; + return undefined; } + }; - if (!webhookCleared) { - try { - await withTelegramApiErrorLogging({ - operation: "deleteWebhook", - runtime: opts.runtime, - fn: () => bot.api.deleteWebhook({ drop_pending_updates: false }), - }); - webhookCleared = true; - } catch (err) { - const shouldRetry = await waitBeforeRetryOnRecoverableSetupError( - err, - "Telegram webhook cleanup failed", - ); - if (!shouldRetry) { - return; - } - continue; - } + const ensureWebhookCleanup = async (bot: TelegramBot): Promise<"ready" | "retry" | "exit"> => { + if (webhookCleared) { + return "ready"; } + try { + await withTelegramApiErrorLogging({ + operation: "deleteWebhook", + runtime: opts.runtime, + fn: () => bot.api.deleteWebhook({ drop_pending_updates: false }), + }); + webhookCleared = true; + return "ready"; + } catch (err) { + const shouldRetry = await waitBeforeRetryOnRecoverableSetupError( + err, + "Telegram webhook cleanup failed", + ); + return shouldRetry ? "retry" : "exit"; + } + }; + const runPollingCycle = async (bot: TelegramBot): Promise<"continue" | "exit"> => { const runner = run(bot, runnerOptions); activeRunner = runner; let stopPromise: Promise | undefined; @@ -277,17 +279,17 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { try { // runner.task() returns a promise that resolves when the runner stops await runner.task(); - if (!forceRestarted) { - return; + if (opts.abortSignal?.aborted) { + return "exit"; } + const reason = forceRestarted + ? "unhandled network error" + : "runner stopped (maxRetryTime exceeded or graceful stop)"; forceRestarted = false; - restartAttempts += 1; - const delayMs = computeBackoff(TELEGRAM_POLL_RESTART_POLICY, restartAttempts); - log( - `Telegram polling runner restarted after unhandled network error; retrying in ${formatDurationPrecise(delayMs)}.`, + const shouldRestart = await waitBeforeRestart( + (delay) => `Telegram polling runner stopped (${reason}); restarting in ${delay}.`, ); - await sleepWithAbort(delayMs, opts.abortSignal); - continue; + return shouldRestart ? "continue" : "exit"; } catch (err) { forceRestarted = false; if (opts.abortSignal?.aborted) { @@ -298,25 +300,36 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { if (!isConflict && !isRecoverable) { throw err; } - restartAttempts += 1; - const delayMs = computeBackoff(TELEGRAM_POLL_RESTART_POLICY, restartAttempts); const reason = isConflict ? "getUpdates conflict" : "network error"; const errMsg = formatErrorMessage(err); - (opts.runtime?.error ?? console.error)( - `Telegram ${reason}: ${errMsg}; retrying in ${formatDurationPrecise(delayMs)}.`, + const shouldRestart = await waitBeforeRestart( + (delay) => `Telegram ${reason}: ${errMsg}; retrying in ${delay}.`, ); - try { - await sleepWithAbort(delayMs, opts.abortSignal); - } catch (sleepErr) { - if (opts.abortSignal?.aborted) { - return; - } - throw sleepErr; - } + return shouldRestart ? "continue" : "exit"; } finally { opts.abortSignal?.removeEventListener("abort", stopOnAbort); await stopRunner(); } + }; + + while (!opts.abortSignal?.aborted) { + const bot = await createPollingBot(); + if (!bot) { + continue; + } + + const cleanupState = await ensureWebhookCleanup(bot); + if (cleanupState === "retry") { + continue; + } + if (cleanupState === "exit") { + return; + } + + const state = await runPollingCycle(bot); + if (state === "exit") { + return; + } } } finally { unregisterHandler(); diff --git a/src/telegram/send.test.ts b/src/telegram/send.test.ts index 37d881d843c..b589fdcf52b 100644 --- a/src/telegram/send.test.ts +++ b/src/telegram/send.test.ts @@ -196,6 +196,10 @@ describe("sendMessageTelegram", () => { for (const testCase of cases) { botCtorSpy.mockClear(); loadConfig.mockReturnValue(testCase.cfg); + botApi.sendMessage.mockResolvedValue({ + message_id: 1, + chat: { id: "123" }, + }); await sendMessageTelegram("123", "hi", testCase.opts); expect(botCtorSpy, testCase.name).toHaveBeenCalledWith( "tok", @@ -325,6 +329,40 @@ describe("sendMessageTelegram", () => { } }); + it("fails when Telegram text send returns no message_id", async () => { + const sendMessage = vi.fn().mockResolvedValue({ + chat: { id: "123" }, + }); + const api = { sendMessage } as unknown as { + sendMessage: typeof sendMessage; + }; + + await expect( + sendMessageTelegram("123", "hi", { + token: "tok", + api, + }), + ).rejects.toThrow(/returned no message_id/i); + }); + + it("fails when Telegram media send returns no message_id", async () => { + mockLoadedMedia({ contentType: "image/png", fileName: "photo.png" }); + const sendPhoto = vi.fn().mockResolvedValue({ + chat: { id: "123" }, + }); + const api = { sendPhoto } as unknown as { + sendPhoto: typeof sendPhoto; + }; + + await expect( + sendMessageTelegram("123", "caption", { + token: "tok", + api, + mediaUrl: "https://example.com/photo.png", + }), + ).rejects.toThrow(/returned no message_id/i); + }); + it("uses native fetch for BAN compatibility when api is omitted", async () => { const originalFetch = globalThis.fetch; const originalBun = (globalThis as { Bun?: unknown }).Bun; @@ -1242,6 +1280,23 @@ describe("sendStickerTelegram", () => { expect(sendSticker).toHaveBeenNthCalledWith(2, chatId, "fileId123", undefined); expect(res.messageId).toBe("109"); }); + + it("fails when sticker send returns no message_id", async () => { + const chatId = "123"; + const sendSticker = vi.fn().mockResolvedValue({ + chat: { id: chatId }, + }); + const api = { sendSticker } as unknown as { + sendSticker: typeof sendSticker; + }; + + await expect( + sendStickerTelegram(chatId, "fileId123", { + token: "tok", + api, + }), + ).rejects.toThrow(/returned no message_id/i); + }); }); describe("shared send behaviors", () => { @@ -1504,6 +1559,20 @@ describe("sendPollTelegram", () => { expect(api.sendPoll).not.toHaveBeenCalled(); }); + + it("fails when poll send returns no message_id", async () => { + const api = { + sendPoll: vi.fn(async () => ({ chat: { id: 555 }, poll: { id: "p1" } })), + }; + + await expect( + sendPollTelegram( + "123", + { question: "Q", options: ["A", "B"] }, + { token: "t", api: api as unknown as Bot["api"] }, + ), + ).rejects.toThrow(/returned no message_id/i); + }); }); describe("createForumTopicTelegram", () => { diff --git a/src/telegram/send.ts b/src/telegram/send.ts index 85327df22b5..ceaa9113e32 100644 --- a/src/telegram/send.ts +++ b/src/telegram/send.ts @@ -86,6 +86,16 @@ type TelegramReactionOpts = { retry?: RetryConfig; }; +function resolveTelegramMessageIdOrThrow( + result: TelegramMessageLike | null | undefined, + context: string, +): number { + if (typeof result?.message_id === "number" && Number.isFinite(result.message_id)) { + return Math.trunc(result.message_id); + } + throw new Error(`Telegram ${context} returned no message_id`); +} + const PARSE_ERR_RE = /can't parse entities|parse entities|find end of the entity/i; const THREAD_NOT_FOUND_RE = /400:\s*Bad Request:\s*message thread not found/i; const MESSAGE_NOT_MODIFIED_RE = @@ -685,11 +695,9 @@ export async function sendMessageTelegram( })(); const result = await sendMedia(mediaSender.label, mediaSender.sender); - const mediaMessageId = String(result?.message_id ?? "unknown"); + const mediaMessageId = resolveTelegramMessageIdOrThrow(result, "media send"); const resolvedChatId = String(result?.chat?.id ?? chatId); - if (result?.message_id) { - recordSentMessage(chatId, result.message_id); - } + recordSentMessage(chatId, mediaMessageId); recordChannelActivity({ channel: "telegram", accountId: account.accountId, @@ -708,13 +716,15 @@ export async function sendMessageTelegram( : undefined; const textRes = await sendTelegramText(followUpText, textParams); // Return the text message ID as the "main" message (it's the actual content). + const textMessageId = resolveTelegramMessageIdOrThrow(textRes, "text follow-up send"); + recordSentMessage(chatId, textMessageId); return { - messageId: String(textRes?.message_id ?? mediaMessageId), + messageId: String(textMessageId), chatId: resolvedChatId, }; } - return { messageId: mediaMessageId, chatId: resolvedChatId }; + return { messageId: String(mediaMessageId), chatId: resolvedChatId }; } if (!text || !text.trim()) { @@ -728,16 +738,14 @@ export async function sendMessageTelegram( } : undefined; const res = await sendTelegramText(text, textParams, opts.plainText); - const messageId = String(res?.message_id ?? "unknown"); - if (res?.message_id) { - recordSentMessage(chatId, res.message_id); - } + const messageId = resolveTelegramMessageIdOrThrow(res, "text send"); + recordSentMessage(chatId, messageId); recordChannelActivity({ channel: "telegram", accountId: account.accountId, direction: "outbound", }); - return { messageId, chatId: String(res?.chat?.id ?? chatId) }; + return { messageId: String(messageId), chatId: String(res?.chat?.id ?? chatId) }; } export async function reactMessageTelegram( @@ -1013,18 +1021,16 @@ export async function sendStickerTelegram( requestWithChatNotFound(() => api.sendSticker(chatId, fileId.trim(), effectiveParams), label), ); - const messageId = String(result?.message_id ?? "unknown"); + const messageId = resolveTelegramMessageIdOrThrow(result, "sticker send"); const resolvedChatId = String(result?.chat?.id ?? chatId); - if (result?.message_id) { - recordSentMessage(chatId, result.message_id); - } + recordSentMessage(chatId, messageId); recordChannelActivity({ channel: "telegram", accountId: account.accountId, direction: "outbound", }); - return { messageId, chatId: resolvedChatId }; + return { messageId: String(messageId), chatId: resolvedChatId }; } type TelegramPollOpts = { @@ -1121,12 +1127,10 @@ export async function sendPollTelegram( ), ); - const messageId = String(result?.message_id ?? "unknown"); + const messageId = resolveTelegramMessageIdOrThrow(result, "poll send"); const resolvedChatId = String(result?.chat?.id ?? chatId); const pollId = result?.poll?.id; - if (result?.message_id) { - recordSentMessage(chatId, result.message_id); - } + recordSentMessage(chatId, messageId); recordChannelActivity({ channel: "telegram", @@ -1134,7 +1138,7 @@ export async function sendPollTelegram( direction: "outbound", }); - return { messageId, chatId: resolvedChatId, pollId }; + return { messageId: String(messageId), chatId: resolvedChatId, pollId }; } // --------------------------------------------------------------------------- diff --git a/src/telegram/sendchataction-401-backoff.test.ts b/src/telegram/sendchataction-401-backoff.test.ts new file mode 100644 index 00000000000..4fbaaaaf9e5 --- /dev/null +++ b/src/telegram/sendchataction-401-backoff.test.ts @@ -0,0 +1,145 @@ +import { describe, expect, it, vi } from "vitest"; +import { createTelegramSendChatActionHandler } from "./sendchataction-401-backoff.js"; + +// Mock the backoff sleep to avoid real delays in tests +vi.mock("../infra/backoff.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + sleepWithAbort: vi.fn().mockResolvedValue(undefined), + }; +}); + +describe("createTelegramSendChatActionHandler", () => { + const make401Error = () => new Error("401 Unauthorized"); + const make500Error = () => new Error("500 Internal Server Error"); + + it("calls sendChatActionFn on success", async () => { + const fn = vi.fn().mockResolvedValue(true); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + }); + + await handler.sendChatAction(123, "typing"); + expect(fn).toHaveBeenCalledWith(123, "typing", undefined); + expect(handler.isSuspended()).toBe(false); + }); + + it("applies exponential backoff on consecutive 401 errors", async () => { + const fn = vi.fn().mockRejectedValue(make401Error()); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + maxConsecutive401: 5, + }); + + // First call fails with 401 + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + expect(handler.isSuspended()).toBe(false); + + // Second call should mention backoff in logs + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + expect(logger).toHaveBeenCalledWith(expect.stringContaining("backoff")); + }); + + it("suspends after maxConsecutive401 failures", async () => { + const fn = vi.fn().mockRejectedValue(make401Error()); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + maxConsecutive401: 3, + }); + + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + + expect(handler.isSuspended()).toBe(true); + expect(logger).toHaveBeenCalledWith(expect.stringContaining("CRITICAL")); + + // Subsequent calls are silently skipped + await handler.sendChatAction(123, "typing"); + expect(fn).toHaveBeenCalledTimes(3); // not called again + }); + + it("resets failure counter on success", async () => { + let callCount = 0; + const fn = vi.fn().mockImplementation(() => { + callCount++; + if (callCount <= 2) { + throw make401Error(); + } + return Promise.resolve(true); + }); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + maxConsecutive401: 5, + }); + + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + // Third call succeeds + await handler.sendChatAction(123, "typing"); + + expect(handler.isSuspended()).toBe(false); + expect(logger).toHaveBeenCalledWith(expect.stringContaining("recovered")); + }); + + it("does not count non-401 errors toward suspension", async () => { + const fn = vi.fn().mockRejectedValue(make500Error()); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + maxConsecutive401: 2, + }); + + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("500"); + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("500"); + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("500"); + + expect(handler.isSuspended()).toBe(false); + }); + + it("reset() clears suspension", async () => { + const fn = vi.fn().mockRejectedValue(make401Error()); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + maxConsecutive401: 1, + }); + + await expect(handler.sendChatAction(123, "typing")).rejects.toThrow("401"); + expect(handler.isSuspended()).toBe(true); + + handler.reset(); + expect(handler.isSuspended()).toBe(false); + }); + + it("is shared across multiple chatIds (global handler)", async () => { + const fn = vi.fn().mockRejectedValue(make401Error()); + const logger = vi.fn(); + const handler = createTelegramSendChatActionHandler({ + sendChatActionFn: fn, + logger, + maxConsecutive401: 3, + }); + + // Different chatIds all contribute to the same failure counter + await expect(handler.sendChatAction(111, "typing")).rejects.toThrow("401"); + await expect(handler.sendChatAction(222, "typing")).rejects.toThrow("401"); + await expect(handler.sendChatAction(333, "typing")).rejects.toThrow("401"); + + expect(handler.isSuspended()).toBe(true); + // Suspended for all chats + await handler.sendChatAction(444, "typing"); + expect(fn).toHaveBeenCalledTimes(3); + }); +}); diff --git a/src/telegram/sendchataction-401-backoff.ts b/src/telegram/sendchataction-401-backoff.ts new file mode 100644 index 00000000000..f87915961c0 --- /dev/null +++ b/src/telegram/sendchataction-401-backoff.ts @@ -0,0 +1,133 @@ +import { computeBackoff, sleepWithAbort, type BackoffPolicy } from "../infra/backoff.js"; + +export type TelegramSendChatActionLogger = (message: string) => void; + +type ChatAction = + | "typing" + | "upload_photo" + | "record_video" + | "upload_video" + | "record_voice" + | "upload_voice" + | "upload_document" + | "find_location" + | "record_video_note" + | "upload_video_note" + | "choose_sticker"; + +type SendChatActionFn = ( + chatId: number | string, + action: ChatAction, + threadParams?: unknown, +) => Promise; + +export type TelegramSendChatActionHandler = { + /** + * Send a chat action with automatic 401 backoff and circuit breaker. + * Safe to call from multiple concurrent message contexts. + */ + sendChatAction: ( + chatId: number | string, + action: ChatAction, + threadParams?: unknown, + ) => Promise; + isSuspended: () => boolean; + reset: () => void; +}; + +export type CreateTelegramSendChatActionHandlerParams = { + sendChatActionFn: SendChatActionFn; + logger: TelegramSendChatActionLogger; + maxConsecutive401?: number; +}; + +const BACKOFF_POLICY: BackoffPolicy = { + initialMs: 1000, + maxMs: 300_000, // 5 minutes + factor: 2, + jitter: 0.1, +}; + +function is401Error(error: unknown): boolean { + if (!error) { + return false; + } + const message = error instanceof Error ? error.message : JSON.stringify(error); + return message.includes("401") || message.toLowerCase().includes("unauthorized"); +} + +/** + * Creates a GLOBAL (per-account) handler for sendChatAction that tracks 401 errors + * across all message contexts. This prevents the infinite loop that caused Telegram + * to delete bots (issue #27092). + * + * When a 401 occurs, exponential backoff is applied (1s → 2s → 4s → ... → 5min). + * After maxConsecutive401 failures (default 10), all sendChatAction calls are + * suspended until reset() is called. + */ +export function createTelegramSendChatActionHandler({ + sendChatActionFn, + logger, + maxConsecutive401 = 10, +}: CreateTelegramSendChatActionHandlerParams): TelegramSendChatActionHandler { + let consecutive401Failures = 0; + let suspended = false; + + const reset = () => { + consecutive401Failures = 0; + suspended = false; + }; + + const sendChatAction = async ( + chatId: number | string, + action: ChatAction, + threadParams?: unknown, + ): Promise => { + if (suspended) { + return; + } + + if (consecutive401Failures > 0) { + const backoffMs = computeBackoff(BACKOFF_POLICY, consecutive401Failures); + logger( + `sendChatAction backoff: waiting ${backoffMs}ms before retry ` + + `(failure ${consecutive401Failures}/${maxConsecutive401})`, + ); + await sleepWithAbort(backoffMs); + } + + try { + await sendChatActionFn(chatId, action, threadParams); + // Success: reset failure counter + if (consecutive401Failures > 0) { + logger(`sendChatAction recovered after ${consecutive401Failures} consecutive 401 failures`); + consecutive401Failures = 0; + } + } catch (error) { + if (is401Error(error)) { + consecutive401Failures++; + + if (consecutive401Failures >= maxConsecutive401) { + suspended = true; + logger( + `CRITICAL: sendChatAction suspended after ${consecutive401Failures} consecutive 401 errors. ` + + `Bot token is likely invalid. Telegram may DELETE the bot if requests continue. ` + + `Replace the token and restart: openclaw channels restart telegram`, + ); + } else { + logger( + `sendChatAction 401 error (${consecutive401Failures}/${maxConsecutive401}). ` + + `Retrying with exponential backoff.`, + ); + } + } + throw error; + } + }; + + return { + sendChatAction, + isSuspended: () => suspended, + reset, + }; +} diff --git a/src/telegram/webhook.test.ts b/src/telegram/webhook.test.ts index 2c943a4be6f..80d25428011 100644 --- a/src/telegram/webhook.test.ts +++ b/src/telegram/webhook.test.ts @@ -1,24 +1,26 @@ +import { createHash } from "node:crypto"; +import { once } from "node:events"; +import { request } from "node:http"; +import { setTimeout as sleep } from "node:timers/promises"; import { describe, expect, it, vi } from "vitest"; import { startTelegramWebhook } from "./webhook.js"; -const handlerSpy = vi.hoisted(() => - vi.fn( - (_req: unknown, res: { writeHead: (status: number) => void; end: (body?: string) => void }) => { - res.writeHead(200); - res.end("ok"); - }, - ), -); +const handlerSpy = vi.hoisted(() => vi.fn((..._args: unknown[]): unknown => undefined)); const setWebhookSpy = vi.hoisted(() => vi.fn()); +const deleteWebhookSpy = vi.hoisted(() => vi.fn(async () => true)); +const initSpy = vi.hoisted(() => vi.fn(async () => undefined)); const stopSpy = vi.hoisted(() => vi.fn()); const webhookCallbackSpy = vi.hoisted(() => vi.fn(() => handlerSpy)); const createTelegramBotSpy = vi.hoisted(() => vi.fn(() => ({ - api: { setWebhook: setWebhookSpy }, + init: initSpy, + api: { setWebhook: setWebhookSpy, deleteWebhook: deleteWebhookSpy }, stop: stopSpy, })), ); +const WEBHOOK_POST_TIMEOUT_MS = process.platform === "win32" ? 20_000 : 8_000; + vi.mock("grammy", async (importOriginal) => { const actual = await importOriginal(); return { @@ -31,10 +33,181 @@ vi.mock("./bot.js", () => ({ createTelegramBot: createTelegramBotSpy, })); +async function fetchWithTimeout( + input: string, + init: Omit, + timeoutMs: number, +): Promise { + const abort = new AbortController(); + const timer = setTimeout(() => { + abort.abort(); + }, timeoutMs); + try { + return await fetch(input, { ...init, signal: abort.signal }); + } finally { + clearTimeout(timer); + } +} + +async function postWebhookJson(params: { + url: string; + payload: string; + secret?: string; + timeoutMs?: number; +}): Promise { + return await fetchWithTimeout( + params.url, + { + method: "POST", + headers: { + "content-type": "application/json", + ...(params.secret ? { "x-telegram-bot-api-secret-token": params.secret } : {}), + }, + body: params.payload, + }, + params.timeoutMs ?? 5_000, + ); +} + +function createDeterministicRng(seed: number): () => number { + let state = seed >>> 0; + return () => { + state = (state * 1_664_525 + 1_013_904_223) >>> 0; + return state / 4_294_967_296; + }; +} + +async function postWebhookPayloadWithChunkPlan(params: { + port: number; + path: string; + payload: string; + secret: string; + mode: "single" | "random-chunked"; + timeoutMs?: number; +}): Promise<{ statusCode: number; body: string }> { + const payloadBuffer = Buffer.from(params.payload, "utf-8"); + return await new Promise((resolve, reject) => { + let bytesQueued = 0; + let chunksQueued = 0; + let phase: "writing" | "awaiting-response" = "writing"; + let settled = false; + const finishResolve = (value: { statusCode: number; body: string }) => { + if (settled) { + return; + } + settled = true; + clearTimeout(timeout); + resolve(value); + }; + const finishReject = (error: unknown) => { + if (settled) { + return; + } + settled = true; + clearTimeout(timeout); + reject(error); + }; + + const req = request( + { + hostname: "127.0.0.1", + port: params.port, + path: params.path, + method: "POST", + headers: { + "content-type": "application/json", + "content-length": String(payloadBuffer.length), + "x-telegram-bot-api-secret-token": params.secret, + }, + }, + (res) => { + const chunks: Buffer[] = []; + res.on("data", (chunk: Buffer | string) => { + chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); + }); + res.on("end", () => { + finishResolve({ + statusCode: res.statusCode ?? 0, + body: Buffer.concat(chunks).toString("utf-8"), + }); + }); + }, + ); + + const timeout = setTimeout(() => { + finishReject( + new Error( + `webhook post timed out after ${params.timeoutMs ?? 15_000}ms (phase=${phase}, bytesQueued=${bytesQueued}, chunksQueued=${chunksQueued}, totalBytes=${payloadBuffer.length})`, + ), + ); + req.destroy(); + }, params.timeoutMs ?? 15_000); + + req.on("error", (error) => { + finishReject(error); + }); + + const writeAll = async () => { + if (params.mode === "single") { + req.end(payloadBuffer); + return; + } + + const rng = createDeterministicRng(26156); + let offset = 0; + while (offset < payloadBuffer.length) { + const remaining = payloadBuffer.length - offset; + const nextSize = Math.max(1, Math.min(remaining, 1 + Math.floor(rng() * 8_192))); + const chunk = payloadBuffer.subarray(offset, offset + nextSize); + const canContinue = req.write(chunk); + offset += nextSize; + bytesQueued = offset; + chunksQueued += 1; + if (chunksQueued % 10 === 0) { + await sleep(1 + Math.floor(rng() * 3)); + } + if (!canContinue) { + // Windows CI occasionally stalls on waiting for drain indefinitely. + // Bound the wait, then continue queuing this small (~1MB) payload. + await Promise.race([once(req, "drain"), sleep(25)]); + } + } + phase = "awaiting-response"; + req.end(); + }; + + void writeAll().catch((error) => { + finishReject(error); + }); + }); +} + +function createNearLimitTelegramPayload(): { payload: string; sizeBytes: number } { + const maxBytes = 1_024 * 1_024; + const targetBytes = maxBytes - 4_096; + const shell = { update_id: 77_777, message: { text: "" } }; + const shellSize = Buffer.byteLength(JSON.stringify(shell), "utf-8"); + const textLength = Math.max(1, targetBytes - shellSize); + const pattern = "the quick brown fox jumps over the lazy dog "; + const repeats = Math.ceil(textLength / pattern.length); + const text = pattern.repeat(repeats).slice(0, textLength); + const payload = JSON.stringify({ + update_id: 77_777, + message: { text }, + }); + return { payload, sizeBytes: Buffer.byteLength(payload, "utf-8") }; +} + +function sha256(text: string): string { + return createHash("sha256").update(text).digest("hex"); +} + describe("startTelegramWebhook", () => { it("starts server, registers webhook, and serves health", async () => { + initSpy.mockClear(); createTelegramBotSpy.mockClear(); webhookCallbackSpy.mockClear(); + const runtimeLog = vi.fn(); const abort = new AbortController(); const cfg = { bindings: [] }; const { server } = await startTelegramWebhook({ @@ -44,6 +217,7 @@ describe("startTelegramWebhook", () => { config: cfg, port: 0, // random free port abortSignal: abort.signal, + runtime: { log: runtimeLog, error: vi.fn(), exit: vi.fn() }, }); expect(createTelegramBotSpy).toHaveBeenCalledWith( expect.objectContaining({ @@ -59,6 +233,7 @@ describe("startTelegramWebhook", () => { const health = await fetch(`${url}/healthz`); expect(health.status).toBe(200); + expect(initSpy).toHaveBeenCalledTimes(1); expect(setWebhookSpy).toHaveBeenCalled(); expect(webhookCallbackSpy).toHaveBeenCalledWith( expect.objectContaining({ @@ -66,13 +241,20 @@ describe("startTelegramWebhook", () => { setWebhook: expect.any(Function), }), }), - "http", + "callback", { secretToken: "secret", onTimeout: "return", timeoutMilliseconds: 10_000, }, ); + expect(runtimeLog).toHaveBeenCalledWith( + expect.stringContaining("webhook local listener on http://127.0.0.1:"), + ); + expect(runtimeLog).toHaveBeenCalledWith(expect.stringContaining("/telegram-webhook")); + expect(runtimeLog).toHaveBeenCalledWith( + expect.stringContaining("webhook advertised to telegram on http://"), + ); abort.abort(); }); @@ -101,7 +283,13 @@ describe("startTelegramWebhook", () => { if (!addr || typeof addr === "string") { throw new Error("no addr"); } - await fetch(`http://127.0.0.1:${addr.port}/hook`, { method: "POST" }); + const payload = JSON.stringify({ update_id: 1, message: { text: "hello" } }); + const response = await postWebhookJson({ + url: `http://127.0.0.1:${addr.port}/hook`, + payload, + secret: "secret", + }); + expect(response.status).toBe(200); expect(handlerSpy).toHaveBeenCalled(); abort.abort(); }); @@ -113,4 +301,376 @@ describe("startTelegramWebhook", () => { }), ).rejects.toThrow(/requires a non-empty secret token/i); }); + + it("registers webhook using the bound listening port when port is 0", async () => { + setWebhookSpy.mockClear(); + const runtimeLog = vi.fn(); + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret: "secret", + port: 0, + abortSignal: abort.signal, + path: "/hook", + runtime: { log: runtimeLog, error: vi.fn(), exit: vi.fn() }, + }); + try { + const addr = server.address(); + if (!addr || typeof addr === "string") { + throw new Error("no addr"); + } + expect(addr.port).toBeGreaterThan(0); + expect(setWebhookSpy).toHaveBeenCalledTimes(1); + expect(setWebhookSpy).toHaveBeenCalledWith( + `http://127.0.0.1:${addr.port}/hook`, + expect.objectContaining({ + secret_token: "secret", + }), + ); + expect(runtimeLog).toHaveBeenCalledWith( + `webhook local listener on http://127.0.0.1:${addr.port}/hook`, + ); + } finally { + abort.abort(); + } + }); + + it("keeps webhook payload readable when callback delays body read", async () => { + handlerSpy.mockImplementationOnce(async (...args: unknown[]) => { + const [update, reply] = args as [unknown, (json: string) => Promise]; + await sleep(50); + await reply(JSON.stringify(update)); + }); + + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret: "secret", + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + try { + const addr = server.address(); + if (!addr || typeof addr === "string") { + throw new Error("no addr"); + } + + const payload = JSON.stringify({ update_id: 1, message: { text: "hello" } }); + const res = await postWebhookJson({ + url: `http://127.0.0.1:${addr.port}/hook`, + payload, + secret: "secret", + }); + expect(res.status).toBe(200); + const responseBody = await res.text(); + expect(JSON.parse(responseBody)).toEqual(JSON.parse(payload)); + } finally { + abort.abort(); + } + }); + + it("keeps webhook payload readable across multiple delayed reads", async () => { + const seenPayloads: string[] = []; + const delayedHandler = async (...args: unknown[]) => { + const [update, reply] = args as [unknown, (json: string) => Promise]; + await sleep(50); + seenPayloads.push(JSON.stringify(update)); + await reply("ok"); + }; + handlerSpy.mockImplementationOnce(delayedHandler).mockImplementationOnce(delayedHandler); + + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret: "secret", + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + try { + const addr = server.address(); + if (!addr || typeof addr === "string") { + throw new Error("no addr"); + } + + const payloads = [ + JSON.stringify({ update_id: 1, message: { text: "first" } }), + JSON.stringify({ update_id: 2, message: { text: "second" } }), + ]; + + for (const payload of payloads) { + const res = await postWebhookJson({ + url: `http://127.0.0.1:${addr.port}/hook`, + payload, + secret: "secret", + }); + expect(res.status).toBe(200); + } + + expect(seenPayloads.map((x) => JSON.parse(x))).toEqual(payloads.map((x) => JSON.parse(x))); + } finally { + abort.abort(); + } + }); + + it("processes a second request after first-request delayed-init data loss", async () => { + const seenUpdates: unknown[] = []; + webhookCallbackSpy.mockImplementationOnce( + () => + vi.fn( + ( + update: unknown, + reply: (json: string) => Promise, + _secretHeader: string | undefined, + _unauthorized: () => Promise, + ) => { + seenUpdates.push(update); + void (async () => { + await sleep(50); + await reply("ok"); + })(); + }, + ) as unknown as typeof handlerSpy, + ); + + const secret = "secret"; + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret, + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + + try { + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("no addr"); + } + + const firstPayload = JSON.stringify({ update_id: 100, message: { text: "first" } }); + const secondPayload = JSON.stringify({ update_id: 101, message: { text: "second" } }); + const firstResponse = await postWebhookPayloadWithChunkPlan({ + port: address.port, + path: "/hook", + payload: firstPayload, + secret, + mode: "single", + timeoutMs: WEBHOOK_POST_TIMEOUT_MS, + }); + const secondResponse = await postWebhookPayloadWithChunkPlan({ + port: address.port, + path: "/hook", + payload: secondPayload, + secret, + mode: "single", + timeoutMs: WEBHOOK_POST_TIMEOUT_MS, + }); + + expect(firstResponse.statusCode).toBe(200); + expect(secondResponse.statusCode).toBe(200); + expect(seenUpdates).toEqual([JSON.parse(firstPayload), JSON.parse(secondPayload)]); + } finally { + abort.abort(); + } + }); + + it("handles near-limit payload with random chunk writes and event-loop yields", async () => { + const seenUpdates: Array<{ update_id: number; message: { text: string } }> = []; + webhookCallbackSpy.mockImplementationOnce( + () => + vi.fn( + ( + update: unknown, + reply: (json: string) => Promise, + _secretHeader: string | undefined, + _unauthorized: () => Promise, + ) => { + seenUpdates.push(update as { update_id: number; message: { text: string } }); + void reply("ok"); + }, + ) as unknown as typeof handlerSpy, + ); + + const { payload, sizeBytes } = createNearLimitTelegramPayload(); + expect(sizeBytes).toBeLessThan(1_024 * 1_024); + expect(sizeBytes).toBeGreaterThan(256 * 1_024); + const expected = JSON.parse(payload) as { update_id: number; message: { text: string } }; + + const secret = "secret"; + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret, + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + + try { + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("no addr"); + } + + const response = await postWebhookPayloadWithChunkPlan({ + port: address.port, + path: "/hook", + payload, + secret, + mode: "random-chunked", + timeoutMs: WEBHOOK_POST_TIMEOUT_MS, + }); + + expect(response.statusCode).toBe(200); + expect(seenUpdates).toHaveLength(1); + expect(seenUpdates[0]?.update_id).toBe(expected.update_id); + expect(seenUpdates[0]?.message.text.length).toBe(expected.message.text.length); + expect(sha256(seenUpdates[0]?.message.text ?? "")).toBe(sha256(expected.message.text)); + } finally { + abort.abort(); + } + }); + + it("handles near-limit payload written in a single request write", async () => { + const seenUpdates: Array<{ update_id: number; message: { text: string } }> = []; + webhookCallbackSpy.mockImplementationOnce( + () => + vi.fn( + ( + update: unknown, + reply: (json: string) => Promise, + _secretHeader: string | undefined, + _unauthorized: () => Promise, + ) => { + seenUpdates.push(update as { update_id: number; message: { text: string } }); + void reply("ok"); + }, + ) as unknown as typeof handlerSpy, + ); + + const { payload, sizeBytes } = createNearLimitTelegramPayload(); + expect(sizeBytes).toBeLessThan(1_024 * 1_024); + expect(sizeBytes).toBeGreaterThan(256 * 1_024); + const expected = JSON.parse(payload) as { update_id: number; message: { text: string } }; + + const secret = "secret"; + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret, + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + + try { + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("no addr"); + } + + const response = await postWebhookPayloadWithChunkPlan({ + port: address.port, + path: "/hook", + payload, + secret, + mode: "single", + timeoutMs: WEBHOOK_POST_TIMEOUT_MS, + }); + + expect(response.statusCode).toBe(200); + expect(seenUpdates).toHaveLength(1); + expect(seenUpdates[0]?.update_id).toBe(expected.update_id); + expect(seenUpdates[0]?.message.text.length).toBe(expected.message.text.length); + expect(sha256(seenUpdates[0]?.message.text ?? "")).toBe(sha256(expected.message.text)); + } finally { + abort.abort(); + } + }); + + it("rejects payloads larger than 1MB before invoking webhook handler", async () => { + handlerSpy.mockClear(); + const abort = new AbortController(); + const { server } = await startTelegramWebhook({ + token: "tok", + secret: "secret", + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + + try { + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("no addr"); + } + + const responseOrError = await new Promise< + | { kind: "response"; statusCode: number; body: string } + | { kind: "error"; code: string | undefined } + >((resolve) => { + const req = request( + { + hostname: "127.0.0.1", + port: address.port, + path: "/hook", + method: "POST", + headers: { + "content-type": "application/json", + "content-length": String(1_024 * 1_024 + 2_048), + "x-telegram-bot-api-secret-token": "secret", + }, + }, + (res) => { + const chunks: Buffer[] = []; + res.on("data", (chunk: Buffer | string) => { + chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); + }); + res.on("end", () => { + resolve({ + kind: "response", + statusCode: res.statusCode ?? 0, + body: Buffer.concat(chunks).toString("utf-8"), + }); + }); + }, + ); + req.on("error", (error: NodeJS.ErrnoException) => { + resolve({ kind: "error", code: error.code }); + }); + req.end("{}"); + }); + + if (responseOrError.kind === "response") { + expect(responseOrError.statusCode).toBe(413); + expect(responseOrError.body).toBe("Payload too large"); + } else { + expect(responseOrError.code).toBeOneOf(["ECONNRESET", "EPIPE"]); + } + expect(handlerSpy).not.toHaveBeenCalled(); + } finally { + abort.abort(); + } + }); + + it("de-registers webhook when shutting down", async () => { + deleteWebhookSpy.mockClear(); + const abort = new AbortController(); + await startTelegramWebhook({ + token: "tok", + secret: "secret", + port: 0, + abortSignal: abort.signal, + path: "/hook", + }); + + abort.abort(); + await sleep(25); + + expect(deleteWebhookSpy).toHaveBeenCalledTimes(1); + expect(deleteWebhookSpy).toHaveBeenCalledWith({ drop_pending_updates: false }); + }); }); diff --git a/src/telegram/webhook.ts b/src/telegram/webhook.ts index 9eb3c73d7f4..a55720102dd 100644 --- a/src/telegram/webhook.ts +++ b/src/telegram/webhook.ts @@ -3,7 +3,7 @@ import { webhookCallback } from "grammy"; import type { OpenClawConfig } from "../config/config.js"; import { isDiagnosticsEnabled } from "../infra/diagnostic-events.js"; import { formatErrorMessage } from "../infra/errors.js"; -import { installRequestBodyLimitGuard } from "../infra/http-body.js"; +import { readJsonBodyWithLimit } from "../infra/http-body.js"; import { logWebhookError, logWebhookProcessed, @@ -21,6 +21,59 @@ const TELEGRAM_WEBHOOK_MAX_BODY_BYTES = 1024 * 1024; const TELEGRAM_WEBHOOK_BODY_TIMEOUT_MS = 30_000; const TELEGRAM_WEBHOOK_CALLBACK_TIMEOUT_MS = 10_000; +async function listenHttpServer(params: { + server: ReturnType; + port: number; + host: string; +}) { + await new Promise((resolve, reject) => { + const onError = (err: Error) => { + params.server.off("error", onError); + reject(err); + }; + params.server.once("error", onError); + params.server.listen(params.port, params.host, () => { + params.server.off("error", onError); + resolve(); + }); + }); +} + +function resolveWebhookPublicUrl(params: { + configuredPublicUrl?: string; + server: ReturnType; + path: string; + host: string; + port: number; +}) { + if (params.configuredPublicUrl) { + return params.configuredPublicUrl; + } + const address = params.server.address(); + if (address && typeof address !== "string") { + const resolvedHost = + params.host === "0.0.0.0" || address.address === "0.0.0.0" || address.address === "::" + ? "localhost" + : address.address; + return `http://${resolvedHost}:${address.port}${params.path}`; + } + const fallbackHost = params.host === "0.0.0.0" ? "localhost" : params.host; + return `http://${fallbackHost}:${params.port}${params.path}`; +} + +async function initializeTelegramWebhookBot(params: { + bot: ReturnType; + runtime: RuntimeEnv; + abortSignal?: AbortSignal; +}) { + const initSignal = params.abortSignal as Parameters<(typeof params.bot)["init"]>[0]; + await withTelegramApiErrorLogging({ + operation: "getMe", + runtime: params.runtime, + fn: () => params.bot.init(initSignal), + }); +} + export async function startTelegramWebhook(opts: { token: string; accountId?: string; @@ -55,7 +108,12 @@ export async function startTelegramWebhook(opts: { config: opts.config, accountId: opts.accountId, }); - const handler = webhookCallback(bot, "http", { + await initializeTelegramWebhookBot({ + bot, + runtime, + abortSignal: opts.abortSignal, + }); + const handler = webhookCallback(bot, "callback", { secretToken: secret, onTimeout: "return", timeoutMilliseconds: TELEGRAM_WEBHOOK_CALLBACK_TIMEOUT_MS, @@ -66,6 +124,14 @@ export async function startTelegramWebhook(opts: { } const server = createServer((req, res) => { + const respondText = (statusCode: number, text = "") => { + if (res.headersSent || res.writableEnded) { + return; + } + res.writeHead(statusCode, { "Content-Type": "text/plain; charset=utf-8" }); + res.end(text); + }; + if (req.url === healthPath) { res.writeHead(200); res.end("ok"); @@ -80,69 +146,128 @@ export async function startTelegramWebhook(opts: { if (diagnosticsEnabled) { logWebhookReceived({ channel: "telegram", updateType: "telegram-post" }); } - const guard = installRequestBodyLimitGuard(req, res, { - maxBytes: TELEGRAM_WEBHOOK_MAX_BODY_BYTES, - timeoutMs: TELEGRAM_WEBHOOK_BODY_TIMEOUT_MS, - responseFormat: "text", - }); - if (guard.isTripped()) { - return; - } - const handled = handler(req, res); - if (handled && typeof handled.catch === "function") { - void handled - .then(() => { - if (diagnosticsEnabled) { - logWebhookProcessed({ - channel: "telegram", - updateType: "telegram-post", - durationMs: Date.now() - startTime, - }); - } - }) - .catch((err) => { - if (guard.isTripped()) { - return; - } - const errMsg = formatErrorMessage(err); - if (diagnosticsEnabled) { - logWebhookError({ - channel: "telegram", - updateType: "telegram-post", - error: errMsg, - }); - } - runtime.log?.(`webhook handler failed: ${errMsg}`); - if (!res.headersSent) { - res.writeHead(500); - } - res.end(); - }) - .finally(() => { - guard.dispose(); + void (async () => { + const body = await readJsonBodyWithLimit(req, { + maxBytes: TELEGRAM_WEBHOOK_MAX_BODY_BYTES, + timeoutMs: TELEGRAM_WEBHOOK_BODY_TIMEOUT_MS, + emptyObjectOnEmpty: false, + }); + if (!body.ok) { + if (body.code === "PAYLOAD_TOO_LARGE") { + respondText(413, body.error); + return; + } + if (body.code === "REQUEST_BODY_TIMEOUT") { + respondText(408, body.error); + return; + } + if (body.code === "CONNECTION_CLOSED") { + respondText(400, body.error); + return; + } + respondText(400, body.error); + return; + } + + let replied = false; + const reply = async (json: string) => { + if (replied) { + return; + } + replied = true; + if (res.headersSent || res.writableEnded) { + return; + } + res.writeHead(200, { "Content-Type": "application/json; charset=utf-8" }); + res.end(json); + }; + const unauthorized = async () => { + if (replied) { + return; + } + replied = true; + respondText(401, "unauthorized"); + }; + const secretHeaderRaw = req.headers["x-telegram-bot-api-secret-token"]; + const secretHeader = Array.isArray(secretHeaderRaw) ? secretHeaderRaw[0] : secretHeaderRaw; + + await handler(body.value, reply, secretHeader, unauthorized); + if (!replied) { + respondText(200); + } + + if (diagnosticsEnabled) { + logWebhookProcessed({ + channel: "telegram", + updateType: "telegram-post", + durationMs: Date.now() - startTime, }); + } + })().catch((err) => { + const errMsg = formatErrorMessage(err); + if (diagnosticsEnabled) { + logWebhookError({ + channel: "telegram", + updateType: "telegram-post", + error: errMsg, + }); + } + runtime.log?.(`webhook handler failed: ${errMsg}`); + respondText(500); + }); + }); + + await listenHttpServer({ + server, + port, + host, + }); + const boundAddress = server.address(); + const boundPort = boundAddress && typeof boundAddress !== "string" ? boundAddress.port : port; + + const publicUrl = resolveWebhookPublicUrl({ + configuredPublicUrl: opts.publicUrl, + server, + path, + host, + port, + }); + + try { + await withTelegramApiErrorLogging({ + operation: "setWebhook", + runtime, + fn: () => + bot.api.setWebhook(publicUrl, { + secret_token: secret, + allowed_updates: resolveTelegramAllowedUpdates(), + }), + }); + } catch (err) { + server.close(); + void bot.stop(); + if (diagnosticsEnabled) { + stopDiagnosticHeartbeat(); + } + throw err; + } + + runtime.log?.(`webhook local listener on http://${host}:${boundPort}${path}`); + runtime.log?.(`webhook advertised to telegram on ${publicUrl}`); + + let shutDown = false; + const shutdown = () => { + if (shutDown) { return; } - guard.dispose(); - }); - - const publicUrl = - opts.publicUrl ?? `http://${host === "0.0.0.0" ? "localhost" : host}:${port}${path}`; - - await withTelegramApiErrorLogging({ - operation: "setWebhook", - runtime, - fn: () => - bot.api.setWebhook(publicUrl, { - secret_token: secret, - allowed_updates: resolveTelegramAllowedUpdates(), - }), - }); - - await new Promise((resolve) => server.listen(port, host, resolve)); - runtime.log?.(`webhook listening on ${publicUrl}`); - - const shutdown = () => { + shutDown = true; + void withTelegramApiErrorLogging({ + operation: "deleteWebhook", + runtime, + fn: () => bot.api.deleteWebhook({ drop_pending_updates: false }), + }).catch(() => { + // withTelegramApiErrorLogging has already emitted the failure. + }); server.close(); void bot.stop(); if (diagnosticsEnabled) { diff --git a/src/test-utils/npm-spec-install-test-helpers.ts b/src/test-utils/npm-spec-install-test-helpers.ts index 23c06afe44b..9ef8e29404e 100644 --- a/src/test-utils/npm-spec-install-test-helpers.ts +++ b/src/test-utils/npm-spec-install-test-helpers.ts @@ -1,5 +1,7 @@ +import fs from "node:fs"; +import path from "node:path"; import { expect } from "vitest"; -import type { SpawnResult } from "../process/exec.js"; +import type { CommandOptions, SpawnResult } from "../process/exec.js"; import { expectSingleNpmInstallIgnoreScriptsCall } from "./exec-assertions.js"; export type InstallResultLike = { @@ -40,10 +42,31 @@ export async function expectUnsupportedNpmSpec( } export function mockNpmPackMetadataResult( - run: { mockResolvedValue: (value: SpawnResult) => unknown }, + run: { + mockImplementation: ( + implementation: ( + argv: string[], + optionsOrTimeout: number | CommandOptions, + ) => Promise, + ) => unknown; + }, metadata: NpmPackMetadata, ) { - run.mockResolvedValue(createSuccessfulSpawnResult(JSON.stringify([metadata]))); + run.mockImplementation(async (argv, optionsOrTimeout) => { + if (argv[0] !== "npm" || argv[1] !== "pack") { + throw new Error(`unexpected command: ${argv.join(" ")}`); + } + + const cwd = + typeof optionsOrTimeout === "object" && optionsOrTimeout !== null + ? optionsOrTimeout.cwd + : undefined; + if (cwd) { + fs.writeFileSync(path.join(cwd, metadata.filename), ""); + } + + return createSuccessfulSpawnResult(JSON.stringify([metadata])); + }); } export function expectIntegrityDriftRejected(params: { diff --git a/src/tui/tui-stream-assembler.test.ts b/src/tui/tui-stream-assembler.test.ts index c7dc3d8fa08..fc1cb119ce8 100644 --- a/src/tui/tui-stream-assembler.test.ts +++ b/src/tui/tui-stream-assembler.test.ts @@ -1,203 +1,122 @@ import { describe, expect, it } from "vitest"; import { TuiStreamAssembler } from "./tui-stream-assembler.js"; -const STREAM_WITH_TOOL_BLOCKS = { - role: "assistant", - content: [ - { type: "text", text: "Before tool call" }, - { type: "tool_use", name: "search" }, - { type: "text", text: "After tool call" }, - ], -} as const; +const text = (value: string) => ({ type: "text", text: value }) as const; +const thinking = (value: string) => ({ type: "thinking", thinking: value }) as const; +const toolUse = () => ({ type: "tool_use", name: "search" }) as const; -const STREAM_AFTER_TOOL_BLOCKS = { - role: "assistant", - content: [ - { type: "tool_use", name: "search" }, - { type: "text", text: "After tool call" }, - ], -} as const; +const messageWithContent = (content: readonly Record[]) => + ({ + role: "assistant", + content, + }) as const; + +const TEXT_ONLY_TWO_BLOCKS = messageWithContent([text("Draft line 1"), text("Draft line 2")]); + +type FinalizeBoundaryCase = { + name: string; + streamedContent: readonly Record[]; + finalContent: readonly Record[]; + expected: string; +}; + +const FINALIZE_BOUNDARY_CASES: FinalizeBoundaryCase[] = [ + { + name: "preserves streamed text when tool-boundary final payload drops prefix blocks", + streamedContent: [text("Before tool call"), toolUse(), text("After tool call")], + finalContent: [toolUse(), text("After tool call")], + expected: "Before tool call\nAfter tool call", + }, + { + name: "preserves streamed text when streamed run had non-text and final drops suffix blocks", + streamedContent: [text("Before tool call"), toolUse(), text("After tool call")], + finalContent: [text("Before tool call")], + expected: "Before tool call\nAfter tool call", + }, + { + name: "prefers final text when non-text appears only in final payload", + streamedContent: [text("Draft line 1"), text("Draft line 2")], + finalContent: [toolUse(), text("Draft line 2")], + expected: "Draft line 2", + }, + { + name: "keeps non-empty final text for plain text boundary drops", + streamedContent: [text("Draft line 1"), text("Draft line 2")], + finalContent: [text("Draft line 1")], + expected: "Draft line 1", + }, + { + name: "prefers final replacement text when payload is not a boundary subset", + streamedContent: [text("Before tool call"), toolUse(), text("After tool call")], + finalContent: [toolUse(), text("Replacement")], + expected: "Replacement", + }, + { + name: "accepts richer final payload when it extends streamed text", + streamedContent: [text("Before tool call")], + finalContent: [text("Before tool call"), text("After tool call")], + expected: "Before tool call\nAfter tool call", + }, +]; describe("TuiStreamAssembler", () => { it("keeps thinking before content even when thinking arrives later", () => { const assembler = new TuiStreamAssembler(); - const first = assembler.ingestDelta( - "run-1", - { - role: "assistant", - content: [{ type: "text", text: "Hello" }], - }, - true, - ); + const first = assembler.ingestDelta("run-1", messageWithContent([text("Hello")]), true); expect(first).toBe("Hello"); - const second = assembler.ingestDelta( - "run-1", - { - role: "assistant", - content: [{ type: "thinking", thinking: "Brain" }], - }, - true, - ); + const second = assembler.ingestDelta("run-1", messageWithContent([thinking("Brain")]), true); expect(second).toBe("[thinking]\nBrain\n\nHello"); }); it("omits thinking when showThinking is false", () => { const assembler = new TuiStreamAssembler(); - const text = assembler.ingestDelta( + const output = assembler.ingestDelta( "run-2", - { - role: "assistant", - content: [ - { type: "thinking", thinking: "Hidden" }, - { type: "text", text: "Visible" }, - ], - }, + messageWithContent([thinking("Hidden"), text("Visible")]), false, ); - - expect(text).toBe("Visible"); + expect(output).toBe("Visible"); }); it("falls back to streamed text on empty final payload", () => { const assembler = new TuiStreamAssembler(); - assembler.ingestDelta( - "run-3", - { - role: "assistant", - content: [{ type: "text", text: "Streamed" }], - }, - false, - ); - - const finalText = assembler.finalize( - "run-3", - { - role: "assistant", - content: [], - }, - false, - ); - + assembler.ingestDelta("run-3", messageWithContent([text("Streamed")]), false); + const finalText = assembler.finalize("run-3", { role: "assistant", content: [] }, false); expect(finalText).toBe("Streamed"); }); it("returns null when delta text is unchanged", () => { const assembler = new TuiStreamAssembler(); - const first = assembler.ingestDelta( - "run-4", - { - role: "assistant", - content: [{ type: "text", text: "Repeat" }], - }, - false, - ); - + const first = assembler.ingestDelta("run-4", messageWithContent([text("Repeat")]), false); expect(first).toBe("Repeat"); + const second = assembler.ingestDelta("run-4", messageWithContent([text("Repeat")]), false); + expect(second).toBeNull(); + }); + + it("keeps streamed delta text when incoming tool boundary drops a block", () => { + const assembler = new TuiStreamAssembler(); + const first = assembler.ingestDelta("run-delta-boundary", TEXT_ONLY_TWO_BLOCKS, false); + expect(first).toBe("Draft line 1\nDraft line 2"); const second = assembler.ingestDelta( - "run-4", - { - role: "assistant", - content: [{ type: "text", text: "Repeat" }], - }, + "run-delta-boundary", + messageWithContent([toolUse(), text("Draft line 2")]), false, ); - expect(second).toBeNull(); }); - it("keeps richer streamed text when final payload drops earlier blocks", () => { - const assembler = new TuiStreamAssembler(); - assembler.ingestDelta("run-5", STREAM_WITH_TOOL_BLOCKS, false); - - const finalText = assembler.finalize("run-5", STREAM_AFTER_TOOL_BLOCKS, false); - - expect(finalText).toBe("Before tool call\nAfter tool call"); - }); - - it("does not regress streamed text when a delta drops boundary blocks after tool calls", () => { - const assembler = new TuiStreamAssembler(); - const first = assembler.ingestDelta("run-5-stream", STREAM_WITH_TOOL_BLOCKS, false); - expect(first).toBe("Before tool call\nAfter tool call"); - - const second = assembler.ingestDelta("run-5-stream", STREAM_AFTER_TOOL_BLOCKS, false); - - expect(second).toBeNull(); - }); - - it("keeps non-empty final text for plain text prefix/suffix updates", () => { - const assembler = new TuiStreamAssembler(); - assembler.ingestDelta( - "run-5b", - { - role: "assistant", - content: [ - { type: "text", text: "Draft line 1" }, - { type: "text", text: "Draft line 2" }, - ], - }, - false, - ); - - const finalText = assembler.finalize( - "run-5b", - { - role: "assistant", - content: [{ type: "text", text: "Draft line 1" }], - }, - false, - ); - - expect(finalText).toBe("Draft line 1"); - }); - - it("accepts richer final payload when it extends streamed text", () => { - const assembler = new TuiStreamAssembler(); - assembler.ingestDelta( - "run-6", - { - role: "assistant", - content: [{ type: "text", text: "Before tool call" }], - }, - false, - ); - - const finalText = assembler.finalize( - "run-6", - { - role: "assistant", - content: [ - { type: "text", text: "Before tool call" }, - { type: "text", text: "After tool call" }, - ], - }, - false, - ); - - expect(finalText).toBe("Before tool call\nAfter tool call"); - }); - - it("prefers non-empty final payload when it is not a dropped block regression", () => { - const assembler = new TuiStreamAssembler(); - assembler.ingestDelta( - "run-7", - { - role: "assistant", - content: [{ type: "text", text: "NOT OK" }], - }, - false, - ); - - const finalText = assembler.finalize( - "run-7", - { - role: "assistant", - content: [{ type: "text", text: "OK" }], - }, - false, - ); - - expect(finalText).toBe("OK"); - }); + for (const testCase of FINALIZE_BOUNDARY_CASES) { + it(testCase.name, () => { + const assembler = new TuiStreamAssembler(); + assembler.ingestDelta("run-boundary", messageWithContent(testCase.streamedContent), false); + const finalText = assembler.finalize( + "run-boundary", + messageWithContent(testCase.finalContent), + false, + ); + expect(finalText).toBe(testCase.expected); + }); + } }); diff --git a/src/tui/tui-stream-assembler.ts b/src/tui/tui-stream-assembler.ts index 302cc7acc1c..9a5187eff4b 100644 --- a/src/tui/tui-stream-assembler.ts +++ b/src/tui/tui-stream-assembler.ts @@ -13,6 +13,8 @@ type RunStreamState = { displayText: string; }; +type BoundaryDropMode = "off" | "streamed-only" | "streamed-or-incoming"; + function extractTextBlocksAndSignals(message: unknown): { textBlocks: string[]; sawNonTextContentBlocks: boolean; @@ -75,6 +77,29 @@ function isDroppedBoundaryTextBlockSubset(params: { return finalTextBlocks.every((block, index) => streamedTextBlocks[suffixStart + index] === block); } +function shouldPreserveBoundaryDroppedText(params: { + boundaryDropMode: BoundaryDropMode; + streamedSawNonTextContentBlocks: boolean; + incomingSawNonTextContentBlocks: boolean; + streamedTextBlocks: string[]; + nextContentBlocks: string[]; +}) { + if (params.boundaryDropMode === "off") { + return false; + } + const sawEligibleNonTextContent = + params.boundaryDropMode === "streamed-or-incoming" + ? params.streamedSawNonTextContentBlocks || params.incomingSawNonTextContentBlocks + : params.streamedSawNonTextContentBlocks; + if (!sawEligibleNonTextContent) { + return false; + } + return isDroppedBoundaryTextBlockSubset({ + streamedTextBlocks: params.streamedTextBlocks, + finalTextBlocks: params.nextContentBlocks, + }); +} + export class TuiStreamAssembler { private runs = new Map(); @@ -97,7 +122,7 @@ export class TuiStreamAssembler { state: RunStreamState, message: unknown, showThinking: boolean, - opts?: { protectBoundaryDrops?: boolean }, + opts?: { boundaryDropMode?: BoundaryDropMode }, ) { const thinkingText = extractThinkingFromMessage(message); const contentText = extractContentFromMessage(message); @@ -108,15 +133,16 @@ export class TuiStreamAssembler { } if (contentText) { const nextContentBlocks = textBlocks.length > 0 ? textBlocks : [contentText]; - const shouldPreserveBoundaryDroppedText = - opts?.protectBoundaryDrops === true && - (state.sawNonTextContentBlocks || sawNonTextContentBlocks) && - isDroppedBoundaryTextBlockSubset({ - streamedTextBlocks: state.contentBlocks, - finalTextBlocks: nextContentBlocks, - }); + const boundaryDropMode = opts?.boundaryDropMode ?? "off"; + const shouldKeepStreamedBoundaryText = shouldPreserveBoundaryDroppedText({ + boundaryDropMode, + streamedSawNonTextContentBlocks: state.sawNonTextContentBlocks, + incomingSawNonTextContentBlocks: sawNonTextContentBlocks, + streamedTextBlocks: state.contentBlocks, + nextContentBlocks, + }); - if (!shouldPreserveBoundaryDroppedText) { + if (!shouldKeepStreamedBoundaryText) { state.contentText = contentText; state.contentBlocks = nextContentBlocks; } @@ -137,7 +163,9 @@ export class TuiStreamAssembler { ingestDelta(runId: string, message: unknown, showThinking: boolean): string | null { const state = this.getOrCreateRun(runId); const previousDisplayText = state.displayText; - this.updateRunState(state, message, showThinking, { protectBoundaryDrops: true }); + this.updateRunState(state, message, showThinking, { + boundaryDropMode: "streamed-or-incoming", + }); if (!state.displayText || state.displayText === previousDisplayText) { return null; @@ -151,7 +179,9 @@ export class TuiStreamAssembler { const streamedDisplayText = state.displayText; const streamedTextBlocks = [...state.contentBlocks]; const streamedSawNonTextContentBlocks = state.sawNonTextContentBlocks; - this.updateRunState(state, message, showThinking); + this.updateRunState(state, message, showThinking, { + boundaryDropMode: "streamed-only", + }); const finalComposed = state.displayText; const shouldKeepStreamedText = streamedSawNonTextContentBlocks && diff --git a/src/utils/mask-api-key.test.ts b/src/utils/mask-api-key.test.ts index f6981c9e10c..3620dc01b34 100644 --- a/src/utils/mask-api-key.test.ts +++ b/src/utils/mask-api-key.test.ts @@ -7,9 +7,11 @@ describe("maskApiKey", () => { expect(maskApiKey(" ")).toBe("missing"); }); - it("returns trimmed value when length is 16 chars or less", () => { - expect(maskApiKey(" abcdefghijklmnop ")).toBe("abcdefghijklmnop"); - expect(maskApiKey(" short ")).toBe("short"); + it("masks short and medium values without returning raw secrets", () => { + expect(maskApiKey(" abcdefghijklmnop ")).toBe("ab...op"); + expect(maskApiKey(" short ")).toBe("s...t"); + expect(maskApiKey(" a ")).toBe("a...a"); + expect(maskApiKey(" ab ")).toBe("a...b"); }); it("masks long values with first and last 8 chars", () => { diff --git a/src/utils/mask-api-key.ts b/src/utils/mask-api-key.ts index f719ad53c23..4b0a1511d42 100644 --- a/src/utils/mask-api-key.ts +++ b/src/utils/mask-api-key.ts @@ -3,8 +3,11 @@ export const maskApiKey = (value: string): string => { if (!trimmed) { return "missing"; } + if (trimmed.length <= 6) { + return `${trimmed.slice(0, 1)}...${trimmed.slice(-1)}`; + } if (trimmed.length <= 16) { - return trimmed; + return `${trimmed.slice(0, 2)}...${trimmed.slice(-2)}`; } return `${trimmed.slice(0, 8)}...${trimmed.slice(-8)}`; }; diff --git a/src/utils/provider-utils.ts b/src/utils/provider-utils.ts index 211c515dc16..c9d7800c292 100644 --- a/src/utils/provider-utils.ts +++ b/src/utils/provider-utils.ts @@ -18,7 +18,11 @@ export function isReasoningTagProvider(provider: string | undefined | null): boo // handles reasoning natively via the `reasoning` field in streaming chunks, // so tag-based enforcement is unnecessary and causes all output to be // discarded as "(no output)" (#2279). - if (normalized === "google-gemini-cli" || normalized === "google-generative-ai") { + if ( + normalized === "google" || + normalized === "google-gemini-cli" || + normalized === "google-generative-ai" + ) { return true; } diff --git a/src/utils/utils-misc.test.ts b/src/utils/utils-misc.test.ts index b7128ad2141..88f0c311ae2 100644 --- a/src/utils/utils-misc.test.ts +++ b/src/utils/utils-misc.test.ts @@ -58,6 +58,16 @@ describe("isReasoningTagProvider", () => { value: "Ollama", expected: false, }, + { + name: "returns true for google (gemini-api-key auth provider)", + value: "google", + expected: true, + }, + { + name: "returns true for Google (case-insensitive)", + value: "Google", + expected: true, + }, { name: "returns true for google-gemini-cli", value: "google-gemini-cli", expected: true }, { name: "returns true for google-generative-ai", diff --git a/src/web/auto-reply/monitor/process-message.ts b/src/web/auto-reply/monitor/process-message.ts index 3ef85b6eb2d..2e49e9c7989 100644 --- a/src/web/auto-reply/monitor/process-message.ts +++ b/src/web/auto-reply/monitor/process-message.ts @@ -25,8 +25,11 @@ import { import { logVerbose, shouldLogVerbose } from "../../../globals.js"; import type { getChildLogger } from "../../../logging.js"; import { getAgentScopedMediaLocalRoots } from "../../../media/local-roots.js"; -import { readChannelAllowFromStore } from "../../../pairing/pairing-store.js"; import type { resolveAgentRoute } from "../../../routing/resolve-route.js"; +import { + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithCommandGate, +} from "../../../security/dm-policy-shared.js"; import { jidToE164, normalizeE164 } from "../../../utils.js"; import { resolveWhatsAppAccount } from "../../accounts.js"; import { newConnectionId } from "../../reconnect.js"; @@ -48,15 +51,6 @@ export type GroupHistoryEntry = { senderJid?: string; }; -function normalizeAllowFromE164(values: Array | undefined): string[] { - const list = Array.isArray(values) ? values : []; - return list - .map((entry) => String(entry).trim()) - .filter((entry) => entry && entry !== "*") - .map((entry) => normalizeE164(entry)) - .filter((entry): entry is string => Boolean(entry)); -} - async function resolveWhatsAppCommandAuthorized(params: { cfg: ReturnType; msg: WebInboundMsg; @@ -76,39 +70,47 @@ async function resolveWhatsAppCommandAuthorized(params: { const account = resolveWhatsAppAccount({ cfg: params.cfg, accountId: params.msg.accountId }); const dmPolicy = account.dmPolicy ?? "pairing"; + const groupPolicy = account.groupPolicy ?? "allowlist"; const configuredAllowFrom = account.allowFrom ?? []; const configuredGroupAllowFrom = account.groupAllowFrom ?? (configuredAllowFrom.length > 0 ? configuredAllowFrom : undefined); - if (isGroup) { - if (!configuredGroupAllowFrom || configuredGroupAllowFrom.length === 0) { - return false; - } - if (configuredGroupAllowFrom.some((v) => String(v).trim() === "*")) { - return true; - } - return normalizeAllowFromE164(configuredGroupAllowFrom).includes(senderE164); - } - - const storeAllowFrom = - dmPolicy === "allowlist" - ? [] - : await readChannelAllowFromStore("whatsapp", process.env, params.msg.accountId).catch( - () => [], - ); - const combinedAllowFrom = Array.from( - new Set([...(configuredAllowFrom ?? []), ...storeAllowFrom]), - ); - const allowFrom = - combinedAllowFrom.length > 0 - ? combinedAllowFrom + const storeAllowFrom = isGroup + ? [] + : await readStoreAllowFromForDmPolicy({ + provider: "whatsapp", + accountId: params.msg.accountId, + dmPolicy, + }); + const dmAllowFrom = + configuredAllowFrom.length > 0 + ? configuredAllowFrom : params.msg.selfE164 ? [params.msg.selfE164] : []; - if (allowFrom.some((v) => String(v).trim() === "*")) { - return true; - } - return normalizeAllowFromE164(allowFrom).includes(senderE164); + const access = resolveDmGroupAccessWithCommandGate({ + isGroup, + dmPolicy, + groupPolicy, + allowFrom: dmAllowFrom, + groupAllowFrom: configuredGroupAllowFrom, + storeAllowFrom, + isSenderAllowed: (allowEntries) => { + if (allowEntries.includes("*")) { + return true; + } + const normalizedEntries = allowEntries + .map((entry) => normalizeE164(String(entry))) + .filter((entry): entry is string => Boolean(entry)); + return normalizedEntries.includes(senderE164); + }, + command: { + useAccessGroups, + allowTextCommands: true, + hasControlCommand: true, + }, + }); + return access.commandAuthorized; } export async function processMessage(params: { diff --git a/src/web/inbound/access-control.test.ts b/src/web/inbound/access-control.test.ts index 796488900f8..2d3e26650c7 100644 --- a/src/web/inbound/access-control.test.ts +++ b/src/web/inbound/access-control.test.ts @@ -130,4 +130,31 @@ describe("WhatsApp dmPolicy precedence", () => { expectSilentlyBlocked(result); expect(readAllowFromStoreMock).not.toHaveBeenCalled(); }); + + it("always allows same-phone DMs even when allowFrom is restrictive", async () => { + setAccessControlTestConfig({ + channels: { + whatsapp: { + dmPolicy: "pairing", + allowFrom: ["+15550001111"], + }, + }, + }); + + const result = await checkInboundAccessControl({ + accountId: "default", + from: "+15550009999", + selfE164: "+15550009999", + senderE164: "+15550009999", + group: false, + pushName: "Owner", + isFromMe: false, + sock: { sendMessage: sendMessageMock }, + remoteJid: "15550009999@s.whatsapp.net", + }); + + expect(result.allowed).toBe(true); + expect(upsertPairingRequestMock).not.toHaveBeenCalled(); + expect(sendMessageMock).not.toHaveBeenCalled(); + }); }); diff --git a/src/web/inbound/access-control.ts b/src/web/inbound/access-control.ts index 2e759507cb9..2363434f34c 100644 --- a/src/web/inbound/access-control.ts +++ b/src/web/inbound/access-control.ts @@ -6,10 +6,11 @@ import { } from "../../config/runtime-group-policy.js"; import { logVerbose } from "../../globals.js"; import { buildPairingReply } from "../../pairing/pairing-messages.js"; +import { upsertChannelPairingRequest } from "../../pairing/pairing-store.js"; import { - readChannelAllowFromStore, - upsertChannelPairingRequest, -} from "../../pairing/pairing-store.js"; + readStoreAllowFromForDmPolicy, + resolveDmGroupAccessWithLists, +} from "../../security/dm-policy-shared.js"; import { isSelfChatMode, normalizeE164 } from "../../utils.js"; import { resolveWhatsAppAccount } from "../accounts.js"; @@ -59,21 +60,18 @@ export async function checkInboundAccessControl(params: { accountId: params.accountId, }); const dmPolicy = account.dmPolicy ?? "pairing"; - const configuredAllowFrom = account.allowFrom; - const storeAllowFrom = - dmPolicy === "allowlist" - ? [] - : await readChannelAllowFromStore("whatsapp", process.env, account.accountId).catch(() => []); + const configuredAllowFrom = account.allowFrom ?? []; + const storeAllowFrom = await readStoreAllowFromForDmPolicy({ + provider: "whatsapp", + accountId: account.accountId, + dmPolicy, + }); // Without user config, default to self-only DM access so the owner can talk to themselves. - const combinedAllowFrom = Array.from( - new Set([...(configuredAllowFrom ?? []), ...storeAllowFrom]), - ); const defaultAllowFrom = - combinedAllowFrom.length === 0 && params.selfE164 ? [params.selfE164] : undefined; - const allowFrom = combinedAllowFrom.length > 0 ? combinedAllowFrom : defaultAllowFrom; + configuredAllowFrom.length === 0 && params.selfE164 ? [params.selfE164] : []; + const dmAllowFrom = configuredAllowFrom.length > 0 ? configuredAllowFrom : defaultAllowFrom; const groupAllowFrom = - account.groupAllowFrom ?? - (configuredAllowFrom && configuredAllowFrom.length > 0 ? configuredAllowFrom : undefined); + account.groupAllowFrom ?? (configuredAllowFrom.length > 0 ? configuredAllowFrom : undefined); const isSamePhone = params.from === params.selfE164; const isSelfChat = account.selfChatMode ?? isSelfChatMode(params.selfE164, configuredAllowFrom); const pairingGraceMs = @@ -85,18 +83,6 @@ export async function checkInboundAccessControl(params: { typeof params.messageTimestampMs === "number" && params.messageTimestampMs < params.connectedAtMs - pairingGraceMs; - // Pre-compute normalized allowlists for filtering. - const dmHasWildcard = allowFrom?.includes("*") ?? false; - const normalizedAllowFrom = - allowFrom && allowFrom.length > 0 - ? allowFrom.filter((entry) => entry !== "*").map(normalizeE164) - : []; - const groupHasWildcard = groupAllowFrom?.includes("*") ?? false; - const normalizedGroupAllowFrom = - groupAllowFrom && groupAllowFrom.length > 0 - ? groupAllowFrom.filter((entry) => entry !== "*").map(normalizeE164) - : []; - // Group policy filtering: // - "open": groups bypass allowFrom, only mention-gating applies // - "disabled": block all group messages entirely @@ -113,8 +99,45 @@ export async function checkInboundAccessControl(params: { accountId: account.accountId, log: (message) => logVerbose(message), }); - if (params.group && groupPolicy === "disabled") { - logVerbose("Blocked group message (groupPolicy: disabled)"); + const normalizedDmSender = normalizeE164(params.from); + const normalizedGroupSender = + typeof params.senderE164 === "string" ? normalizeE164(params.senderE164) : null; + const access = resolveDmGroupAccessWithLists({ + isGroup: params.group, + dmPolicy, + groupPolicy, + // Groups intentionally fall back to configured allowFrom only (not DM self-chat fallback). + allowFrom: params.group ? configuredAllowFrom : dmAllowFrom, + groupAllowFrom, + storeAllowFrom, + isSenderAllowed: (allowEntries) => { + const hasWildcard = allowEntries.includes("*"); + if (hasWildcard) { + return true; + } + const normalizedEntrySet = new Set( + allowEntries + .map((entry) => normalizeE164(String(entry))) + .filter((entry): entry is string => Boolean(entry)), + ); + if (!params.group && isSamePhone) { + return true; + } + return params.group + ? Boolean(normalizedGroupSender && normalizedEntrySet.has(normalizedGroupSender)) + : normalizedEntrySet.has(normalizedDmSender); + }, + }); + if (params.group && access.decision !== "allow") { + if (access.reason === "groupPolicy=disabled") { + logVerbose("Blocked group message (groupPolicy: disabled)"); + } else if (access.reason === "groupPolicy=allowlist (empty allowlist)") { + logVerbose("Blocked group message (groupPolicy: allowlist, no groupAllowFrom)"); + } else { + logVerbose( + `Blocked group message from ${params.senderE164 ?? "unknown sender"} (groupPolicy: allowlist)`, + ); + } return { allowed: false, shouldMarkRead: false, @@ -122,31 +145,6 @@ export async function checkInboundAccessControl(params: { resolvedAccountId: account.accountId, }; } - if (params.group && groupPolicy === "allowlist") { - if (!groupAllowFrom || groupAllowFrom.length === 0) { - logVerbose("Blocked group message (groupPolicy: allowlist, no groupAllowFrom)"); - return { - allowed: false, - shouldMarkRead: false, - isSelfChat, - resolvedAccountId: account.accountId, - }; - } - const senderAllowed = - groupHasWildcard || - (params.senderE164 != null && normalizedGroupAllowFrom.includes(params.senderE164)); - if (!senderAllowed) { - logVerbose( - `Blocked group message from ${params.senderE164 ?? "unknown sender"} (groupPolicy: allowlist)`, - ); - return { - allowed: false, - shouldMarkRead: false, - isSelfChat, - resolvedAccountId: account.accountId, - }; - } - } // DM access control (secure defaults): "pairing" (default) / "allowlist" / "open" / "disabled". if (!params.group) { @@ -159,7 +157,7 @@ export async function checkInboundAccessControl(params: { resolvedAccountId: account.accountId, }; } - if (dmPolicy === "disabled") { + if (access.decision === "block" && access.reason === "dmPolicy=disabled") { logVerbose("Blocked dm (dmPolicy: disabled)"); return { allowed: false, @@ -168,49 +166,49 @@ export async function checkInboundAccessControl(params: { resolvedAccountId: account.accountId, }; } - if (dmPolicy !== "open" && !isSamePhone) { + if (access.decision === "pairing" && !isSamePhone) { const candidate = params.from; - const allowed = - dmHasWildcard || - (normalizedAllowFrom.length > 0 && normalizedAllowFrom.includes(candidate)); - if (!allowed) { - if (dmPolicy === "pairing") { - if (suppressPairingReply) { - logVerbose(`Skipping pairing reply for historical DM from ${candidate}.`); - } else { - const { code, created } = await upsertChannelPairingRequest({ - channel: "whatsapp", - id: candidate, - accountId: account.accountId, - meta: { name: (params.pushName ?? "").trim() || undefined }, + if (suppressPairingReply) { + logVerbose(`Skipping pairing reply for historical DM from ${candidate}.`); + } else { + const { code, created } = await upsertChannelPairingRequest({ + channel: "whatsapp", + id: candidate, + accountId: account.accountId, + meta: { name: (params.pushName ?? "").trim() || undefined }, + }); + if (created) { + logVerbose( + `whatsapp pairing request sender=${candidate} name=${params.pushName ?? "unknown"}`, + ); + try { + await params.sock.sendMessage(params.remoteJid, { + text: buildPairingReply({ + channel: "whatsapp", + idLine: `Your WhatsApp phone number: ${candidate}`, + code, + }), }); - if (created) { - logVerbose( - `whatsapp pairing request sender=${candidate} name=${params.pushName ?? "unknown"}`, - ); - try { - await params.sock.sendMessage(params.remoteJid, { - text: buildPairingReply({ - channel: "whatsapp", - idLine: `Your WhatsApp phone number: ${candidate}`, - code, - }), - }); - } catch (err) { - logVerbose(`whatsapp pairing reply failed for ${candidate}: ${String(err)}`); - } - } + } catch (err) { + logVerbose(`whatsapp pairing reply failed for ${candidate}: ${String(err)}`); } - } else { - logVerbose(`Blocked unauthorized sender ${candidate} (dmPolicy=${dmPolicy})`); } - return { - allowed: false, - shouldMarkRead: false, - isSelfChat, - resolvedAccountId: account.accountId, - }; } + return { + allowed: false, + shouldMarkRead: false, + isSelfChat, + resolvedAccountId: account.accountId, + }; + } + if (access.decision !== "allow") { + logVerbose(`Blocked unauthorized sender ${params.from} (dmPolicy=${dmPolicy})`); + return { + allowed: false, + shouldMarkRead: false, + isSelfChat, + resolvedAccountId: account.accountId, + }; } } diff --git a/src/wizard/onboarding.gateway-config.test.ts b/src/wizard/onboarding.gateway-config.test.ts index 1f9cb175d64..1bbe3a82f15 100644 --- a/src/wizard/onboarding.gateway-config.test.ts +++ b/src/wizard/onboarding.gateway-config.test.ts @@ -111,4 +111,29 @@ describe("configureGatewayForOnboarding", () => { expect(authConfig?.password).toBe(""); expect(authConfig?.password).not.toBe("undefined"); }); + + it("seeds control UI allowed origins for non-loopback binds", async () => { + mocks.randomToken.mockReturnValue("generated-token"); + + const prompter = createPrompter({ + selectQueue: ["lan", "token", "off"], + textQueue: ["18789", undefined], + }); + const runtime = createRuntime(); + + const result = await configureGatewayForOnboarding({ + flow: "advanced", + baseConfig: {}, + nextConfig: {}, + localPort: 18789, + quickstartGateway: createQuickstartGateway("token"), + prompter, + runtime, + }); + + expect(result.nextConfig.gateway?.controlUi?.allowedOrigins).toEqual([ + "http://localhost:18789", + "http://127.0.0.1:18789", + ]); + }); }); diff --git a/src/wizard/onboarding.gateway-config.ts b/src/wizard/onboarding.gateway-config.ts index a57cef19b12..6aba767b401 100644 --- a/src/wizard/onboarding.gateway-config.ts +++ b/src/wizard/onboarding.gateway-config.ts @@ -49,6 +49,21 @@ type ConfigureGatewayResult = { settings: GatewayWizardSettings; }; +function buildDefaultControlUiAllowedOrigins(params: { + port: number; + bind: GatewayWizardSettings["bind"]; + customBindHost?: string; +}): string[] { + const origins = new Set([ + `http://localhost:${params.port}`, + `http://127.0.0.1:${params.port}`, + ]); + if (params.bind === "custom" && params.customBindHost) { + origins.add(`http://${params.customBindHost}:${params.port}`); + } + return [...origins]; +} + export async function configureGatewayForOnboarding( opts: ConfigureGatewayOptions, ): Promise { @@ -216,6 +231,28 @@ export async function configureGatewayForOnboarding( }, }; + const controlUiEnabled = nextConfig.gateway?.controlUi?.enabled ?? true; + const hasExplicitControlUiAllowedOrigins = + (nextConfig.gateway?.controlUi?.allowedOrigins ?? []).some( + (origin) => origin.trim().length > 0, + ) || nextConfig.gateway?.controlUi?.dangerouslyAllowHostHeaderOriginFallback === true; + if (controlUiEnabled && bind !== "loopback" && !hasExplicitControlUiAllowedOrigins) { + nextConfig = { + ...nextConfig, + gateway: { + ...nextConfig.gateway, + controlUi: { + ...nextConfig.gateway?.controlUi, + allowedOrigins: buildDefaultControlUiAllowedOrigins({ + port, + bind, + customBindHost, + }), + }, + }, + }; + } + // If this is a new gateway setup (no existing gateway settings), start with a // denylist for high-risk node commands. Users can arm these temporarily via // /phone arm ... (phone-control plugin). diff --git a/src/wizard/onboarding.ts b/src/wizard/onboarding.ts index df826b62ccf..49a6e292ed2 100644 --- a/src/wizard/onboarding.ts +++ b/src/wizard/onboarding.ts @@ -31,15 +31,21 @@ async function requireRiskAcknowledgement(params: { "Security warning — please read.", "", "OpenClaw is a hobby project and still in beta. Expect sharp edges.", + "By default, OpenClaw is a personal agent: one trusted operator boundary.", "This bot can read files and run actions if tools are enabled.", "A bad prompt can trick it into doing unsafe things.", "", - "If you’re not comfortable with basic security and access control, don’t run OpenClaw.", + "OpenClaw is not a hostile multi-tenant boundary by default.", + "If multiple users can message one tool-enabled agent, they share that delegated tool authority.", + "", + "If you’re not comfortable with security hardening and access control, don’t run OpenClaw.", "Ask someone experienced to help before enabling tools or exposing it to the internet.", "", "Recommended baseline:", "- Pairing/allowlists + mention gating.", + "- Multi-user/shared inbox: split trust boundaries (separate gateway/credentials, ideally separate OS users/hosts).", "- Sandbox + least-privilege tools.", + "- Shared inboxes: isolate DM sessions (`session.dmScope: per-channel-peer`) and keep tool access minimal.", "- Keep secrets out of the agent’s reachable filesystem.", "- Use the strongest available model for any bot with tools or untrusted inboxes.", "", @@ -53,7 +59,8 @@ async function requireRiskAcknowledgement(params: { ); const ok = await params.prompter.confirm({ - message: "I understand this is powerful and inherently risky. Continue?", + message: + "I understand this is personal-by-default and shared/multi-user use requires lock-down. Continue?", initialValue: false, }); if (!ok) { @@ -360,6 +367,7 @@ export async function runOnboardingWizard( prompter, runtime, config: nextConfig, + secretInputMode: opts.secretInputMode, }); nextConfig = customResult.config; } else { diff --git a/test/fixtures/system-run-approval-binding-contract.json b/test/fixtures/system-run-approval-binding-contract.json new file mode 100644 index 00000000000..2a5a5ad55c2 --- /dev/null +++ b/test/fixtures/system-run-approval-binding-contract.json @@ -0,0 +1,115 @@ +{ + "cases": [ + { + "name": "v1 matches when env key order changes", + "request": { + "host": "node", + "command": "git diff", + "bindingV1": { + "argv": ["git", "diff"], + "cwd": null, + "agentId": null, + "sessionKey": null, + "env": { "SAFE_A": "1", "SAFE_B": "2" } + } + }, + "invoke": { + "argv": ["git", "diff"], + "binding": { + "cwd": null, + "agentId": null, + "sessionKey": null, + "env": { "SAFE_B": "2", "SAFE_A": "1" } + } + }, + "expected": { "ok": true } + }, + { + "name": "v1 rejects env mismatch", + "request": { + "host": "node", + "command": "git diff", + "bindingV1": { + "argv": ["git", "diff"], + "cwd": null, + "agentId": null, + "sessionKey": null, + "env": { "SAFE": "1" } + } + }, + "invoke": { + "argv": ["git", "diff"], + "binding": { + "cwd": null, + "agentId": null, + "sessionKey": null, + "env": { "SAFE": "2" } + } + }, + "expected": { "ok": false, "code": "APPROVAL_ENV_MISMATCH" } + }, + { + "name": "v1 rejects unbound env overrides", + "request": { + "host": "node", + "command": "git diff", + "bindingV1": { + "argv": ["git", "diff"], + "cwd": null, + "agentId": null, + "sessionKey": null + } + }, + "invoke": { + "argv": ["git", "diff"], + "binding": { + "cwd": null, + "agentId": null, + "sessionKey": null, + "env": { "GIT_EXTERNAL_DIFF": "/tmp/pwn.sh" } + } + }, + "expected": { "ok": false, "code": "APPROVAL_ENV_BINDING_MISSING" } + }, + { + "name": "missing binding rejects requests even with matching argv", + "request": { + "host": "node", + "command": "echo SAFE", + "commandArgv": ["echo", "SAFE"] + }, + "invoke": { + "argv": ["echo", "SAFE"], + "binding": { + "cwd": null, + "agentId": null, + "sessionKey": null + } + }, + "expected": { "ok": false, "code": "APPROVAL_REQUEST_MISMATCH" } + }, + { + "name": "v1 stays authoritative when legacy command text diverges", + "request": { + "host": "node", + "command": "echo STALE", + "commandArgv": ["echo", "STALE"], + "bindingV1": { + "argv": ["echo", "SAFE"], + "cwd": null, + "agentId": null, + "sessionKey": null + } + }, + "invoke": { + "argv": ["echo", "SAFE"], + "binding": { + "cwd": null, + "agentId": null, + "sessionKey": null + } + }, + "expected": { "ok": true } + } + ] +} diff --git a/test/fixtures/system-run-approval-mismatch-contract.json b/test/fixtures/system-run-approval-mismatch-contract.json new file mode 100644 index 00000000000..138751c68fb --- /dev/null +++ b/test/fixtures/system-run-approval-mismatch-contract.json @@ -0,0 +1,67 @@ +{ + "cases": [ + { + "name": "request mismatch preserves base details", + "runId": "approval-req-1", + "match": { + "ok": false, + "code": "APPROVAL_REQUEST_MISMATCH", + "message": "approval id does not match request" + }, + "expected": { + "ok": false, + "message": "approval id does not match request", + "details": { + "code": "APPROVAL_REQUEST_MISMATCH", + "runId": "approval-req-1" + } + } + }, + { + "name": "missing env binding keeps env key details", + "runId": "approval-env-missing", + "match": { + "ok": false, + "code": "APPROVAL_ENV_BINDING_MISSING", + "message": "approval id missing env binding for requested env overrides", + "details": { + "envKeys": ["GIT_EXTERNAL_DIFF"] + } + }, + "expected": { + "ok": false, + "message": "approval id missing env binding for requested env overrides", + "details": { + "code": "APPROVAL_ENV_BINDING_MISSING", + "runId": "approval-env-missing", + "envKeys": ["GIT_EXTERNAL_DIFF"] + } + } + }, + { + "name": "env mismatch preserves hash diagnostics", + "runId": "approval-env-mismatch", + "match": { + "ok": false, + "code": "APPROVAL_ENV_MISMATCH", + "message": "approval id env binding mismatch", + "details": { + "envKeys": ["SAFE_A"], + "expectedEnvHash": "expected-hash", + "actualEnvHash": "actual-hash" + } + }, + "expected": { + "ok": false, + "message": "approval id env binding mismatch", + "details": { + "code": "APPROVAL_ENV_MISMATCH", + "runId": "approval-env-mismatch", + "envKeys": ["SAFE_A"], + "expectedEnvHash": "expected-hash", + "actualEnvHash": "actual-hash" + } + } + } + ] +} diff --git a/test/scripts/check-channel-agnostic-boundaries.test.ts b/test/scripts/check-channel-agnostic-boundaries.test.ts new file mode 100644 index 00000000000..f82f355dd85 --- /dev/null +++ b/test/scripts/check-channel-agnostic-boundaries.test.ts @@ -0,0 +1,127 @@ +import { describe, expect, it } from "vitest"; +import { + findChannelAgnosticBoundaryViolations, + findAcpUserFacingChannelNameViolations, + findChannelCoreReverseDependencyViolations, + findSystemMarkLiteralViolations, +} from "../../scripts/check-channel-agnostic-boundaries.mjs"; + +describe("check-channel-agnostic-boundaries", () => { + it("flags direct channel module imports", () => { + const source = ` + import { getThreadBindingManager } from "../discord/monitor/thread-bindings.js"; + const x = 1; + `; + expect(findChannelAgnosticBoundaryViolations(source)).toEqual([ + { + line: 2, + reason: 'imports channel module "../discord/monitor/thread-bindings.js"', + }, + ]); + }); + + it("flags channel config path access", () => { + const source = ` + const x = cfg.channels.discord?.threadBindings?.enabled; + `; + expect(findChannelAgnosticBoundaryViolations(source)).toEqual([ + { + line: 2, + reason: 'references config path "channels.discord"', + }, + ]); + }); + + it("flags channel-literal comparisons", () => { + const source = ` + if (channel === "discord") { + return true; + } + `; + expect(findChannelAgnosticBoundaryViolations(source)).toEqual([ + { + line: 2, + reason: 'compares with channel id literal (channel === "discord")', + }, + ]); + }); + + it("flags object literals with explicit channel ids", () => { + const source = ` + const payload = { channel: "telegram" }; + `; + expect(findChannelAgnosticBoundaryViolations(source)).toEqual([ + { + line: 2, + reason: 'assigns channel id literal to "channel" ("telegram")', + }, + ]); + }); + + it("ignores non-channel literals and unrelated text", () => { + const source = ` + const msg = "discord"; + const payload = { mode: "persistent" }; + const x = cfg.session.threadBindings?.enabled; + `; + expect(findChannelAgnosticBoundaryViolations(source)).toEqual([]); + }); + + it("reverse-deps mode flags channel module re-exports", () => { + const source = ` + export { resolveThreadBindingIntroText } from "../discord/monitor/thread-bindings.messages.js"; + `; + expect(findChannelCoreReverseDependencyViolations(source)).toEqual([ + { + line: 2, + reason: 're-exports channel module "../discord/monitor/thread-bindings.messages.js"', + }, + ]); + }); + + it("reverse-deps mode ignores channel literals when no imports are present", () => { + const source = ` + const channel = "discord"; + const x = cfg.channels.discord?.threadBindings?.enabled; + `; + expect(findChannelCoreReverseDependencyViolations(source)).toEqual([]); + }); + + it("user-facing text mode flags channel names in string literals", () => { + const source = ` + const message = "Bind a Discord thread first."; + `; + expect(findAcpUserFacingChannelNameViolations(source)).toEqual([ + { + line: 2, + reason: 'user-facing text references channel name ("Bind a Discord thread first.")', + }, + ]); + }); + + it("user-facing text mode ignores channel names in import specifiers", () => { + const source = ` + import { x } from "../discord/monitor/thread-bindings.js"; + `; + expect(findAcpUserFacingChannelNameViolations(source)).toEqual([]); + }); + + it("system-mark guard flags hardcoded gear literals", () => { + const source = ` + const line = "⚙️ Thread bindings enabled."; + `; + expect(findSystemMarkLiteralViolations(source)).toEqual([ + { + line: 2, + reason: 'hardcoded system mark literal ("⚙️ Thread bindings enabled.")', + }, + ]); + }); + + it("system-mark guard ignores module import specifiers", () => { + const source = ` + import { x } from "../infra/system-message.js"; + `; + expect(findSystemMarkLiteralViolations(source)).toEqual([]); + }); +}); diff --git a/ui/src/styles/base.css b/ui/src/styles/base.css index b83afd32c50..ffef3f69a23 100644 --- a/ui/src/styles/base.css +++ b/ui/src/styles/base.css @@ -1,5 +1,3 @@ -@import url("https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap"); - :root { /* Background - Warmer dark with depth */ --bg: #12141a; @@ -80,12 +78,11 @@ --theme-switch-x: 50%; --theme-switch-y: 50%; - /* Typography - Space Grotesk for personality */ + /* Typography */ --mono: "JetBrains Mono", ui-monospace, SFMono-Regular, "SF Mono", Menlo, Monaco, Consolas, monospace; - --font-body: "Space Grotesk", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; - --font-display: - "Space Grotesk", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; + --font-body: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; + --font-display: var(--font-body); /* Shadows - Richer with subtle color */ --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.2); diff --git a/ui/src/styles/chat/layout.css b/ui/src/styles/chat/layout.css index 4a5c4cdfa46..25fa6742b4a 100644 --- a/ui/src/styles/chat/layout.css +++ b/ui/src/styles/chat/layout.css @@ -452,6 +452,24 @@ grid-template-columns: 1fr; } + /* Mobile: stack compose row vertically */ + .chat-compose__row { + flex-direction: column; + gap: 8px; + } + + /* Mobile: stack action buttons vertically */ + .chat-compose__actions { + flex-direction: column; + width: 100%; + gap: 8px; + } + + /* Mobile: full-width buttons */ + .chat-compose .chat-compose__actions .btn { + width: 100%; + } + .chat-controls { flex-wrap: wrap; gap: 8px; diff --git a/ui/vite.config.ts b/ui/vite.config.ts index 161cb9dae3b..1f34fb313cd 100644 --- a/ui/vite.config.ts +++ b/ui/vite.config.ts @@ -31,6 +31,8 @@ export default defineConfig(() => { outDir: path.resolve(here, "../dist/control-ui"), emptyOutDir: true, sourcemap: true, + // Keep CI/onboard logs clean; current control UI chunking is intentionally above 500 kB. + chunkSizeWarningLimit: 1024, }, server: { host: true,