diff --git a/.secrets.baseline b/.secrets.baseline index 5a0c639b9e3..056b2dd8778 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -12991,7 +12991,7 @@ "filename": "ui/src/i18n/locales/en.ts", "hashed_secret": "de0ff6b974d6910aca8d6b830e1b761f076d8fe6", "is_verified": false, - "line_number": 61 + "line_number": 74 } ], "ui/src/i18n/locales/pt-BR.ts": [ @@ -13000,7 +13000,7 @@ "filename": "ui/src/i18n/locales/pt-BR.ts", "hashed_secret": "ef7b6f95faca2d7d3a5aa5a6434c89530c6dd243", "is_verified": false, - "line_number": 61 + "line_number": 73 } ], "vendor/a2ui/README.md": [ diff --git a/CHANGELOG.md b/CHANGELOG.md index ab831a6d5ac..56374c47b20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,20 +4,56 @@ Docs: https://docs.openclaw.ai ## Unreleased +### Security + +- Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible `\u{...}` escapes instead of spoofing the reviewed command. (`GHSA-pcqg-f7rg-xfvv`)(#43687) Thanks @EkiXu and @vincentkoc. +- Security/device pairing: cap issued and verified device-token scopes to each paired device's approved scope baseline so stale or overbroad tokens cannot exceed approved access. (`GHSA-2pwv-x786-56f8`)(#43686) Thanks @tdjackey and @vincentkoc. +- Security/proxy attachments: restore the shared media-store size cap for persisted browser proxy files so oversized payloads are rejected instead of overriding the intended 5 MB limit. (`GHSA-6rph-mmhp-h7h9`)(#43684) Thanks @tdjackey and @vincentkoc. +- Security/host env: block inherited `GIT_EXEC_PATH` from sanitized host exec environments so Git helper resolution cannot be steered by host environment state. (`GHSA-jf5v-pqgw-gm5m`)(#43685) Thanks @zpbrent and @vincentkoc. +- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`. (`GHSA-wcxr-59v9-rxr8`)(#43754) Thanks @tdjackey and @vincentkoc. +- Models/secrets: enforce source-managed SecretRef markers in generated `models.json` so runtime-resolved provider secrets are not persisted when runtime projection is skipped. (#43759) Thanks @joshavant. +- Security/browser.request: block persistent browser profile create/delete routes from write-scoped `browser.request` so callers can no longer persist admin-only browser profile changes through the browser control surface. (`GHSA-vmhq-cqm9-6p7q`)(#43800) Thanks @tdjackey and @vincentkoc. +- Security/agent: reject public spawned-run lineage fields and keep workspace inheritance on the internal spawned-session path so external `agent` callers can no longer override the gateway workspace boundary. (`GHSA-2rqg-gjgv-84jm`)(#43801) Thanks @tdjackey and @vincentkoc. +- Security/exec allowlist: preserve POSIX case sensitivity and keep `?` within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (`GHSA-f8r2-vg7x-gh8m`)(#43798) Thanks @zpbrent and @vincentkoc. + ### Changes -- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky. -- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle. -- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn. -- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc. +### Fixes + +- Cron/proactive delivery: keep isolated direct cron sends out of the write-ahead resend queue so transient-send retries do not replay duplicate proactive messages after restart. (#40646) Thanks @openperf and @vincentkoc. +- TUI/chat log: reuse the active assistant message component for the same streaming run so `openclaw tui` no longer renders duplicate assistant replies. (#35364) Thanks @lisitan. +- macOS/Reminders: add the missing `NSRemindersUsageDescription` to the bundled app so `apple-reminders` can trigger the system permission prompt from OpenClaw.app. (#8559) Thanks @dinakars777. +- iMessage/self-chat echo dedupe: drop reflected duplicate copies only when a matching `is_from_me` event was just seen for the same chat, text, and `created_at`, preventing self-chat loops without broad text-only suppression. Related to #32166. (#38440) Thanks @vincentkoc. +- Mattermost/block streaming: fix duplicate message delivery (one threaded, one top-level) when block streaming is active by excluding `replyToId` from the block reply dedup key and adding an explicit `threading` dock to the Mattermost plugin. (#41362) Thanks @mathiasnagler and @vincentkoc. +- BlueBubbles/self-chat echo dedupe: drop reflected duplicate webhook copies only when a matching `fromMe` event was just seen for the same chat, body, and timestamp, preventing self-chat loops without broad webhook suppression. Related to #32166. (#38442) Thanks @vincentkoc. +- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. +- Sandbox/write: preserve pinned mutation-helper payload stdin so sandboxed `write` no longer reports success while creating empty files. (#43876) Thanks @glitch418x. +- Gateway/main-session routing: keep TUI and other `mode:UI` main-session sends on the internal surface when `deliver` is enabled, so replies no longer inherit the session's persisted Telegram/WhatsApp route. (#43918) Thanks @obviyus. + +## 2026.3.11 + +### Security + +- Gateway/WebSocket: enforce browser origin validation for all browser-originated connections regardless of whether proxy headers are present, closing a cross-site WebSocket hijacking path in `trusted-proxy` mode that could grant untrusted origins `operator.admin` access. (GHSA-5wcw-8jjv-m286) + +### Changes + +- OpenRouter/models: add temporary Hunter Alpha and Healer Alpha entries to the built-in catalog so OpenRouter users can try the new free stealth models during their roughly one-week availability window. (#43642) Thanks @ping-Toven. - iOS/Home canvas: add a bundled welcome screen with a live agent overview that refreshes on connect, reconnect, and foreground return, and move the compact connection pill off the top-left canvas overlay. (#42456) Thanks @ngutman. - iOS/Home canvas: replace floating controls with a docked toolbar, make the bundled home scaffold adapt to smaller phones, and open chat in the resolved main session instead of a synthetic `ios` session. (#42456) Thanks @ngutman. -- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman. -- OpenCode/onboarding: add new OpenCode Go provider, treat Zen and Go as one OpenCode setup in the wizard/docs while keeping the runtime providers split, store one shared OpenCode key for both profiles, and stop overriding the built-in `opencode-go` catalog routing. (#42313) Thanks @ImLukeF and @vincentkoc. - macOS/chat UI: add a chat model picker, persist explicit thinking-level selections across relaunch, and harden provider-aware session model sync for the shared chat composer. (#42314) Thanks @ImLukeF. -- iOS/TestFlight: add a local beta release flow with Fastlane prepare/archive/upload support, canonical beta bundle IDs, and watch-app archive fixes. (#42991) Thanks @ngutman. -- macOS/onboarding: detect when remote gateways need a shared auth token, explain where to find it on the gateway host, and clarify when a successful check used paired-device auth instead. (#43100) Thanks @ngutman. - Onboarding/Ollama: add first-class Ollama setup with Local or Cloud + Local modes, browser-based cloud sign-in, curated model suggestions, and cloud-model handling that skips unnecessary local pulls. (#41529) Thanks @BruceMacD. +- OpenCode/onboarding: add new OpenCode Go provider, treat Zen and Go as one OpenCode setup in the wizard/docs while keeping the runtime providers split, store one shared OpenCode key for both profiles, and stop overriding the built-in `opencode-go` catalog routing. (#42313) Thanks @ImLukeF and @vincentkoc. +- Memory: add opt-in multimodal image and audio indexing for `memorySearch.extraPaths` with Gemini `gemini-embedding-2-preview`, strict fallback gating, and scope-based reindexing. (#43460) Thanks @gumadeiras. +- Memory/Gemini: add `gemini-embedding-2-preview` memory-search support with configurable output dimensions and automatic reindexing when the configured dimensions change. (#42501) Thanks @BillChirico and @gumadeiras. +- macOS/onboarding: detect when remote gateways need a shared auth token, explain where to find it on the gateway host, and clarify when a successful check used paired-device auth instead. (#43100) Thanks @ngutman. +- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman. +- iOS/TestFlight: add a local beta release flow with Fastlane prepare/archive/upload support, canonical beta bundle IDs, and watch-app archive fixes. (#42991) Thanks @ngutman. +- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn. +- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky. +- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle. +- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc. +- LLM Task/Lobster: add an optional `thinking` override so workflow calls can explicitly set embedded reasoning level with shared validation for invalid values and unsupported `xhigh` modes. (#15606) Thanks @xadenryan and @ImLukeF. ### Breaking @@ -26,81 +62,97 @@ Docs: https://docs.openclaw.ai ### Fixes - Agents/text sanitization: strip leaked model control tokens (`<|...|>` and full-width `<|...|>` variants) from user-facing assistant text, preventing GLM-5 and DeepSeek internal delimiters from reaching end users. (#42173) Thanks @imwyvern. -- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant. -- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura. -- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob. -- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes. +- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky. - Gateway/Control UI: keep dashboard auth tokens in session-scoped browser storage so same-tab refreshes preserve remote token auth without restoring long-lived localStorage token persistence, while scoping tokens to the selected gateway URL and fragment-only bootstrap flow. (#40892) thanks @velvet-shark. +- Gateway/macOS launchd restarts: keep the LaunchAgent registered during explicit restarts, hand off self-restarts through a detached launchd helper, and recover config/hot reload restart paths without unloading the service. Fixes #43311, #43406, #43035, and #43049. +- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes. +- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera. +- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura. +- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. +- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus. +- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev. +- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev. +- Telegram/final preview cleanup follow-up: clear stale cleanup-retain state only for transient preview finals so archived-preview retains no longer leave a stale partial bubble beside a later fallback-sent final. (#41763) Thanks @obviyus. +- Telegram/poll restarts: scope process-level polling restarts to real Telegram `getUpdates` failures so unrelated network errors, such as Slack DNS misses, no longer bounce Telegram polling. (#43799) Thanks @obviyus. +- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant. +- Gateway/config errors: surface up to three validation issues in top-level `config.set`, `config.patch`, and `config.apply` error messages while preserving structured issue details. (#42664) Thanks @huntharo. +- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk. +- Agents/error rendering: ignore stale assistant `errorMessage` fields on successful turns so background/tool-side failures no longer prepend synthetic billing errors over valid replies. (#40616) Thanks @ingyukoh. +- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf. +- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk. +- Agents/fallback: recognize Venice `402 Insufficient USD or Diem balance` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#43205) Thanks @Squabble9. +- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio. +- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode. +- Agents/cooldowns: default cooldown windows with no recorded failure history to `unknown` instead of `rate_limit`, avoiding false API rate-limit warnings while preserving cooldown recovery probes. (#42911) Thanks @VibhorGautam. +- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x. +- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn. +- Agents/context pruning: prune image-only tool results during soft-trim, align context-pruning coverage with the new tool-result contract, and extend historical image cleanup to the same screenshot-heavy session path. (#43045) Thanks @MoerAI. +- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab. +- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases. +- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant. +- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2. +- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo. +- CLI/skills JSON: strip ANSI and C1 control bytes from `skills list --json`, `skills info --json`, and `skills check --json` so machine-readable output stays valid for terminals and skill metadata with embedded control characters. Fixes #27530. Related #27557. Thanks @Jimmy-xuzimo and @vincentkoc. +- CLI/tables: default shared tables to ASCII borders on legacy Windows consoles while keeping Unicode borders on modern Windows terminals, so commands like `openclaw skills` stop rendering mojibake under GBK/936 consoles. Fixes #40853. Related #41015. Thanks @ApacheBin and @vincentkoc. +- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth. +- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng. +- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet. +- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931. +- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda. +- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux. +- Signal/config schema: accept `channels.signal.accountUuid` in strict config validation so loop-protection configs no longer fail with an unrecognized-key error. (#35578) Thanks @ingyukoh. +- Telegram/config schema: accept `channels.telegram.actions.editMessage` and `createForumTopic` in strict config validation so existing Telegram action toggles no longer fail as unrecognized keys. (#35498) Thanks @ingyukoh. +- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf. +- Discord/config typing: expose channel-level `autoThread` on the canonical guild-channel config type so strict config loading matches the existing Discord schema and runtime behavior. (#35608) Thanks @ingyukoh. +- fix(models): guard optional model.input capability checks (#42096) thanks @andyliu +- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu. +- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant. - Secret files: harden CLI and channel credential file reads against path-swap races by requiring direct regular files for `*File` secret inputs and rejecting symlink-backed secret files. - Archive extraction: harden TAR and external `tar.bz2` installs against destination symlink and pre-existing child-symlink escapes by extracting into staging first and merging into the canonical destination with safe file opens. -- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. -- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis. -- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek. +- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant. +- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey. +- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant. +- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting. +- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting. +- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting. +- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth. +- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set. +- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94. +- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo. +- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey. +- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob. - ACP/sessions.patch: allow `spawnedBy` and `spawnDepth` lineage fields on ACP session keys so `sessions_spawn` with `runtime: "acp"` no longer fails during child-session setup. Fixes #40971. (#40995) thanks @xaeon2026. - ACP/stop reason mapping: resolve gateway chat `state: "error"` completions as ACP `end_turn` instead of `refusal` so transient backend failures are not surfaced as deliberate refusals. (#41187) thanks @pejmanjohn. - ACP/setSessionMode: propagate gateway `sessions.patch` failures back to ACP clients so rejected mode changes no longer return silent success. (#41185) thanks @pejmanjohn. -- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf. -- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky. -- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927. -- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x. -- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky. - ACP/bridge mode: reject unsupported per-session MCP server setup and propagate rejected session-mode changes so IDE clients see explicit bridge limitations instead of silent success. (#41424) Thanks @mbelinky. - ACP/session UX: replay stored user and assistant text on `loadSession`, expose Gateway-backed session controls and metadata, and emit approximate session usage updates so IDE clients restore context more faithfully. (#41425) Thanks @mbelinky. - ACP/tool streaming: enrich `tool_call` and `tool_call_update` events with best-effort text content and file-location hints so IDE clients can follow bridge tool activity more naturally. (#41442) Thanks @mbelinky. - ACP/runtime attachments: forward normalized inbound image attachments into ACP runtime turns so ACPX sessions can preserve image prompt content on the runtime path. (#41427) Thanks @mbelinky. - ACP/regressions: add gateway RPC coverage for ACP lineage patching, ACPX runtime coverage for image prompt serialization, and an operator smoke-test procedure for live ACP spawn verification. (#41456) Thanks @mbelinky. -- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf. - ACP/follow-up hardening: make session restore and prompt completion degrade gracefully on transcript/update failures, enforce bounded tool-location traversal, and skip non-image ACPX turns the runtime cannot serialize. (#41464) Thanks @mbelinky. -- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf. -- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky. -- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera. -- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf. -- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk. +- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman. +- ACP/main session aliases: canonicalize `main` before ACP session lookup so restarted ACP main sessions rehydrate instead of failing closed with `Session is not ACP-enabled: main`. (#43285, fixes #25692) - Plugins/context-engine model auth: expose `runtime.modelAuth` and plugin-sdk auth helpers so plugins can resolve provider/model API keys through the normal auth pipeline. (#41090) thanks @xinhuagu. -- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth. -- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2. -- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo. -- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng. -- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev. +- Hooks/plugin context parity followup: pass `trigger` and `channelId` through embedded `llm_input`, `agent_end`, and `llm_output` hook contexts so plugins receive the same agent metadata across hook phases. (#42362) Thanks @zhoulf1006. +- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc. +- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis. +- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek. +- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf. +- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung. +- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf. +- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf. +- Agents/context-engine compaction: guard thrown engine-owned overflow compaction attempts and fire compaction hooks for `ownsCompaction` engines so overflow recovery no longer crashes and plugin subscribers still observe compact runs. (#41361) thanks @davidrudduck. +- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky. +- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky. +- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927. - Cron/state errors: record `lastErrorReason` in cron job state and keep the gateway schema aligned with the full failover-reason set, including regression coverage for protocol conformance. (#14382) thanks @futuremind2026. -- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo. -- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94. -- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet. -- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn. +- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn. - CI/CodeQL Swift toolchain: select Xcode 26.1 before installing Swift build tools so the CodeQL Swift job uses Swift tools 6.2 on `macos-latest`. (#41787) thanks @BunsDev. - Sandbox/subagents: pass the real configured workspace through `sessions_spawn` inheritance when a parent agent runs in a copied-workspace sandbox, so child `/agent` mounts point at the configured workspace instead of the parent sandbox copy. (#40757) Thanks @dsantoreis. -- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda. -- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux. -- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931. - Agents/fallback cooldown probing: cap cooldown-bypass probing to one attempt per provider per fallback run so multi-model same-provider cooldown chains can continue to cross-provider fallbacks instead of repeatedly stalling on duplicate cooldown probes. (#41711) Thanks @cgdusek. - Telegram/direct delivery: bridge direct delivery sends to internal `message:sent` hooks so internal hook listeners observe successful Telegram deliveries. (#40185) Thanks @vincentkoc. -- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc. -- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio. -- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus. -- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung. -- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode. -- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant. -- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant. -- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf. -- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu. -- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey. -- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman. -- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab. -- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn. -- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant. -- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases. -- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant. -- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey. -- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting. -- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting. -- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting. -- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev. -- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk. -- Agents/context pruning: prune image-only tool results during soft-trim, align context-pruning coverage with the new tool-result contract, and extend historical image cleanup to the same screenshot-heavy session path. (#43045) Thanks @MoerAI. -- fix(models): guard optional model.input capability checks (#42096) thanks @andyliu -- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth. -- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`. -- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set. +- Dependencies: refresh workspace dependencies except the pinned Carbon package, and harden ACP session-config writes against non-string SDK values so newer ACP clients fail fast instead of tripping type/runtime mismatches. +- Telegram/polling restarts: clear bounded cleanup timeout handles after `runner.stop()` and `bot.stop()` settle so stall recovery no longer leaves stray 15-second timers behind on clean shutdown. (#43188) thanks @kyohwang. ## 2026.3.8 @@ -174,6 +226,8 @@ Docs: https://docs.openclaw.ai - SecretRef/models: harden custom/provider secret persistence and reuse across models.json snapshots, merge behavior, runtime headers, and secret audits. (#42554) Thanks @joshavant. - macOS/browser proxy: serialize non-GET browser proxy request bodies through `AnyCodable.foundationValue` so nested JSON bodies no longer crash the macOS app with `Invalid type in JSON write (__SwiftValue)`. (#43069) Thanks @Effet. - CLI/skills tables: keep terminal table borders aligned for wide graphemes, use full reported terminal width, and switch a few ambiguous skill icons to Terminal-safe emoji so `openclaw skills` renders more consistently in Terminal.app and iTerm. Thanks @vincentkoc. +- Memory/Gemini: normalize returned Gemini embeddings across direct query, direct batch, and async batch paths so memory search uses consistent vector handling for Gemini too. (#43409) Thanks @gumadeiras. +- Agents/failover: recognize additional serialized network errno strings plus `EHOSTDOWN` and `EPIPE` structured codes so transient transport failures trigger timeout failover more reliably. (#42830) Thanks @jnMetaCode. ## 2026.3.7 @@ -534,6 +588,7 @@ Docs: https://docs.openclaw.ai - Browser/config schema: accept `browser.profiles.*.driver: "openclaw"` while preserving legacy `"clawd"` compatibility in validated config. (#39374; based on #35621) Thanks @gambletan and @ingyukoh. - Memory flush/bootstrap file protection: restrict memory-flush runs to append-only `read`/`write` tools and route host-side memory appends through root-enforced safe file handles so flush turns cannot overwrite bootstrap files via `exec` or unsafe raw rewrites. (#38574) Thanks @frankekn. - Mattermost/DM media uploads: resolve bare 26-character Mattermost IDs user-first for direct messages so media sends no longer fail with `403 Forbidden` when targets are configured as unprefixed user IDs. (#29925) Thanks @teconomix. +- Voice-call/OpenAI TTS config parity: add missing `speed`, `instructions`, and `baseUrl` fields to the OpenAI TTS config schema and gate `instructions` to supported models so voice-call overrides validate and route cleanly through core TTS. (#39226) Thanks @ademczuk. ## 2026.3.2 @@ -1041,6 +1096,7 @@ Docs: https://docs.openclaw.ai - Browser/Navigate: resolve the correct `targetId` in navigate responses after renderer swaps. (#25326) Thanks @stone-jin and @vincentkoc. - FS/Sandbox workspace boundaries: add a dedicated `outside-workspace` safe-open error code for root-escape checks, and propagate specific outside-workspace messages across edit/browser/media consumers instead of generic not-found/invalid-path fallbacks. (#29715) Thanks @YuzuruS. - Diagnostics/Stuck session signal: add configurable stuck-session warning threshold via `diagnostics.stuckSessionWarnMs` (default 120000ms) to reduce false-positive warnings on long multi-tool turns. (#31032) +- Agents/error classification: check billing errors before context overflow heuristics in the agent runner catch block so spend-limit and quota errors show the billing-specific message instead of being misclassified as "Context overflow: prompt too large". (#40409) Thanks @ademczuk. ## 2026.2.26 @@ -4013,6 +4069,7 @@ Thanks @AlexMikhalev, @CoreyH, @John-Rood, @KrauseFx, @MaudeBot, @Nachx639, @Nic - Gateway/Daemon/Doctor: atomic config writes; repair gateway service entrypoint + install switches; non-interactive legacy migrations; systemd unit alignment + KillMode=process; node bridge keepalive/pings; Launch at Login persistence; bundle MoltbotKit resources + Swift 6.2 compat dylib; relay version check + remove smoke test; regen Swift GatewayModels + keep agent provider string; cron jobId alias + channel alias migration + main session key normalization; heartbeat Telegram accountId resolution; avoid WhatsApp fallback for internal runs; gateway listener error wording; serveBaseUrl param; honor gateway --dev; fix wide-area discovery updates; align agents.defaults schema; provider account metadata in daemon status; refresh Carbon patch for gateway fixes; restore doctor prompter initialValue handling. - Control UI/TUI: persist per-session verbose off + hide tool cards; logs tab opens at bottom; relative asset paths + landing cleanup; session labels lookup/persistence; stop pinning main session in recents; start logs at bottom; TUI status bar refresh + timeout handling + hide reasoning label when off. - Onboarding/Configure: QuickStart single-select provider picker; avoid Codex CLI false-expiry warnings; clarify WhatsApp owner prompt; fix Minimax hosted onboarding (agents.defaults + msteams heartbeat target); remove configure Control UI prompt; honor gateway --dev flag. +- Agent loop: guard overflow compaction throws and restore compaction hooks for engine-owned context engines. (#41361) — thanks @davidrudduck ### Maintenance diff --git a/apps/android/app/build.gradle.kts b/apps/android/app/build.gradle.kts index 3b52bcf50de..32306780c72 100644 --- a/apps/android/app/build.gradle.kts +++ b/apps/android/app/build.gradle.kts @@ -63,8 +63,8 @@ android { applicationId = "ai.openclaw.app" minSdk = 31 targetSdk = 36 - versionCode = 202603090 - versionName = "2026.3.9" + versionCode = 202603110 + versionName = "2026.3.11" ndk { // Support all major ABIs — native libs are tiny (~47 KB per ABI) abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64") diff --git a/apps/ios/README.md b/apps/ios/README.md index 42c5a51dec2..6eb35a1d639 100644 --- a/apps/ios/README.md +++ b/apps/ios/README.md @@ -64,9 +64,9 @@ Release behavior: - Beta release uses canonical `ai.openclaw.client*` bundle IDs through a temporary generated xcconfig in `apps/ios/build/BetaRelease.xcconfig`. - The beta flow does not modify `apps/ios/.local-signing.xcconfig` or `apps/ios/LocalSigning.xcconfig`. - Root `package.json.version` is the only version source for iOS. -- A root version like `2026.3.9-beta.1` becomes: - - `CFBundleShortVersionString = 2026.3.9` - - `CFBundleVersion = next TestFlight build number for 2026.3.9` +- A root version like `2026.3.11-beta.1` becomes: + - `CFBundleShortVersionString = 2026.3.11` + - `CFBundleVersion = next TestFlight build number for 2026.3.11` Archive without upload: diff --git a/apps/ios/fastlane/Fastfile b/apps/ios/fastlane/Fastfile index 62d79f9995c..e7b286b4dd5 100644 --- a/apps/ios/fastlane/Fastfile +++ b/apps/ios/fastlane/Fastfile @@ -99,7 +99,7 @@ def normalize_release_version(raw_value) version = raw_value.to_s.strip.sub(/\Av/, "") UI.user_error!("Missing root package.json version.") unless env_present?(version) unless version.match?(/\A\d+\.\d+\.\d+(?:[.-]?beta[.-]\d+)?\z/i) - UI.user_error!("Invalid package.json version '#{raw_value}'. Expected 2026.3.9 or 2026.3.9-beta.1.") + UI.user_error!("Invalid package.json version '#{raw_value}'. Expected 2026.3.11 or 2026.3.11-beta.1.") end version diff --git a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift index 2981a60bbf7..932c9fc5e61 100644 --- a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift +++ b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift @@ -17,6 +17,7 @@ enum HostEnvSecurityPolicy { "BASH_ENV", "ENV", "GIT_EXTERNAL_DIFF", + "GIT_EXEC_PATH", "SHELL", "SHELLOPTS", "PS4", diff --git a/apps/macos/Sources/OpenClaw/Resources/Info.plist b/apps/macos/Sources/OpenClaw/Resources/Info.plist index 706fe7029c4..0bfd45cc97b 100644 --- a/apps/macos/Sources/OpenClaw/Resources/Info.plist +++ b/apps/macos/Sources/OpenClaw/Resources/Info.plist @@ -15,9 +15,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + 2026.3.11 CFBundleVersion - 202603080 + 202603110 CFBundleIconFile OpenClaw CFBundleURLTypes @@ -59,6 +59,8 @@ OpenClaw uses speech recognition to detect your Voice Wake trigger phrase. NSAppleEventsUsageDescription OpenClaw needs Automation (AppleScript) permission to drive Terminal and other apps for agent actions. + NSRemindersUsageDescription + OpenClaw can access Reminders when requested by the agent for the apple-reminders skill. NSAppTransportSecurity diff --git a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift index ea85e6c1511..b743060f6c0 100644 --- a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift @@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable { public let inputprovenance: [String: AnyCodable]? public let idempotencykey: String public let label: String? - public let spawnedby: String? - public let workspacedir: String? public init( message: String, @@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable { internalevents: [[String: AnyCodable]]?, inputprovenance: [String: AnyCodable]?, idempotencykey: String, - label: String?, - spawnedby: String?, - workspacedir: String?) + label: String?) { self.message = message self.agentid = agentid @@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable { self.inputprovenance = inputprovenance self.idempotencykey = idempotencykey self.label = label - self.spawnedby = spawnedby - self.workspacedir = workspacedir } private enum CodingKeys: String, CodingKey { @@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable { case inputprovenance = "inputProvenance" case idempotencykey = "idempotencyKey" case label - case spawnedby = "spawnedBy" - case workspacedir = "workspaceDir" } } @@ -1336,6 +1328,7 @@ public struct SessionsPatchParams: Codable, Sendable { public let execnode: AnyCodable? public let model: AnyCodable? public let spawnedby: AnyCodable? + public let spawnedworkspacedir: AnyCodable? public let spawndepth: AnyCodable? public let subagentrole: AnyCodable? public let subagentcontrolscope: AnyCodable? @@ -1356,6 +1349,7 @@ public struct SessionsPatchParams: Codable, Sendable { execnode: AnyCodable?, model: AnyCodable?, spawnedby: AnyCodable?, + spawnedworkspacedir: AnyCodable?, spawndepth: AnyCodable?, subagentrole: AnyCodable?, subagentcontrolscope: AnyCodable?, @@ -1375,6 +1369,7 @@ public struct SessionsPatchParams: Codable, Sendable { self.execnode = execnode self.model = model self.spawnedby = spawnedby + self.spawnedworkspacedir = spawnedworkspacedir self.spawndepth = spawndepth self.subagentrole = subagentrole self.subagentcontrolscope = subagentcontrolscope @@ -1396,6 +1391,7 @@ public struct SessionsPatchParams: Codable, Sendable { case execnode = "execNode" case model case spawnedby = "spawnedBy" + case spawnedworkspacedir = "spawnedWorkspaceDir" case spawndepth = "spawnDepth" case subagentrole = "subagentRole" case subagentcontrolscope = "subagentControlScope" diff --git a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift index ea85e6c1511..b743060f6c0 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift @@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable { public let inputprovenance: [String: AnyCodable]? public let idempotencykey: String public let label: String? - public let spawnedby: String? - public let workspacedir: String? public init( message: String, @@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable { internalevents: [[String: AnyCodable]]?, inputprovenance: [String: AnyCodable]?, idempotencykey: String, - label: String?, - spawnedby: String?, - workspacedir: String?) + label: String?) { self.message = message self.agentid = agentid @@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable { self.inputprovenance = inputprovenance self.idempotencykey = idempotencykey self.label = label - self.spawnedby = spawnedby - self.workspacedir = workspacedir } private enum CodingKeys: String, CodingKey { @@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable { case inputprovenance = "inputProvenance" case idempotencykey = "idempotencyKey" case label - case spawnedby = "spawnedBy" - case workspacedir = "workspaceDir" } } @@ -1336,6 +1328,7 @@ public struct SessionsPatchParams: Codable, Sendable { public let execnode: AnyCodable? public let model: AnyCodable? public let spawnedby: AnyCodable? + public let spawnedworkspacedir: AnyCodable? public let spawndepth: AnyCodable? public let subagentrole: AnyCodable? public let subagentcontrolscope: AnyCodable? @@ -1356,6 +1349,7 @@ public struct SessionsPatchParams: Codable, Sendable { execnode: AnyCodable?, model: AnyCodable?, spawnedby: AnyCodable?, + spawnedworkspacedir: AnyCodable?, spawndepth: AnyCodable?, subagentrole: AnyCodable?, subagentcontrolscope: AnyCodable?, @@ -1375,6 +1369,7 @@ public struct SessionsPatchParams: Codable, Sendable { self.execnode = execnode self.model = model self.spawnedby = spawnedby + self.spawnedworkspacedir = spawnedworkspacedir self.spawndepth = spawndepth self.subagentrole = subagentrole self.subagentcontrolscope = subagentcontrolscope @@ -1396,6 +1391,7 @@ public struct SessionsPatchParams: Codable, Sendable { case execnode = "execNode" case model case spawnedby = "spawnedBy" + case spawnedworkspacedir = "spawnedWorkspaceDir" case spawndepth = "spawnDepth" case subagentrole = "subagentRole" case subagentcontrolscope = "subagentControlScope" diff --git a/docs/cli/agent.md b/docs/cli/agent.md index 93c8d04b41a..430bdf50743 100644 --- a/docs/cli/agent.md +++ b/docs/cli/agent.md @@ -25,4 +25,5 @@ openclaw agent --agent ops --message "Generate report" --deliver --reply-channel ## Notes -- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names or `secretref-managed`), not resolved secret plaintext. +- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names, `secretref-env:ENV_VAR_NAME`, or `secretref-managed`), not resolved secret plaintext. +- Marker writes are source-authoritative: OpenClaw persists markers from the active source config snapshot, not from resolved runtime secret values. diff --git a/docs/concepts/memory.md b/docs/concepts/memory.md index b3940945249..8ed755b394c 100644 --- a/docs/concepts/memory.md +++ b/docs/concepts/memory.md @@ -284,9 +284,46 @@ Notes: - Paths can be absolute or workspace-relative. - Directories are scanned recursively for `.md` files. -- Only Markdown files are indexed. +- By default, only Markdown files are indexed. +- If `memorySearch.multimodal.enabled = true`, OpenClaw also indexes supported image/audio files under `extraPaths` only. Default memory roots (`MEMORY.md`, `memory.md`, `memory/**/*.md`) stay Markdown-only. - Symlinks are ignored (files or directories). +### Multimodal memory files (Gemini image + audio) + +OpenClaw can index image and audio files from `memorySearch.extraPaths` when using Gemini embedding 2: + +```json5 +agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: ["assets/reference", "voice-notes"], + multimodal: { + enabled: true, + modalities: ["image", "audio"], // or ["all"] + maxFileBytes: 10000000 + }, + remote: { + apiKey: "YOUR_GEMINI_API_KEY" + } + } + } +} +``` + +Notes: + +- Multimodal memory is currently supported only for `gemini-embedding-2-preview`. +- Multimodal indexing applies only to files discovered through `memorySearch.extraPaths`. +- Supported modalities in this phase: image and audio. +- `memorySearch.fallback` must stay `"none"` while multimodal memory is enabled. +- Matching image/audio file bytes are uploaded to the configured Gemini embedding endpoint during indexing. +- Supported image extensions: `.jpg`, `.jpeg`, `.png`, `.webp`, `.gif`, `.heic`, `.heif`. +- Supported audio extensions: `.mp3`, `.wav`, `.ogg`, `.opus`, `.m4a`, `.aac`, `.flac`. +- Search queries remain text, but Gemini can compare those text queries against indexed image/audio embeddings. +- `memory_get` still reads Markdown only; binary files are searchable but not returned as raw file contents. + ### Gemini embeddings (native) Set the provider to `gemini` to use the Gemini embeddings API directly: @@ -310,6 +347,29 @@ Notes: - `remote.baseUrl` is optional (defaults to the Gemini API base URL). - `remote.headers` lets you add extra headers if needed. - Default model: `gemini-embedding-001`. +- `gemini-embedding-2-preview` is also supported: 8192 token limit and configurable dimensions (768 / 1536 / 3072, default 3072). + +#### Gemini Embedding 2 (preview) + +```json5 +agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 3072, // optional: 768, 1536, or 3072 (default) + remote: { + apiKey: "YOUR_GEMINI_API_KEY" + } + } + } +} +``` + +> **⚠️ Re-index required:** Switching from `gemini-embedding-001` (768 dimensions) +> to `gemini-embedding-2-preview` (3072 dimensions) changes the vector size. The same is true if you +> change `outputDimensionality` between 768, 1536, and 3072. +> OpenClaw will automatically reindex when it detects a model or dimension change. If you want to use a **custom OpenAI-compatible endpoint** (OpenRouter, vLLM, or a proxy), you can use the `remote` configuration with the OpenAI provider: diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 4f3d80b2420..549875c77b4 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -357,7 +357,7 @@ Ollama is a local LLM runtime that provides an OpenAI-compatible API: - Provider: `ollama` - Auth: None required (local server) - Example model: `ollama/llama3.3` -- Installation: [https://ollama.ai](https://ollama.ai) +- Installation: [https://ollama.com/download](https://ollama.com/download) ```bash # Install Ollama, then pull a model: @@ -372,7 +372,7 @@ ollama pull llama3.3 } ``` -Ollama is automatically detected when running locally at `http://127.0.0.1:11434/v1`. See [/providers/ollama](/providers/ollama) for model recommendations and custom configuration. +Ollama is detected locally at `http://127.0.0.1:11434` when you opt in with `OLLAMA_API_KEY`, and `openclaw onboard` can configure it directly as a first-class provider. See [/providers/ollama](/providers/ollama) for onboarding, cloud/local mode, and custom configuration. ### vLLM diff --git a/docs/concepts/models.md b/docs/concepts/models.md index f87eead821c..6323feef04e 100644 --- a/docs/concepts/models.md +++ b/docs/concepts/models.md @@ -207,7 +207,7 @@ mode, pass `--yes` to accept defaults. ## Models registry (`models.json`) Custom providers in `models.providers` are written into `models.json` under the -agent directory (default `~/.openclaw/agents//models.json`). This file +agent directory (default `~/.openclaw/agents//agent/models.json`). This file is merged by default unless `models.mode` is set to `replace`. Merge mode precedence for matching provider IDs: @@ -215,7 +215,9 @@ Merge mode precedence for matching provider IDs: - Non-empty `baseUrl` already present in the agent `models.json` wins. - Non-empty `apiKey` in the agent `models.json` wins only when that provider is not SecretRef-managed in current config/auth-profile context. - SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets. +- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs). - Empty or missing agent `apiKey`/`baseUrl` fall back to config `models.providers`. - Other provider fields are refreshed from config and normalized catalog data. -This marker-based persistence applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`. +Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values. +This applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`. diff --git a/docs/gateway/configuration-reference.md b/docs/gateway/configuration-reference.md index 1e48f69d6f8..db5077aebcf 100644 --- a/docs/gateway/configuration-reference.md +++ b/docs/gateway/configuration-reference.md @@ -2014,9 +2014,11 @@ OpenClaw uses the pi-coding-agent model catalog. Add custom providers via `model - Non-empty agent `models.json` `baseUrl` values win. - Non-empty agent `apiKey` values win only when that provider is not SecretRef-managed in current config/auth-profile context. - SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets. + - SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs). - Empty or missing agent `apiKey`/`baseUrl` fall back to `models.providers` in config. - Matching model `contextWindow`/`maxTokens` use the higher value between explicit config and implicit catalog values. - Use `models.mode: "replace"` when you want config to fully rewrite `models.json`. + - Marker persistence is source-authoritative: markers are written from the active source config snapshot (pre-resolution), not from resolved runtime secret values. ### Provider field details diff --git a/docs/gateway/local-models.md b/docs/gateway/local-models.md index 8a07a827467..4059f988776 100644 --- a/docs/gateway/local-models.md +++ b/docs/gateway/local-models.md @@ -11,6 +11,8 @@ title: "Local Models" Local is doable, but OpenClaw expects large context + strong defenses against prompt injection. Small cards truncate context and leak safety. Aim high: **≥2 maxed-out Mac Studios or equivalent GPU rig (~$30k+)**. A single **24 GB** GPU works only for lighter prompts with higher latency. Use the **largest / full-size model variant you can run**; aggressively quantized or “small” checkpoints raise prompt-injection risk (see [Security](/gateway/security)). +If you want the lowest-friction local setup, start with [Ollama](/providers/ollama) and `openclaw onboard`. This page is the opinionated guide for higher-end local stacks and custom OpenAI-compatible local servers. + ## Recommended: LM Studio + MiniMax M2.5 (Responses API, full-size) Best current local stack. Load MiniMax M2.5 in LM Studio, enable the local server (default `http://127.0.0.1:1234`), and use Responses API to keep reasoning separate from final text. diff --git a/docs/help/faq.md b/docs/help/faq.md index 8b738b60fc2..453688c1c5f 100644 --- a/docs/help/faq.md +++ b/docs/help/faq.md @@ -2084,8 +2084,21 @@ More context: [Models](/concepts/models). ### Can I use selfhosted models llamacpp vLLM Ollama -Yes. If your local server exposes an OpenAI-compatible API, you can point a -custom provider at it. Ollama is supported directly and is the easiest path. +Yes. Ollama is the easiest path for local models. + +Quickest setup: + +1. Install Ollama from `https://ollama.com/download` +2. Pull a local model such as `ollama pull glm-4.7-flash` +3. If you want Ollama Cloud too, run `ollama signin` +4. Run `openclaw onboard` and choose `Ollama` +5. Pick `Local` or `Cloud + Local` + +Notes: + +- `Cloud + Local` gives you Ollama Cloud models plus your local Ollama models +- cloud models such as `kimi-k2.5:cloud` do not need a local pull +- for manual switching, use `openclaw models list` and `openclaw models set ollama/` Security note: smaller or heavily quantized models are more vulnerable to prompt injection. We strongly recommend **large models** for any bot that can use tools. diff --git a/docs/platforms/mac/release.md b/docs/platforms/mac/release.md index 180a52075ed..cd4052ac9dc 100644 --- a/docs/platforms/mac/release.md +++ b/docs/platforms/mac/release.md @@ -39,7 +39,7 @@ Notes: # Default is auto-derived from APP_VERSION when omitted. SKIP_NOTARIZE=1 \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.3.9 \ +APP_VERSION=2026.3.11 \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh @@ -47,10 +47,10 @@ scripts/package-mac-dist.sh # `package-mac-dist.sh` already creates the zip + DMG. # If you used `package-mac-app.sh` directly instead, create them manually: # If you want notarization/stapling in this step, use the NOTARIZE command below. -ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.9.zip +ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.11.zip # Optional: build a styled DMG for humans (drag to /Applications) -scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg +scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.11.dmg # Recommended: build + notarize/staple zip + DMG # First, create a keychain profile once: @@ -58,13 +58,13 @@ scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg # --apple-id "" --team-id "" --password "" NOTARIZE=1 NOTARYTOOL_PROFILE=openclaw-notary \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.3.9 \ +APP_VERSION=2026.3.11 \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh # Optional: ship dSYM alongside the release -ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.9.dSYM.zip +ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.11.dSYM.zip ``` ## Appcast entry @@ -72,7 +72,7 @@ ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenCl Use the release note generator so Sparkle renders formatted HTML notes: ```bash -SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.9.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml +SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.11.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml ``` Generates HTML release notes from `CHANGELOG.md` (via [`scripts/changelog-to-html.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/changelog-to-html.sh)) and embeds them in the appcast entry. @@ -80,7 +80,7 @@ Commit the updated `appcast.xml` alongside the release assets (zip + dSYM) when ## Publish & verify -- Upload `OpenClaw-2026.3.9.zip` (and `OpenClaw-2026.3.9.dSYM.zip`) to the GitHub release for tag `v2026.3.9`. +- Upload `OpenClaw-2026.3.11.zip` (and `OpenClaw-2026.3.11.dSYM.zip`) to the GitHub release for tag `v2026.3.11`. - Ensure the raw appcast URL matches the baked feed: `https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml`. - Sanity checks: - `curl -I https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml` returns 200. diff --git a/docs/platforms/raspberry-pi.md b/docs/platforms/raspberry-pi.md index e46076e869d..247bf757b91 100644 --- a/docs/platforms/raspberry-pi.md +++ b/docs/platforms/raspberry-pi.md @@ -153,30 +153,33 @@ sudo systemctl status openclaw journalctl -u openclaw -f ``` -## 9) Access the Dashboard +## 9) Access the OpenClaw Dashboard -Since the Pi is headless, use an SSH tunnel: +Replace `user@gateway-host` with your Pi username and hostname or IP address. + +On your computer, ask the Pi to print a fresh dashboard URL: ```bash -# From your laptop/desktop -ssh -L 18789:localhost:18789 user@gateway-host - -# Then open in browser -open http://localhost:18789 +ssh user@gateway-host 'openclaw dashboard --no-open' ``` -Or use Tailscale for always-on access: +The command prints `Dashboard URL:`. Depending on how `gateway.auth.token` +is configured, the URL may be a plain `http://127.0.0.1:18789/` link or one +that includes `#token=...`. + +In another terminal on your computer, create the SSH tunnel: ```bash -# On the Pi -curl -fsSL https://tailscale.com/install.sh | sh -sudo tailscale up - -# Update config -openclaw config set gateway.bind tailnet -sudo systemctl restart openclaw +ssh -N -L 18789:127.0.0.1:18789 user@gateway-host ``` +Then open the printed Dashboard URL in your local browser. + +If the UI asks for auth, paste the token from `gateway.auth.token` +(or `OPENCLAW_GATEWAY_TOKEN`) into Control UI settings. + +For always-on remote access, see [Tailscale](/gateway/tailscale). + --- ## Performance Optimizations diff --git a/docs/providers/ollama.md b/docs/providers/ollama.md index b82f6411b68..abc41361ed0 100644 --- a/docs/providers/ollama.md +++ b/docs/providers/ollama.md @@ -8,7 +8,7 @@ title: "Ollama" # Ollama -Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supporting streaming and tool calling, and can **auto-discover tool-capable models** when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry. +Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supports streaming and tool calling, and can auto-discover local Ollama models when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry. **Remote Ollama users**: Do not use the `/v1` OpenAI-compatible URL (`http://host:11434/v1`) with OpenClaw. This breaks tool calling and models may output raw tool JSON as plain text. Use the native Ollama API URL instead: `baseUrl: "http://host:11434"` (no `/v1`). @@ -16,21 +16,40 @@ Ollama is a local LLM runtime that makes it easy to run open-source models on yo ## Quick start -1. Install Ollama: [https://ollama.ai](https://ollama.ai) +1. Install Ollama: [https://ollama.com/download](https://ollama.com/download) -2. Pull a model: +2. Pull a local model if you want local inference: ```bash +ollama pull glm-4.7-flash +# or ollama pull gpt-oss:20b # or ollama pull llama3.3 -# or -ollama pull qwen2.5-coder:32b -# or -ollama pull deepseek-r1:32b ``` -3. Enable Ollama for OpenClaw (any value works; Ollama doesn't require a real key): +3. If you want Ollama Cloud models too, sign in: + +```bash +ollama signin +``` + +4. Run onboarding and choose `Ollama`: + +```bash +openclaw onboard +``` + +- `Local`: local models only +- `Cloud + Local`: local models plus Ollama Cloud models +- Cloud models such as `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, and `glm-5:cloud` do **not** require a local `ollama pull` + +OpenClaw currently suggests: + +- local default: `glm-4.7-flash` +- cloud defaults: `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, `glm-5:cloud` + +5. If you prefer manual setup, enable Ollama for OpenClaw directly (any value works; Ollama doesn't require a real key): ```bash # Set environment variable @@ -40,13 +59,20 @@ export OLLAMA_API_KEY="ollama-local" openclaw config set models.providers.ollama.apiKey "ollama-local" ``` -4. Use Ollama models: +6. Inspect or switch models: + +```bash +openclaw models list +openclaw models set ollama/glm-4.7-flash +``` + +7. Or set the default in config: ```json5 { agents: { defaults: { - model: { primary: "ollama/gpt-oss:20b" }, + model: { primary: "ollama/glm-4.7-flash" }, }, }, } @@ -56,14 +82,13 @@ openclaw config set models.providers.ollama.apiKey "ollama-local" When you set `OLLAMA_API_KEY` (or an auth profile) and **do not** define `models.providers.ollama`, OpenClaw discovers models from the local Ollama instance at `http://127.0.0.1:11434`: -- Queries `/api/tags` and `/api/show` -- Keeps only models that report `tools` capability -- Marks `reasoning` when the model reports `thinking` -- Reads `contextWindow` from `model_info[".context_length"]` when available -- Sets `maxTokens` to 10× the context window +- Queries `/api/tags` +- Uses best-effort `/api/show` lookups to read `contextWindow` when available +- Marks `reasoning` with a model-name heuristic (`r1`, `reasoning`, `think`) +- Sets `maxTokens` to the default Ollama max-token cap used by OpenClaw - Sets all costs to `0` -This avoids manual model entries while keeping the catalog aligned with Ollama's capabilities. +This avoids manual model entries while keeping the catalog aligned with the local Ollama instance. To see what models are available: @@ -98,7 +123,7 @@ Use explicit config when: - Ollama runs on another host/port. - You want to force specific context windows or model lists. -- You want to include models that do not report tool support. +- You want fully manual model definitions. ```json5 { @@ -170,7 +195,7 @@ Once configured, all your Ollama models are available: ### Reasoning models -OpenClaw marks models as reasoning-capable when Ollama reports `thinking` in `/api/show`: +OpenClaw treats models with names such as `deepseek-r1`, `reasoning`, or `think` as reasoning-capable by default: ```bash ollama pull deepseek-r1:32b @@ -230,7 +255,7 @@ When `api: "openai-completions"` is used with Ollama, OpenClaw injects `options. ### Context windows -For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it defaults to `8192`. You can override `contextWindow` and `maxTokens` in explicit provider config. +For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it falls back to the default Ollama context window used by OpenClaw. You can override `contextWindow` and `maxTokens` in explicit provider config. ## Troubleshooting @@ -250,16 +275,17 @@ curl http://localhost:11434/api/tags ### No models available -OpenClaw only auto-discovers models that report tool support. If your model isn't listed, either: +If your model is not listed, either: -- Pull a tool-capable model, or +- Pull the model locally, or - Define the model explicitly in `models.providers.ollama`. To add models: ```bash ollama list # See what's installed -ollama pull gpt-oss:20b # Pull a tool-capable model +ollama pull glm-4.7-flash +ollama pull gpt-oss:20b ollama pull llama3.3 # Or another model ``` diff --git a/docs/reference/secretref-credential-surface.md b/docs/reference/secretref-credential-surface.md index 2a5fc5a66ac..76eb4ec2ae1 100644 --- a/docs/reference/secretref-credential-surface.md +++ b/docs/reference/secretref-credential-surface.md @@ -101,6 +101,7 @@ Notes: - Plan entries target `profiles.*.key` / `profiles.*.token` and write sibling refs (`keyRef` / `tokenRef`). - Auth-profile refs are included in runtime resolution and audit coverage. - For SecretRef-managed model providers, generated `agents/*/agent/models.json` entries persist non-secret markers (not resolved secret values) for `apiKey`/header surfaces. +- Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values. - For web search: - In explicit provider mode (`tools.web.search.provider` set), only the selected provider key is active. - In auto mode (`tools.web.search.provider` unset), only the first provider key that resolves by precedence is active. diff --git a/docs/tools/llm-task.md b/docs/tools/llm-task.md index e6f574d078e..2626d3237e4 100644 --- a/docs/tools/llm-task.md +++ b/docs/tools/llm-task.md @@ -75,11 +75,14 @@ outside the list is rejected. - `schema` (object, optional JSON Schema) - `provider` (string, optional) - `model` (string, optional) +- `thinking` (string, optional) - `authProfileId` (string, optional) - `temperature` (number, optional) - `maxTokens` (number, optional) - `timeoutMs` (number, optional) +`thinking` accepts the standard OpenClaw reasoning presets, such as `low` or `medium`. + ## Output Returns `details.json` containing the parsed JSON (and validates against @@ -90,6 +93,7 @@ Returns `details.json` containing the parsed JSON (and validates against ```lobster openclaw.invoke --tool llm-task --action json --args-json '{ "prompt": "Given the input email, return intent and draft.", + "thinking": "low", "input": { "subject": "Hello", "body": "Can you help?" diff --git a/docs/tools/lobster.md b/docs/tools/lobster.md index 65ff4f56dfb..5c8a47e4d62 100644 --- a/docs/tools/lobster.md +++ b/docs/tools/lobster.md @@ -106,6 +106,7 @@ Use it in a pipeline: ```lobster openclaw.invoke --tool llm-task --action json --args-json '{ "prompt": "Given the input email, return intent and draft.", + "thinking": "low", "input": { "subject": "Hello", "body": "Can you help?" }, "schema": { "type": "object", diff --git a/extensions/acpx/package.json b/extensions/acpx/package.json index 599d71579b0..ae4f7e695ef 100644 --- a/extensions/acpx/package.json +++ b/extensions/acpx/package.json @@ -1,10 +1,10 @@ { "name": "@openclaw/acpx", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw ACP runtime backend via acpx", "type": "module", "dependencies": { - "acpx": "0.1.16" + "acpx": "0.2.0" }, "openclaw": { "extensions": [ diff --git a/extensions/bluebubbles/package.json b/extensions/bluebubbles/package.json index 3c8605ef312..4918e9d3c02 100644 --- a/extensions/bluebubbles/package.json +++ b/extensions/bluebubbles/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/bluebubbles", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw BlueBubbles channel plugin", "type": "module", "dependencies": { diff --git a/extensions/bluebubbles/src/monitor-normalize.test.ts b/extensions/bluebubbles/src/monitor-normalize.test.ts index 3986909c259..3e06302593c 100644 --- a/extensions/bluebubbles/src/monitor-normalize.test.ts +++ b/extensions/bluebubbles/src/monitor-normalize.test.ts @@ -17,9 +17,28 @@ describe("normalizeWebhookMessage", () => { expect(result).not.toBeNull(); expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(false); expect(result?.chatGuid).toBe("iMessage;-;+15551234567"); }); + it("marks explicit sender handles as explicit identity", () => { + const result = normalizeWebhookMessage({ + type: "new-message", + data: { + guid: "msg-explicit-1", + text: "hello", + isGroup: false, + isFromMe: true, + handle: { address: "+15551234567" }, + chatGuid: "iMessage;-;+15551234567", + }, + }); + + expect(result).not.toBeNull(); + expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(true); + }); + it("does not infer sender from group chatGuid when sender handle is missing", () => { const result = normalizeWebhookMessage({ type: "new-message", @@ -72,6 +91,7 @@ describe("normalizeWebhookReaction", () => { expect(result).not.toBeNull(); expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(false); expect(result?.messageId).toBe("p:0/msg-1"); expect(result?.action).toBe("added"); }); diff --git a/extensions/bluebubbles/src/monitor-normalize.ts b/extensions/bluebubbles/src/monitor-normalize.ts index 173ea9c24a6..83454602d4c 100644 --- a/extensions/bluebubbles/src/monitor-normalize.ts +++ b/extensions/bluebubbles/src/monitor-normalize.ts @@ -191,12 +191,13 @@ function readFirstChatRecord(message: Record): Record): { senderId: string; + senderIdExplicit: boolean; senderName?: string; } { const handleValue = message.handle ?? message.sender; const handle = asRecord(handleValue) ?? (typeof handleValue === "string" ? { address: handleValue } : null); - const senderId = + const senderIdRaw = readString(handle, "address") ?? readString(handle, "handle") ?? readString(handle, "id") ?? @@ -204,13 +205,18 @@ function extractSenderInfo(message: Record): { readString(message, "sender") ?? readString(message, "from") ?? ""; + const senderId = senderIdRaw.trim(); const senderName = readString(handle, "displayName") ?? readString(handle, "name") ?? readString(message, "senderName") ?? undefined; - return { senderId, senderName }; + return { + senderId, + senderIdExplicit: Boolean(senderId), + senderName, + }; } function extractChatContext(message: Record): { @@ -441,6 +447,7 @@ export type BlueBubblesParticipant = { export type NormalizedWebhookMessage = { text: string; senderId: string; + senderIdExplicit: boolean; senderName?: string; messageId?: string; timestamp?: number; @@ -466,6 +473,7 @@ export type NormalizedWebhookReaction = { action: "added" | "removed"; emoji: string; senderId: string; + senderIdExplicit: boolean; senderName?: string; messageId: string; timestamp?: number; @@ -672,7 +680,7 @@ export function normalizeWebhookMessage( readString(message, "subject") ?? ""; - const { senderId, senderName } = extractSenderInfo(message); + const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message); const { chatGuid, chatIdentifier, chatId, chatName, isGroup, participants } = extractChatContext(message); const normalizedParticipants = normalizeParticipantList(participants); @@ -717,7 +725,7 @@ export function normalizeWebhookMessage( // BlueBubbles may omit `handle` in webhook payloads; for DM chat GUIDs we can still infer sender. const senderFallbackFromChatGuid = - !senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; + !senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || ""); if (!normalizedSender) { return null; @@ -727,6 +735,7 @@ export function normalizeWebhookMessage( return { text, senderId: normalizedSender, + senderIdExplicit, senderName, messageId, timestamp, @@ -777,7 +786,7 @@ export function normalizeWebhookReaction( const emoji = (associatedEmoji?.trim() || mapping?.emoji) ?? `reaction:${associatedType}`; const action = mapping?.action ?? resolveTapbackActionHint(associatedType) ?? "added"; - const { senderId, senderName } = extractSenderInfo(message); + const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message); const { chatGuid, chatIdentifier, chatId, chatName, isGroup } = extractChatContext(message); const fromMe = readBoolean(message, "isFromMe") ?? readBoolean(message, "is_from_me"); @@ -793,7 +802,7 @@ export function normalizeWebhookReaction( : undefined; const senderFallbackFromChatGuid = - !senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; + !senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || ""); if (!normalizedSender) { return null; @@ -803,6 +812,7 @@ export function normalizeWebhookReaction( action, emoji, senderId: normalizedSender, + senderIdExplicit, senderName, messageId: associatedGuid, timestamp, diff --git a/extensions/bluebubbles/src/monitor-processing.ts b/extensions/bluebubbles/src/monitor-processing.ts index 6eb2ab08bc0..9cf72ea1efd 100644 --- a/extensions/bluebubbles/src/monitor-processing.ts +++ b/extensions/bluebubbles/src/monitor-processing.ts @@ -38,6 +38,10 @@ import { resolveBlueBubblesMessageId, resolveReplyContextFromCache, } from "./monitor-reply-cache.js"; +import { + hasBlueBubblesSelfChatCopy, + rememberBlueBubblesSelfChatCopy, +} from "./monitor-self-chat-cache.js"; import type { BlueBubblesCoreRuntime, BlueBubblesRuntimeEnv, @@ -47,7 +51,12 @@ import { isBlueBubblesPrivateApiEnabled } from "./probe.js"; import { normalizeBlueBubblesReactionInput, sendBlueBubblesReaction } from "./reactions.js"; import { normalizeSecretInputString } from "./secret-input.js"; import { resolveChatGuidForTarget, sendMessageBlueBubbles } from "./send.js"; -import { formatBlueBubblesChatTarget, isAllowedBlueBubblesSender } from "./targets.js"; +import { + extractHandleFromChatGuid, + formatBlueBubblesChatTarget, + isAllowedBlueBubblesSender, + normalizeBlueBubblesHandle, +} from "./targets.js"; const DEFAULT_TEXT_LIMIT = 4000; const invalidAckReactions = new Set(); @@ -80,6 +89,19 @@ function normalizeSnippet(value: string): string { return stripMarkdown(value).replace(/\s+/g, " ").trim().toLowerCase(); } +function isBlueBubblesSelfChatMessage( + message: NormalizedWebhookMessage, + isGroup: boolean, +): boolean { + if (isGroup || !message.senderIdExplicit) { + return false; + } + const chatHandle = + (message.chatGuid ? extractHandleFromChatGuid(message.chatGuid) : null) ?? + normalizeBlueBubblesHandle(message.chatIdentifier ?? ""); + return Boolean(chatHandle) && chatHandle === message.senderId; +} + function prunePendingOutboundMessageIds(now = Date.now()): void { const cutoff = now - PENDING_OUTBOUND_MESSAGE_ID_TTL_MS; for (let i = pendingOutboundMessageIds.length - 1; i >= 0; i--) { @@ -453,8 +475,27 @@ export async function processMessage( ? `removed ${tapbackParsed.emoji} reaction` : `reacted with ${tapbackParsed.emoji}` : text || placeholder; + const isSelfChatMessage = isBlueBubblesSelfChatMessage(message, isGroup); + const selfChatLookup = { + accountId: account.accountId, + chatGuid: message.chatGuid, + chatIdentifier: message.chatIdentifier, + chatId: message.chatId, + senderId: message.senderId, + body: rawBody, + timestamp: message.timestamp, + }; const cacheMessageId = message.messageId?.trim(); + const confirmedOutboundCacheEntry = cacheMessageId + ? resolveReplyContextFromCache({ + accountId: account.accountId, + replyToId: cacheMessageId, + chatGuid: message.chatGuid, + chatIdentifier: message.chatIdentifier, + chatId: message.chatId, + }) + : null; let messageShortId: string | undefined; const cacheInboundMessage = () => { if (!cacheMessageId) { @@ -476,6 +517,12 @@ export async function processMessage( if (message.fromMe) { // Cache from-me messages so reply context can resolve sender/body. cacheInboundMessage(); + const confirmedAssistantOutbound = + confirmedOutboundCacheEntry?.senderLabel === "me" && + normalizeSnippet(confirmedOutboundCacheEntry.body ?? "") === normalizeSnippet(rawBody); + if (isSelfChatMessage && confirmedAssistantOutbound) { + rememberBlueBubblesSelfChatCopy(selfChatLookup); + } if (cacheMessageId) { const pending = consumePendingOutboundMessageId({ accountId: account.accountId, @@ -499,6 +546,11 @@ export async function processMessage( return; } + if (isSelfChatMessage && hasBlueBubblesSelfChatCopy(selfChatLookup)) { + logVerbose(core, runtime, `drop: reflected self-chat duplicate sender=${message.senderId}`); + return; + } + if (!rawBody) { logVerbose(core, runtime, `drop: empty text sender=${message.senderId}`); return; diff --git a/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts b/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts new file mode 100644 index 00000000000..3e843f6943d --- /dev/null +++ b/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts @@ -0,0 +1,190 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + hasBlueBubblesSelfChatCopy, + rememberBlueBubblesSelfChatCopy, + resetBlueBubblesSelfChatCache, +} from "./monitor-self-chat-cache.js"; + +describe("BlueBubbles self-chat cache", () => { + const directLookup = { + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + } as const; + + afterEach(() => { + resetBlueBubblesSelfChatCache(); + vi.useRealTimers(); + }); + + it("matches repeated lookups for the same scope, timestamp, and text", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: " hello\r\nworld ", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello\nworld", + timestamp: 123, + }), + ).toBe(true); + }); + + it("canonicalizes DM scope across chatIdentifier and chatGuid", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + accountId: "default", + chatIdentifier: "+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }), + ).toBe(true); + + resetBlueBubblesSelfChatCache(); + + rememberBlueBubblesSelfChatCopy({ + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + accountId: "default", + chatIdentifier: "+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }), + ).toBe(true); + }); + + it("expires entries after the ttl window", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello", + timestamp: 123, + }); + + vi.advanceTimersByTime(11_001); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello", + timestamp: 123, + }), + ).toBe(false); + }); + + it("evicts older entries when the cache exceeds its cap", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + for (let i = 0; i < 513; i += 1) { + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: `message-${i}`, + timestamp: i, + }); + vi.advanceTimersByTime(1_001); + } + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "message-0", + timestamp: 0, + }), + ).toBe(false); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "message-512", + timestamp: 512, + }), + ).toBe(true); + }); + + it("enforces the cache cap even when cleanup is throttled", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + for (let i = 0; i < 513; i += 1) { + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: `burst-${i}`, + timestamp: i, + }); + } + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "burst-0", + timestamp: 0, + }), + ).toBe(false); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "burst-512", + timestamp: 512, + }), + ).toBe(true); + }); + + it("does not collide long texts that differ only in the middle", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const prefix = "a".repeat(256); + const suffix = "b".repeat(256); + const longBodyA = `${prefix}${"x".repeat(300)}${suffix}`; + const longBodyB = `${prefix}${"y".repeat(300)}${suffix}`; + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyA, + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyA, + timestamp: 123, + }), + ).toBe(true); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyB, + timestamp: 123, + }), + ).toBe(false); + }); +}); diff --git a/extensions/bluebubbles/src/monitor-self-chat-cache.ts b/extensions/bluebubbles/src/monitor-self-chat-cache.ts new file mode 100644 index 00000000000..09d7167d769 --- /dev/null +++ b/extensions/bluebubbles/src/monitor-self-chat-cache.ts @@ -0,0 +1,127 @@ +import { createHash } from "node:crypto"; +import { extractHandleFromChatGuid, normalizeBlueBubblesHandle } from "./targets.js"; + +type SelfChatCacheKeyParts = { + accountId: string; + chatGuid?: string; + chatIdentifier?: string; + chatId?: number; + senderId: string; +}; + +type SelfChatLookup = SelfChatCacheKeyParts & { + body?: string; + timestamp?: number; +}; + +const SELF_CHAT_TTL_MS = 10_000; +const MAX_SELF_CHAT_CACHE_ENTRIES = 512; +const CLEANUP_MIN_INTERVAL_MS = 1_000; +const MAX_SELF_CHAT_BODY_CHARS = 32_768; +const cache = new Map(); +let lastCleanupAt = 0; + +function normalizeBody(body: string | undefined): string | null { + if (!body) { + return null; + } + const bounded = + body.length > MAX_SELF_CHAT_BODY_CHARS ? body.slice(0, MAX_SELF_CHAT_BODY_CHARS) : body; + const normalized = bounded.replace(/\r\n?/g, "\n").trim(); + return normalized ? normalized : null; +} + +function isUsableTimestamp(timestamp: number | undefined): timestamp is number { + return typeof timestamp === "number" && Number.isFinite(timestamp); +} + +function digestText(text: string): string { + return createHash("sha256").update(text).digest("base64url"); +} + +function trimOrUndefined(value?: string | null): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + +function resolveCanonicalChatTarget(parts: SelfChatCacheKeyParts): string | null { + const handleFromGuid = parts.chatGuid ? extractHandleFromChatGuid(parts.chatGuid) : null; + if (handleFromGuid) { + return handleFromGuid; + } + + const normalizedIdentifier = normalizeBlueBubblesHandle(parts.chatIdentifier ?? ""); + if (normalizedIdentifier) { + return normalizedIdentifier; + } + + return ( + trimOrUndefined(parts.chatGuid) ?? + trimOrUndefined(parts.chatIdentifier) ?? + (typeof parts.chatId === "number" ? String(parts.chatId) : null) + ); +} + +function buildScope(parts: SelfChatCacheKeyParts): string { + const target = resolveCanonicalChatTarget(parts) ?? parts.senderId; + return `${parts.accountId}:${target}`; +} + +function cleanupExpired(now = Date.now()): void { + if ( + lastCleanupAt !== 0 && + now >= lastCleanupAt && + now - lastCleanupAt < CLEANUP_MIN_INTERVAL_MS + ) { + return; + } + lastCleanupAt = now; + for (const [key, seenAt] of cache.entries()) { + if (now - seenAt > SELF_CHAT_TTL_MS) { + cache.delete(key); + } + } +} + +function enforceSizeCap(): void { + while (cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) { + const oldestKey = cache.keys().next().value; + if (typeof oldestKey !== "string") { + break; + } + cache.delete(oldestKey); + } +} + +function buildKey(lookup: SelfChatLookup): string | null { + const body = normalizeBody(lookup.body); + if (!body || !isUsableTimestamp(lookup.timestamp)) { + return null; + } + return `${buildScope(lookup)}:${lookup.timestamp}:${digestText(body)}`; +} + +export function rememberBlueBubblesSelfChatCopy(lookup: SelfChatLookup): void { + cleanupExpired(); + const key = buildKey(lookup); + if (!key) { + return; + } + cache.set(key, Date.now()); + enforceSizeCap(); +} + +export function hasBlueBubblesSelfChatCopy(lookup: SelfChatLookup): boolean { + cleanupExpired(); + const key = buildKey(lookup); + if (!key) { + return false; + } + const seenAt = cache.get(key); + return typeof seenAt === "number" && Date.now() - seenAt <= SELF_CHAT_TTL_MS; +} + +export function resetBlueBubblesSelfChatCache(): void { + cache.clear(); + lastCleanupAt = 0; +} diff --git a/extensions/bluebubbles/src/monitor.test.ts b/extensions/bluebubbles/src/monitor.test.ts index b02019058b8..1ba2e27f0b6 100644 --- a/extensions/bluebubbles/src/monitor.test.ts +++ b/extensions/bluebubbles/src/monitor.test.ts @@ -5,6 +5,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { createPluginRuntimeMock } from "../../test-utils/plugin-runtime-mock.js"; import type { ResolvedBlueBubblesAccount } from "./accounts.js"; import { fetchBlueBubblesHistory } from "./history.js"; +import { resetBlueBubblesSelfChatCache } from "./monitor-self-chat-cache.js"; import { handleBlueBubblesWebhookRequest, registerBlueBubblesWebhookTarget, @@ -246,6 +247,7 @@ describe("BlueBubbles webhook monitor", () => { vi.clearAllMocks(); // Reset short ID state between tests for predictable behavior _resetBlueBubblesShortIdState(); + resetBlueBubblesSelfChatCache(); mockFetchBlueBubblesHistory.mockResolvedValue({ entries: [], resolved: true }); mockReadAllowFromStore.mockResolvedValue([]); mockUpsertPairingRequest.mockResolvedValue({ code: "TESTCODE", created: true }); @@ -259,6 +261,7 @@ describe("BlueBubbles webhook monitor", () => { afterEach(() => { unregister?.(); + vi.useRealTimers(); }); describe("DM pairing behavior vs allowFrom", () => { @@ -2676,5 +2679,449 @@ describe("BlueBubbles webhook monitor", () => { expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); }); + + it("drops reflected self-chat duplicates after a confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const { sendMessageBlueBubbles } = await import("./send.js"); + vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "msg-self-1" }); + + mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => { + await params.dispatcherOptions.deliver({ text: "replying now" }, { kind: "final" }); + return EMPTY_DISPATCH_RESULT; + }); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const inboundPayload = { + type: "new-message", + data: { + text: "hello", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-0", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const fromMePayload = { + type: "new-message", + data: { + text: "replying now", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "replying now", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); + }); + + it("does not drop inbound messages when no fromMe self-chat copy was seen", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const inboundPayload = { + type: "new-message", + data: { + text: "genuinely new message", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-inbound-1", + chatGuid: "iMessage;-;+15551234567", + date: Date.now(), + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not drop reflected copies after the self-chat cache TTL expires", async () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "ttl me", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-ttl-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await vi.runAllTimersAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + vi.advanceTimersByTime(10_001); + + const reflectedPayload = { + type: "new-message", + data: { + text: "ttl me", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-ttl-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await vi.runAllTimersAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not cache regular fromMe DMs as self-chat reflections", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "shared text", + handle: { address: "+15557654321" }, + isGroup: false, + isFromMe: true, + guid: "msg-normal-fromme", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const inboundPayload = { + type: "new-message", + data: { + text: "shared text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-normal-inbound", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not drop user-authored self-chat prompts without a confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "user-authored self prompt", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-user-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "user-authored self prompt", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-user-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not treat a pending text-only match as confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const { sendMessageBlueBubbles } = await import("./send.js"); + vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "ok" }); + + mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => { + await params.dispatcherOptions.deliver({ text: "same text" }, { kind: "final" }); + return EMPTY_DISPATCH_RESULT; + }); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const inboundPayload = { + type: "new-message", + data: { + text: "hello", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-race-0", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const fromMePayload = { + type: "new-message", + data: { + text: "same text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-race-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "same text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-race-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not treat chatGuid-inferred sender ids as self-chat evidence", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "shared inferred text", + handle: null, + isGroup: false, + isFromMe: true, + guid: "msg-inferred-fromme", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const inboundPayload = { + type: "new-message", + data: { + text: "shared inferred text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-inferred-inbound", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); }); }); diff --git a/extensions/copilot-proxy/package.json b/extensions/copilot-proxy/package.json index e060ddd67f1..56f6c1085ee 100644 --- a/extensions/copilot-proxy/package.json +++ b/extensions/copilot-proxy/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/copilot-proxy", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Copilot Proxy provider plugin", "type": "module", diff --git a/extensions/diagnostics-otel/package.json b/extensions/diagnostics-otel/package.json index 29c9b0ac79b..91aea1e9256 100644 --- a/extensions/diagnostics-otel/package.json +++ b/extensions/diagnostics-otel/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diagnostics-otel", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw diagnostics OpenTelemetry exporter", "type": "module", "dependencies": { diff --git a/extensions/diffs/package.json b/extensions/diffs/package.json index b685f985108..c9e30cee333 100644 --- a/extensions/diffs/package.json +++ b/extensions/diffs/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diffs", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw diff viewer plugin", "type": "module", diff --git a/extensions/discord/package.json b/extensions/discord/package.json index f30f10ade51..7f291bd1c7a 100644 --- a/extensions/discord/package.json +++ b/extensions/discord/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/discord", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Discord channel plugin", "type": "module", "openclaw": { diff --git a/extensions/feishu/package.json b/extensions/feishu/package.json index fc38816e1bd..116f15f08d2 100644 --- a/extensions/feishu/package.json +++ b/extensions/feishu/package.json @@ -1,12 +1,12 @@ { "name": "@openclaw/feishu", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Feishu/Lark channel plugin (community maintained by @m1heng)", "type": "module", "dependencies": { "@larksuiteoapi/node-sdk": "^1.59.0", "@sinclair/typebox": "0.34.48", - "https-proxy-agent": "^7.0.6", + "https-proxy-agent": "^8.0.0", "zod": "^4.3.6" }, "openclaw": { diff --git a/extensions/google-gemini-cli-auth/package.json b/extensions/google-gemini-cli-auth/package.json index 2ab1c6a6ca8..7a84f58020a 100644 --- a/extensions/google-gemini-cli-auth/package.json +++ b/extensions/google-gemini-cli-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/google-gemini-cli-auth", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Gemini CLI OAuth provider plugin", "type": "module", diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index 61128b78032..2b9eee3932e 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -1,15 +1,12 @@ { "name": "@openclaw/googlechat", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Google Chat channel plugin", "type": "module", "dependencies": { "google-auth-library": "^10.6.1" }, - "devDependencies": { - "openclaw": "workspace:*" - }, "peerDependencies": { "openclaw": ">=2026.3.7" }, diff --git a/extensions/imessage/package.json b/extensions/imessage/package.json index 3f38e01efe1..8add26a2fe7 100644 --- a/extensions/imessage/package.json +++ b/extensions/imessage/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/imessage", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw iMessage channel plugin", "type": "module", diff --git a/extensions/irc/package.json b/extensions/irc/package.json index 34c7de1dcfb..e6e9bdfe6b4 100644 --- a/extensions/irc/package.json +++ b/extensions/irc/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/irc", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw IRC channel plugin", "type": "module", "dependencies": { diff --git a/extensions/line/package.json b/extensions/line/package.json index 9ec37f833e7..4f98b21c7a2 100644 --- a/extensions/line/package.json +++ b/extensions/line/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/line", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw LINE channel plugin", "type": "module", diff --git a/extensions/llm-task/README.md b/extensions/llm-task/README.md index d8e5dadc6fb..738208f3d60 100644 --- a/extensions/llm-task/README.md +++ b/extensions/llm-task/README.md @@ -69,6 +69,7 @@ outside the list is rejected. - `schema` (object, optional JSON Schema) - `provider` (string, optional) - `model` (string, optional) +- `thinking` (string, optional) - `authProfileId` (string, optional) - `temperature` (number, optional) - `maxTokens` (number, optional) diff --git a/extensions/llm-task/package.json b/extensions/llm-task/package.json index 8a74b2ead7e..bf63c9b28fc 100644 --- a/extensions/llm-task/package.json +++ b/extensions/llm-task/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/llm-task", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw JSON-only LLM task plugin", "type": "module", diff --git a/extensions/llm-task/src/llm-task-tool.test.ts b/extensions/llm-task/src/llm-task-tool.test.ts index fea135e8be5..fc9f0e07215 100644 --- a/extensions/llm-task/src/llm-task-tool.test.ts +++ b/extensions/llm-task/src/llm-task-tool.test.ts @@ -109,6 +109,59 @@ describe("llm-task tool (json-only)", () => { expect(call.model).toBe("claude-4-sonnet"); }); + it("passes thinking override to embedded runner", async () => { + // oxlint-disable-next-line typescript/no-explicit-any + (runEmbeddedPiAgent as any).mockResolvedValueOnce({ + meta: {}, + payloads: [{ text: JSON.stringify({ ok: true }) }], + }); + const tool = createLlmTaskTool(fakeApi()); + await tool.execute("id", { prompt: "x", thinking: "high" }); + // oxlint-disable-next-line typescript/no-explicit-any + const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + expect(call.thinkLevel).toBe("high"); + }); + + it("normalizes thinking aliases", async () => { + // oxlint-disable-next-line typescript/no-explicit-any + (runEmbeddedPiAgent as any).mockResolvedValueOnce({ + meta: {}, + payloads: [{ text: JSON.stringify({ ok: true }) }], + }); + const tool = createLlmTaskTool(fakeApi()); + await tool.execute("id", { prompt: "x", thinking: "on" }); + // oxlint-disable-next-line typescript/no-explicit-any + const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + expect(call.thinkLevel).toBe("low"); + }); + + it("throws on invalid thinking level", async () => { + const tool = createLlmTaskTool(fakeApi()); + await expect(tool.execute("id", { prompt: "x", thinking: "banana" })).rejects.toThrow( + /invalid thinking level/i, + ); + }); + + it("throws on unsupported xhigh thinking level", async () => { + const tool = createLlmTaskTool(fakeApi()); + await expect(tool.execute("id", { prompt: "x", thinking: "xhigh" })).rejects.toThrow( + /only supported/i, + ); + }); + + it("does not pass thinkLevel when thinking is omitted", async () => { + // oxlint-disable-next-line typescript/no-explicit-any + (runEmbeddedPiAgent as any).mockResolvedValueOnce({ + meta: {}, + payloads: [{ text: JSON.stringify({ ok: true }) }], + }); + const tool = createLlmTaskTool(fakeApi()); + await tool.execute("id", { prompt: "x" }); + // oxlint-disable-next-line typescript/no-explicit-any + const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + expect(call.thinkLevel).toBeUndefined(); + }); + it("enforces allowedModels", async () => { // oxlint-disable-next-line typescript/no-explicit-any (runEmbeddedPiAgent as any).mockResolvedValueOnce({ diff --git a/extensions/llm-task/src/llm-task-tool.ts b/extensions/llm-task/src/llm-task-tool.ts index 3a2e42c7223..ff2037e534a 100644 --- a/extensions/llm-task/src/llm-task-tool.ts +++ b/extensions/llm-task/src/llm-task-tool.ts @@ -2,7 +2,13 @@ import fs from "node:fs/promises"; import path from "node:path"; import { Type } from "@sinclair/typebox"; import Ajv from "ajv"; -import { resolvePreferredOpenClawTmpDir } from "openclaw/plugin-sdk/llm-task"; +import { + formatThinkingLevels, + formatXHighModelHint, + normalizeThinkLevel, + resolvePreferredOpenClawTmpDir, + supportsXHighThinking, +} from "openclaw/plugin-sdk/llm-task"; // NOTE: This extension is intended to be bundled with OpenClaw. // When running from source (tests/dev), OpenClaw internals live under src/. // When running from a built install, internals live under dist/ (no src/ tree). @@ -86,6 +92,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) { Type.String({ description: "Provider override (e.g. openai-codex, anthropic)." }), ), model: Type.Optional(Type.String({ description: "Model id override." })), + thinking: Type.Optional(Type.String({ description: "Thinking level override." })), authProfileId: Type.Optional(Type.String({ description: "Auth profile override." })), temperature: Type.Optional(Type.Number({ description: "Best-effort temperature override." })), maxTokens: Type.Optional(Type.Number({ description: "Best-effort maxTokens override." })), @@ -144,6 +151,18 @@ export function createLlmTaskTool(api: OpenClawPluginApi) { ); } + const thinkingRaw = + typeof params.thinking === "string" && params.thinking.trim() ? params.thinking : undefined; + const thinkLevel = thinkingRaw ? normalizeThinkLevel(thinkingRaw) : undefined; + if (thinkingRaw && !thinkLevel) { + throw new Error( + `Invalid thinking level "${thinkingRaw}". Use one of: ${formatThinkingLevels(provider, model)}.`, + ); + } + if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) { + throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`); + } + const timeoutMs = (typeof params.timeoutMs === "number" && params.timeoutMs > 0 ? params.timeoutMs @@ -204,6 +223,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) { model, authProfileId, authProfileIdSource: authProfileId ? "user" : "auto", + thinkLevel, streamParams, disableTools: true, }); diff --git a/extensions/lobster/package.json b/extensions/lobster/package.json index 4c137401fbb..c0c243b28c0 100644 --- a/extensions/lobster/package.json +++ b/extensions/lobster/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/lobster", - "version": "2026.3.9", + "version": "2026.3.11", "description": "Lobster workflow tool plugin (typed pipelines + resumable approvals)", "type": "module", "dependencies": { diff --git a/extensions/matrix/CHANGELOG.md b/extensions/matrix/CHANGELOG.md index a3b32a18c85..65f31b8445e 100644 --- a/extensions/matrix/CHANGELOG.md +++ b/extensions/matrix/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/matrix/package.json b/extensions/matrix/package.json index c1b5859b43e..8a132a9edf5 100644 --- a/extensions/matrix/package.json +++ b/extensions/matrix/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/matrix", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Matrix channel plugin", "type": "module", "dependencies": { diff --git a/extensions/mattermost/package.json b/extensions/mattermost/package.json index d532764db87..e16e158545e 100644 --- a/extensions/mattermost/package.json +++ b/extensions/mattermost/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/mattermost", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Mattermost channel plugin", "type": "module", "dependencies": { diff --git a/extensions/mattermost/src/channel.ts b/extensions/mattermost/src/channel.ts index 2dffaa6f3cf..42d167948a0 100644 --- a/extensions/mattermost/src/channel.ts +++ b/extensions/mattermost/src/channel.ts @@ -270,6 +270,16 @@ export const mattermostPlugin: ChannelPlugin = { streaming: { blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 }, }, + threading: { + resolveReplyToMode: ({ cfg, accountId }) => { + const account = resolveMattermostAccount({ cfg, accountId: accountId ?? "default" }); + const mode = account.config.replyToMode; + if (mode === "off" || mode === "first") { + return mode; + } + return "all"; + }, + }, reload: { configPrefixes: ["channels.mattermost"] }, configSchema: buildChannelConfigSchema(MattermostConfigSchema), config: { diff --git a/extensions/mattermost/src/config-schema.ts b/extensions/mattermost/src/config-schema.ts index 51d9bdbe33a..43dd7ede8d2 100644 --- a/extensions/mattermost/src/config-schema.ts +++ b/extensions/mattermost/src/config-schema.ts @@ -43,6 +43,7 @@ const MattermostAccountSchemaBase = z chunkMode: z.enum(["length", "newline"]).optional(), blockStreaming: z.boolean().optional(), blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(), + replyToMode: z.enum(["off", "first", "all"]).optional(), responsePrefix: z.string().optional(), actions: z .object({ diff --git a/extensions/mattermost/src/mattermost/monitor.test.ts b/extensions/mattermost/src/mattermost/monitor.test.ts index 1bd871714c4..d479909ac05 100644 --- a/extensions/mattermost/src/mattermost/monitor.test.ts +++ b/extensions/mattermost/src/mattermost/monitor.test.ts @@ -109,6 +109,29 @@ describe("mattermost mention gating", () => { }); }); +describe("resolveMattermostReplyRootId with block streaming payloads", () => { + it("uses threadRootId for block-streamed payloads with replyToId", () => { + // When block streaming sends a payload with replyToId from the threading + // mode, the deliver callback should still use the existing threadRootId. + expect( + resolveMattermostReplyRootId({ + threadRootId: "thread-root-1", + replyToId: "streamed-reply-id", + }), + ).toBe("thread-root-1"); + }); + + it("falls back to payload replyToId when no threadRootId in block streaming", () => { + // Top-level channel message: no threadRootId, payload carries the + // inbound post id as replyToId from the "all" threading mode. + expect( + resolveMattermostReplyRootId({ + replyToId: "inbound-post-for-threading", + }), + ).toBe("inbound-post-for-threading"); + }); +}); + describe("resolveMattermostReplyRootId", () => { it("uses replyToId for top-level replies", () => { expect( diff --git a/extensions/mattermost/src/types.ts b/extensions/mattermost/src/types.ts index ba664baa894..86de9c1a714 100644 --- a/extensions/mattermost/src/types.ts +++ b/extensions/mattermost/src/types.ts @@ -52,6 +52,8 @@ export type MattermostAccountConfig = { blockStreaming?: boolean; /** Merge streamed block replies before sending. */ blockStreamingCoalesce?: BlockStreamingCoalesceConfig; + /** Control reply threading (off|first|all). Default: "all". */ + replyToMode?: "off" | "first" | "all"; /** Outbound response prefix override for this channel/account. */ responsePrefix?: string; /** Action toggles for this account. */ diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index 0af3fc45281..640c81e1539 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -1,12 +1,9 @@ { "name": "@openclaw/memory-core", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw core memory search plugin", "type": "module", - "devDependencies": { - "openclaw": "workspace:*" - }, "peerDependencies": { "openclaw": ">=2026.3.7" }, diff --git a/extensions/memory-lancedb/package.json b/extensions/memory-lancedb/package.json index abd920833ca..2a1b2a9994b 100644 --- a/extensions/memory-lancedb/package.json +++ b/extensions/memory-lancedb/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/memory-lancedb", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw LanceDB-backed long-term memory plugin with auto-recall/capture", "type": "module", diff --git a/extensions/minimax-portal-auth/package.json b/extensions/minimax-portal-auth/package.json index 9443f37d524..6e11b99212f 100644 --- a/extensions/minimax-portal-auth/package.json +++ b/extensions/minimax-portal-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/minimax-portal-auth", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw MiniMax Portal OAuth provider plugin", "type": "module", diff --git a/extensions/msteams/CHANGELOG.md b/extensions/msteams/CHANGELOG.md index 38d5614305c..bf82200cf59 100644 --- a/extensions/msteams/CHANGELOG.md +++ b/extensions/msteams/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/msteams/package.json b/extensions/msteams/package.json index c4453f82f6e..c159d091977 100644 --- a/extensions/msteams/package.json +++ b/extensions/msteams/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/msteams", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Microsoft Teams channel plugin", "type": "module", "dependencies": { diff --git a/extensions/nextcloud-talk/package.json b/extensions/nextcloud-talk/package.json index 96797d4b76e..9ef0a1daf09 100644 --- a/extensions/nextcloud-talk/package.json +++ b/extensions/nextcloud-talk/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nextcloud-talk", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Nextcloud Talk channel plugin", "type": "module", "dependencies": { diff --git a/extensions/nostr/CHANGELOG.md b/extensions/nostr/CHANGELOG.md index 3088efcc2bb..dcb4c18fdfa 100644 --- a/extensions/nostr/CHANGELOG.md +++ b/extensions/nostr/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/nostr/package.json b/extensions/nostr/package.json index dbee4bc09d7..f02b67b6837 100644 --- a/extensions/nostr/package.json +++ b/extensions/nostr/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nostr", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Nostr channel plugin for NIP-04 encrypted DMs", "type": "module", "dependencies": { diff --git a/extensions/open-prose/package.json b/extensions/open-prose/package.json index 240a2bbcb41..de86909f961 100644 --- a/extensions/open-prose/package.json +++ b/extensions/open-prose/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/open-prose", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenProse VM skill pack plugin (slash command + telemetry).", "type": "module", diff --git a/extensions/signal/package.json b/extensions/signal/package.json index 743c8212d31..6fd516cfd42 100644 --- a/extensions/signal/package.json +++ b/extensions/signal/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/signal", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Signal channel plugin", "type": "module", diff --git a/extensions/slack/package.json b/extensions/slack/package.json index 539541bdc6d..dbc4a4483c4 100644 --- a/extensions/slack/package.json +++ b/extensions/slack/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/slack", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Slack channel plugin", "type": "module", diff --git a/extensions/synology-chat/package.json b/extensions/synology-chat/package.json index 00503898817..0e7b4847494 100644 --- a/extensions/synology-chat/package.json +++ b/extensions/synology-chat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/synology-chat", - "version": "2026.3.9", + "version": "2026.3.11", "description": "Synology Chat channel plugin for OpenClaw", "type": "module", "dependencies": { diff --git a/extensions/telegram/package.json b/extensions/telegram/package.json index 6602b46f2c8..8ffa3acf603 100644 --- a/extensions/telegram/package.json +++ b/extensions/telegram/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/telegram", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Telegram channel plugin", "type": "module", diff --git a/extensions/tlon/package.json b/extensions/tlon/package.json index 0cb79328d89..154e1dd6dbd 100644 --- a/extensions/tlon/package.json +++ b/extensions/tlon/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/tlon", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Tlon/Urbit channel plugin", "type": "module", "dependencies": { diff --git a/extensions/twitch/CHANGELOG.md b/extensions/twitch/CHANGELOG.md index 48160f427e8..844ef13dc6c 100644 --- a/extensions/twitch/CHANGELOG.md +++ b/extensions/twitch/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/twitch/package.json b/extensions/twitch/package.json index 5fbf49cc971..3bcdf9fe847 100644 --- a/extensions/twitch/package.json +++ b/extensions/twitch/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/twitch", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Twitch channel plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/CHANGELOG.md b/extensions/voice-call/CHANGELOG.md index a8a4586116c..93aba26c868 100644 --- a/extensions/voice-call/CHANGELOG.md +++ b/extensions/voice-call/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/voice-call/openclaw.plugin.json b/extensions/voice-call/openclaw.plugin.json index d9a904c73eb..fef3ccc6ad9 100644 --- a/extensions/voice-call/openclaw.plugin.json +++ b/extensions/voice-call/openclaw.plugin.json @@ -522,11 +522,22 @@ "apiKey": { "type": "string" }, + "baseUrl": { + "type": "string" + }, "model": { "type": "string" }, "voice": { "type": "string" + }, + "speed": { + "type": "number", + "minimum": 0.25, + "maximum": 4.0 + }, + "instructions": { + "type": "string" } } }, diff --git a/extensions/voice-call/package.json b/extensions/voice-call/package.json index 420f8b41560..9bdadd3b226 100644 --- a/extensions/voice-call/package.json +++ b/extensions/voice-call/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/voice-call", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw voice-call plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/src/providers/tts-openai.ts b/extensions/voice-call/src/providers/tts-openai.ts index a27030b4578..0a7c74d90ac 100644 --- a/extensions/voice-call/src/providers/tts-openai.ts +++ b/extensions/voice-call/src/providers/tts-openai.ts @@ -1,3 +1,4 @@ +import { resolveOpenAITtsInstructions } from "openclaw/plugin-sdk/voice-call"; import { pcmToMulaw } from "../telephony-audio.js"; /** @@ -110,9 +111,11 @@ export class OpenAITTSProvider { speed: this.speed, }; - // Add instructions if using gpt-4o-mini-tts model - const effectiveInstructions = trimToUndefined(instructions) ?? this.instructions; - if (effectiveInstructions && this.model.includes("gpt-4o-mini-tts")) { + const effectiveInstructions = resolveOpenAITtsInstructions( + this.model, + trimToUndefined(instructions) ?? this.instructions, + ); + if (effectiveInstructions) { body.instructions = effectiveInstructions; } diff --git a/extensions/whatsapp/package.json b/extensions/whatsapp/package.json index c87a5f26c2b..1a21be8eba9 100644 --- a/extensions/whatsapp/package.json +++ b/extensions/whatsapp/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/whatsapp", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw WhatsApp channel plugin", "type": "module", diff --git a/extensions/zalo/CHANGELOG.md b/extensions/zalo/CHANGELOG.md index 5ae5323034f..178f993e825 100644 --- a/extensions/zalo/CHANGELOG.md +++ b/extensions/zalo/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/zalo/package.json b/extensions/zalo/package.json index 6de5909736f..463887c68fe 100644 --- a/extensions/zalo/package.json +++ b/extensions/zalo/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalo", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Zalo channel plugin", "type": "module", "dependencies": { diff --git a/extensions/zalouser/CHANGELOG.md b/extensions/zalouser/CHANGELOG.md index 10c22ce4029..b5a0fbb6f57 100644 --- a/extensions/zalouser/CHANGELOG.md +++ b/extensions/zalouser/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/zalouser/package.json b/extensions/zalouser/package.json index 79bf5723d48..2b803b0b150 100644 --- a/extensions/zalouser/package.json +++ b/extensions/zalouser/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalouser", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Zalo Personal Account plugin via native zca-js integration", "type": "module", "dependencies": { diff --git a/extensions/zalouser/src/channel.sendpayload.test.ts b/extensions/zalouser/src/channel.sendpayload.test.ts index 0cef65f8c05..d388773e2e6 100644 --- a/extensions/zalouser/src/channel.sendpayload.test.ts +++ b/extensions/zalouser/src/channel.sendpayload.test.ts @@ -5,6 +5,7 @@ import { primeSendMock, } from "../../../src/test-utils/send-payload-contract.js"; import { zalouserPlugin } from "./channel.js"; +import { setZalouserRuntime } from "./runtime.js"; vi.mock("./send.js", () => ({ sendMessageZalouser: vi.fn().mockResolvedValue({ ok: true, messageId: "zlu-1" }), @@ -38,6 +39,14 @@ describe("zalouserPlugin outbound sendPayload", () => { let mockedSend: ReturnType>; beforeEach(async () => { + setZalouserRuntime({ + channel: { + text: { + resolveChunkMode: vi.fn(() => "length"), + resolveTextChunkLimit: vi.fn(() => 1200), + }, + }, + } as never); const mod = await import("./send.js"); mockedSend = vi.mocked(mod.sendMessageZalouser); mockedSend.mockClear(); @@ -55,7 +64,7 @@ describe("zalouserPlugin outbound sendPayload", () => { expect(mockedSend).toHaveBeenCalledWith( "1471383327500481391", "hello group", - expect.objectContaining({ isGroup: true }), + expect.objectContaining({ isGroup: true, textMode: "markdown" }), ); expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g1" }); }); @@ -71,7 +80,7 @@ describe("zalouserPlugin outbound sendPayload", () => { expect(mockedSend).toHaveBeenCalledWith( "987654321", "hello", - expect.objectContaining({ isGroup: false }), + expect.objectContaining({ isGroup: false, textMode: "markdown" }), ); expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-d1" }); }); @@ -87,14 +96,37 @@ describe("zalouserPlugin outbound sendPayload", () => { expect(mockedSend).toHaveBeenCalledWith( "g-1471383327500481391", "hello native group", - expect.objectContaining({ isGroup: true }), + expect.objectContaining({ isGroup: true, textMode: "markdown" }), ); expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g-native" }); }); + it("passes long markdown through once so formatting happens before chunking", async () => { + const text = `**${"a".repeat(2501)}**`; + mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-code" }); + + const result = await zalouserPlugin.outbound!.sendPayload!({ + ...baseCtx({ text }), + to: "987654321", + }); + + expect(mockedSend).toHaveBeenCalledTimes(1); + expect(mockedSend).toHaveBeenCalledWith( + "987654321", + text, + expect.objectContaining({ + isGroup: false, + textMode: "markdown", + textChunkMode: "length", + textChunkLimit: 1200, + }), + ); + expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-code" }); + }); + installSendPayloadContractSuite({ channel: "zalouser", - chunking: { mode: "split", longTextLength: 3000, maxChunkLength: 2000 }, + chunking: { mode: "passthrough", longTextLength: 3000 }, createHarness: ({ payload, sendResults }) => { primeSendMock(mockedSend, { ok: true, messageId: "zlu-1" }, sendResults); return { diff --git a/extensions/zalouser/src/channel.test.ts b/extensions/zalouser/src/channel.test.ts index 231bcc8b2d3..5580ddfb2e1 100644 --- a/extensions/zalouser/src/channel.test.ts +++ b/extensions/zalouser/src/channel.test.ts @@ -1,5 +1,7 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { chunkMarkdownText } from "../../../src/auto-reply/chunk.js"; import { zalouserPlugin } from "./channel.js"; +import { setZalouserRuntime } from "./runtime.js"; import { sendReactionZalouser } from "./send.js"; vi.mock("./send.js", async (importOriginal) => { @@ -13,6 +15,16 @@ vi.mock("./send.js", async (importOriginal) => { const mockSendReaction = vi.mocked(sendReactionZalouser); describe("zalouser outbound chunker", () => { + beforeEach(() => { + setZalouserRuntime({ + channel: { + text: { + chunkMarkdownText, + }, + }, + } as never); + }); + it("chunks without empty strings and respects limit", () => { const chunker = zalouserPlugin.outbound?.chunker; expect(chunker).toBeTypeOf("function"); diff --git a/extensions/zalouser/src/channel.ts b/extensions/zalouser/src/channel.ts index 2091124be6e..79e3ae7477b 100644 --- a/extensions/zalouser/src/channel.ts +++ b/extensions/zalouser/src/channel.ts @@ -20,7 +20,6 @@ import { buildBaseAccountStatusSnapshot, buildChannelConfigSchema, DEFAULT_ACCOUNT_ID, - chunkTextForOutbound, deleteAccountFromConfigSection, formatAllowFromLowercase, isNumericTargetId, @@ -43,6 +42,7 @@ import { resolveZalouserReactionMessageIds } from "./message-sid.js"; import { zalouserOnboardingAdapter } from "./onboarding.js"; import { probeZalouser } from "./probe.js"; import { writeQrDataUrlToTempFile } from "./qr-temp-file.js"; +import { getZalouserRuntime } from "./runtime.js"; import { sendMessageZalouser, sendReactionZalouser } from "./send.js"; import { collectZalouserStatusIssues } from "./status-issues.js"; import { @@ -166,6 +166,16 @@ function resolveZalouserQrProfile(accountId?: string | null): string { return normalized; } +function resolveZalouserOutboundChunkMode(cfg: OpenClawConfig, accountId?: string) { + return getZalouserRuntime().channel.text.resolveChunkMode(cfg, "zalouser", accountId); +} + +function resolveZalouserOutboundTextChunkLimit(cfg: OpenClawConfig, accountId?: string) { + return getZalouserRuntime().channel.text.resolveTextChunkLimit(cfg, "zalouser", accountId, { + fallbackLimit: zalouserDock.outbound?.textChunkLimit ?? 2000, + }); +} + function mapUser(params: { id: string; name?: string | null; @@ -595,14 +605,11 @@ export const zalouserPlugin: ChannelPlugin = { }, outbound: { deliveryMode: "direct", - chunker: chunkTextForOutbound, - chunkerMode: "text", - textChunkLimit: 2000, + chunker: (text, limit) => getZalouserRuntime().channel.text.chunkMarkdownText(text, limit), + chunkerMode: "markdown", sendPayload: async (ctx) => await sendPayloadWithChunkedTextAndMedia({ ctx, - textChunkLimit: zalouserPlugin.outbound!.textChunkLimit, - chunker: zalouserPlugin.outbound!.chunker, sendText: (nextCtx) => zalouserPlugin.outbound!.sendText!(nextCtx), sendMedia: (nextCtx) => zalouserPlugin.outbound!.sendMedia!(nextCtx), emptyResult: { channel: "zalouser", messageId: "" }, @@ -613,6 +620,9 @@ export const zalouserPlugin: ChannelPlugin = { const result = await sendMessageZalouser(target.threadId, text, { profile: account.profile, isGroup: target.isGroup, + textMode: "markdown", + textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId), + textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId), }); return buildChannelSendResult("zalouser", result); }, @@ -624,6 +634,9 @@ export const zalouserPlugin: ChannelPlugin = { isGroup: target.isGroup, mediaUrl, mediaLocalRoots, + textMode: "markdown", + textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId), + textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId), }); return buildChannelSendResult("zalouser", result); }, diff --git a/extensions/zalouser/src/monitor.group-gating.test.ts b/extensions/zalouser/src/monitor.group-gating.test.ts index b3e38efecd6..49593f07072 100644 --- a/extensions/zalouser/src/monitor.group-gating.test.ts +++ b/extensions/zalouser/src/monitor.group-gating.test.ts @@ -51,6 +51,7 @@ function createRuntimeEnv(): RuntimeEnv { function installRuntime(params: { commandAuthorized?: boolean; + replyPayload?: { text?: string; mediaUrl?: string; mediaUrls?: string[] }; resolveCommandAuthorizedFromAuthorizers?: (params: { useAccessGroups: boolean; authorizers: Array<{ configured: boolean; allowed: boolean }>; @@ -58,6 +59,9 @@ function installRuntime(params: { }) { const dispatchReplyWithBufferedBlockDispatcher = vi.fn(async ({ dispatcherOptions, ctx }) => { await dispatcherOptions.typingCallbacks?.onReplyStart?.(); + if (params.replyPayload) { + await dispatcherOptions.deliver(params.replyPayload); + } return { queuedFinal: false, counts: { tool: 0, block: 0, final: 0 }, ctx }; }); const resolveCommandAuthorizedFromAuthorizers = vi.fn( @@ -166,7 +170,8 @@ function installRuntime(params: { text: { resolveMarkdownTableMode: vi.fn(() => "code"), convertMarkdownTables: vi.fn((text: string) => text), - resolveChunkMode: vi.fn(() => "line"), + resolveChunkMode: vi.fn(() => "length"), + resolveTextChunkLimit: vi.fn(() => 1200), chunkMarkdownTextWithMode: vi.fn((text: string) => [text]), }, }, @@ -304,6 +309,42 @@ describe("zalouser monitor group mention gating", () => { expect(callArg?.ctx?.WasMentioned).toBe(true); }); + it("passes long markdown replies through once so formatting happens before chunking", async () => { + const replyText = `**${"a".repeat(2501)}**`; + installRuntime({ + commandAuthorized: false, + replyPayload: { text: replyText }, + }); + + await __testing.processMessage({ + message: createDmMessage({ + content: "hello", + }), + account: { + ...createAccount(), + config: { + ...createAccount().config, + dmPolicy: "open", + }, + }, + config: createConfig(), + runtime: createRuntimeEnv(), + }); + + expect(sendMessageZalouserMock).toHaveBeenCalledTimes(1); + expect(sendMessageZalouserMock).toHaveBeenCalledWith( + "u-1", + replyText, + expect.objectContaining({ + isGroup: false, + profile: "default", + textMode: "markdown", + textChunkMode: "length", + textChunkLimit: 1200, + }), + ); + }); + it("uses commandContent for mention-prefixed control commands", async () => { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: true, diff --git a/extensions/zalouser/src/monitor.ts b/extensions/zalouser/src/monitor.ts index 6590082e830..5329b22fa68 100644 --- a/extensions/zalouser/src/monitor.ts +++ b/extensions/zalouser/src/monitor.ts @@ -703,6 +703,10 @@ async function deliverZalouserReply(params: { params; const tableMode = params.tableMode ?? "code"; const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode); + const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId); + const textChunkLimit = core.channel.text.resolveTextChunkLimit(config, "zalouser", accountId, { + fallbackLimit: ZALOUSER_TEXT_LIMIT, + }); const sentMedia = await sendMediaWithLeadingCaption({ mediaUrls: resolveOutboundMediaUrls(payload), @@ -713,6 +717,9 @@ async function deliverZalouserReply(params: { profile, mediaUrl, isGroup, + textMode: "markdown", + textChunkMode: chunkMode, + textChunkLimit, }); statusSink?.({ lastOutboundAt: Date.now() }); }, @@ -725,20 +732,17 @@ async function deliverZalouserReply(params: { } if (text) { - const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId); - const chunks = core.channel.text.chunkMarkdownTextWithMode( - text, - ZALOUSER_TEXT_LIMIT, - chunkMode, - ); - logVerbose(core, runtime, `Sending ${chunks.length} text chunk(s) to ${chatId}`); - for (const chunk of chunks) { - try { - await sendMessageZalouser(chatId, chunk, { profile, isGroup }); - statusSink?.({ lastOutboundAt: Date.now() }); - } catch (err) { - runtime.error(`Zalouser message send failed: ${String(err)}`); - } + try { + await sendMessageZalouser(chatId, text, { + profile, + isGroup, + textMode: "markdown", + textChunkMode: chunkMode, + textChunkLimit, + }); + statusSink?.({ lastOutboundAt: Date.now() }); + } catch (err) { + runtime.error(`Zalouser message send failed: ${String(err)}`); } } } diff --git a/extensions/zalouser/src/send.test.ts b/extensions/zalouser/src/send.test.ts index 92b3cec25f2..cc920e6be7e 100644 --- a/extensions/zalouser/src/send.test.ts +++ b/extensions/zalouser/src/send.test.ts @@ -8,6 +8,7 @@ import { sendSeenZalouser, sendTypingZalouser, } from "./send.js"; +import { parseZalouserTextStyles } from "./text-styles.js"; import { sendZaloDeliveredEvent, sendZaloLink, @@ -16,6 +17,7 @@ import { sendZaloTextMessage, sendZaloTypingEvent, } from "./zalo-js.js"; +import { TextStyle } from "./zca-client.js"; vi.mock("./zalo-js.js", () => ({ sendZaloTextMessage: vi.fn(), @@ -43,36 +45,272 @@ describe("zalouser send helpers", () => { mockSendSeen.mockReset(); }); - it("delegates text send to JS transport", async () => { + it("keeps plain text literal by default", async () => { mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1" }); - const result = await sendMessageZalouser("thread-1", "hello", { + const result = await sendMessageZalouser("thread-1", "**hello**", { profile: "default", isGroup: true, }); - expect(mockSendText).toHaveBeenCalledWith("thread-1", "hello", { - profile: "default", - isGroup: true, - }); + expect(mockSendText).toHaveBeenCalledWith( + "thread-1", + "**hello**", + expect.objectContaining({ + profile: "default", + isGroup: true, + }), + ); expect(result).toEqual({ ok: true, messageId: "mid-1" }); }); - it("maps image helper to media send", async () => { + it("formats markdown text when markdown mode is enabled", async () => { + mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1b" }); + + await sendMessageZalouser("thread-1", "**hello**", { + profile: "default", + isGroup: true, + textMode: "markdown", + }); + + expect(mockSendText).toHaveBeenCalledWith( + "thread-1", + "hello", + expect.objectContaining({ + profile: "default", + isGroup: true, + textMode: "markdown", + textStyles: [{ start: 0, len: 5, st: TextStyle.Bold }], + }), + ); + }); + + it("formats image captions in markdown mode", async () => { mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2" }); await sendImageZalouser("thread-2", "https://example.com/a.png", { profile: "p2", - caption: "cap", + caption: "_cap_", isGroup: false, + textMode: "markdown", }); - expect(mockSendText).toHaveBeenCalledWith("thread-2", "cap", { + expect(mockSendText).toHaveBeenCalledWith( + "thread-2", + "cap", + expect.objectContaining({ + profile: "p2", + caption: undefined, + isGroup: false, + mediaUrl: "https://example.com/a.png", + textMode: "markdown", + textStyles: [{ start: 0, len: 3, st: TextStyle.Italic }], + }), + ); + }); + + it("does not keep the raw markdown caption as a media fallback after formatting", async () => { + mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2b" }); + + await sendImageZalouser("thread-2", "https://example.com/a.png", { profile: "p2", - caption: "cap", + caption: "```\n```", isGroup: false, - mediaUrl: "https://example.com/a.png", + textMode: "markdown", }); + + expect(mockSendText).toHaveBeenCalledWith( + "thread-2", + "", + expect.objectContaining({ + profile: "p2", + caption: undefined, + isGroup: false, + mediaUrl: "https://example.com/a.png", + textMode: "markdown", + textStyles: undefined, + }), + ); + }); + + it("rechunks normalized markdown text before sending to avoid transport truncation", async () => { + const text = "\t".repeat(500) + "a".repeat(1500); + const formatted = parseZalouserTextStyles(text); + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2c-1" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2c-2" }); + + const result = await sendMessageZalouser("thread-2c", text, { + profile: "p2c", + isGroup: false, + textMode: "markdown", + }); + + expect(formatted.text.length).toBeGreaterThan(2000); + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text); + expect(mockSendText.mock.calls.every((call) => (call[1] as string).length <= 2000)).toBe(true); + expect(result).toEqual({ ok: true, messageId: "mid-2c-2" }); + }); + + it("preserves text styles when splitting long formatted markdown", async () => { + const text = `**${"a".repeat(2501)}**`; + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-1" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-2" }); + + const result = await sendMessageZalouser("thread-2d", text, { + profile: "p2d", + isGroup: false, + textMode: "markdown", + }); + + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2d", + "a".repeat(2000), + expect.objectContaining({ + profile: "p2d", + isGroup: false, + textMode: "markdown", + textStyles: [{ start: 0, len: 2000, st: TextStyle.Bold }], + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2d", + "a".repeat(501), + expect.objectContaining({ + profile: "p2d", + isGroup: false, + textMode: "markdown", + textStyles: [{ start: 0, len: 501, st: TextStyle.Bold }], + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2d-2" }); + }); + + it("preserves formatted text and styles when newline chunk mode splits after parsing", async () => { + const text = `**${"a".repeat(1995)}**\n\nsecond paragraph`; + const formatted = parseZalouserTextStyles(text); + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-3" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-4" }); + + const result = await sendMessageZalouser("thread-2d-2", text, { + profile: "p2d-2", + isGroup: false, + textMode: "markdown", + textChunkMode: "newline", + }); + + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text); + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2d-2", + `${"a".repeat(1995)}\n\n`, + expect.objectContaining({ + profile: "p2d-2", + isGroup: false, + textMode: "markdown", + textChunkMode: "newline", + textStyles: [{ start: 0, len: 1995, st: TextStyle.Bold }], + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2d-2", + "second paragraph", + expect.objectContaining({ + profile: "p2d-2", + isGroup: false, + textMode: "markdown", + textChunkMode: "newline", + textStyles: undefined, + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2d-4" }); + }); + + it("respects an explicit text chunk limit when splitting formatted markdown", async () => { + const text = `**${"a".repeat(1501)}**`; + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-5" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-6" }); + + const result = await sendMessageZalouser("thread-2d-3", text, { + profile: "p2d-3", + isGroup: false, + textMode: "markdown", + textChunkLimit: 1200, + } as never); + + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2d-3", + "a".repeat(1200), + expect.objectContaining({ + profile: "p2d-3", + isGroup: false, + textMode: "markdown", + textChunkLimit: 1200, + textStyles: [{ start: 0, len: 1200, st: TextStyle.Bold }], + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2d-3", + "a".repeat(301), + expect.objectContaining({ + profile: "p2d-3", + isGroup: false, + textMode: "markdown", + textChunkLimit: 1200, + textStyles: [{ start: 0, len: 301, st: TextStyle.Bold }], + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2d-6" }); + }); + + it("sends overflow markdown captions as follow-up text after the media message", async () => { + const caption = "\t".repeat(500) + "a".repeat(1500); + const formatted = parseZalouserTextStyles(caption); + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2e-1" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2e-2" }); + + const result = await sendImageZalouser("thread-2e", "https://example.com/long.png", { + profile: "p2e", + caption, + isGroup: false, + textMode: "markdown", + }); + + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text); + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2e", + expect.any(String), + expect.objectContaining({ + profile: "p2e", + caption: undefined, + isGroup: false, + mediaUrl: "https://example.com/long.png", + textMode: "markdown", + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2e", + expect.any(String), + expect.not.objectContaining({ + mediaUrl: "https://example.com/long.png", + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2e-2" }); }); it("delegates link helper to JS transport", async () => { diff --git a/extensions/zalouser/src/send.ts b/extensions/zalouser/src/send.ts index 07ae1408bff..55ff17df636 100644 --- a/extensions/zalouser/src/send.ts +++ b/extensions/zalouser/src/send.ts @@ -1,3 +1,4 @@ +import { parseZalouserTextStyles } from "./text-styles.js"; import type { ZaloEventMessage, ZaloSendOptions, ZaloSendResult } from "./types.js"; import { sendZaloDeliveredEvent, @@ -7,16 +8,58 @@ import { sendZaloTextMessage, sendZaloTypingEvent, } from "./zalo-js.js"; +import { TextStyle } from "./zca-client.js"; export type ZalouserSendOptions = ZaloSendOptions; export type ZalouserSendResult = ZaloSendResult; +const ZALO_TEXT_LIMIT = 2000; +const DEFAULT_TEXT_CHUNK_MODE = "length"; + +type StyledTextChunk = { + text: string; + styles?: ZaloSendOptions["textStyles"]; +}; + +type TextChunkMode = NonNullable; + export async function sendMessageZalouser( threadId: string, text: string, options: ZalouserSendOptions = {}, ): Promise { - return await sendZaloTextMessage(threadId, text, options); + const prepared = + options.textMode === "markdown" + ? parseZalouserTextStyles(text) + : { text, styles: options.textStyles }; + const textChunkLimit = options.textChunkLimit ?? ZALO_TEXT_LIMIT; + const chunks = splitStyledText( + prepared.text, + (prepared.styles?.length ?? 0) > 0 ? prepared.styles : undefined, + textChunkLimit, + options.textChunkMode, + ); + + let lastResult: ZalouserSendResult | null = null; + for (const [index, chunk] of chunks.entries()) { + const chunkOptions = + index === 0 + ? { ...options, textStyles: chunk.styles } + : { + ...options, + caption: undefined, + mediaLocalRoots: undefined, + mediaUrl: undefined, + textStyles: chunk.styles, + }; + const result = await sendZaloTextMessage(threadId, chunk.text, chunkOptions); + if (!result.ok) { + return result; + } + lastResult = result; + } + + return lastResult ?? { ok: false, error: "No message content provided" }; } export async function sendImageZalouser( @@ -24,8 +67,9 @@ export async function sendImageZalouser( imageUrl: string, options: ZalouserSendOptions = {}, ): Promise { - return await sendZaloTextMessage(threadId, options.caption ?? "", { + return await sendMessageZalouser(threadId, options.caption ?? "", { ...options, + caption: undefined, mediaUrl: imageUrl, }); } @@ -85,3 +129,144 @@ export async function sendSeenZalouser(params: { }): Promise { await sendZaloSeenEvent(params); } + +function splitStyledText( + text: string, + styles: ZaloSendOptions["textStyles"], + limit: number, + mode: ZaloSendOptions["textChunkMode"], +): StyledTextChunk[] { + if (text.length === 0) { + return [{ text, styles: undefined }]; + } + + const chunks: StyledTextChunk[] = []; + for (const range of splitTextRanges(text, limit, mode ?? DEFAULT_TEXT_CHUNK_MODE)) { + const { start, end } = range; + chunks.push({ + text: text.slice(start, end), + styles: sliceTextStyles(styles, start, end), + }); + } + return chunks; +} + +function sliceTextStyles( + styles: ZaloSendOptions["textStyles"], + start: number, + end: number, +): ZaloSendOptions["textStyles"] { + if (!styles || styles.length === 0) { + return undefined; + } + + const chunkStyles = styles + .map((style) => { + const overlapStart = Math.max(style.start, start); + const overlapEnd = Math.min(style.start + style.len, end); + if (overlapEnd <= overlapStart) { + return null; + } + + if (style.st === TextStyle.Indent) { + return { + start: overlapStart - start, + len: overlapEnd - overlapStart, + st: style.st, + indentSize: style.indentSize, + }; + } + + return { + start: overlapStart - start, + len: overlapEnd - overlapStart, + st: style.st, + }; + }) + .filter((style): style is NonNullable => style !== null); + + return chunkStyles.length > 0 ? chunkStyles : undefined; +} + +function splitTextRanges( + text: string, + limit: number, + mode: TextChunkMode, +): Array<{ start: number; end: number }> { + if (mode === "newline") { + return splitTextRangesByPreferredBreaks(text, limit); + } + + const ranges: Array<{ start: number; end: number }> = []; + for (let start = 0; start < text.length; start += limit) { + ranges.push({ + start, + end: Math.min(text.length, start + limit), + }); + } + return ranges; +} + +function splitTextRangesByPreferredBreaks( + text: string, + limit: number, +): Array<{ start: number; end: number }> { + const ranges: Array<{ start: number; end: number }> = []; + let start = 0; + + while (start < text.length) { + const maxEnd = Math.min(text.length, start + limit); + let end = maxEnd; + if (maxEnd < text.length) { + end = + findParagraphBreak(text, start, maxEnd) ?? + findLastBreak(text, "\n", start, maxEnd) ?? + findLastWhitespaceBreak(text, start, maxEnd) ?? + maxEnd; + } + + if (end <= start) { + end = maxEnd; + } + + ranges.push({ start, end }); + start = end; + } + + return ranges; +} + +function findParagraphBreak(text: string, start: number, end: number): number | undefined { + const slice = text.slice(start, end); + const matches = slice.matchAll(/\n[\t ]*\n+/g); + let lastMatch: RegExpMatchArray | undefined; + for (const match of matches) { + lastMatch = match; + } + if (!lastMatch || lastMatch.index === undefined) { + return undefined; + } + return start + lastMatch.index + lastMatch[0].length; +} + +function findLastBreak( + text: string, + marker: string, + start: number, + end: number, +): number | undefined { + const index = text.lastIndexOf(marker, end - 1); + if (index < start) { + return undefined; + } + return index + marker.length; +} + +function findLastWhitespaceBreak(text: string, start: number, end: number): number | undefined { + for (let index = end - 1; index > start; index -= 1) { + if (/\s/.test(text[index])) { + return index + 1; + } + } + return undefined; +} diff --git a/extensions/zalouser/src/text-styles.test.ts b/extensions/zalouser/src/text-styles.test.ts new file mode 100644 index 00000000000..01e6c2da86b --- /dev/null +++ b/extensions/zalouser/src/text-styles.test.ts @@ -0,0 +1,203 @@ +import { describe, expect, it } from "vitest"; +import { parseZalouserTextStyles } from "./text-styles.js"; +import { TextStyle } from "./zca-client.js"; + +describe("parseZalouserTextStyles", () => { + it("renders inline markdown emphasis as Zalo style ranges", () => { + expect(parseZalouserTextStyles("**bold** *italic* ~~strike~~")).toEqual({ + text: "bold italic strike", + styles: [ + { start: 0, len: 4, st: TextStyle.Bold }, + { start: 5, len: 6, st: TextStyle.Italic }, + { start: 12, len: 6, st: TextStyle.StrikeThrough }, + ], + }); + }); + + it("keeps inline code and plain math markers literal", () => { + expect(parseZalouserTextStyles("before `inline *code*` after\n2 * 3 * 4")).toEqual({ + text: "before `inline *code*` after\n2 * 3 * 4", + styles: [], + }); + }); + + it("preserves backslash escapes inside code spans and fenced code blocks", () => { + expect(parseZalouserTextStyles("before `\\*` after\n```ts\n\\*\\_\\\\\n```")).toEqual({ + text: "before `\\*` after\n\\*\\_\\\\", + styles: [], + }); + }); + + it("closes fenced code blocks when the input uses CRLF newlines", () => { + expect(parseZalouserTextStyles("```\r\n*code*\r\n```\r\n**after**")).toEqual({ + text: "*code*\nafter", + styles: [{ start: 7, len: 5, st: TextStyle.Bold }], + }); + }); + + it("maps headings, block quotes, and lists into line styles", () => { + expect(parseZalouserTextStyles(["# Title", "> quoted", " - nested"].join("\n"))).toEqual({ + text: "Title\nquoted\nnested", + styles: [ + { start: 0, len: 5, st: TextStyle.Bold }, + { start: 0, len: 5, st: TextStyle.Big }, + { start: 6, len: 6, st: TextStyle.Indent, indentSize: 1 }, + { start: 13, len: 6, st: TextStyle.UnorderedList }, + ], + }); + }); + + it("treats 1-3 leading spaces as markdown padding for headings and lists", () => { + expect(parseZalouserTextStyles(" # Title\n 1. item\n - bullet")).toEqual({ + text: "Title\nitem\nbullet", + styles: [ + { start: 0, len: 5, st: TextStyle.Bold }, + { start: 0, len: 5, st: TextStyle.Big }, + { start: 6, len: 4, st: TextStyle.OrderedList }, + { start: 11, len: 6, st: TextStyle.UnorderedList }, + ], + }); + }); + + it("strips fenced code markers and preserves leading indentation with nbsp", () => { + expect(parseZalouserTextStyles("```ts\n const x = 1\n\treturn x\n```")).toEqual({ + text: "\u00A0\u00A0const x = 1\n\u00A0\u00A0\u00A0\u00A0return x", + styles: [], + }); + }); + + it("treats tilde fences as literal code blocks", () => { + expect(parseZalouserTextStyles("~~~bash\n*cmd*\n~~~")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats fences indented under list items as literal code blocks", () => { + expect(parseZalouserTextStyles(" ```\n*cmd*\n ```")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats quoted backtick fences as literal code blocks", () => { + expect(parseZalouserTextStyles("> ```js\n> *cmd*\n> ```")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats quoted tilde fences as literal code blocks", () => { + expect(parseZalouserTextStyles("> ~~~\n> *cmd*\n> ~~~")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("preserves quote-prefixed lines inside normal fenced code blocks", () => { + expect(parseZalouserTextStyles("```\n> prompt\n```")).toEqual({ + text: "> prompt", + styles: [], + }); + }); + + it("does not treat quote-prefixed fence text inside code as a closing fence", () => { + expect(parseZalouserTextStyles("```\n> ```\n*still code*\n```")).toEqual({ + text: "> ```\n*still code*", + styles: [], + }); + }); + + it("treats indented blockquotes as quoted lines", () => { + expect(parseZalouserTextStyles(" > quoted")).toEqual({ + text: "quoted", + styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 1 }], + }); + }); + + it("treats spaced nested blockquotes as deeper quoted lines", () => { + expect(parseZalouserTextStyles("> > quoted")).toEqual({ + text: "quoted", + styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 2 }], + }); + }); + + it("treats indented quoted fences as literal code blocks", () => { + expect(parseZalouserTextStyles(" > ```\n > *cmd*\n > ```")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats spaced nested quoted fences as literal code blocks", () => { + expect(parseZalouserTextStyles("> > ```\n> > code\n> > ```")).toEqual({ + text: "code", + styles: [], + }); + }); + + it("preserves inner quote markers inside quoted fenced code blocks", () => { + expect(parseZalouserTextStyles("> ```\n>> prompt\n> ```")).toEqual({ + text: "> prompt", + styles: [], + }); + }); + + it("keeps quote indentation on heading lines", () => { + expect(parseZalouserTextStyles("> # Title")).toEqual({ + text: "Title", + styles: [ + { start: 0, len: 5, st: TextStyle.Bold }, + { start: 0, len: 5, st: TextStyle.Big }, + { start: 0, len: 5, st: TextStyle.Indent, indentSize: 1 }, + ], + }); + }); + + it("keeps unmatched fences literal", () => { + expect(parseZalouserTextStyles("```python")).toEqual({ + text: "```python", + styles: [], + }); + }); + + it("keeps unclosed fenced blocks literal until eof", () => { + expect(parseZalouserTextStyles("```python\n\\*not italic*\n_next_")).toEqual({ + text: "```python\n\\*not italic*\n_next_", + styles: [], + }); + }); + + it("supports nested markdown and tag styles regardless of order", () => { + expect(parseZalouserTextStyles("**{red}x{/red}** {red}**y**{/red}")).toEqual({ + text: "x y", + styles: [ + { start: 0, len: 1, st: TextStyle.Bold }, + { start: 0, len: 1, st: TextStyle.Red }, + { start: 2, len: 1, st: TextStyle.Red }, + { start: 2, len: 1, st: TextStyle.Bold }, + ], + }); + }); + + it("treats small text tags as normal text", () => { + expect(parseZalouserTextStyles("{small}tiny{/small}")).toEqual({ + text: "tiny", + styles: [], + }); + }); + + it("keeps escaped markers literal", () => { + expect(parseZalouserTextStyles("\\*literal\\* \\{underline}tag{/underline}")).toEqual({ + text: "*literal* {underline}tag{/underline}", + styles: [], + }); + }); + + it("keeps indented code blocks literal", () => { + expect(parseZalouserTextStyles(" *cmd*")).toEqual({ + text: "\u00A0\u00A0\u00A0\u00A0*cmd*", + styles: [], + }); + }); +}); diff --git a/extensions/zalouser/src/text-styles.ts b/extensions/zalouser/src/text-styles.ts new file mode 100644 index 00000000000..cdfe8b492b5 --- /dev/null +++ b/extensions/zalouser/src/text-styles.ts @@ -0,0 +1,537 @@ +import { TextStyle, type Style } from "./zca-client.js"; + +type InlineStyle = (typeof TextStyle)[keyof typeof TextStyle]; + +type LineStyle = { + lineIndex: number; + style: InlineStyle; + indentSize?: number; +}; + +type Segment = { + text: string; + styles: InlineStyle[]; +}; + +type InlineMarker = { + pattern: RegExp; + extractText: (match: RegExpExecArray) => string; + resolveStyles?: (match: RegExpExecArray) => InlineStyle[]; + literal?: boolean; +}; + +type ResolvedInlineMatch = { + match: RegExpExecArray; + marker: InlineMarker; + styles: InlineStyle[]; + text: string; + priority: number; +}; + +type FenceMarker = { + char: "`" | "~"; + length: number; + indent: number; +}; + +type ActiveFence = FenceMarker & { + quoteIndent: number; +}; + +const TAG_STYLE_MAP: Record = { + red: TextStyle.Red, + orange: TextStyle.Orange, + yellow: TextStyle.Yellow, + green: TextStyle.Green, + small: null, + big: TextStyle.Big, + underline: TextStyle.Underline, +}; + +const INLINE_MARKERS: InlineMarker[] = [ + { + pattern: /`([^`\n]+)`/g, + extractText: (match) => match[0], + literal: true, + }, + { + pattern: /\\([*_~#\\{}>+\-`])/g, + extractText: (match) => match[1], + literal: true, + }, + { + pattern: new RegExp(`\\{(${Object.keys(TAG_STYLE_MAP).join("|")})\\}(.+?)\\{/\\1\\}`, "g"), + extractText: (match) => match[2], + resolveStyles: (match) => { + const style = TAG_STYLE_MAP[match[1]]; + return style ? [style] : []; + }, + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Bold, TextStyle.Italic], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Bold], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Bold], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.StrikeThrough], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Italic], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Italic], + }, +]; + +export function parseZalouserTextStyles(input: string): { text: string; styles: Style[] } { + const allStyles: Style[] = []; + + const escapeMap: string[] = []; + const lines = input.replace(/\r\n?/g, "\n").split("\n"); + const lineStyles: LineStyle[] = []; + const processedLines: string[] = []; + let activeFence: ActiveFence | null = null; + + for (let lineIndex = 0; lineIndex < lines.length; lineIndex += 1) { + const rawLine = lines[lineIndex]; + const { text: unquotedLine, indent: baseIndent } = stripQuotePrefix(rawLine); + + if (activeFence) { + const codeLine = + activeFence.quoteIndent > 0 + ? stripQuotePrefix(rawLine, activeFence.quoteIndent).text + : rawLine; + if (isClosingFence(codeLine, activeFence)) { + activeFence = null; + continue; + } + processedLines.push( + escapeLiteralText( + normalizeCodeBlockLeadingWhitespace(stripCodeFenceIndent(codeLine, activeFence.indent)), + escapeMap, + ), + ); + continue; + } + + let line = unquotedLine; + const openingFence = resolveOpeningFence(rawLine); + if (openingFence) { + const fenceLine = openingFence.quoteIndent > 0 ? unquotedLine : rawLine; + if (!hasClosingFence(lines, lineIndex + 1, openingFence)) { + processedLines.push(escapeLiteralText(fenceLine, escapeMap)); + activeFence = openingFence; + continue; + } + activeFence = openingFence; + continue; + } + + const outputLineIndex = processedLines.length; + if (isIndentedCodeBlockLine(line)) { + if (baseIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: baseIndent, + }); + } + processedLines.push(escapeLiteralText(normalizeCodeBlockLeadingWhitespace(line), escapeMap)); + continue; + } + + const { text: markdownLine, size: markdownPadding } = stripOptionalMarkdownPadding(line); + + const headingMatch = markdownLine.match(/^(#{1,4})\s(.*)$/); + if (headingMatch) { + const depth = headingMatch[1].length; + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Bold }); + if (depth === 1) { + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Big }); + } + if (baseIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: baseIndent, + }); + } + processedLines.push(headingMatch[2]); + continue; + } + + const indentMatch = markdownLine.match(/^(\s+)(.*)$/); + let indentLevel = 0; + let content = markdownLine; + if (indentMatch) { + indentLevel = clampIndent(indentMatch[1].length); + content = indentMatch[2]; + } + const totalIndent = Math.min(5, baseIndent + indentLevel); + + if (/^[-*+]\s\[[ xX]\]\s/.test(content)) { + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + } + processedLines.push(content); + continue; + } + + const orderedListMatch = content.match(/^(\d+)\.\s(.*)$/); + if (orderedListMatch) { + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + } + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.OrderedList }); + processedLines.push(orderedListMatch[2]); + continue; + } + + const unorderedListMatch = content.match(/^[-*+]\s(.*)$/); + if (unorderedListMatch) { + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + } + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.UnorderedList }); + processedLines.push(unorderedListMatch[1]); + continue; + } + + if (markdownPadding > 0) { + if (baseIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: baseIndent, + }); + } + processedLines.push(line); + continue; + } + + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + processedLines.push(content); + continue; + } + + processedLines.push(line); + } + + const segments = parseInlineSegments(processedLines.join("\n")); + + let plainText = ""; + for (const segment of segments) { + const start = plainText.length; + plainText += segment.text; + for (const style of segment.styles) { + allStyles.push({ start, len: segment.text.length, st: style } as Style); + } + } + + if (escapeMap.length > 0) { + const escapeRegex = /\x01(\d+)\x02/g; + const shifts: Array<{ pos: number; delta: number }> = []; + let cumulativeDelta = 0; + + for (const match of plainText.matchAll(escapeRegex)) { + const escapeIndex = Number.parseInt(match[1], 10); + cumulativeDelta += match[0].length - escapeMap[escapeIndex].length; + shifts.push({ pos: (match.index ?? 0) + match[0].length, delta: cumulativeDelta }); + } + + for (const style of allStyles) { + let startDelta = 0; + let endDelta = 0; + const end = style.start + style.len; + for (const shift of shifts) { + if (shift.pos <= style.start) { + startDelta = shift.delta; + } + if (shift.pos <= end) { + endDelta = shift.delta; + } + } + style.start -= startDelta; + style.len -= endDelta - startDelta; + } + + plainText = plainText.replace( + escapeRegex, + (_match, index) => escapeMap[Number.parseInt(index, 10)], + ); + } + + const finalLines = plainText.split("\n"); + let offset = 0; + for (let lineIndex = 0; lineIndex < finalLines.length; lineIndex += 1) { + const lineLength = finalLines[lineIndex].length; + if (lineLength > 0) { + for (const lineStyle of lineStyles) { + if (lineStyle.lineIndex !== lineIndex) { + continue; + } + + if (lineStyle.style === TextStyle.Indent) { + allStyles.push({ + start: offset, + len: lineLength, + st: TextStyle.Indent, + indentSize: lineStyle.indentSize, + }); + } else { + allStyles.push({ start: offset, len: lineLength, st: lineStyle.style } as Style); + } + } + } + offset += lineLength + 1; + } + + return { text: plainText, styles: allStyles }; +} + +function clampIndent(spaceCount: number): number { + return Math.min(5, Math.max(1, Math.floor(spaceCount / 2))); +} + +function stripOptionalMarkdownPadding(line: string): { text: string; size: number } { + const match = line.match(/^( {1,3})(?=\S)/); + if (!match) { + return { text: line, size: 0 }; + } + return { + text: line.slice(match[1].length), + size: match[1].length, + }; +} + +function hasClosingFence(lines: string[], startIndex: number, fence: ActiveFence): boolean { + for (let index = startIndex; index < lines.length; index += 1) { + const candidate = + fence.quoteIndent > 0 ? stripQuotePrefix(lines[index], fence.quoteIndent).text : lines[index]; + if (isClosingFence(candidate, fence)) { + return true; + } + } + return false; +} + +function resolveOpeningFence(line: string): ActiveFence | null { + const directFence = parseFenceMarker(line); + if (directFence) { + return { ...directFence, quoteIndent: 0 }; + } + + const quoted = stripQuotePrefix(line); + if (quoted.indent === 0) { + return null; + } + + const quotedFence = parseFenceMarker(quoted.text); + if (!quotedFence) { + return null; + } + + return { + ...quotedFence, + quoteIndent: quoted.indent, + }; +} + +function stripQuotePrefix( + line: string, + maxDepth = Number.POSITIVE_INFINITY, +): { text: string; indent: number } { + let cursor = 0; + while (cursor < line.length && cursor < 3 && line[cursor] === " ") { + cursor += 1; + } + + let removedDepth = 0; + let consumedCursor = cursor; + while (removedDepth < maxDepth && consumedCursor < line.length && line[consumedCursor] === ">") { + removedDepth += 1; + consumedCursor += 1; + if (line[consumedCursor] === " ") { + consumedCursor += 1; + } + } + + if (removedDepth === 0) { + return { text: line, indent: 0 }; + } + + return { + text: line.slice(consumedCursor), + indent: Math.min(5, removedDepth), + }; +} + +function parseFenceMarker(line: string): FenceMarker | null { + const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})(.*)$/); + if (!match) { + return null; + } + + const marker = match[2]; + const char = marker[0]; + if (char !== "`" && char !== "~") { + return null; + } + + return { + char, + length: marker.length, + indent: match[1].length, + }; +} + +function isClosingFence(line: string, fence: FenceMarker): boolean { + const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})[ \t]*$/); + if (!match) { + return false; + } + return match[2][0] === fence.char && match[2].length >= fence.length; +} + +function escapeLiteralText(input: string, escapeMap: string[]): string { + return input.replace(/[\\*_~{}`]/g, (ch) => { + const index = escapeMap.length; + escapeMap.push(ch); + return `\x01${index}\x02`; + }); +} + +function parseInlineSegments(text: string, inheritedStyles: InlineStyle[] = []): Segment[] { + const segments: Segment[] = []; + let cursor = 0; + + while (cursor < text.length) { + const nextMatch = findNextInlineMatch(text, cursor); + if (!nextMatch) { + pushSegment(segments, text.slice(cursor), inheritedStyles); + break; + } + + if (nextMatch.match.index > cursor) { + pushSegment(segments, text.slice(cursor, nextMatch.match.index), inheritedStyles); + } + + const combinedStyles = [...inheritedStyles, ...nextMatch.styles]; + if (nextMatch.marker.literal) { + pushSegment(segments, nextMatch.text, combinedStyles); + } else { + segments.push(...parseInlineSegments(nextMatch.text, combinedStyles)); + } + + cursor = nextMatch.match.index + nextMatch.match[0].length; + } + + return segments; +} + +function findNextInlineMatch(text: string, startIndex: number): ResolvedInlineMatch | null { + let bestMatch: ResolvedInlineMatch | null = null; + + for (const [priority, marker] of INLINE_MARKERS.entries()) { + const regex = new RegExp(marker.pattern.source, marker.pattern.flags); + regex.lastIndex = startIndex; + const match = regex.exec(text); + if (!match) { + continue; + } + + if ( + bestMatch && + (match.index > bestMatch.match.index || + (match.index === bestMatch.match.index && priority > bestMatch.priority)) + ) { + continue; + } + + bestMatch = { + match, + marker, + text: marker.extractText(match), + styles: marker.resolveStyles?.(match) ?? [], + priority, + }; + } + + return bestMatch; +} + +function pushSegment(segments: Segment[], text: string, styles: InlineStyle[]): void { + if (!text) { + return; + } + + const lastSegment = segments.at(-1); + if (lastSegment && sameStyles(lastSegment.styles, styles)) { + lastSegment.text += text; + return; + } + + segments.push({ + text, + styles: [...styles], + }); +} + +function sameStyles(left: InlineStyle[], right: InlineStyle[]): boolean { + return left.length === right.length && left.every((style, index) => style === right[index]); +} + +function normalizeCodeBlockLeadingWhitespace(line: string): string { + return line.replace(/^[ \t]+/, (leadingWhitespace) => + leadingWhitespace.replace(/\t/g, "\u00A0\u00A0\u00A0\u00A0").replace(/ /g, "\u00A0"), + ); +} + +function isIndentedCodeBlockLine(line: string): boolean { + return /^(?: {4,}|\t)/.test(line); +} + +function stripCodeFenceIndent(line: string, indent: number): string { + let consumed = 0; + let cursor = 0; + + while (cursor < line.length && consumed < indent && line[cursor] === " ") { + cursor += 1; + consumed += 1; + } + + return line.slice(cursor); +} diff --git a/extensions/zalouser/src/types.ts b/extensions/zalouser/src/types.ts index d704a1b3f78..e6343b1f6bd 100644 --- a/extensions/zalouser/src/types.ts +++ b/extensions/zalouser/src/types.ts @@ -1,3 +1,5 @@ +import type { Style } from "./zca-client.js"; + export type ZcaFriend = { userId: string; displayName: string; @@ -59,6 +61,10 @@ export type ZaloSendOptions = { caption?: string; isGroup?: boolean; mediaLocalRoots?: readonly string[]; + textMode?: "markdown" | "plain"; + textChunkMode?: "length" | "newline"; + textChunkLimit?: number; + textStyles?: Style[]; }; export type ZaloSendResult = { diff --git a/extensions/zalouser/src/zalo-js.ts b/extensions/zalouser/src/zalo-js.ts index 25d263b7d6a..0e2d744232f 100644 --- a/extensions/zalouser/src/zalo-js.ts +++ b/extensions/zalouser/src/zalo-js.ts @@ -20,6 +20,7 @@ import type { } from "./types.js"; import { LoginQRCallbackEventType, + TextStyle, ThreadType, Zalo, type API, @@ -136,6 +137,39 @@ function toErrorMessage(error: unknown): string { return String(error); } +function clampTextStyles( + text: string, + styles?: ZaloSendOptions["textStyles"], +): ZaloSendOptions["textStyles"] { + if (!styles || styles.length === 0) { + return undefined; + } + const maxLength = text.length; + const clamped = styles + .map((style) => { + const start = Math.max(0, Math.min(style.start, maxLength)); + const end = Math.min(style.start + style.len, maxLength); + if (end <= start) { + return null; + } + if (style.st === TextStyle.Indent) { + return { + start, + len: end - start, + st: style.st, + indentSize: style.indentSize, + }; + } + return { + start, + len: end - start, + st: style.st, + }; + }) + .filter((style): style is NonNullable => style !== null); + return clamped.length > 0 ? clamped : undefined; +} + function toNumberId(value: unknown): string { if (typeof value === "number" && Number.isFinite(value)) { return String(Math.trunc(value)); @@ -1018,11 +1052,16 @@ export async function sendZaloTextMessage( kind: media.kind, }); const payloadText = (text || options.caption || "").slice(0, 2000); + const textStyles = clampTextStyles(payloadText, options.textStyles); if (media.kind === "audio") { let textMessageId: string | undefined; if (payloadText) { - const textResponse = await api.sendMessage(payloadText, trimmedThreadId, type); + const textResponse = await api.sendMessage( + textStyles ? { msg: payloadText, styles: textStyles } : payloadText, + trimmedThreadId, + type, + ); textMessageId = extractSendMessageId(textResponse); } @@ -1055,6 +1094,7 @@ export async function sendZaloTextMessage( const response = await api.sendMessage( { msg: payloadText, + ...(textStyles ? { styles: textStyles } : {}), attachments: [ { data: media.buffer, @@ -1071,7 +1111,13 @@ export async function sendZaloTextMessage( return { ok: true, messageId: extractSendMessageId(response) }; } - const response = await api.sendMessage(text.slice(0, 2000), trimmedThreadId, type); + const payloadText = text.slice(0, 2000); + const textStyles = clampTextStyles(payloadText, options.textStyles); + const response = await api.sendMessage( + textStyles ? { msg: payloadText, styles: textStyles } : payloadText, + trimmedThreadId, + type, + ); return { ok: true, messageId: extractSendMessageId(response) }; } catch (error) { return { ok: false, error: toErrorMessage(error) }; diff --git a/extensions/zalouser/src/zca-client.ts b/extensions/zalouser/src/zca-client.ts index 57172eef64d..00a1c8c1be0 100644 --- a/extensions/zalouser/src/zca-client.ts +++ b/extensions/zalouser/src/zca-client.ts @@ -28,6 +28,39 @@ export const Reactions = ReactionsRuntime as Record & { NONE: string; }; +// Mirror zca-js sendMessage style constants locally because the package root +// typing surface does not consistently expose TextStyle/Style to tsgo. +export const TextStyle = { + Bold: "b", + Italic: "i", + Underline: "u", + StrikeThrough: "s", + Red: "c_db342e", + Orange: "c_f27806", + Yellow: "c_f7b503", + Green: "c_15a85f", + Small: "f_13", + Big: "f_18", + UnorderedList: "lst_1", + OrderedList: "lst_2", + Indent: "ind_$", +} as const; + +type TextStyleValue = (typeof TextStyle)[keyof typeof TextStyle]; + +export type Style = + | { + start: number; + len: number; + st: Exclude; + } + | { + start: number; + len: number; + st: typeof TextStyle.Indent; + indentSize?: number; + }; + export type Credentials = { imei: string; cookie: unknown; diff --git a/package.json b/package.json index f673633009c..9c1100bc49f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openclaw", - "version": "2026.3.9", + "version": "2026.3.11", "description": "Multi-channel AI gateway with extensible messaging integrations", "keywords": [], "homepage": "https://github.com/openclaw/openclaw#readme", @@ -338,11 +338,11 @@ "ui:install": "node scripts/ui.js install" }, "dependencies": { - "@agentclientprotocol/sdk": "0.15.0", - "@aws-sdk/client-bedrock": "^3.1004.0", + "@agentclientprotocol/sdk": "0.16.1", + "@aws-sdk/client-bedrock": "^3.1007.0", "@buape/carbon": "0.0.0-beta-20260216184201", "@clack/prompts": "^1.1.0", - "@discordjs/voice": "^0.19.0", + "@discordjs/voice": "^0.19.1", "@grammyjs/runner": "^2.0.3", "@grammyjs/transformer-throttler": "^1.2.1", "@homebridge/ciao": "^1.3.5", @@ -364,13 +364,13 @@ "cli-highlight": "^2.1.11", "commander": "^14.0.3", "croner": "^10.0.1", - "discord-api-types": "^0.38.41", + "discord-api-types": "^0.38.42", "dotenv": "^17.3.1", "express": "^5.2.1", "file-type": "^21.3.1", "grammy": "^1.41.1", "hono": "4.12.7", - "https-proxy-agent": "^7.0.6", + "https-proxy-agent": "^8.0.0", "ipaddr.js": "^2.3.0", "jiti": "^2.6.1", "json5": "^2.2.3", @@ -399,18 +399,18 @@ "@lit/context": "^1.1.6", "@types/express": "^5.0.6", "@types/markdown-it": "^14.1.2", - "@types/node": "^25.3.5", + "@types/node": "^25.4.0", "@types/qrcode-terminal": "^0.12.2", "@types/ws": "^8.18.1", - "@typescript/native-preview": "7.0.0-dev.20260308.1", + "@typescript/native-preview": "7.0.0-dev.20260311.1", "@vitest/coverage-v8": "^4.0.18", "jscpd": "4.0.8", "lit": "^3.3.2", - "oxfmt": "0.36.0", - "oxlint": "^1.51.0", + "oxfmt": "0.38.0", + "oxlint": "^1.53.0", "oxlint-tsgolint": "^0.16.0", "signal-utils": "0.21.1", - "tsdown": "0.21.0", + "tsdown": "0.21.2", "tsx": "^4.21.0", "typescript": "^5.9.3", "vitest": "^4.0.18" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 72fa7353329..1e26495971c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -26,11 +26,11 @@ importers: .: dependencies: '@agentclientprotocol/sdk': - specifier: 0.15.0 - version: 0.15.0(zod@4.3.6) + specifier: 0.16.1 + version: 0.16.1(zod@4.3.6) '@aws-sdk/client-bedrock': - specifier: ^3.1004.0 - version: 3.1004.0 + specifier: ^3.1007.0 + version: 3.1007.0 '@buape/carbon': specifier: 0.0.0-beta-20260216184201 version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) @@ -38,8 +38,8 @@ importers: specifier: ^1.1.0 version: 1.1.0 '@discordjs/voice': - specifier: ^0.19.0 - version: 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) + specifier: ^0.19.1 + version: 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) '@grammyjs/runner': specifier: ^2.0.3 version: 2.0.3(grammy@1.41.1) @@ -107,8 +107,8 @@ importers: specifier: ^10.0.1 version: 10.0.1 discord-api-types: - specifier: ^0.38.41 - version: 0.38.41 + specifier: ^0.38.42 + version: 0.38.42 dotenv: specifier: ^17.3.1 version: 17.3.1 @@ -125,8 +125,8 @@ importers: specifier: 4.12.7 version: 4.12.7 https-proxy-agent: - specifier: ^7.0.6 - version: 7.0.6 + specifier: ^8.0.0 + version: 8.0.0 ipaddr.js: specifier: ^2.3.0 version: 2.3.0 @@ -210,8 +210,8 @@ importers: specifier: ^14.1.2 version: 14.1.2 '@types/node': - specifier: ^25.3.5 - version: 25.3.5 + specifier: ^25.4.0 + version: 25.4.0 '@types/qrcode-terminal': specifier: ^0.12.2 version: 0.12.2 @@ -219,11 +219,11 @@ importers: specifier: ^8.18.1 version: 8.18.1 '@typescript/native-preview': - specifier: 7.0.0-dev.20260308.1 - version: 7.0.0-dev.20260308.1 + specifier: 7.0.0-dev.20260311.1 + version: 7.0.0-dev.20260311.1 '@vitest/coverage-v8': specifier: ^4.0.18 - version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) + version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) jscpd: specifier: 4.0.8 version: 4.0.8 @@ -231,11 +231,11 @@ importers: specifier: ^3.3.2 version: 3.3.2 oxfmt: - specifier: 0.36.0 - version: 0.36.0 + specifier: 0.38.0 + version: 0.38.0 oxlint: - specifier: ^1.51.0 - version: 1.51.0(oxlint-tsgolint@0.16.0) + specifier: ^1.53.0 + version: 1.53.0(oxlint-tsgolint@0.16.0) oxlint-tsgolint: specifier: ^0.16.0 version: 0.16.0 @@ -243,8 +243,8 @@ importers: specifier: 0.21.1 version: 0.21.1(signal-polyfill@0.2.2) tsdown: - specifier: 0.21.0 - version: 0.21.0(@typescript/native-preview@7.0.0-dev.20260308.1)(typescript@5.9.3) + specifier: 0.21.2 + version: 0.21.2(@typescript/native-preview@7.0.0-dev.20260311.1)(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -253,13 +253,13 @@ importers: version: 5.9.3 vitest: specifier: ^4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) extensions/acpx: dependencies: acpx: - specifier: 0.1.16 - version: 0.1.16(zod@4.3.6) + specifier: 0.2.0 + version: 0.2.0(zod@4.3.6) extensions/bluebubbles: dependencies: @@ -328,8 +328,8 @@ importers: specifier: 0.34.48 version: 0.34.48 https-proxy-agent: - specifier: ^7.0.6 - version: 7.0.6 + specifier: ^8.0.0 + version: 8.0.0 zod: specifier: ^4.3.6 version: 4.3.6 @@ -341,10 +341,9 @@ importers: google-auth-library: specifier: ^10.6.1 version: 10.6.1 - devDependencies: openclaw: - specifier: workspace:* - version: link:../.. + specifier: '>=2026.3.7' + version: 2026.3.8(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.7)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/imessage: {} @@ -402,10 +401,10 @@ importers: version: 4.3.6 extensions/memory-core: - devDependencies: + dependencies: openclaw: - specifier: workspace:* - version: link:../.. + specifier: '>=2026.3.7' + version: 2026.3.8(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.7)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/memory-lancedb: dependencies: @@ -551,8 +550,8 @@ importers: specifier: 3.0.0 version: 3.0.0 dompurify: - specifier: ^3.3.2 - version: 3.3.2 + specifier: ^3.3.3 + version: 3.3.3 lit: specifier: ^3.3.2 version: 3.3.2 @@ -567,17 +566,17 @@ importers: version: 0.21.1(signal-polyfill@0.2.2) vite: specifier: 7.3.1 - version: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) devDependencies: '@vitest/browser-playwright': specifier: 4.0.18 - version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) playwright: specifier: ^1.58.2 version: 1.58.2 vitest: specifier: 4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) packages: @@ -586,6 +585,11 @@ packages: peerDependencies: zod: ^3.25.0 || ^4.0.0 + '@agentclientprotocol/sdk@0.16.1': + resolution: {integrity: sha512-1ad+Sc/0sCtZGHthxxvgEUo5Wsbw16I+aF+YwdiLnPwkZG8KAGUEAPK6LM6Pf69lCyJPt1Aomk1d+8oE3C4ZEw==} + peerDependencies: + zod: ^3.25.0 || ^4.0.0 + '@anthropic-ai/sdk@0.73.0': resolution: {integrity: sha512-URURVzhxXGJDGUGFunIOtBlSl7KWvZiAAKY/ttTkZAkXT9bTPqdk2eK0b8qqSxXpikh3QKPnPYpiyX98zf5ebw==} hasBin: true @@ -622,8 +626,8 @@ packages: resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.1004.0': - resolution: {integrity: sha512-JbfZSV85IL+43S7rPBmeMbvoOYXs1wmrfbEpHkDBjkvbukRQWtoetiPAXNSKDfFq1qVsoq8sWPdoerDQwlUO8w==} + '@aws-sdk/client-bedrock@3.1007.0': + resolution: {integrity: sha512-49hH8o6ALKkCiBUgg20HkwxNamP1yYA/n8Si73Z438EqhZGpCfScP3FfxVhrfD5o+4bV4Whi9BTzPKCa/PfUww==} engines: {node: '>=20.0.0'} '@aws-sdk/client-s3@3.1000.0': @@ -638,6 +642,10 @@ packages: resolution: {integrity: sha512-GUIlegfcK2LO1J2Y98sCJy63rQSiLiDOgVw7HiHPRqfI2vb3XozTVqemwO0VSGXp54ngCnAQz0Lf0YPCBINNxA==} engines: {node: '>=20.0.0'} + '@aws-sdk/core@3.973.19': + resolution: {integrity: sha512-56KePyOcZnKTWCd89oJS1G6j3HZ9Kc+bh/8+EbvtaCCXdP6T7O7NzCiPuHRhFLWnzXIaXX3CxAz0nI5My9spHQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/crc64-nvme@3.972.3': resolution: {integrity: sha512-UExeK+EFiq5LAcbHm96CQLSia+5pvpUVSAsVApscBzayb7/6dJBJKwV4/onsk4VbWSmqxDMcfuTD+pC4RxgZHg==} engines: {node: '>=20.0.0'} @@ -650,6 +658,10 @@ packages: resolution: {integrity: sha512-HrdtnadvTGAQUr18sPzGlE5El3ICphnH6SU7UQOMOWFgRKbTRNN8msTxM4emzguUso9CzaHU2xy5ctSrmK5YNA==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-env@3.972.17': + resolution: {integrity: sha512-MBAMW6YELzE1SdkOniqr51mrjapQUv8JXSGxtwRjQV0mwVDutVsn22OPAUt4RcLRvdiHQmNBDEFP9iTeSVCOlA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-http@3.972.15': resolution: {integrity: sha512-dJuSTreu/T8f24SHDNTjd7eQ4rabr0TzPh2UTCwYexQtzG3nTDKm1e5eIdhiroTMDkPEJeY+WPkA6F9wod/20A==} engines: {node: '>=20.0.0'} @@ -658,6 +670,10 @@ packages: resolution: {integrity: sha512-NyB6smuZAixND5jZumkpkunQ0voc4Mwgkd+SZ6cvAzIB7gK8HV8Zd4rS8Kn5MmoGgusyNfVGG+RLoYc4yFiw+A==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-http@3.972.19': + resolution: {integrity: sha512-9EJROO8LXll5a7eUFqu48k6BChrtokbmgeMWmsH7lBb6lVbtjslUYz/ShLi+SHkYzTomiGBhmzTW7y+H4BxsnA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-ini@3.972.13': resolution: {integrity: sha512-JKSoGb7XeabZLBJptpqoZIFbROUIS65NuQnEHGOpuT9GuuZwag2qciKANiDLFiYk4u8nSrJC9JIOnWKVvPVjeA==} engines: {node: '>=20.0.0'} @@ -666,6 +682,10 @@ packages: resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-ini@3.972.18': + resolution: {integrity: sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-login@3.972.13': resolution: {integrity: sha512-RtYcrxdnJHKY8MFQGLltCURcjuMjnaQpAxPE6+/QEdDHHItMKZgabRe/KScX737F9vJMQsmJy9EmMOkCnoC1JQ==} engines: {node: '>=20.0.0'} @@ -674,6 +694,10 @@ packages: resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-login@3.972.18': + resolution: {integrity: sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-node@3.972.14': resolution: {integrity: sha512-WqoC2aliIjQM/L3oFf6j+op/enT2i9Cc4UTxxMEKrJNECkq4/PlKE5BOjSYFcq6G9mz65EFbXJh7zOU4CvjSKQ==} engines: {node: '>=20.0.0'} @@ -682,6 +706,10 @@ packages: resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-node@3.972.19': + resolution: {integrity: sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-process@3.972.13': resolution: {integrity: sha512-rsRG0LQA4VR+jnDyuqtXi2CePYSmfm5GNL9KxiW8DSe25YwJSr06W8TdUfONAC+rjsTI+aIH2rBGG5FjMeANrw==} engines: {node: '>=20.0.0'} @@ -690,6 +718,10 @@ packages: resolution: {integrity: sha512-n89ibATwnLEg0ZdZmUds5bq8AfBAdoYEDpqP3uzPLaRuGelsKlIvCYSNNvfgGLi8NaHPNNhs1HjJZYbqkW9b+g==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-process@3.972.17': + resolution: {integrity: sha512-c8G8wT1axpJDgaP3xzcy+q8Y1fTi9A2eIQJvyhQ9xuXrUZhlCfXbC0vM9bM1CUXiZppFQ1p7g0tuUMvil/gCPg==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-sso@3.972.13': resolution: {integrity: sha512-fr0UU1wx8kNHDhTQBXioc/YviSW8iXuAxHvnH7eQUtn8F8o/FU3uu6EUMvAQgyvn7Ne5QFnC0Cj0BFlwCk+RFw==} engines: {node: '>=20.0.0'} @@ -698,6 +730,10 @@ packages: resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-sso@3.972.18': + resolution: {integrity: sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-web-identity@3.972.13': resolution: {integrity: sha512-a6iFMh1pgUH0TdcouBppLJUfPM7Yd3R9S1xFodPtCRoLqCz2RQFA3qjA8x4112PVYXEd4/pHX2eihapq39w0rA==} engines: {node: '>=20.0.0'} @@ -706,6 +742,10 @@ packages: resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-web-identity@3.972.18': + resolution: {integrity: sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==} + engines: {node: '>=20.0.0'} + '@aws-sdk/eventstream-handler-node@3.972.10': resolution: {integrity: sha512-g2Z9s6Y4iNh0wICaEqutgYgt/Pmhv5Ev9G3eKGFe2w9VuZDhc76vYdop6I5OocmpHV79d4TuLG+JWg5rQIVDVA==} engines: {node: '>=20.0.0'} @@ -770,6 +810,10 @@ packages: resolution: {integrity: sha512-Km90fcXt3W/iqujHzuM6IaDkYCj73gsYufcuWXApWdzoTy6KGk8fnchAjePMARU0xegIR3K4N3yIo1vy7OVe8A==} engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-user-agent@3.972.20': + resolution: {integrity: sha512-3kNTLtpUdeahxtnJRnj/oIdLAUdzTfr9N40KtxNhtdrq+Q1RPMdCJINRXq37m4t5+r3H70wgC3opW46OzFcZYA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-websocket@3.972.12': resolution: {integrity: sha512-iyPP6FVDKe/5wy5ojC0akpDFG1vX3FeCUU47JuwN8xfvT66xlEI8qUJZPtN55TJVFzzWZJpWL78eqUE31md08Q==} engines: {node: '>= 14.0.0'} @@ -782,6 +826,10 @@ packages: resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==} engines: {node: '>=20.0.0'} + '@aws-sdk/nested-clients@3.996.8': + resolution: {integrity: sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/region-config-resolver@3.972.6': resolution: {integrity: sha512-Aa5PusHLXAqLTX1UKDvI3pHQJtIsF7Q+3turCHqfz/1F61/zDMWfbTC8evjhrrYVAtz9Vsv3SJ/waSUeu7B6gw==} engines: {node: '>=20.0.0'} @@ -802,6 +850,14 @@ packages: resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==} engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.1005.0': + resolution: {integrity: sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/token-providers@3.1007.0': + resolution: {integrity: sha512-kKvVyr53vvVc5k6RbvI6jhafxufxO2SkEw8QeEzJqwOXH/IMY7Cm0IyhnBGdqj80iiIIiIM2jGe7Fn3TIdwdrw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.999.0': resolution: {integrity: sha512-cx0hHUlgXULfykx4rdu/ciNAJaa3AL5xz3rieCz7NKJ68MJwlj3664Y8WR5MGgxfyYJBdamnkjNSx5Kekuc0cg==} engines: {node: '>=20.0.0'} @@ -862,6 +918,15 @@ packages: aws-crt: optional: true + '@aws-sdk/util-user-agent-node@3.973.5': + resolution: {integrity: sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==} + engines: {node: '>=20.0.0'} + peerDependencies: + aws-crt: '>=1.0.0' + peerDependenciesMeta: + aws-crt: + optional: true + '@aws-sdk/xml-builder@3.972.10': resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} engines: {node: '>=20.0.0'} @@ -1024,6 +1089,10 @@ packages: resolution: {integrity: sha512-UyX6rGEXzVyPzb1yvjHtPfTlnLvB5jX/stAMdiytHhfoydX+98hfympdOwsnTktzr+IRvphxTbdErgYDJkEsvw==} engines: {node: '>=22.12.0'} + '@discordjs/voice@0.19.1': + resolution: {integrity: sha512-XYbFVyUBB7zhRvrjREfiWDwio24nEp/vFaVe6u9aBIC5UYuT7HvoMt8LgNfZ5hOyaCW0flFr72pkhUGz+gWw4Q==} + engines: {node: '>=22.12.0'} + '@emnapi/core@1.8.1': resolution: {integrity: sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==} @@ -2102,116 +2171,116 @@ packages: '@oxc-project/types@0.115.0': resolution: {integrity: sha512-4n91DKnebUS4yjUHl2g3/b2T+IUdCfmoZGhmwsovZCDaJSs+QkVAM+0AqqTxHSsHfeiMuueT75cZaZcT/m0pSw==} - '@oxfmt/binding-android-arm-eabi@0.36.0': - resolution: {integrity: sha512-Z4yVHJWx/swHHjtr0dXrBZb6LxS+qNz1qdza222mWwPTUK4L790+5i3LTgjx3KYGBzcYpjaiZBw4vOx94dH7MQ==} + '@oxfmt/binding-android-arm-eabi@0.38.0': + resolution: {integrity: sha512-lTN4//sgYywK8ulQo7a/EZVzOTGomGQv2IG/7tMYdqTV3xN3QTqWpXcZBGUzaicC4B882N+5zJLYZ37IWfUMcg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxfmt/binding-android-arm64@0.36.0': - resolution: {integrity: sha512-3ElCJRFNPQl7jexf2CAa9XmAm8eC5JPrIDSjc9jSchkVSFTEqyL0NtZinBB2h1a4i4JgP1oGl/5G5n8YR4FN8Q==} + '@oxfmt/binding-android-arm64@0.38.0': + resolution: {integrity: sha512-XbVgqR1WsIcCkfxwh2tdg3M1MWgR23YOboW2nbB8ab0gInNNLGy7cIAdr78XaoG/bGdaF4488XRhuGWq67xrzA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxfmt/binding-darwin-arm64@0.36.0': - resolution: {integrity: sha512-nak4znWCqIExKhYSY/mz/lWsqWIpdsS7o0+SRzXR1Q0m7GrMcG1UrF1pS7TLGZhhkf7nTfEF7q6oZzJiodRDuw==} + '@oxfmt/binding-darwin-arm64@0.38.0': + resolution: {integrity: sha512-AHb6zUzWaSJra7lnPkI+Sqwu33bVWVTwCozcw9QTX8vwHaI1+5d5STqBcsJf63eSuRVRlflwMS4erlAPh3fXZw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxfmt/binding-darwin-x64@0.36.0': - resolution: {integrity: sha512-V4GP96thDnpKx6ADnMDnhIXNdtV+Ql9D4HUU+a37VTeVbs5qQSF/s6hhUP1b3xUqU7iRcwh72jUU2Y12rtGHAw==} + '@oxfmt/binding-darwin-x64@0.38.0': + resolution: {integrity: sha512-VmlmTyn7LL7Xi5htjosxGpJJHf3Drx5mgXxKE8+NT10uBXTaG3FHpRYhW3Zg5Qp7omH92Lj1+IHYqQG/HZpLnw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxfmt/binding-freebsd-x64@0.36.0': - resolution: {integrity: sha512-/xapWCADfI5wrhxpEUjhI9fnw7MV5BUZizVa8e24n3VSK6A3Y1TB/ClOP1tfxNspykFKXp4NBWl6NtDJP3osqQ==} + '@oxfmt/binding-freebsd-x64@0.38.0': + resolution: {integrity: sha512-LynMLRqaUEAV6n4svTFanFOAnJ9D6aCCfymJ2yhMSh5fYFgCCO4q5LzPV2nATKKoyPocSErFSmYREsOFbkIlCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxfmt/binding-linux-arm-gnueabihf@0.36.0': - resolution: {integrity: sha512-1lOmv61XMFIH5uNm27620kRRzWt/RK6tdn250BRDoG9W7OXGOQ5UyI1HVT+SFkoOoKztBiinWgi68+NA1MjBVQ==} + '@oxfmt/binding-linux-arm-gnueabihf@0.38.0': + resolution: {integrity: sha512-HRRZtOXcss5+bGqQcYahILgt14+Iu/Olf6fnoKq5ctOzU21PGHVB+zuocgt+/+ixoMLV1Drvok3ns7QwnLwNTA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxfmt/binding-linux-arm-musleabihf@0.36.0': - resolution: {integrity: sha512-vMH23AskdR1ujUS9sPck2Df9rBVoZUnCVY86jisILzIQ/QQ/yKUTi7tgnIvydPx7TyB/48wsQ5QMr5Knq5p/aw==} + '@oxfmt/binding-linux-arm-musleabihf@0.38.0': + resolution: {integrity: sha512-kScH8XnH7TRUckMOSZ5115Vvr2CQq+iPsuXPEzwUXSxh+gDLzt+GsXuvCsaPxp1KP+dQj88VrIjeQ4V0f9NRKw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxfmt/binding-linux-arm64-gnu@0.36.0': - resolution: {integrity: sha512-Hy1V+zOBHpBiENRx77qrUTt5aPDHeCASRc8K5KwwAHkX2AKP0nV89eL17hsZrE9GmnXFjsNmd80lyf7aRTXsbw==} + '@oxfmt/binding-linux-arm64-gnu@0.38.0': + resolution: {integrity: sha512-PUVn/vGsMs83eLhNXLNjR+Qw/EPiNxU9Tx+p+aZBK0RT9/k6RNgh/O4F1TxS4tdISmf3SSgjdnMOVW3ZfQZ2mA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxfmt/binding-linux-arm64-musl@0.36.0': - resolution: {integrity: sha512-SPGLJkOIHSIC6ABUQ5V8NqJpvYhMJueJv26NYqfCnwi/Mn6A61amkpJJ9Suy0Nmvs+OWESJpcebrBUbXPGZyQQ==} + '@oxfmt/binding-linux-arm64-musl@0.38.0': + resolution: {integrity: sha512-LhtmaLCMGtAIEtaTBAoKLF3QVt+IDKIjdEZvsf0msLeTUFKxyoTNScYBXbkmvqGrm37vV0JjTPvm+OaSh3np5A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxfmt/binding-linux-ppc64-gnu@0.36.0': - resolution: {integrity: sha512-3EuoyB8x9x8ysYJjbEO/M9fkSk72zQKnXCvpZMDHXlnY36/1qMp55Nm0PrCwjGO/1pen5hdOVkz9WmP3nAp2IQ==} + '@oxfmt/binding-linux-ppc64-gnu@0.38.0': + resolution: {integrity: sha512-tO6tPaS21o0MaRqmOi9e3sDotlW4c+1gCx4SwdrfDXm3Y1vmIZWh0qB6t/Xh77bIGVr/4fC95eKOhKLPGwdL+Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@oxfmt/binding-linux-riscv64-gnu@0.36.0': - resolution: {integrity: sha512-MpY3itLwpGh8dnywtrZtaZ604T1m715SydCKy0+qTxetv+IHzuA+aO/AGzrlzUNYZZmtWtmDBrChZGibvZxbRQ==} + '@oxfmt/binding-linux-riscv64-gnu@0.38.0': + resolution: {integrity: sha512-djEqwFUHczstFKp5aT43TuRWxyKZSkIZUfGXIEKa0srmIAt1CXQO5O8xLgNG4SGkXTRB1domFfCE68t9SkSmfA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxfmt/binding-linux-riscv64-musl@0.36.0': - resolution: {integrity: sha512-mmDhe4Vtx+XwQPRPn/V25+APnkApYgZ23q+6GVsNYY98pf3aU0aI3Me96pbRs/AfJ1jIiGC+/6q71FEu8dHcHw==} + '@oxfmt/binding-linux-riscv64-musl@0.38.0': + resolution: {integrity: sha512-76EgMMtS6sIE+9Pl9q2GZgZpbZSzqtjQhUUIWl0RVNfHg66tstdJMhY2LXESjDYhc5vFYt9qdQNM0w0zg3onPw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxfmt/binding-linux-s390x-gnu@0.36.0': - resolution: {integrity: sha512-AYXhU+DmNWLSnvVwkHM92fuYhogtVHab7UQrPNaDf1sxadugg9gWVmcgJDlIwxJdpk5CVW/TFvwUKwI432zhhA==} + '@oxfmt/binding-linux-s390x-gnu@0.38.0': + resolution: {integrity: sha512-JYNr3i9z/YguZg088kopjvz49hDxTEL193mYL2/02uq/6BLlQRMaKrePEITTHm/vUu4ZquAKgu4mDib6pGWdyg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@oxfmt/binding-linux-x64-gnu@0.36.0': - resolution: {integrity: sha512-H16QhhQ3usoakMleiAAQ2mg0NsBDAdyE9agUgfC8IHHh3jZEbr0rIKwjEqwbOHK5M0EmfhJmr+aGO/MgZPsneA==} + '@oxfmt/binding-linux-x64-gnu@0.38.0': + resolution: {integrity: sha512-Lf+/Keaw1kBKx0U3HT5PsA7/3VO4ZOmaqo4sWaeAJ6tYeX8h/2IZcEONhjry6T4BETza78z6xI3Qx+18QZix6A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxfmt/binding-linux-x64-musl@0.36.0': - resolution: {integrity: sha512-EFFGkixA39BcmHiCe2ECdrq02D6FCve5ka6ObbvrheXl4V+R0U/E+/uLyVx1X65LW8TA8QQHdnbdDallRekohw==} + '@oxfmt/binding-linux-x64-musl@0.38.0': + resolution: {integrity: sha512-4O6sf6OQuz1flk0TDrrtmXOVO3letA7fYe2IEAiJOQvKhJcMU08NiIVODQjMGZ6IQh1q91B+TlliDfbsYalw8A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxfmt/binding-openharmony-arm64@0.36.0': - resolution: {integrity: sha512-zr/t369wZWFOj1qf06Z5gGNjFymfUNDrxKMmr7FKiDRVI1sNsdKRCuRL4XVjtcptKQ+ao3FfxLN1vrynivmCYg==} + '@oxfmt/binding-openharmony-arm64@0.38.0': + resolution: {integrity: sha512-GNocbjYnielmKVBk+r/2Vc4E3oTsAO4+5gRuroUVx86Jv+mpD+hyFkf260/by0YtpF1ipqyxR8chOSgRQvD2zQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxfmt/binding-win32-arm64-msvc@0.36.0': - resolution: {integrity: sha512-FxO7UksTv8h4olzACgrqAXNF6BP329+H322323iDrMB5V/+a1kcAw07fsOsUmqNrb9iJBsCQgH/zqcqp5903ag==} + '@oxfmt/binding-win32-arm64-msvc@0.38.0': + resolution: {integrity: sha512-AwgjBHRxPckbazLpECuPOSzYlppYR1CBeUSuzZuClsmTnlZA9O1MexCEP9CROe03Yo1xBGvYtiCjwKZMBChGkg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxfmt/binding-win32-ia32-msvc@0.36.0': - resolution: {integrity: sha512-OjoMQ89H01M0oLMfr/CPNH1zi48ZIwxAKObUl57oh7ssUBNDp/2Vjf7E1TQ8M4oj4VFQ/byxl2SmcPNaI2YNDg==} + '@oxfmt/binding-win32-ia32-msvc@0.38.0': + resolution: {integrity: sha512-c3u+ak6Zrh1g6pM2TgNVvOgkm7q1XaIX+5Mgxvu38ozJ5OfM8c7HZk3glMdBzlTD2uK0sSfgBq1kuXwCe1NOGg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxfmt/binding-win32-x64-msvc@0.36.0': - resolution: {integrity: sha512-MoyeQ9S36ZTz/4bDhOKJgOBIDROd4dQ5AkT9iezhEaUBxAPdNX9Oq0jD8OSnCj3G4wam/XNxVWKMA52kmzmPtQ==} + '@oxfmt/binding-win32-x64-msvc@0.38.0': + resolution: {integrity: sha512-wud1Hz0D2hYrhk6exxQQndn1htcA28wAcFb1vtP3ZXSzPFtMvc7ag/VNPv6nz6mDzM8X660jUwGEac99QcrVsA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -2246,116 +2315,116 @@ packages: cpu: [x64] os: [win32] - '@oxlint/binding-android-arm-eabi@1.51.0': - resolution: {integrity: sha512-jJYIqbx4sX+suIxWstc4P7SzhEwb4ArWA2KVrmEuu9vH2i0qM6QIHz/ehmbGE4/2fZbpuMuBzTl7UkfNoqiSgw==} + '@oxlint/binding-android-arm-eabi@1.53.0': + resolution: {integrity: sha512-JC89/jAx4d2zhDIbK8MC4L659FN1WiMXMBkNg7b33KXSkYpUgcbf+0nz7+EPRg+VwWiZVfaoFkNHJ7RXYb5Neg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxlint/binding-android-arm64@1.51.0': - resolution: {integrity: sha512-GtXyBCcH4ti98YdiMNCrpBNGitx87EjEWxevnyhcBK12k/Vu4EzSB45rzSC4fGFUD6sQgeaxItRCEEWeVwPafw==} + '@oxlint/binding-android-arm64@1.53.0': + resolution: {integrity: sha512-CY+pZfi+uyeU7AwFrEnjsNT+VfxYmKLMuk7bVxArd8f+09hQbJb8f7C7EpvTfNqrCK1J8zZlaYI4LltmEctgbQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxlint/binding-darwin-arm64@1.51.0': - resolution: {integrity: sha512-3QJbeYaMHn6Bh2XeBXuITSsbnIctyTjvHf5nRjKYrT9pPeErNIpp5VDEeAXC0CZSwSVTsc8WOSDwgrAI24JolQ==} + '@oxlint/binding-darwin-arm64@1.53.0': + resolution: {integrity: sha512-0aqsC4HDQ94oI6kMz64iaOJ1f3bCVArxvaHJGOScBvFz6CcQedXi5b70Xg09CYjKNaHA56dW0QJfoZ/111kz1A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxlint/binding-darwin-x64@1.51.0': - resolution: {integrity: sha512-NzErhMaTEN1cY0E8C5APy74lw5VwsNfJfVPBMWPVQLqAbO0k4FFLjvHURvkUL+Y18Wu+8Vs1kbqPh2hjXYA4pg==} + '@oxlint/binding-darwin-x64@1.53.0': + resolution: {integrity: sha512-e+KvuaWtnisyWojO/t5qKDbp2dvVpg+1dl4MGnTb21QpY4+4+9Y1XmZPaztcA2XNvy4BIaXFW+9JH9tMpSBqUg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxlint/binding-freebsd-x64@1.51.0': - resolution: {integrity: sha512-msAIh3vPAoKoHlOE/oe6Q5C/n9umypv/k81lED82ibrJotn+3YG2Qp1kiR8o/Dg5iOEU97c6tl0utxcyFenpFw==} + '@oxlint/binding-freebsd-x64@1.53.0': + resolution: {integrity: sha512-hpU0ZHVeblFjmZDfgi9BxhhCpURh0KjoFy5V+Tvp9sg/fRcnMUEfaJrgz+jQfOX4jctlVWrAs1ANs91+5iV+zA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxlint/binding-linux-arm-gnueabihf@1.51.0': - resolution: {integrity: sha512-CqQPcvqYyMe9ZBot2stjGogEzk1z8gGAngIX7srSzrzexmXixwVxBdFZyxTVM0CjGfDeV+Ru0w25/WNjlMM2Hw==} + '@oxlint/binding-linux-arm-gnueabihf@1.53.0': + resolution: {integrity: sha512-ccKxOpw+X4xa2pO+qbTOpxQ2x1+Ag3ViRQMnWt3gHp1LcpNgS1xd6GYc3OvehmHtrXqEV3YGczZ0I1qpBB4/2A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxlint/binding-linux-arm-musleabihf@1.51.0': - resolution: {integrity: sha512-dstrlYQgZMnyOssxSbolGCge/sDbko12N/35RBNuqLpoPbft2aeBidBAb0dvQlyBd9RJ6u8D4o4Eh8Un6iTgyQ==} + '@oxlint/binding-linux-arm-musleabihf@1.53.0': + resolution: {integrity: sha512-UBkBvmzSmlyH2ZObQMDKW/TuyTmUtP/XClPUyU2YLwj0qLopZTZxnDz4VG5d3wz1HQuZXO0o1QqsnQUW1v4a6Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxlint/binding-linux-arm64-gnu@1.51.0': - resolution: {integrity: sha512-QEjUpXO7d35rP1/raLGGbAsBLLGZIzV3ZbeSjqWlD3oRnxpRIZ6iL4o51XQHkconn3uKssc+1VKdtHJ81BBhDA==} + '@oxlint/binding-linux-arm64-gnu@1.53.0': + resolution: {integrity: sha512-PQJJ1izoH9p61las6rZ0BWOznAhTDMmdUPL2IEBLuXFwhy2mSloYHvRkk39PSYJ1DyG+trqU5Z9ZbtHSGH6plg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxlint/binding-linux-arm64-musl@1.51.0': - resolution: {integrity: sha512-YSJua5irtG4DoMAjUapDTPhkQLHhBIY0G9JqlZS6/SZPzqDkPku/1GdWs0D6h/wyx0Iz31lNCfIaWKBQhzP0wQ==} + '@oxlint/binding-linux-arm64-musl@1.53.0': + resolution: {integrity: sha512-GXI1o4Thn/rtnRIL38BwrDMwVcUbIHKCsOixIWf/CkU3fCG3MXFzFTtDMt+34ik0Qk452d8kcpksL0w/hUkMZA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxlint/binding-linux-ppc64-gnu@1.51.0': - resolution: {integrity: sha512-7L4Wj2IEUNDETKssB9IDYt16T6WlF+X2jgC/hBq3diGHda9vJLpAgb09+D3quFq7TdkFtI7hwz/jmuQmQFPc1Q==} + '@oxlint/binding-linux-ppc64-gnu@1.53.0': + resolution: {integrity: sha512-Uahk7IVs2yBamCgeJ3XKpKT9Vh+de0pDKISFKnjEcI3c/w2CFHk1+W6Q6G3KI56HGwE9PWCp6ayhA9whXWkNIQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@oxlint/binding-linux-riscv64-gnu@1.51.0': - resolution: {integrity: sha512-cBUHqtOXy76G41lOB401qpFoKx1xq17qYkhWrLSM7eEjiHM9sOtYqpr6ZdqCnN9s6ZpzudX4EkeHOFH2E9q0vA==} + '@oxlint/binding-linux-riscv64-gnu@1.53.0': + resolution: {integrity: sha512-sWtcU9UkrKMWsGKdFy8R6jkm9Q0VVG1VCpxVuh0HzRQQi3ENI1Nh5CkpsdfUs2MKRcOoHKbXqTscunuXjhxoxQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxlint/binding-linux-riscv64-musl@1.51.0': - resolution: {integrity: sha512-WKbg8CysgZcHfZX0ixQFBRSBvFZUHa3SBnEjHY2FVYt2nbNJEjzTxA3ZR5wMU0NOCNKIAFUFvAh5/XJKPRJuJg==} + '@oxlint/binding-linux-riscv64-musl@1.53.0': + resolution: {integrity: sha512-aXew1+HDvCdExijX/8NBVC854zJwxhKP3l9AHFSHQNo4EanlHtzDMIlIvP3raUkL0vXtFCkTFYezzU5HjstB8A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxlint/binding-linux-s390x-gnu@1.51.0': - resolution: {integrity: sha512-N1QRUvJTxqXNSu35YOufdjsAVmKVx5bkrggOWAhTWBc3J4qjcBwr1IfyLh/6YCg8sYRSR1GraldS9jUgJL/U4A==} + '@oxlint/binding-linux-s390x-gnu@1.53.0': + resolution: {integrity: sha512-rVpyBSqPGou9sITcsoXqUoGBUH74bxYLYOAGUqN599Zu6BQBlBU9hh3bJQ/20D1xrhhrsbiCpVPvXpLPM5nL1w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@oxlint/binding-linux-x64-gnu@1.51.0': - resolution: {integrity: sha512-e0Mz0DizsCoqNIjeOg6OUKe8JKJWZ5zZlwsd05Bmr51Jo3AOL4UJnPvwKumr4BBtBrDZkCmOLhCvDGm95nJM2g==} + '@oxlint/binding-linux-x64-gnu@1.53.0': + resolution: {integrity: sha512-eOyeQ8qFQ2geXmlWJuXAOaek0hFhbMLlYsU457NMLKDRoC43Xf+eDPZ9Yk0n9jDaGJ5zBl/3Dy8wo41cnIXuLA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxlint/binding-linux-x64-musl@1.51.0': - resolution: {integrity: sha512-wD8HGTWhYBKXvRDvoBVB1y+fEYV01samhWQSy1Zkxq2vpezvMnjaFKRuiP6tBNITLGuffbNDEXOwcAhJ3gI5Ug==} + '@oxlint/binding-linux-x64-musl@1.53.0': + resolution: {integrity: sha512-S6rBArW/zD1tob8M9PwKYrRmz+j1ss1+wjbRAJCWKd7TC3JB6noDiA95pIj9zOZVVp04MIzy5qymnYusrEyXzg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxlint/binding-openharmony-arm64@1.51.0': - resolution: {integrity: sha512-5NSwQ2hDEJ0GPXqikjWtwzgAQCsS7P9aLMNenjjKa+gknN3lTCwwwERsT6lKXSirfU3jLjexA2XQvQALh5h27w==} + '@oxlint/binding-openharmony-arm64@1.53.0': + resolution: {integrity: sha512-sd/A0Ny5sN0D/MJtlk7w2jGY4bJQou7gToa9WZF7Sj6HTyVzvlzKJWiOHfr4SulVk4ndiFQ8rKmF9rXP0EcF3A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxlint/binding-win32-arm64-msvc@1.51.0': - resolution: {integrity: sha512-JEZyah1M0RHMw8d+jjSSJmSmO8sABA1J1RtrHYujGPeCkYg1NeH0TGuClpe2h5QtioRTaF57y/TZfn/2IFV6fA==} + '@oxlint/binding-win32-arm64-msvc@1.53.0': + resolution: {integrity: sha512-QC3q7b51Er/ZurEFcFzc7RpQ/YEoEBLJuCp3WoOzhSHHH/nkUKFy+igOxlj1z3LayhEZPDQQ7sXvv2PM2cdG3Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxlint/binding-win32-ia32-msvc@1.51.0': - resolution: {integrity: sha512-q3cEoKH6kwjz/WRyHwSf0nlD2F5Qw536kCXvmlSu+kaShzgrA0ojmh45CA81qL+7udfCaZL2SdKCZlLiGBVFlg==} + '@oxlint/binding-win32-ia32-msvc@1.53.0': + resolution: {integrity: sha512-3OvLgOqwd705hWHV2i8ni80pilvg6BUgpC2+xtVu++e/q28LKVohGh5J5QYJOrRMfWmxK0M/AUu43vUw62LAKQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxlint/binding-win32-x64-msvc@1.51.0': - resolution: {integrity: sha512-Q14+fOGb9T28nWF/0EUsYqERiRA7cl1oy4TJrGmLaqhm+aO2cV+JttboHI3CbdeMCAyDI1+NoSlrM7Melhp/cw==} + '@oxlint/binding-win32-x64-msvc@1.53.0': + resolution: {integrity: sha512-xTiOkntexCdJytZ7ArIIgl3vGW5ujMM3sJNM7/+iqGAVJagCqjFFWn68HRWRLeyT66c95uR+CeFmQFI6mLQqDw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -2461,97 +2530,97 @@ packages: resolution: {integrity: sha512-DmCG8GzysnCZ15bres3N5AHCmwBwYgp0As6xjhQ47rAUTUXxJiK+lLUxaGsX3hd/30qUpVElh05PbGuxRPgJwA==} engines: {node: '>= 10'} - '@rolldown/binding-android-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-/uadfNUaMLFFBGvcIOiq8NnlhvTZTjOyybJaJnhGxD0n9k5vZRJfTaitH5GHnbwmc6T2PC+ZpS1FQH+vXyS/UA==} + '@rolldown/binding-android-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-lcJL0bN5hpgJfSIz/8PIf02irmyL43P+j1pTCfbD1DbLkmGRuFIA4DD3B3ZOvGqG0XiVvRznbKtN0COQVaKUTg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@rolldown/binding-darwin-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-zokYr1KgRn0hRA89dmgtPj/BmKp9DxgrfAJvOEFfXa8nfYWW2nmgiYIBGpSIAJrEg7Qc/Qznovy6xYwmKh0M8g==} + '@rolldown/binding-darwin-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-J7Zk3kLYFsLtuH6U+F4pS2sYVzac0qkjcO5QxHS7OS7yZu2LRs+IXo+uvJ/mvpyUljDJ3LROZPoQfgBIpCMhdQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@rolldown/binding-darwin-x64@1.0.0-rc.7': - resolution: {integrity: sha512-eZFjbmrapCBVgMmuLALH3pmQQQStHFuRhsFceJHk6KISW8CkI2e9OPLp9V4qXksrySQcD8XM8fpvGLs5l5C7LQ==} + '@rolldown/binding-darwin-x64@1.0.0-rc.9': + resolution: {integrity: sha512-iwtmmghy8nhfRGeNAIltcNXzD0QMNaaA5U/NyZc1Ia4bxrzFByNMDoppoC+hl7cDiUq5/1CnFthpT9n+UtfFyg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@rolldown/binding-freebsd-x64@1.0.0-rc.7': - resolution: {integrity: sha512-xjMrh8Dmu2DNwdY6DZsrF6YPGeesc3PaTlkh8v9cqmkSCNeTxnhX3ErhVnuv1j3n8t2IuuhQIwM9eZDINNEt5Q==} + '@rolldown/binding-freebsd-x64@1.0.0-rc.9': + resolution: {integrity: sha512-DLFYI78SCiZr5VvdEplsVC2Vx53lnA4/Ga5C65iyldMVaErr86aiqCoNBLl92PXPfDtUYjUh+xFFor40ueNs4Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.7': - resolution: {integrity: sha512-mOvftrHiXg4/xFdxJY3T9Wl1/zDAOSlMN8z9an2bXsCwuvv3RdyhYbSMZDuDO52S04w9z7+cBd90lvQSPTAQtw==} + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.9': + resolution: {integrity: sha512-CsjTmTwd0Hri6iTw/DRMK7kOZ7FwAkrO4h8YWKoX/kcj833e4coqo2wzIFywtch/8Eb5enQ/lwLM7w6JX1W5RQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-TuUkeuEEPRyXMBbJ86NRhAiPNezxHW8merl3Om2HASA9Pl1rI+VZcTtsVQ6v/P0MDIFpSl0k0+tUUze9HIXyEw==} + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-2x9O2JbSPxpxMDhP9Z74mahAStibTlrBMW0520+epJH5sac7/LwZW5Bmg/E6CXuEF53JJFW509uP+lSedaUNxg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rolldown/binding-linux-arm64-musl@1.0.0-rc.7': - resolution: {integrity: sha512-G43ZElEvaby+YSOgrXfBgpeQv42LdS0ivFFYQufk2tBDWeBfzE/+ob5DmO8Izbyn4Y8k6GgLF11jFDYNnmU/3w==} + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.9': + resolution: {integrity: sha512-JA1QRW31ogheAIRhIg9tjMfsYbglXXYGNPLdPEYrwFxdbkQCAzvpSCSHCDWNl4hTtrol8WeboCSEpjdZK8qrCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-Y48ShVxGE2zUTt0A0PR3grCLNxW4DWtAfe5lxf6L3uYEQujwo/LGuRogMsAtOJeYLCPTJo2i714LOdnK34cHpw==} + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-aOKU9dJheda8Kj8Y3w9gnt9QFOO+qKPAl8SWd7JPHP+Cu0EuDAE5wokQubLzIDQWg2myXq2XhTpOVS07qqvT+w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-KU5DUYvX3qI8/TX6D3RA4awXi4Ge/1+M6Jqv7kRiUndpqoVGgD765xhV3Q6QvtABnYjLJenrWDl3S1B5U56ixA==} + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-OalO94fqj7IWRn3VdXWty75jC5dk4C197AWEuMhIpvVv2lw9fiPhud0+bW2ctCxb3YoBZor71QHbY+9/WToadA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@rolldown/binding-linux-x64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-1THb6FdBkAEL12zvUue2bmK4W1+P+tz8Pgu5uEzq+xrtYa3iBzmmKNlyfUzCFNCqsPd8WJEQrYdLcw4iMW4AVw==} + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-cVEl1vZtBsBZna3YMjGXNvnYYrOJ7RzuWvZU0ffvJUexWkukMaDuGhUXn0rjnV0ptzGVkvc+vW9Yqy6h8YX4pg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rolldown/binding-linux-x64-musl@1.0.0-rc.7': - resolution: {integrity: sha512-12o73atFNWDgYnLyA52QEUn9AH8pHIe12W28cmqjyHt4bIEYRzMICvYVCPa2IQm6DJBvCBrEhD9K+ct4wr2hwg==} + '@rolldown/binding-linux-x64-musl@1.0.0-rc.9': + resolution: {integrity: sha512-UzYnKCIIc4heAKgI4PZ3dfBGUZefGCJ1TPDuLHoCzgrMYPb5Rv6TLFuYtyM4rWyHM7hymNdsg5ik2C+UD9VDbA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rolldown/binding-openharmony-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-+uUgGwvuUCXl894MTsmTS2J0BnCZccFsmzV7y1jFxW5pTSxkuwL5agyPuDvDOztPeS6RrdqWkn7sT0jRd0ECkg==} + '@rolldown/binding-openharmony-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-+6zoiF+RRyf5cdlFQP7nm58mq7+/2PFaY2DNQeD4B87N36JzfF/l9mdBkkmTvSYcYPE8tMh/o3cRlsx1ldLfog==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@rolldown/binding-wasm32-wasi@1.0.0-rc.7': - resolution: {integrity: sha512-53p2L/NSy21UiFOqUGlC11kJDZS2Nx2GJRz1QvbkXovypA3cOHbsyZHLkV72JsLSbiEQe+kg4tndUhSiC31UEA==} + '@rolldown/binding-wasm32-wasi@1.0.0-rc.9': + resolution: {integrity: sha512-rgFN6sA/dyebil3YTlL2evvi/M+ivhfnyxec7AccTpRPccno/rPoNlqybEZQBkcbZu8Hy+eqNJCqfBR8P7Pg8g==} engines: {node: '>=14.0.0'} cpu: [wasm32] - '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.7': - resolution: {integrity: sha512-K6svNRljO6QrL6VTKxwh4yThhlR9DT/tK0XpaFQMnJwwQKng+NYcVEtUkAM0WsoiZHw+Hnh3DGnn3taf/pNYGg==} + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.9': + resolution: {integrity: sha512-lHVNUG/8nlF1IQk1C0Ci574qKYyty2goMiPlRqkC5R+3LkXDkL5Dhx8ytbxq35m+pkHVIvIxviD+TWLdfeuadA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@rolldown/binding-win32-x64-msvc@1.0.0-rc.7': - resolution: {integrity: sha512-3ZJBT47VWLKVKIyvHhUSUgVwHzzZW761YAIkM3tOT+8ZTjFVp0acCM0Y2Z2j3jCl+XYi2d9y2uEWQ8H0PvvpPw==} + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.9': + resolution: {integrity: sha512-G0oA4+w1iY5AGi5HcDTxWsoxF509hrFIPB2rduV5aDqS9FtDg1CAfa7V34qImbjfhIcA8C+RekocJZA96EarwQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] - '@rolldown/pluginutils@1.0.0-rc.7': - resolution: {integrity: sha512-qujRfC8sFVInYSPPMLQByRh7zhwkGFS4+tyMQ83srV1qrxL4g8E2tyxVVyxd0+8QeBM1mIk9KbWxkegRr76XzA==} + '@rolldown/pluginutils@1.0.0-rc.9': + resolution: {integrity: sha512-w6oiRWgEBl04QkFZgmW+jnU1EC9b57Oihi2ot3HNWIQRqgHp5PnYDia5iZ5FF7rpa4EQdiqMDXjlqKGXBhsoXw==} '@rollup/rollup-android-arm-eabi@4.59.0': resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} @@ -3138,6 +3207,93 @@ packages: resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} engines: {node: '>=18.0.0'} + '@snazzah/davey-android-arm-eabi@0.1.10': + resolution: {integrity: sha512-7bwHxSNEI2wVXOT6xnmpnO9SHb2xwAnf9oEdL45dlfVHTgU1Okg5rwGwRvZ2aLVFFbTyecfC8EVZyhpyTkjLSw==} + engines: {node: '>= 10'} + cpu: [arm] + os: [android] + + '@snazzah/davey-android-arm64@0.1.10': + resolution: {integrity: sha512-68WUf2LQwQTP9MgPcCqTWwJztJSIk0keGfF2Y/b+MihSDh29fYJl7C0rbz69aUrVCvCC2lYkB/46P8X1kBz7yg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@snazzah/davey-darwin-arm64@0.1.10': + resolution: {integrity: sha512-nYC+DWCGUC1jUGEenCNQE/jJpL/02m0ebY/NvTCQbul5ktI/ShVzgA3kzssEhZvhf6jbH048Rs39wDhp/b24Jg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@snazzah/davey-darwin-x64@0.1.10': + resolution: {integrity: sha512-0q5Rrcs+O9sSSnPX+A3R3djEQs2nTAtMe5N3lApO6lZas/QNMl6wkEWCvTbDc2cfAYBMSk2jgc1awlRXi4LX3Q==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@snazzah/davey-freebsd-x64@0.1.10': + resolution: {integrity: sha512-/Gq5YDD6Oz8iBqVJLswUnetCv9JCRo1quYX5ujzpAG8zPCNItZo4g4h5p9C+h4Yoay2quWBYhoaVqQKT96bm8g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@snazzah/davey-linux-arm-gnueabihf@0.1.10': + resolution: {integrity: sha512-0Z7Vrt0WIbgxws9CeHB9qlueYJlvltI44rUuZmysdi70UcHGxlr7nE3MnzYCr9nRWRegohn8EQPWHMKMDJH2GA==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@snazzah/davey-linux-arm64-gnu@0.1.10': + resolution: {integrity: sha512-xhZQycn4QB+qXhqm/QmZ+kb9MHMXcbjjoPfvcIL4WMQXFG/zUWHW8EiBk7ZTEGMOpeab3F9D1+MlgumglYByUQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@snazzah/davey-linux-arm64-musl@0.1.10': + resolution: {integrity: sha512-pudzQCP9rZItwW4qHHvciMwtNd9kWH4l73g6Id1LRpe6sc8jiFBV7W+YXITj2PZbI0by6XPfkRP6Dk5IkGOuAw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@snazzah/davey-linux-x64-gnu@0.1.10': + resolution: {integrity: sha512-DC8qRmk+xJEFNqjxKB46cETKeDQqgUqE5p39KXS2k6Vl/XTi8pw8pXOxrPfYte5neoqlWAVQzbxuLnwpyRJVEQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@snazzah/davey-linux-x64-musl@0.1.10': + resolution: {integrity: sha512-wPR5/2QmsF7sR0WUaCwbk4XI3TLcxK9PVK8mhgcAYyuRpbhcVgNGWXs8ulcyMSXve5pFRJAFAuMTGCEb014peg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@snazzah/davey-wasm32-wasi@0.1.10': + resolution: {integrity: sha512-SfQavU+eKTDbRmPeLRodrVSfsWq25PYTmH1nIZW3B27L6IkijzjXZZuxiU1ZG1gdI5fB7mwXrOTtx34t+vAG7Q==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + + '@snazzah/davey-win32-arm64-msvc@0.1.10': + resolution: {integrity: sha512-Raafk53smYs67wZCY9bQXHXzbaiRMS5QCdjTdin3D9fF5A06T/0Zv1z7/YnaN+O3GSL/Ou3RvynF7SziToYiFQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@snazzah/davey-win32-ia32-msvc@0.1.10': + resolution: {integrity: sha512-pAs43l/DiZ+icqBwxIwNePzuYxFM1ZblVuf7t6vwwSLxvova7vnREnU7qDVjbc5/YTUHOsqYy3S6TpZMzDo2lw==} + engines: {node: '>= 10'} + cpu: [ia32] + os: [win32] + + '@snazzah/davey-win32-x64-msvc@0.1.10': + resolution: {integrity: sha512-kr6148VVBoUT4CtD+5hYshTFRny7R/xQZxXFhFc0fYjtmdMVM8Px9M91olg1JFNxuNzdfMfTufR58Q3wfBocug==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@snazzah/davey@0.1.10': + resolution: {integrity: sha512-J5f7vV5/tnj0xGnqufFRd6qiWn3FcR3iXjpjpEmO2Ok+Io0AASkMaZ3I39TsL45as0Qo5bq9wWuamFQ77PjJ+g==} + engines: {node: '>= 10'} + '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -3304,11 +3460,11 @@ packages: '@types/node@20.19.37': resolution: {integrity: sha512-8kzdPJ3FsNsVIurqBs7oodNnCEVbni9yUEkaHbgptDACOPW04jimGagZ51E6+lXUwJjgnBw+hyko/lkFWCldqw==} - '@types/node@24.11.0': - resolution: {integrity: sha512-fPxQqz4VTgPI/IQ+lj9r0h+fDR66bzoeMGHp8ASee+32OSGIkeASsoZuJixsQoVef1QJbeubcPBxKk22QVoWdw==} + '@types/node@24.12.0': + resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==} - '@types/node@25.3.5': - resolution: {integrity: sha512-oX8xrhvpiyRCQkG1MFchB09f+cXftgIXb3a7UUa4Y3wpmZPw5tyZGTLWhlESOLq1Rq6oDlc8npVU2/9xiCuXMA==} + '@types/node@25.4.0': + resolution: {integrity: sha512-9wLpoeWuBlcbBpOY3XmzSTG3oscB6xjBEEtn+pYXTfhyXhIxC5FsBer2KTopBlvKEiW9l13po9fq+SJY/5lkhw==} '@types/qrcode-terminal@0.12.2': resolution: {integrity: sha512-v+RcIEJ+Uhd6ygSQ0u5YYY7ZM+la7GgPbs0V/7l/kFs2uO4S8BcIUEMoP7za4DNIqNnUD5npf0A/7kBhrCKG5Q==} @@ -3355,43 +3511,43 @@ packages: '@types/yauzl@2.10.3': resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-mywkctYr45fUBUYD35poInc9HEjup0zyCO5z3ZU2QC9eCQShpwYSDceoSCwxVKB/b/f/CU6H3LqINFeIz5CvrQ==} + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-k3UqlA40U9m8meAyliJdbTayDSGZRBGNsEDP2rtjOomLUo2IA0eIi4vNAjQKzsXFtyfoQ59MGAqOLSO/CzVrQA==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-iF+Y4USbCiD5BxmXI6xYuy+S6d2BhxKDb3YHjchzqg3AgleDNTd2rqSzlWv4ku26V2iOSfpM9t1H/xluL9pgNw==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-8PNUCS1HPeXMK1F+1D3A4MyD+9Nil2mM3mWSwayUZpqT/A+dfEtcoo4Oe7Gz6qvMZbhCjbipwhTC84ilisiE1g==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-uEIIbW1JYPGEesVh/P5xA+xox7pQ6toeFPeke2X2H2bs5YkWHVaUQtVZuKNmGelw+2PCG6XRrXvMgMp056ebuQ==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-WwRJO5ryMEs4Flro6JKNq0T+hR78eYFrItautu9o6EsIpeevk7Cq7T0BBgCrAf+A5aKts21HpiWzfHI0YP/CuQ==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-vg8hwfwIhT8CmYJI5lG3PP8IoNzKKBGbq1cKjxQabSZTPuQKwVFVity2XKTKZKd+qRGL7xW4UWMJZLFgSx3b2Q==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-9T8kwNALCWzuNe00ri/f6wwoVD64YZW24cqkycFeptIF+DfNxfHMddWd7fvtHf0OKzPtkL83mkjBtviNeVKOfQ==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-Yd/ht0CGE4NYUAjuHa1u4VbiJbyUgvDh+b2o+Zcb2h5t8B761DIzDm24QqVXh+KhvGUoEodXWg3g3APxLHqj8Q==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-oMm3cb4njzMLBb61TI4EGq5Igxc+hoPHHNpMWqORfiYu/uQZWnter/twamTrZo6boCFtIa59mrGkhR3Qz7kauA==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-Klk6BoiHegfPmkO0YYrXmbYVdPjOfN25lRkzenqDIwbyzPlABHvICCyo5YRvWD3HU4EeDfLisIFU9wEd/0duCw==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-EQ5nz4qrwtzMZ5bjdMVQ2ke5BHQWDBz9IQsdh/8UU819cs5ZBnKmFFe5wOrIngqFvq4EoWKDXf983Vw0q4erkg==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-4LrXmaMfzedwczANIkD/M9guPD4EWuQnCxOJsJkdYi3ExWQDjIFwfmxTtAmfPBWxVExLfn7UUkz/yCtcv2Wd+w==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-Y/5A7BaRFV1Pro4BqNW3nVDuId7YdPXktl769x1yUjTDQLH6YJEJVeBkFkT0+4e1O5IL92rxxr8rWMLypNKnTw==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-8a3oe5IAfBkEfMouRheNhOXUScBSHIUknPvUdsbxx7s+Ja1lxFNA1X1TTl2T18vu72Q/mM86vxefw5eW8/ps3g==} + '@typescript/native-preview@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-BnyOW/mdZVZGevyeJ4RRY60CI4F121QBa++8Rwd+/Ms48OKQ30eMhaIKWGowz/u4WjJZmrzhFxIzN92XeSWMCQ==} hasBin: true '@typespec/ts-http-runtime@0.3.3': @@ -3523,8 +3679,8 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - acpx@0.1.16: - resolution: {integrity: sha512-CxHkUIP9dPSjh+RyoZkQg0AXjSiSus/dF4xKEeG9c+7JboZp5bZuWie/n4V7sBeKTMheMoEYGrMUslrdUadrqg==} + acpx@0.2.0: + resolution: {integrity: sha512-5E38uizINoEpTuHjLvlkWTfFqeLRqnO7vS3z3qmAXZCEZVExE+oYhJ1TClIl8KZZ9gKaoJF+5c0ltDcJDzG67g==} engines: {node: '>=22.12.0'} hasBin: true @@ -3536,6 +3692,10 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + agent-base@8.0.0: + resolution: {integrity: sha512-QT8i0hCz6C/KQ+KTAbSNwCHDGdmUJl2tp2ZpNlGSWCfhUNVbYG2WLE3MdZGBAgXPV4GAvjGMxo+C1hroyxmZEg==} + engines: {node: '>= 14'} + ajv-formats@3.0.1: resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} peerDependencies: @@ -3697,6 +3857,36 @@ packages: bare-abort-controller: optional: true + bare-fs@4.5.5: + resolution: {integrity: sha512-XvwYM6VZqKoqDll8BmSww5luA5eflDzY0uEFfBJtFKe4PAAtxBjU3YIxzIBzhyaEQBy1VXEQBto4cpN5RZJw+w==} + engines: {bare: '>=1.16.0'} + peerDependencies: + bare-buffer: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + + bare-os@3.7.1: + resolution: {integrity: sha512-ebvMaS5BgZKmJlvuWh14dg9rbUI84QeV3WlWn6Ph6lFI8jJoh7ADtVTyD2c93euwbe+zgi0DVrl4YmqXeM9aIA==} + engines: {bare: '>=1.14.0'} + + bare-path@3.0.0: + resolution: {integrity: sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==} + + bare-stream@2.8.1: + resolution: {integrity: sha512-bSeR8RfvbRwDpD7HWZvn8M3uYNDrk7m9DQjYOFkENZlXW8Ju/MPaqUPQq5LqJ3kyjEm07siTaAQ7wBKCU59oHg==} + peerDependencies: + bare-buffer: '*' + bare-events: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + bare-events: + optional: true + + bare-url@2.3.2: + resolution: {integrity: sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw==} + base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} @@ -4059,8 +4249,8 @@ packages: discord-api-types@0.38.37: resolution: {integrity: sha512-Cv47jzY1jkGkh5sv0bfHYqGgKOWO1peOrGMkDFM4UmaGMOTgOW8QSexhvixa9sVOiz8MnVOBryWYyw/CEVhj7w==} - discord-api-types@0.38.41: - resolution: {integrity: sha512-yMECyR8j9c2fVTvCQ+Qc24pweYFIZk/XoxDOmt1UvPeSw5tK6gXBd/2hhP+FEAe9Y6ny8pRMaf618XDK4U53OQ==} + discord-api-types@0.38.42: + resolution: {integrity: sha512-qs1kya7S84r5RR8m9kgttywGrmmoHaRifU1askAoi+wkoSefLpZP6aGXusjNw5b0jD3zOg3LTwUa3Tf2iHIceQ==} doctypes@1.1.0: resolution: {integrity: sha512-LLBi6pEqS6Do3EKQ3J0NqHWV5hhb78Pi8vvESYwyOy2c31ZEZVdtitdzsQsKb7878PEERhzUk0ftqGhG6Mz+pQ==} @@ -4075,9 +4265,8 @@ packages: resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} engines: {node: '>= 4'} - dompurify@3.3.2: - resolution: {integrity: sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==} - engines: {node: '>=20'} + dompurify@3.3.3: + resolution: {integrity: sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==} domutils@3.2.2: resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} @@ -4559,6 +4748,10 @@ packages: resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} engines: {node: '>= 14'} + https-proxy-agent@8.0.0: + resolution: {integrity: sha512-YYeW+iCnAS3xhvj2dvVoWgsbca3RfQy/IlaNHHOtDmU0jMqPI9euIq3Y9BJETdxk16h9NHHCKqp/KB9nIMStCQ==} + engines: {node: '>= 14'} + human-signals@1.1.1: resolution: {integrity: sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==} engines: {node: '>=8.12.0'} @@ -5332,6 +5525,14 @@ packages: zod: optional: true + openclaw@2026.3.8: + resolution: {integrity: sha512-e5Rk2Aj55sD/5LyX94mdYCQj7zpHXo0xIZsl+k140+nRopePfPAxC7nsu0V/NyypPRtaotP1riFfzK7IhaYkuQ==} + engines: {node: '>=22.12.0'} + hasBin: true + peerDependencies: + '@napi-rs/canvas': ^0.1.89 + node-llama-cpp: 3.16.2 + opus-decoder@0.7.11: resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==} @@ -5346,8 +5547,8 @@ packages: resolution: {integrity: sha512-4/8JfsetakdeEa4vAYV45FW20aY+B/+K8NEXp5Eiar3wR8726whgHrbSg5Ar/ZY1FLJ/AGtUqV7W2IVF+Gvp9A==} engines: {node: '>=20'} - oxfmt@0.36.0: - resolution: {integrity: sha512-/ejJ+KoSW6J9bcNT9a9UtJSJNWhJ3yOLSBLbkoFHJs/8CZjmaZVZAJe4YgO1KMJlKpNQasrn/G9JQUEZI3p0EQ==} + oxfmt@0.38.0: + resolution: {integrity: sha512-RGYfnnxmCz8dMQ1Oo5KrYkNRc9cne2WL2vfE+datWNkgiSAkfUsqpGLR7rnkN6cQFgQkHDZH400eXN6izJ8Lww==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -5355,8 +5556,8 @@ packages: resolution: {integrity: sha512-4RuJK2jP08XwqtUu+5yhCbxEauCm6tv2MFHKEMsjbosK2+vy5us82oI3VLuHwbNyZG7ekZA26U2LLHnGR4frIA==} hasBin: true - oxlint@1.51.0: - resolution: {integrity: sha512-g6DNPaV9/WI9MoX2XllafxQuxwY1TV++j7hP8fTJByVBuCoVtm3dy9f/2vtH/HU40JztcgWF4G7ua+gkainklQ==} + oxlint@1.53.0: + resolution: {integrity: sha512-TLW0PzGbpO1JxUnuy1pIqVPjQUGh4fNfxu5XJbdFIRFVaJ0UFzTjjk/hSFTMRxN6lZub53xL/IwJNEkrh7VtDg==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: @@ -5781,8 +5982,8 @@ packages: resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} hasBin: true - rolldown-plugin-dts@0.22.4: - resolution: {integrity: sha512-pueqTPyN1N6lWYivyDGad+j+GO3DT67pzpct8s8e6KGVIezvnrDjejuw1AXFeyDRas3xTq4Ja6Lj5R5/04C5GQ==} + rolldown-plugin-dts@0.22.5: + resolution: {integrity: sha512-M/HXfM4cboo+jONx9Z0X+CUf3B5tCi7ni+kR5fUW50Fp9AlZk0oVLesibGWgCXDKFp5lpgQ9yhKoImUFjl3VZw==} engines: {node: '>=20.19.0'} peerDependencies: '@ts-macro/tsc': ^0.3.6 @@ -5800,8 +6001,8 @@ packages: vue-tsc: optional: true - rolldown@1.0.0-rc.7: - resolution: {integrity: sha512-5X0zEeQFzDpB3MqUWQZyO2TUQqP9VnT7CqXHF2laTFRy487+b6QZyotCazOySAuZLAvplCaOVsg1tVn/Zlmwfg==} + rolldown@1.0.0-rc.9: + resolution: {integrity: sha512-9EbgWge7ZH+yqb4d2EnELAntgPTWbfL8ajiTW+SyhJEC4qhBbkCKbqFV4Ge4zmu5ziQuVbWxb/XwLZ+RIO7E8Q==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -6115,13 +6316,16 @@ packages: resolution: {integrity: sha512-iK5/YhZxq5GO5z8wb0bY1317uDF3Zjpha0QFFLA8/trAoiLbQD0HUbMesEaxyzUgDxi2QlcbM8IvqOlEjgoXBA==} engines: {node: '>=12.17'} - tar-stream@3.1.7: - resolution: {integrity: sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==} + tar-stream@3.1.8: + resolution: {integrity: sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==} tar@7.5.11: resolution: {integrity: sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ==} engines: {node: '>=18'} + teex@1.0.1: + resolution: {integrity: sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==} + text-decoder@1.2.7: resolution: {integrity: sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ==} @@ -6194,14 +6398,14 @@ packages: ts-algebra@2.0.0: resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==} - tsdown@0.21.0: - resolution: {integrity: sha512-Sw/ehzVhjYLD7HVBPybJHDxpcaeyFjPcaDCME23o9O4fyuEl6ibYEdrnB8W8UchYAGoayKqzWQqx/oIp3jn/Vg==} + tsdown@0.21.2: + resolution: {integrity: sha512-pP8eAcd1XAWjl5gjosuJs0BAuVoheUe3V8VDHx31QK7YOgXjcCMsBSyFWO3CMh/CSUkjRUzR96JtGH3WJFTExQ==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: '@arethetypeswrong/core': ^0.18.1 - '@tsdown/css': 0.21.0 - '@tsdown/exe': 0.21.0 + '@tsdown/css': 0.21.2 + '@tsdown/exe': 0.21.2 '@vitejs/devtools': '*' publint: ^0.3.0 typescript: ^5.0.0 @@ -6324,8 +6528,8 @@ packages: resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} engines: {node: '>= 0.8'} - unrun@0.2.30: - resolution: {integrity: sha512-a4W1wDADI0gvDDr14T0ho1FgMhmfjq6M8Iz8q234EnlxgH/9cMHDueUSLwTl1fwSBs5+mHrLFYH+7B8ao36EBA==} + unrun@0.2.32: + resolution: {integrity: sha512-opd3z6791rf281JdByf0RdRQrpcc7WyzqittqIXodM/5meNWdTwrVxeyzbaCp4/Rgls/um14oUaif1gomO8YGg==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: @@ -6580,6 +6784,10 @@ snapshots: dependencies: zod: 4.3.6 + '@agentclientprotocol/sdk@0.16.1(zod@4.3.6)': + dependencies: + zod: 4.3.6 + '@anthropic-ai/sdk@0.73.0(zod@4.3.6)': dependencies: json-schema-to-ts: 3.1.1 @@ -6685,22 +6893,22 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.1004.0': + '@aws-sdk/client-bedrock@3.1007.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-node': 3.972.18 + '@aws-sdk/core': 3.973.19 + '@aws-sdk/credential-provider-node': 3.972.19 '@aws-sdk/middleware-host-header': 3.972.7 '@aws-sdk/middleware-logger': 3.972.7 '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/middleware-user-agent': 3.972.20 '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/token-providers': 3.1004.0 + '@aws-sdk/token-providers': 3.1007.0 '@aws-sdk/types': 3.973.5 '@aws-sdk/util-endpoints': 3.996.4 '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 + '@aws-sdk/util-user-agent-node': 3.973.5 '@smithy/config-resolver': 4.4.10 '@smithy/core': 3.23.9 '@smithy/fetch-http-handler': 5.3.13 @@ -6822,6 +7030,22 @@ snapshots: '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 + '@aws-sdk/core@3.973.19': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws-sdk/xml-builder': 3.972.10 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@aws-sdk/crc64-nvme@3.972.3': dependencies: '@smithy/types': 4.13.0 @@ -6843,6 +7067,14 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/credential-provider-env@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/credential-provider-http@3.972.15': dependencies: '@aws-sdk/core': 3.973.15 @@ -6869,6 +7101,19 @@ snapshots: '@smithy/util-stream': 4.5.17 tslib: 2.8.1 + '@aws-sdk/credential-provider-http@3.972.19': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/node-http-handler': 4.4.14 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-stream': 4.5.17 + tslib: 2.8.1 + '@aws-sdk/credential-provider-ini@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -6907,6 +7152,25 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-ini@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/credential-provider-env': 3.972.17 + '@aws-sdk/credential-provider-http': 3.972.19 + '@aws-sdk/credential-provider-login': 3.972.18 + '@aws-sdk/credential-provider-process': 3.972.17 + '@aws-sdk/credential-provider-sso': 3.972.18 + '@aws-sdk/credential-provider-web-identity': 3.972.18 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-login@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -6933,6 +7197,19 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-login@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-node@3.972.14': dependencies: '@aws-sdk/credential-provider-env': 3.972.13 @@ -6967,6 +7244,23 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-node@3.972.19': + dependencies: + '@aws-sdk/credential-provider-env': 3.972.17 + '@aws-sdk/credential-provider-http': 3.972.19 + '@aws-sdk/credential-provider-ini': 3.972.18 + '@aws-sdk/credential-provider-process': 3.972.17 + '@aws-sdk/credential-provider-sso': 3.972.18 + '@aws-sdk/credential-provider-web-identity': 3.972.18 + '@aws-sdk/types': 3.973.5 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-process@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -6985,6 +7279,15 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/credential-provider-process@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/credential-provider-sso@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -7011,6 +7314,19 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-sso@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/token-providers': 3.1005.0 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-web-identity@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -7035,6 +7351,18 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-web-identity@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/eventstream-handler-node@3.972.10': dependencies: '@aws-sdk/types': 3.973.5 @@ -7175,6 +7503,17 @@ snapshots: '@smithy/util-retry': 4.2.11 tslib: 2.8.1 + '@aws-sdk/middleware-user-agent@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@smithy/core': 3.23.9 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-retry': 4.2.11 + tslib: 2.8.1 + '@aws-sdk/middleware-websocket@3.972.12': dependencies: '@aws-sdk/types': 3.973.5 @@ -7276,6 +7615,49 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/nested-clients@3.996.8': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.19 + '@aws-sdk/middleware-host-header': 3.972.7 + '@aws-sdk/middleware-logger': 3.972.7 + '@aws-sdk/middleware-recursion-detection': 3.972.7 + '@aws-sdk/middleware-user-agent': 3.972.20 + '@aws-sdk/region-config-resolver': 3.972.7 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@aws-sdk/util-user-agent-browser': 3.972.7 + '@aws-sdk/util-user-agent-node': 3.973.5 + '@smithy/config-resolver': 4.4.10 + '@smithy/core': 3.23.9 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/hash-node': 4.2.11 + '@smithy/invalid-dependency': 4.2.11 + '@smithy/middleware-content-length': 4.2.11 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-retry': 4.4.40 + '@smithy/middleware-serde': 4.2.12 + '@smithy/middleware-stack': 4.2.11 + '@smithy/node-config-provider': 4.3.11 + '@smithy/node-http-handler': 4.4.14 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.39 + '@smithy/util-defaults-mode-node': 4.2.42 + '@smithy/util-endpoints': 3.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-retry': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/region-config-resolver@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -7324,6 +7706,30 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/token-providers@3.1005.0': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/token-providers@3.1007.0': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/token-providers@3.999.0': dependencies: '@aws-sdk/core': 3.973.15 @@ -7414,6 +7820,14 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/util-user-agent-node@3.973.5': + dependencies: + '@aws-sdk/middleware-user-agent': 3.972.20 + '@aws-sdk/types': 3.973.5 + '@smithy/node-config-provider': 4.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/xml-builder@3.972.10': dependencies: '@smithy/types': 4.13.0 @@ -7499,7 +7913,7 @@ snapshots: '@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1)': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 discord-api-types: 0.38.37 optionalDependencies: '@cloudflare/workers-types': 4.20260120.0 @@ -7657,7 +8071,24 @@ snapshots: '@discordjs/voice@0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1)': dependencies: '@types/ws': 8.18.1 - discord-api-types: 0.38.41 + discord-api-types: 0.38.42 + prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1) + tslib: 2.8.1 + ws: 8.19.0 + transitivePeerDependencies: + - '@discordjs/opus' + - bufferutil + - ffmpeg-static + - node-opus + - opusscript + - utf-8-validate + optional: true + + '@discordjs/voice@0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1)': + dependencies: + '@snazzah/davey': 0.1.10 + '@types/ws': 8.18.1 + discord-api-types: 0.38.42 prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1) tslib: 2.8.1 ws: 8.19.0 @@ -8049,7 +8480,7 @@ snapshots: '@line/bot-sdk@10.6.0': dependencies: - '@types/node': 24.11.0 + '@types/node': 24.12.0 optionalDependencies: axios: 1.13.5 transitivePeerDependencies: @@ -8768,61 +9199,61 @@ snapshots: '@oxc-project/types@0.115.0': {} - '@oxfmt/binding-android-arm-eabi@0.36.0': + '@oxfmt/binding-android-arm-eabi@0.38.0': optional: true - '@oxfmt/binding-android-arm64@0.36.0': + '@oxfmt/binding-android-arm64@0.38.0': optional: true - '@oxfmt/binding-darwin-arm64@0.36.0': + '@oxfmt/binding-darwin-arm64@0.38.0': optional: true - '@oxfmt/binding-darwin-x64@0.36.0': + '@oxfmt/binding-darwin-x64@0.38.0': optional: true - '@oxfmt/binding-freebsd-x64@0.36.0': + '@oxfmt/binding-freebsd-x64@0.38.0': optional: true - '@oxfmt/binding-linux-arm-gnueabihf@0.36.0': + '@oxfmt/binding-linux-arm-gnueabihf@0.38.0': optional: true - '@oxfmt/binding-linux-arm-musleabihf@0.36.0': + '@oxfmt/binding-linux-arm-musleabihf@0.38.0': optional: true - '@oxfmt/binding-linux-arm64-gnu@0.36.0': + '@oxfmt/binding-linux-arm64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-arm64-musl@0.36.0': + '@oxfmt/binding-linux-arm64-musl@0.38.0': optional: true - '@oxfmt/binding-linux-ppc64-gnu@0.36.0': + '@oxfmt/binding-linux-ppc64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-riscv64-gnu@0.36.0': + '@oxfmt/binding-linux-riscv64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-riscv64-musl@0.36.0': + '@oxfmt/binding-linux-riscv64-musl@0.38.0': optional: true - '@oxfmt/binding-linux-s390x-gnu@0.36.0': + '@oxfmt/binding-linux-s390x-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-x64-gnu@0.36.0': + '@oxfmt/binding-linux-x64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-x64-musl@0.36.0': + '@oxfmt/binding-linux-x64-musl@0.38.0': optional: true - '@oxfmt/binding-openharmony-arm64@0.36.0': + '@oxfmt/binding-openharmony-arm64@0.38.0': optional: true - '@oxfmt/binding-win32-arm64-msvc@0.36.0': + '@oxfmt/binding-win32-arm64-msvc@0.38.0': optional: true - '@oxfmt/binding-win32-ia32-msvc@0.36.0': + '@oxfmt/binding-win32-ia32-msvc@0.38.0': optional: true - '@oxfmt/binding-win32-x64-msvc@0.36.0': + '@oxfmt/binding-win32-x64-msvc@0.38.0': optional: true '@oxlint-tsgolint/darwin-arm64@0.16.0': @@ -8843,61 +9274,61 @@ snapshots: '@oxlint-tsgolint/win32-x64@0.16.0': optional: true - '@oxlint/binding-android-arm-eabi@1.51.0': + '@oxlint/binding-android-arm-eabi@1.53.0': optional: true - '@oxlint/binding-android-arm64@1.51.0': + '@oxlint/binding-android-arm64@1.53.0': optional: true - '@oxlint/binding-darwin-arm64@1.51.0': + '@oxlint/binding-darwin-arm64@1.53.0': optional: true - '@oxlint/binding-darwin-x64@1.51.0': + '@oxlint/binding-darwin-x64@1.53.0': optional: true - '@oxlint/binding-freebsd-x64@1.51.0': + '@oxlint/binding-freebsd-x64@1.53.0': optional: true - '@oxlint/binding-linux-arm-gnueabihf@1.51.0': + '@oxlint/binding-linux-arm-gnueabihf@1.53.0': optional: true - '@oxlint/binding-linux-arm-musleabihf@1.51.0': + '@oxlint/binding-linux-arm-musleabihf@1.53.0': optional: true - '@oxlint/binding-linux-arm64-gnu@1.51.0': + '@oxlint/binding-linux-arm64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-arm64-musl@1.51.0': + '@oxlint/binding-linux-arm64-musl@1.53.0': optional: true - '@oxlint/binding-linux-ppc64-gnu@1.51.0': + '@oxlint/binding-linux-ppc64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-riscv64-gnu@1.51.0': + '@oxlint/binding-linux-riscv64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-riscv64-musl@1.51.0': + '@oxlint/binding-linux-riscv64-musl@1.53.0': optional: true - '@oxlint/binding-linux-s390x-gnu@1.51.0': + '@oxlint/binding-linux-s390x-gnu@1.53.0': optional: true - '@oxlint/binding-linux-x64-gnu@1.51.0': + '@oxlint/binding-linux-x64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-x64-musl@1.51.0': + '@oxlint/binding-linux-x64-musl@1.53.0': optional: true - '@oxlint/binding-openharmony-arm64@1.51.0': + '@oxlint/binding-openharmony-arm64@1.53.0': optional: true - '@oxlint/binding-win32-arm64-msvc@1.51.0': + '@oxlint/binding-win32-arm64-msvc@1.53.0': optional: true - '@oxlint/binding-win32-ia32-msvc@1.51.0': + '@oxlint/binding-win32-ia32-msvc@1.53.0': optional: true - '@oxlint/binding-win32-x64-msvc@1.51.0': + '@oxlint/binding-win32-x64-msvc@1.53.0': optional: true '@pierre/diffs@1.0.11(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': @@ -8982,54 +9413,54 @@ snapshots: '@reflink/reflink-win32-x64-msvc': 0.1.19 optional: true - '@rolldown/binding-android-arm64@1.0.0-rc.7': + '@rolldown/binding-android-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-darwin-arm64@1.0.0-rc.7': + '@rolldown/binding-darwin-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-darwin-x64@1.0.0-rc.7': + '@rolldown/binding-darwin-x64@1.0.0-rc.9': optional: true - '@rolldown/binding-freebsd-x64@1.0.0-rc.7': + '@rolldown/binding-freebsd-x64@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.7': + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm64-musl@1.0.0-rc.7': + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-x64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-x64-musl@1.0.0-rc.7': + '@rolldown/binding-linux-x64-musl@1.0.0-rc.9': optional: true - '@rolldown/binding-openharmony-arm64@1.0.0-rc.7': + '@rolldown/binding-openharmony-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-wasm32-wasi@1.0.0-rc.7': + '@rolldown/binding-wasm32-wasi@1.0.0-rc.9': dependencies: '@napi-rs/wasm-runtime': 1.1.1 optional: true - '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.7': + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.9': optional: true - '@rolldown/binding-win32-x64-msvc@1.0.0-rc.7': + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.9': optional: true - '@rolldown/pluginutils@1.0.0-rc.7': {} + '@rolldown/pluginutils@1.0.0-rc.9': {} '@rollup/rollup-android-arm-eabi@4.59.0': optional: true @@ -9187,14 +9618,14 @@ snapshots: '@slack/logger@4.0.0': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@slack/oauth@3.0.4': dependencies: '@slack/logger': 4.0.0 '@slack/web-api': 7.14.1 '@types/jsonwebtoken': 9.0.10 - '@types/node': 25.3.5 + '@types/node': 25.4.0 jsonwebtoken: 9.0.3 transitivePeerDependencies: - debug @@ -9203,7 +9634,7 @@ snapshots: dependencies: '@slack/logger': 4.0.0 '@slack/web-api': 7.14.1 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/ws': 8.18.1 eventemitter3: 5.0.4 ws: 8.19.0 @@ -9218,7 +9649,7 @@ snapshots: dependencies: '@slack/logger': 4.0.0 '@slack/types': 2.20.0 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/retry': 0.12.0 axios: 1.13.5 eventemitter3: 5.0.4 @@ -9855,6 +10286,67 @@ snapshots: dependencies: tslib: 2.8.1 + '@snazzah/davey-android-arm-eabi@0.1.10': + optional: true + + '@snazzah/davey-android-arm64@0.1.10': + optional: true + + '@snazzah/davey-darwin-arm64@0.1.10': + optional: true + + '@snazzah/davey-darwin-x64@0.1.10': + optional: true + + '@snazzah/davey-freebsd-x64@0.1.10': + optional: true + + '@snazzah/davey-linux-arm-gnueabihf@0.1.10': + optional: true + + '@snazzah/davey-linux-arm64-gnu@0.1.10': + optional: true + + '@snazzah/davey-linux-arm64-musl@0.1.10': + optional: true + + '@snazzah/davey-linux-x64-gnu@0.1.10': + optional: true + + '@snazzah/davey-linux-x64-musl@0.1.10': + optional: true + + '@snazzah/davey-wasm32-wasi@0.1.10': + dependencies: + '@napi-rs/wasm-runtime': 1.1.1 + optional: true + + '@snazzah/davey-win32-arm64-msvc@0.1.10': + optional: true + + '@snazzah/davey-win32-ia32-msvc@0.1.10': + optional: true + + '@snazzah/davey-win32-x64-msvc@0.1.10': + optional: true + + '@snazzah/davey@0.1.10': + optionalDependencies: + '@snazzah/davey-android-arm-eabi': 0.1.10 + '@snazzah/davey-android-arm64': 0.1.10 + '@snazzah/davey-darwin-arm64': 0.1.10 + '@snazzah/davey-darwin-x64': 0.1.10 + '@snazzah/davey-freebsd-x64': 0.1.10 + '@snazzah/davey-linux-arm-gnueabihf': 0.1.10 + '@snazzah/davey-linux-arm64-gnu': 0.1.10 + '@snazzah/davey-linux-arm64-musl': 0.1.10 + '@snazzah/davey-linux-x64-gnu': 0.1.10 + '@snazzah/davey-linux-x64-musl': 0.1.10 + '@snazzah/davey-wasm32-wasi': 0.1.10 + '@snazzah/davey-win32-arm64-msvc': 0.1.10 + '@snazzah/davey-win32-ia32-msvc': 0.1.10 + '@snazzah/davey-win32-x64-msvc': 0.1.10 + '@standard-schema/spec@1.1.0': {} '@swc/helpers@0.5.19': @@ -9982,7 +10474,7 @@ snapshots: '@types/body-parser@1.19.6': dependencies: '@types/connect': 3.4.38 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/bun@1.3.9': dependencies: @@ -10002,7 +10494,7 @@ snapshots: '@types/connect@3.4.38': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/deep-eql@4.0.2': {} @@ -10010,14 +10502,14 @@ snapshots: '@types/express-serve-static-core@4.19.8': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 '@types/express-serve-static-core@5.1.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 @@ -10046,7 +10538,7 @@ snapshots: '@types/jsonwebtoken@9.0.10': dependencies: '@types/ms': 2.1.0 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/linkify-it@5.0.0': {} @@ -10075,11 +10567,11 @@ snapshots: dependencies: undici-types: 6.21.0 - '@types/node@24.11.0': + '@types/node@24.12.0': dependencies: undici-types: 7.16.0 - '@types/node@25.3.5': + '@types/node@25.4.0': dependencies: undici-types: 7.18.2 @@ -10092,7 +10584,7 @@ snapshots: '@types/request@2.48.13': dependencies: '@types/caseless': 0.12.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/tough-cookie': 4.0.5 form-data: 2.5.4 @@ -10103,22 +10595,22 @@ snapshots: '@types/send@0.17.6': dependencies: '@types/mime': 1.3.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/send@1.2.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/serve-static@1.15.10': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/send': 0.17.6 '@types/serve-static@2.2.0': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/tough-cookie@4.0.5': {} @@ -10128,43 +10620,43 @@ snapshots: '@types/ws@8.18.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/yauzl@2.10.3': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 optional: true - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260308.1': + '@typescript/native-preview@7.0.0-dev.20260311.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260308.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260311.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260311.1 '@typespec/ts-http-runtime@0.3.3': dependencies: @@ -10205,29 +10697,29 @@ snapshots: - '@cypress/request' - supports-color - '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) playwright: 1.58.2 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - bufferutil - msw - utf-8-validate - vite - '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/utils': 4.0.18 magic-string: 0.30.21 pixelmatch: 7.1.0 pngjs: 7.0.0 sirv: 3.0.2 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) ws: 8.19.0 transitivePeerDependencies: - bufferutil @@ -10235,7 +10727,7 @@ snapshots: - utf-8-validate - vite - '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': + '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.18 @@ -10247,9 +10739,9 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) optionalDependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) '@vitest/expect@4.0.18': dependencies: @@ -10260,13 +10752,13 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: - vite: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) '@vitest/pretty-format@4.0.18': dependencies: @@ -10363,13 +10855,14 @@ snapshots: acorn@8.16.0: {} - acpx@0.1.16(zod@4.3.6): + acpx@0.2.0(zod@4.3.6): dependencies: '@agentclientprotocol/sdk': 0.15.0(zod@4.3.6) commander: 14.0.3 skillflag: 0.1.4 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a - zod @@ -10382,6 +10875,8 @@ snapshots: agent-base@7.1.4: {} + agent-base@8.0.0: {} + ajv-formats@3.0.1(ajv@8.18.0): optionalDependencies: ajv: 8.18.0 @@ -10526,6 +11021,37 @@ snapshots: bare-events@2.8.2: {} + bare-fs@4.5.5: + dependencies: + bare-events: 2.8.2 + bare-path: 3.0.0 + bare-stream: 2.8.1(bare-events@2.8.2) + bare-url: 2.3.2 + fast-fifo: 1.3.2 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + + bare-os@3.7.1: {} + + bare-path@3.0.0: + dependencies: + bare-os: 3.7.1 + + bare-stream@2.8.1(bare-events@2.8.2): + dependencies: + streamx: 2.23.0 + teex: 1.0.1 + optionalDependencies: + bare-events: 2.8.2 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + + bare-url@2.3.2: + dependencies: + bare-path: 3.0.0 + base64-js@1.5.1: {} basic-auth@2.0.1: @@ -10613,7 +11139,7 @@ snapshots: bun-types@1.3.9: dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 optional: true bytes@3.1.2: {} @@ -10865,7 +11391,7 @@ snapshots: discord-api-types@0.38.37: {} - discord-api-types@0.38.41: {} + discord-api-types@0.38.42: {} doctypes@1.1.0: {} @@ -10881,7 +11407,7 @@ snapshots: dependencies: domelementtype: 2.3.0 - dompurify@3.3.2: + dompurify@3.3.3: optionalDependencies: '@types/trusted-types': 2.0.7 @@ -11507,6 +12033,13 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@8.0.0: + dependencies: + agent-base: 8.0.0 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + human-signals@1.1.1: {} iconv-lite@0.4.24: @@ -12305,6 +12838,81 @@ snapshots: ws: 8.19.0 zod: 4.3.6 + openclaw@2026.3.8(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.7)(node-llama-cpp@3.16.2(typescript@5.9.3)): + dependencies: + '@agentclientprotocol/sdk': 0.15.0(zod@4.3.6) + '@aws-sdk/client-bedrock': 3.1007.0 + '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) + '@clack/prompts': 1.1.0 + '@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) + '@grammyjs/runner': 2.0.3(grammy@1.41.1) + '@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1) + '@homebridge/ciao': 1.3.5 + '@larksuiteoapi/node-sdk': 1.59.0 + '@line/bot-sdk': 10.6.0 + '@lydell/node-pty': 1.2.0-beta.3 + '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.57.1 + '@mozilla/readability': 0.6.0 + '@napi-rs/canvas': 0.1.95 + '@sinclair/typebox': 0.34.48 + '@slack/bolt': 4.6.0(@types/express@5.0.6) + '@slack/web-api': 7.14.1 + '@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) + ajv: 8.18.0 + chalk: 5.6.2 + chokidar: 5.0.0 + cli-highlight: 2.1.11 + commander: 14.0.3 + croner: 10.0.1 + discord-api-types: 0.38.42 + dotenv: 17.3.1 + express: 5.2.1 + file-type: 21.3.1 + grammy: 1.41.1 + https-proxy-agent: 7.0.6 + ipaddr.js: 2.3.0 + jiti: 2.6.1 + json5: 2.2.3 + jszip: 3.10.1 + linkedom: 0.18.12 + long: 5.3.2 + markdown-it: 14.1.1 + node-edge-tts: 1.2.10 + node-llama-cpp: 3.16.2(typescript@5.9.3) + opusscript: 0.1.1 + osc-progress: 0.3.0 + pdfjs-dist: 5.5.207 + playwright-core: 1.58.2 + qrcode-terminal: 0.12.0 + sharp: 0.34.5 + sqlite-vec: 0.1.7-alpha.2 + tar: 7.5.11 + tslog: 4.10.2 + undici: 7.22.0 + ws: 8.19.0 + yaml: 2.8.2 + zod: 4.3.6 + transitivePeerDependencies: + - '@discordjs/opus' + - '@modelcontextprotocol/sdk' + - '@types/express' + - audio-decode + - aws-crt + - bufferutil + - canvas + - debug + - encoding + - ffmpeg-static + - hono + - jimp + - link-preview-js + - node-opus + - supports-color + - utf-8-validate + opus-decoder@0.7.11: dependencies: '@wasm-audio-decoders/common': 9.0.7 @@ -12325,29 +12933,29 @@ snapshots: osc-progress@0.3.0: {} - oxfmt@0.36.0: + oxfmt@0.38.0: dependencies: tinypool: 2.1.0 optionalDependencies: - '@oxfmt/binding-android-arm-eabi': 0.36.0 - '@oxfmt/binding-android-arm64': 0.36.0 - '@oxfmt/binding-darwin-arm64': 0.36.0 - '@oxfmt/binding-darwin-x64': 0.36.0 - '@oxfmt/binding-freebsd-x64': 0.36.0 - '@oxfmt/binding-linux-arm-gnueabihf': 0.36.0 - '@oxfmt/binding-linux-arm-musleabihf': 0.36.0 - '@oxfmt/binding-linux-arm64-gnu': 0.36.0 - '@oxfmt/binding-linux-arm64-musl': 0.36.0 - '@oxfmt/binding-linux-ppc64-gnu': 0.36.0 - '@oxfmt/binding-linux-riscv64-gnu': 0.36.0 - '@oxfmt/binding-linux-riscv64-musl': 0.36.0 - '@oxfmt/binding-linux-s390x-gnu': 0.36.0 - '@oxfmt/binding-linux-x64-gnu': 0.36.0 - '@oxfmt/binding-linux-x64-musl': 0.36.0 - '@oxfmt/binding-openharmony-arm64': 0.36.0 - '@oxfmt/binding-win32-arm64-msvc': 0.36.0 - '@oxfmt/binding-win32-ia32-msvc': 0.36.0 - '@oxfmt/binding-win32-x64-msvc': 0.36.0 + '@oxfmt/binding-android-arm-eabi': 0.38.0 + '@oxfmt/binding-android-arm64': 0.38.0 + '@oxfmt/binding-darwin-arm64': 0.38.0 + '@oxfmt/binding-darwin-x64': 0.38.0 + '@oxfmt/binding-freebsd-x64': 0.38.0 + '@oxfmt/binding-linux-arm-gnueabihf': 0.38.0 + '@oxfmt/binding-linux-arm-musleabihf': 0.38.0 + '@oxfmt/binding-linux-arm64-gnu': 0.38.0 + '@oxfmt/binding-linux-arm64-musl': 0.38.0 + '@oxfmt/binding-linux-ppc64-gnu': 0.38.0 + '@oxfmt/binding-linux-riscv64-gnu': 0.38.0 + '@oxfmt/binding-linux-riscv64-musl': 0.38.0 + '@oxfmt/binding-linux-s390x-gnu': 0.38.0 + '@oxfmt/binding-linux-x64-gnu': 0.38.0 + '@oxfmt/binding-linux-x64-musl': 0.38.0 + '@oxfmt/binding-openharmony-arm64': 0.38.0 + '@oxfmt/binding-win32-arm64-msvc': 0.38.0 + '@oxfmt/binding-win32-ia32-msvc': 0.38.0 + '@oxfmt/binding-win32-x64-msvc': 0.38.0 oxlint-tsgolint@0.16.0: optionalDependencies: @@ -12358,27 +12966,27 @@ snapshots: '@oxlint-tsgolint/win32-arm64': 0.16.0 '@oxlint-tsgolint/win32-x64': 0.16.0 - oxlint@1.51.0(oxlint-tsgolint@0.16.0): + oxlint@1.53.0(oxlint-tsgolint@0.16.0): optionalDependencies: - '@oxlint/binding-android-arm-eabi': 1.51.0 - '@oxlint/binding-android-arm64': 1.51.0 - '@oxlint/binding-darwin-arm64': 1.51.0 - '@oxlint/binding-darwin-x64': 1.51.0 - '@oxlint/binding-freebsd-x64': 1.51.0 - '@oxlint/binding-linux-arm-gnueabihf': 1.51.0 - '@oxlint/binding-linux-arm-musleabihf': 1.51.0 - '@oxlint/binding-linux-arm64-gnu': 1.51.0 - '@oxlint/binding-linux-arm64-musl': 1.51.0 - '@oxlint/binding-linux-ppc64-gnu': 1.51.0 - '@oxlint/binding-linux-riscv64-gnu': 1.51.0 - '@oxlint/binding-linux-riscv64-musl': 1.51.0 - '@oxlint/binding-linux-s390x-gnu': 1.51.0 - '@oxlint/binding-linux-x64-gnu': 1.51.0 - '@oxlint/binding-linux-x64-musl': 1.51.0 - '@oxlint/binding-openharmony-arm64': 1.51.0 - '@oxlint/binding-win32-arm64-msvc': 1.51.0 - '@oxlint/binding-win32-ia32-msvc': 1.51.0 - '@oxlint/binding-win32-x64-msvc': 1.51.0 + '@oxlint/binding-android-arm-eabi': 1.53.0 + '@oxlint/binding-android-arm64': 1.53.0 + '@oxlint/binding-darwin-arm64': 1.53.0 + '@oxlint/binding-darwin-x64': 1.53.0 + '@oxlint/binding-freebsd-x64': 1.53.0 + '@oxlint/binding-linux-arm-gnueabihf': 1.53.0 + '@oxlint/binding-linux-arm-musleabihf': 1.53.0 + '@oxlint/binding-linux-arm64-gnu': 1.53.0 + '@oxlint/binding-linux-arm64-musl': 1.53.0 + '@oxlint/binding-linux-ppc64-gnu': 1.53.0 + '@oxlint/binding-linux-riscv64-gnu': 1.53.0 + '@oxlint/binding-linux-riscv64-musl': 1.53.0 + '@oxlint/binding-linux-s390x-gnu': 1.53.0 + '@oxlint/binding-linux-x64-gnu': 1.53.0 + '@oxlint/binding-linux-x64-musl': 1.53.0 + '@oxlint/binding-openharmony-arm64': 1.53.0 + '@oxlint/binding-win32-arm64-msvc': 1.53.0 + '@oxlint/binding-win32-ia32-msvc': 1.53.0 + '@oxlint/binding-win32-x64-msvc': 1.53.0 oxlint-tsgolint: 0.16.0 p-finally@1.0.0: {} @@ -12594,7 +13202,7 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 25.3.5 + '@types/node': 25.4.0 long: 5.3.2 proxy-addr@2.0.7: @@ -12833,7 +13441,7 @@ snapshots: dependencies: glob: 10.5.0 - rolldown-plugin-dts@0.22.4(@typescript/native-preview@7.0.0-dev.20260308.1)(rolldown@1.0.0-rc.7)(typescript@5.9.3): + rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260311.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.2 '@babel/helper-validator-identifier': 8.0.0-rc.2 @@ -12844,33 +13452,33 @@ snapshots: dts-resolver: 2.1.3 get-tsconfig: 4.13.6 obug: 2.1.1 - rolldown: 1.0.0-rc.7 + rolldown: 1.0.0-rc.9 optionalDependencies: - '@typescript/native-preview': 7.0.0-dev.20260308.1 + '@typescript/native-preview': 7.0.0-dev.20260311.1 typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver - rolldown@1.0.0-rc.7: + rolldown@1.0.0-rc.9: dependencies: '@oxc-project/types': 0.115.0 - '@rolldown/pluginutils': 1.0.0-rc.7 + '@rolldown/pluginutils': 1.0.0-rc.9 optionalDependencies: - '@rolldown/binding-android-arm64': 1.0.0-rc.7 - '@rolldown/binding-darwin-arm64': 1.0.0-rc.7 - '@rolldown/binding-darwin-x64': 1.0.0-rc.7 - '@rolldown/binding-freebsd-x64': 1.0.0-rc.7 - '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.7 - '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.7 - '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-x64-musl': 1.0.0-rc.7 - '@rolldown/binding-openharmony-arm64': 1.0.0-rc.7 - '@rolldown/binding-wasm32-wasi': 1.0.0-rc.7 - '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.7 - '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.7 + '@rolldown/binding-android-arm64': 1.0.0-rc.9 + '@rolldown/binding-darwin-arm64': 1.0.0-rc.9 + '@rolldown/binding-darwin-x64': 1.0.0-rc.9 + '@rolldown/binding-freebsd-x64': 1.0.0-rc.9 + '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.9 + '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.9 + '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-x64-musl': 1.0.0-rc.9 + '@rolldown/binding-openharmony-arm64': 1.0.0-rc.9 + '@rolldown/binding-wasm32-wasi': 1.0.0-rc.9 + '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.9 + '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.9 rollup@4.59.0: dependencies: @@ -13114,9 +13722,10 @@ snapshots: skillflag@0.1.4: dependencies: '@clack/prompts': 1.1.0 - tar-stream: 3.1.7 + tar-stream: 3.1.8 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a sleep-promise@9.1.0: {} @@ -13300,13 +13909,15 @@ snapshots: array-back: 6.2.2 wordwrapjs: 5.1.1 - tar-stream@3.1.7: + tar-stream@3.1.8: dependencies: b4a: 1.8.0 + bare-fs: 4.5.5 fast-fifo: 1.3.2 streamx: 2.23.0 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a tar@7.5.11: @@ -13317,6 +13928,13 @@ snapshots: minizlib: 3.1.0 yallist: 5.0.0 + teex@1.0.1: + dependencies: + streamx: 2.23.0 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + text-decoder@1.2.7: dependencies: b4a: 1.8.0 @@ -13381,7 +13999,7 @@ snapshots: ts-algebra@2.0.0: {} - tsdown@0.21.0(@typescript/native-preview@7.0.0-dev.20260308.1)(typescript@5.9.3): + tsdown@0.21.2(@typescript/native-preview@7.0.0-dev.20260311.1)(typescript@5.9.3): dependencies: ansis: 4.2.0 cac: 7.0.0 @@ -13391,14 +14009,14 @@ snapshots: import-without-cache: 0.2.5 obug: 2.1.1 picomatch: 4.0.3 - rolldown: 1.0.0-rc.7 - rolldown-plugin-dts: 0.22.4(@typescript/native-preview@7.0.0-dev.20260308.1)(rolldown@1.0.0-rc.7)(typescript@5.9.3) + rolldown: 1.0.0-rc.9 + rolldown-plugin-dts: 0.22.5(@typescript/native-preview@7.0.0-dev.20260311.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3) semver: 7.7.4 tinyexec: 1.0.2 tinyglobby: 0.2.15 tree-kill: 1.2.2 unconfig-core: 7.5.0 - unrun: 0.2.30 + unrun: 0.2.32 optionalDependencies: typescript: 5.9.3 transitivePeerDependencies: @@ -13496,9 +14114,9 @@ snapshots: unpipe@1.0.0: {} - unrun@0.2.30: + unrun@0.2.32: dependencies: - rolldown: 1.0.0-rc.7 + rolldown: 1.0.0-rc.9 url-join@4.0.1: {} @@ -13537,7 +14155,7 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 - vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): dependencies: esbuild: 0.27.3 fdir: 6.5.0(picomatch@4.0.3) @@ -13546,17 +14164,17 @@ snapshots: rollup: 4.59.0 tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 fsevents: 2.3.3 jiti: 2.6.1 lightningcss: 1.30.2 tsx: 4.21.0 yaml: 2.8.2 - vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): dependencies: '@vitest/expect': 4.0.18 - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/pretty-format': 4.0.18 '@vitest/runner': 4.0.18 '@vitest/snapshot': 4.0.18 @@ -13573,12 +14191,12 @@ snapshots: tinyexec: 1.0.2 tinyglobby: 0.2.15 tinyrainbow: 3.0.3 - vite: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.0 - '@types/node': 25.3.5 - '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@types/node': 25.4.0 + '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) transitivePeerDependencies: - jiti - less diff --git a/scripts/ios-write-version-xcconfig.sh b/scripts/ios-write-version-xcconfig.sh index e6214c9188c..b63d3e81adb 100755 --- a/scripts/ios-write-version-xcconfig.sh +++ b/scripts/ios-write-version-xcconfig.sh @@ -73,7 +73,7 @@ fi if [[ "${PACKAGE_VERSION}" =~ ^([0-9]{4}\.[0-9]{1,2}\.[0-9]{1,2})([.-]?beta[.-][0-9]+)?$ ]]; then MARKETING_VERSION="${BASH_REMATCH[1]}" else - echo "Unsupported package.json.version '${PACKAGE_VERSION}'. Expected 2026.3.9 or 2026.3.9-beta.1." >&2 + echo "Unsupported package.json.version '${PACKAGE_VERSION}'. Expected 2026.3.11 or 2026.3.11-beta.1." >&2 exit 1 fi diff --git a/src/acp/control-plane/manager.core.ts b/src/acp/control-plane/manager.core.ts index 558e1ca24a8..b15aa3bd72e 100644 --- a/src/acp/control-plane/manager.core.ts +++ b/src/acp/control-plane/manager.core.ts @@ -44,11 +44,11 @@ import { type TurnLatencyStats, } from "./manager.types.js"; import { + canonicalizeAcpSessionKey, createUnsupportedControlError, hasLegacyAcpIdentityProjection, normalizeAcpErrorCode, normalizeActorKey, - normalizeSessionKey, requireReadySessionMeta, resolveAcpAgentFromSessionKey, resolveAcpSessionResolutionError, @@ -87,7 +87,7 @@ export class AcpSessionManager { constructor(private readonly deps: AcpSessionManagerDeps = DEFAULT_DEPS) {} resolveSession(params: { cfg: OpenClawConfig; sessionKey: string }): AcpSessionResolution { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { return { kind: "none", @@ -213,7 +213,10 @@ export class AcpSessionManager { handle: AcpRuntimeHandle; meta: SessionAcpMeta; }> { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -321,7 +324,7 @@ export class AcpSessionManager { sessionKey: string; signal?: AbortSignal; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -397,7 +400,7 @@ export class AcpSessionManager { sessionKey: string; runtimeMode: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -452,7 +455,7 @@ export class AcpSessionManager { key: string; value: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -525,7 +528,7 @@ export class AcpSessionManager { sessionKey: string; patch: Partial; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); const validatedPatch = validateRuntimeOptionPatch(params.patch); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); @@ -555,7 +558,7 @@ export class AcpSessionManager { cfg: OpenClawConfig; sessionKey: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -591,7 +594,10 @@ export class AcpSessionManager { } async runTurn(input: AcpRunTurnInput): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -738,7 +744,7 @@ export class AcpSessionManager { sessionKey: string; reason?: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -806,7 +812,10 @@ export class AcpSessionManager { } async closeSession(input: AcpCloseSessionInput): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } diff --git a/src/acp/control-plane/manager.test.ts b/src/acp/control-plane/manager.test.ts index ebdf356ca9f..8152944834c 100644 --- a/src/acp/control-plane/manager.test.ts +++ b/src/acp/control-plane/manager.test.ts @@ -170,6 +170,57 @@ describe("AcpSessionManager", () => { expect(resolved.error.message).toContain("ACP metadata is missing"); }); + it("canonicalizes the main alias before ACP rehydrate after restart", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey; + if (sessionKey !== "agent:main:main") { + return null; + } + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + agent: "main", + runtimeSessionName: sessionKey, + }, + }; + }); + + const manager = new AcpSessionManager(); + const cfg = { + ...baseCfg, + session: { mainKey: "main" }, + agents: { list: [{ id: "main", default: true }] }, + } as OpenClawConfig; + + await manager.runTurn({ + cfg, + sessionKey: "main", + text: "after restart", + mode: "prompt", + requestId: "r-main", + }); + + expect(hoisted.readAcpSessionEntryMock).toHaveBeenCalledWith( + expect.objectContaining({ + cfg, + sessionKey: "agent:main:main", + }), + ); + expect(runtimeState.ensureSession).toHaveBeenCalledWith( + expect.objectContaining({ + agent: "main", + sessionKey: "agent:main:main", + }), + ); + }); + it("serializes concurrent turns for the same ACP session", async () => { const runtimeState = createRuntime(); hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ diff --git a/src/acp/control-plane/manager.utils.ts b/src/acp/control-plane/manager.utils.ts index 17729c6c2fc..90f7c516538 100644 --- a/src/acp/control-plane/manager.utils.ts +++ b/src/acp/control-plane/manager.utils.ts @@ -1,6 +1,14 @@ import type { OpenClawConfig } from "../../config/config.js"; +import { + canonicalizeMainSessionAlias, + resolveMainSessionKey, +} from "../../config/sessions/main-session.js"; import type { SessionAcpMeta } from "../../config/sessions/types.js"; -import { normalizeAgentId, parseAgentSessionKey } from "../../routing/session-key.js"; +import { + normalizeAgentId, + normalizeMainKey, + parseAgentSessionKey, +} from "../../routing/session-key.js"; import { ACP_ERROR_CODES, AcpRuntimeError } from "../runtime/errors.js"; import type { AcpSessionResolution } from "./manager.types.js"; @@ -42,6 +50,33 @@ export function normalizeSessionKey(sessionKey: string): string { return sessionKey.trim(); } +export function canonicalizeAcpSessionKey(params: { + cfg: OpenClawConfig; + sessionKey: string; +}): string { + const normalized = normalizeSessionKey(params.sessionKey); + if (!normalized) { + return ""; + } + const lowered = normalized.toLowerCase(); + if (lowered === "global" || lowered === "unknown") { + return lowered; + } + const parsed = parseAgentSessionKey(lowered); + if (parsed) { + return canonicalizeMainSessionAlias({ + cfg: params.cfg, + agentId: parsed.agentId, + sessionKey: lowered, + }); + } + const mainKey = normalizeMainKey(params.cfg.session?.mainKey); + if (lowered === "main" || lowered === mainKey) { + return resolveMainSessionKey(params.cfg); + } + return lowered; +} + export function normalizeActorKey(sessionKey: string): string { return sessionKey.trim().toLowerCase(); } diff --git a/src/acp/translator.session-rate-limit.test.ts b/src/acp/translator.session-rate-limit.test.ts index d08ae1a1567..d0f774678a9 100644 --- a/src/acp/translator.session-rate-limit.test.ts +++ b/src/acp/translator.session-rate-limit.test.ts @@ -52,7 +52,7 @@ function createSetSessionModeRequest(sessionId: string, modeId: string): SetSess function createSetSessionConfigOptionRequest( sessionId: string, configId: string, - value: string, + value: string | boolean, ): SetSessionConfigOptionRequest { return { sessionId, @@ -644,6 +644,55 @@ describe("acp setSessionConfigOption bridge behavior", () => { sessionStore.clearAllSessionsForTest(); }); + + it("rejects non-string ACP config option values", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "bool-config-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "minimal", + modelProvider: "openai", + model: "gpt-5.4", + }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("bool-config-session")); + + await expect( + agent.setSessionConfigOption( + createSetSessionConfigOptionRequest("bool-config-session", "thought_level", false), + ), + ).rejects.toThrow( + 'ACP bridge does not support non-string session config option values for "thought_level".', + ); + expect(request).not.toHaveBeenCalledWith( + "sessions.patch", + expect.objectContaining({ key: "bool-config-session" }), + ); + + sessionStore.clearAllSessionsForTest(); + }); }); describe("acp tool streaming bridge behavior", () => { diff --git a/src/acp/translator.ts b/src/acp/translator.ts index 585f97c8f43..bb52db7b26b 100644 --- a/src/acp/translator.ts +++ b/src/acp/translator.ts @@ -937,11 +937,16 @@ export class AcpGatewayAgent implements Agent { private resolveSessionConfigPatch( configId: string, - value: string, + value: string | boolean, ): { overrides: Partial; patch: Record; } { + if (typeof value !== "string") { + throw new Error( + `ACP bridge does not support non-string session config option values for "${configId}".`, + ); + } switch (configId) { case ACP_THOUGHT_LEVEL_CONFIG_ID: return { diff --git a/src/agents/auth-profiles/usage.test.ts b/src/agents/auth-profiles/usage.test.ts index 261eae6efd5..6dd5697cc99 100644 --- a/src/agents/auth-profiles/usage.test.ts +++ b/src/agents/auth-profiles/usage.test.ts @@ -207,7 +207,7 @@ describe("resolveProfilesUnavailableReason", () => { ).toBe("overloaded"); }); - it("falls back to rate_limit when active cooldown has no reason history", () => { + it("falls back to unknown when active cooldown has no reason history", () => { const now = Date.now(); const store = makeStore({ "anthropic:default": { @@ -221,7 +221,7 @@ describe("resolveProfilesUnavailableReason", () => { profileIds: ["anthropic:default"], now, }), - ).toBe("rate_limit"); + ).toBe("unknown"); }); it("ignores expired windows and returns null when no profile is actively unavailable", () => { diff --git a/src/agents/auth-profiles/usage.ts b/src/agents/auth-profiles/usage.ts index 273fd754595..20e1cbaa497 100644 --- a/src/agents/auth-profiles/usage.ts +++ b/src/agents/auth-profiles/usage.ts @@ -110,7 +110,11 @@ export function resolveProfilesUnavailableReason(params: { recordedReason = true; } if (!recordedReason) { - addScore("rate_limit", 1); + // No failure counts recorded for this cooldown window. Previously this + // defaulted to "rate_limit", which caused false "rate limit reached" + // warnings when the actual reason was unknown (e.g. transient network + // blip or server error without a classified failure count). + addScore("unknown", 1); } } diff --git a/src/agents/failover-error.test.ts b/src/agents/failover-error.test.ts index db01c03d8c4..1548ce5496a 100644 --- a/src/agents/failover-error.test.ts +++ b/src/agents/failover-error.test.ts @@ -274,6 +274,8 @@ describe("failover-error", () => { it("infers timeout from common node error codes", () => { expect(resolveFailoverReasonFromError({ code: "ETIMEDOUT" })).toBe("timeout"); expect(resolveFailoverReasonFromError({ code: "ECONNRESET" })).toBe("timeout"); + expect(resolveFailoverReasonFromError({ code: "EHOSTDOWN" })).toBe("timeout"); + expect(resolveFailoverReasonFromError({ code: "EPIPE" })).toBe("timeout"); }); it("infers timeout from abort/error stop-reason messages", () => { diff --git a/src/agents/failover-error.ts b/src/agents/failover-error.ts index a39685e1b16..8c49df40acb 100644 --- a/src/agents/failover-error.ts +++ b/src/agents/failover-error.ts @@ -170,7 +170,9 @@ export function resolveFailoverReasonFromError(err: unknown): FailoverReason | n "ECONNREFUSED", "ENETUNREACH", "EHOSTUNREACH", + "EHOSTDOWN", "ENETRESET", + "EPIPE", "EAI_AGAIN", ].includes(code) ) { diff --git a/src/agents/memory-search.test.ts b/src/agents/memory-search.test.ts index 9372b4c7696..1d04b730351 100644 --- a/src/agents/memory-search.test.ts +++ b/src/agents/memory-search.test.ts @@ -131,6 +131,113 @@ describe("memory search config", () => { expect(resolved?.extraPaths).toEqual(["/shared/notes", "docs", "../team-notes"]); }); + it("normalizes multimodal settings", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + multimodal: { + enabled: true, + modalities: ["all"], + maxFileBytes: 8192, + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: 8192, + }); + }); + + it("keeps an explicit empty multimodal modalities list empty", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + multimodal: { + enabled: true, + modalities: [], + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: [], + maxFileBytes: 10 * 1024 * 1024, + }); + expect(resolved?.provider).toBe("gemini"); + }); + + it("does not enforce multimodal provider validation when no modalities are active", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + model: "text-embedding-3-small", + fallback: "openai", + multimodal: { + enabled: true, + modalities: [], + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: [], + maxFileBytes: 10 * 1024 * 1024, + }); + }); + + it("rejects multimodal memory on unsupported providers", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + model: "text-embedding-3-small", + multimodal: { enabled: true, modalities: ["image"] }, + }, + }, + }, + }); + expect(() => resolveMemorySearchConfig(cfg, "main")).toThrow( + /memorySearch\.multimodal requires memorySearch\.provider = "gemini"/, + ); + }); + + it("rejects multimodal memory when fallback is configured", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "openai", + multimodal: { enabled: true, modalities: ["image"] }, + }, + }, + }, + }); + expect(() => resolveMemorySearchConfig(cfg, "main")).toThrow( + /memorySearch\.multimodal does not support memorySearch\.fallback/, + ); + }); + it("includes batch defaults for openai without remote overrides", () => { const cfg = configWithDefaultProvider("openai"); const resolved = resolveMemorySearchConfig(cfg, "main"); diff --git a/src/agents/memory-search.ts b/src/agents/memory-search.ts index e14fd5a0b3b..d00dae70639 100644 --- a/src/agents/memory-search.ts +++ b/src/agents/memory-search.ts @@ -3,6 +3,12 @@ import path from "node:path"; import type { OpenClawConfig, MemorySearchConfig } from "../config/config.js"; import { resolveStateDir } from "../config/paths.js"; import type { SecretInput } from "../config/types.secrets.js"; +import { + isMemoryMultimodalEnabled, + normalizeMemoryMultimodalSettings, + supportsMemoryMultimodalEmbeddings, + type MemoryMultimodalSettings, +} from "../memory/multimodal.js"; import { clampInt, clampNumber, resolveUserPath } from "../utils.js"; import { resolveAgentConfig } from "./agent-scope.js"; @@ -10,6 +16,7 @@ export type ResolvedMemorySearchConfig = { enabled: boolean; sources: Array<"memory" | "sessions">; extraPaths: string[]; + multimodal: MemoryMultimodalSettings; provider: "openai" | "local" | "gemini" | "voyage" | "mistral" | "ollama" | "auto"; remote?: { baseUrl?: string; @@ -28,6 +35,7 @@ export type ResolvedMemorySearchConfig = { }; fallback: "openai" | "gemini" | "local" | "voyage" | "mistral" | "ollama" | "none"; model: string; + outputDimensionality?: number; local: { modelPath?: string; modelCacheDir?: string; @@ -193,6 +201,7 @@ function mergeConfig( ? DEFAULT_OLLAMA_MODEL : undefined; const model = overrides?.model ?? defaults?.model ?? modelDefault ?? ""; + const outputDimensionality = overrides?.outputDimensionality ?? defaults?.outputDimensionality; const local = { modelPath: overrides?.local?.modelPath ?? defaults?.local?.modelPath, modelCacheDir: overrides?.local?.modelCacheDir ?? defaults?.local?.modelCacheDir, @@ -202,6 +211,11 @@ function mergeConfig( .map((value) => value.trim()) .filter(Boolean); const extraPaths = Array.from(new Set(rawPaths)); + const multimodal = normalizeMemoryMultimodalSettings({ + enabled: overrides?.multimodal?.enabled ?? defaults?.multimodal?.enabled, + modalities: overrides?.multimodal?.modalities ?? defaults?.multimodal?.modalities, + maxFileBytes: overrides?.multimodal?.maxFileBytes ?? defaults?.multimodal?.maxFileBytes, + }); const vector = { enabled: overrides?.store?.vector?.enabled ?? defaults?.store?.vector?.enabled ?? true, extensionPath: @@ -305,6 +319,7 @@ function mergeConfig( enabled, sources, extraPaths, + multimodal, provider, remote, experimental: { @@ -312,6 +327,7 @@ function mergeConfig( }, fallback, model, + outputDimensionality, local, store, chunking: { tokens: Math.max(1, chunking.tokens), overlap }, @@ -362,5 +378,22 @@ export function resolveMemorySearchConfig( if (!resolved.enabled) { return null; } + const multimodalActive = isMemoryMultimodalEnabled(resolved.multimodal); + if ( + multimodalActive && + !supportsMemoryMultimodalEmbeddings({ + provider: resolved.provider, + model: resolved.model, + }) + ) { + throw new Error( + 'agents.*.memorySearch.multimodal requires memorySearch.provider = "gemini" and model = "gemini-embedding-2-preview".', + ); + } + if (multimodalActive && resolved.fallback !== "none") { + throw new Error( + 'agents.*.memorySearch.multimodal does not support memorySearch.fallback. Set fallback to "none".', + ); + } return resolved; } diff --git a/src/agents/model-fallback.test.ts b/src/agents/model-fallback.test.ts index 8bc1a6ecb47..f8422b4aa14 100644 --- a/src/agents/model-fallback.test.ts +++ b/src/agents/model-fallback.test.ts @@ -555,7 +555,7 @@ describe("runWithModelFallback", () => { usageStat: { cooldownUntil: Date.now() + 5 * 60_000, }, - expectedReason: "rate_limit", + expectedReason: "unknown", }); }); diff --git a/src/agents/model-fallback.ts b/src/agents/model-fallback.ts index cda7771d329..d14ede7658b 100644 --- a/src/agents/model-fallback.ts +++ b/src/agents/model-fallback.ts @@ -449,7 +449,7 @@ function resolveCooldownDecision(params: { store: params.authStore, profileIds: params.profileIds, now: params.now, - }) ?? "rate_limit"; + }) ?? "unknown"; const isPersistentAuthIssue = inferredReason === "auth" || inferredReason === "auth_permanent"; if (isPersistentAuthIssue) { return { @@ -483,7 +483,10 @@ function resolveCooldownDecision(params: { // limits, which are often model-scoped and can recover on a sibling model. const shouldAttemptDespiteCooldown = (params.isPrimary && (!params.requestedModel || shouldProbe)) || - (!params.isPrimary && (inferredReason === "rate_limit" || inferredReason === "overloaded")); + (!params.isPrimary && + (inferredReason === "rate_limit" || + inferredReason === "overloaded" || + inferredReason === "unknown")); if (!shouldAttemptDespiteCooldown) { return { type: "skip", @@ -588,13 +591,16 @@ export async function runWithModelFallback(params: { if ( decision.reason === "rate_limit" || decision.reason === "overloaded" || - decision.reason === "billing" + decision.reason === "billing" || + decision.reason === "unknown" ) { // Probe at most once per provider per fallback run when all profiles // are cooldowned. Re-probing every same-provider candidate can stall // cross-provider fallback on providers with long internal retries. const isTransientCooldownReason = - decision.reason === "rate_limit" || decision.reason === "overloaded"; + decision.reason === "rate_limit" || + decision.reason === "overloaded" || + decision.reason === "unknown"; if (isTransientCooldownReason && cooldownProbeUsedProviders.has(candidate.provider)) { const error = `Provider ${candidate.provider} is in cooldown (probe already attempted this run)`; attempts.push({ diff --git a/src/agents/models-config.plan.ts b/src/agents/models-config.plan.ts index 40777c2cd0d..601a0edfda1 100644 --- a/src/agents/models-config.plan.ts +++ b/src/agents/models-config.plan.ts @@ -6,6 +6,7 @@ import { type ExistingProviderConfig, } from "./models-config.merge.js"; import { + enforceSourceManagedProviderSecrets, normalizeProviders, resolveImplicitProviders, type ProviderConfig, @@ -86,6 +87,7 @@ async function resolveProvidersForMode(params: { export async function planOpenClawModelsJson(params: { cfg: OpenClawConfig; + sourceConfigForSecrets?: OpenClawConfig; agentDir: string; env: NodeJS.ProcessEnv; existingRaw: string; @@ -106,6 +108,8 @@ export async function planOpenClawModelsJson(params: { agentDir, env, secretDefaults: cfg.secrets?.defaults, + sourceProviders: params.sourceConfigForSecrets?.models?.providers, + sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults, secretRefManagedProviders, }) ?? providers; const mergedProviders = await resolveProvidersForMode({ @@ -115,7 +119,14 @@ export async function planOpenClawModelsJson(params: { secretRefManagedProviders, explicitBaseUrlProviders: resolveExplicitBaseUrlProviders(cfg.models), }); - const nextContents = `${JSON.stringify({ providers: mergedProviders }, null, 2)}\n`; + const secretEnforcedProviders = + enforceSourceManagedProviderSecrets({ + providers: mergedProviders, + sourceProviders: params.sourceConfigForSecrets?.models?.providers, + sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults, + secretRefManagedProviders, + }) ?? mergedProviders; + const nextContents = `${JSON.stringify({ providers: secretEnforcedProviders }, null, 2)}\n`; if (params.existingRaw === nextContents) { return { action: "noop" }; diff --git a/src/agents/models-config.providers.discovery.ts b/src/agents/models-config.providers.discovery.ts index dd0504d2a53..64e1a9abe61 100644 --- a/src/agents/models-config.providers.discovery.ts +++ b/src/agents/models-config.providers.discovery.ts @@ -10,6 +10,7 @@ import { } from "./huggingface-models.js"; import { discoverKilocodeModels } from "./kilocode-models.js"; import { + enrichOllamaModelsWithContext, OLLAMA_DEFAULT_CONTEXT_WINDOW, OLLAMA_DEFAULT_COST, OLLAMA_DEFAULT_MAX_TOKENS, @@ -46,38 +47,6 @@ type VllmModelsResponse = { }>; }; -async function queryOllamaContextWindow( - apiBase: string, - modelName: string, -): Promise { - try { - const response = await fetch(`${apiBase}/api/show`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ name: modelName }), - signal: AbortSignal.timeout(3000), - }); - if (!response.ok) { - return undefined; - } - const data = (await response.json()) as { model_info?: Record }; - if (!data.model_info) { - return undefined; - } - for (const [key, value] of Object.entries(data.model_info)) { - if (key.endsWith(".context_length") && typeof value === "number" && Number.isFinite(value)) { - const contextWindow = Math.floor(value); - if (contextWindow > 0) { - return contextWindow; - } - } - } - return undefined; - } catch { - return undefined; - } -} - async function discoverOllamaModels( baseUrl?: string, opts?: { quiet?: boolean }, @@ -107,27 +76,18 @@ async function discoverOllamaModels( `Capping Ollama /api/show inspection to ${OLLAMA_SHOW_MAX_MODELS} models (received ${data.models.length})`, ); } - const discovered: ModelDefinitionConfig[] = []; - for (let index = 0; index < modelsToInspect.length; index += OLLAMA_SHOW_CONCURRENCY) { - const batch = modelsToInspect.slice(index, index + OLLAMA_SHOW_CONCURRENCY); - const batchDiscovered = await Promise.all( - batch.map(async (model) => { - const modelId = model.name; - const contextWindow = await queryOllamaContextWindow(apiBase, modelId); - return { - id: modelId, - name: modelId, - reasoning: isReasoningModelHeuristic(modelId), - input: ["text"], - cost: OLLAMA_DEFAULT_COST, - contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, - maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, - } satisfies ModelDefinitionConfig; - }), - ); - discovered.push(...batchDiscovered); - } - return discovered; + const discovered = await enrichOllamaModelsWithContext(apiBase, modelsToInspect, { + concurrency: OLLAMA_SHOW_CONCURRENCY, + }); + return discovered.map((model) => ({ + id: model.name, + name: model.name, + reasoning: isReasoningModelHeuristic(model.name), + input: ["text"], + cost: OLLAMA_DEFAULT_COST, + contextWindow: model.contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, + maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, + })); } catch (error) { if (!opts?.quiet) { log.warn(`Failed to discover Ollama models: ${String(error)}`); diff --git a/src/agents/models-config.providers.normalize-keys.test.ts b/src/agents/models-config.providers.normalize-keys.test.ts index f8422d797dd..b39705d8ec2 100644 --- a/src/agents/models-config.providers.normalize-keys.test.ts +++ b/src/agents/models-config.providers.normalize-keys.test.ts @@ -4,7 +4,10 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; -import { normalizeProviders } from "./models-config.providers.js"; +import { + enforceSourceManagedProviderSecrets, + normalizeProviders, +} from "./models-config.providers.js"; describe("normalizeProviders", () => { it("trims provider keys so image models remain discoverable for custom providers", async () => { @@ -136,4 +139,38 @@ describe("normalizeProviders", () => { await fs.rm(agentDir, { recursive: true, force: true }); } }); + + it("ignores non-object provider entries during source-managed enforcement", () => { + const providers = { + openai: null, + moonshot: { + baseUrl: "https://api.moonshot.ai/v1", + api: "openai-completions", + apiKey: "sk-runtime-moonshot", // pragma: allowlist secret + models: [], + }, + } as unknown as NonNullable["providers"]>; + + const sourceProviders: NonNullable["providers"]> = { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + models: [], + }, + moonshot: { + baseUrl: "https://api.moonshot.ai/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" }, // pragma: allowlist secret + models: [], + }, + }; + + const enforced = enforceSourceManagedProviderSecrets({ + providers, + sourceProviders, + }); + expect((enforced as Record).openai).toBeNull(); + expect(enforced?.moonshot?.apiKey).toBe("MOONSHOT_API_KEY"); // pragma: allowlist secret + }); }); diff --git a/src/agents/models-config.providers.static.ts b/src/agents/models-config.providers.static.ts index 08b3d1c2a66..c525cb32f53 100644 --- a/src/agents/models-config.providers.static.ts +++ b/src/agents/models-config.providers.static.ts @@ -429,6 +429,24 @@ export function buildOpenrouterProvider(): ProviderConfig { contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW, maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS, }, + { + id: "openrouter/hunter-alpha", + name: "Hunter Alpha", + reasoning: true, + input: ["text"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: 1048576, + maxTokens: 65536, + }, + { + id: "openrouter/healer-alpha", + name: "Healer Alpha", + reasoning: true, + input: ["text", "image"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: 262144, + maxTokens: 65536, + }, ], }; } diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index c63ed6865a8..411072f2d7a 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -4,6 +4,7 @@ import { DEFAULT_COPILOT_API_BASE_URL, resolveCopilotApiToken, } from "../providers/github-copilot-token.js"; +import { isRecord } from "../utils.js"; import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js"; import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js"; import { discoverBedrockModels } from "./bedrock-discovery.js"; @@ -70,6 +71,11 @@ export { resolveOllamaApiBase } from "./models-config.providers.discovery.js"; type ModelsConfig = NonNullable; export type ProviderConfig = NonNullable[string]; +type SecretDefaults = { + env?: string; + file?: string; + exec?: string; +}; const ENV_VAR_NAME_RE = /^[A-Z_][A-Z0-9_]*$/; @@ -97,13 +103,7 @@ function resolveAwsSdkApiKeyVarName(env: NodeJS.ProcessEnv = process.env): strin function normalizeHeaderValues(params: { headers: ProviderConfig["headers"] | undefined; - secretDefaults: - | { - env?: string; - file?: string; - exec?: string; - } - | undefined; + secretDefaults: SecretDefaults | undefined; }): { headers: ProviderConfig["headers"] | undefined; mutated: boolean } { const { headers } = params; if (!headers) { @@ -276,15 +276,155 @@ function normalizeAntigravityProvider(provider: ProviderConfig): ProviderConfig return normalizeProviderModels(provider, normalizeAntigravityModelId); } +function normalizeSourceProviderLookup( + providers: ModelsConfig["providers"] | undefined, +): Record { + if (!providers) { + return {}; + } + const out: Record = {}; + for (const [key, provider] of Object.entries(providers)) { + const normalizedKey = key.trim(); + if (!normalizedKey || !isRecord(provider)) { + continue; + } + out[normalizedKey] = provider; + } + return out; +} + +function resolveSourceManagedApiKeyMarker(params: { + sourceProvider: ProviderConfig | undefined; + sourceSecretDefaults: SecretDefaults | undefined; +}): string | undefined { + const sourceApiKeyRef = resolveSecretInputRef({ + value: params.sourceProvider?.apiKey, + defaults: params.sourceSecretDefaults, + }).ref; + if (!sourceApiKeyRef || !sourceApiKeyRef.id.trim()) { + return undefined; + } + return sourceApiKeyRef.source === "env" + ? sourceApiKeyRef.id.trim() + : resolveNonEnvSecretRefApiKeyMarker(sourceApiKeyRef.source); +} + +function resolveSourceManagedHeaderMarkers(params: { + sourceProvider: ProviderConfig | undefined; + sourceSecretDefaults: SecretDefaults | undefined; +}): Record { + const sourceHeaders = isRecord(params.sourceProvider?.headers) + ? (params.sourceProvider.headers as Record) + : undefined; + if (!sourceHeaders) { + return {}; + } + const markers: Record = {}; + for (const [headerName, headerValue] of Object.entries(sourceHeaders)) { + const sourceHeaderRef = resolveSecretInputRef({ + value: headerValue, + defaults: params.sourceSecretDefaults, + }).ref; + if (!sourceHeaderRef || !sourceHeaderRef.id.trim()) { + continue; + } + markers[headerName] = + sourceHeaderRef.source === "env" + ? resolveEnvSecretRefHeaderValueMarker(sourceHeaderRef.id) + : resolveNonEnvSecretRefHeaderValueMarker(sourceHeaderRef.source); + } + return markers; +} + +export function enforceSourceManagedProviderSecrets(params: { + providers: ModelsConfig["providers"]; + sourceProviders: ModelsConfig["providers"] | undefined; + sourceSecretDefaults?: SecretDefaults; + secretRefManagedProviders?: Set; +}): ModelsConfig["providers"] { + const { providers } = params; + if (!providers) { + return providers; + } + const sourceProvidersByKey = normalizeSourceProviderLookup(params.sourceProviders); + if (Object.keys(sourceProvidersByKey).length === 0) { + return providers; + } + + let nextProviders: Record | null = null; + for (const [providerKey, provider] of Object.entries(providers)) { + if (!isRecord(provider)) { + continue; + } + const sourceProvider = sourceProvidersByKey[providerKey.trim()]; + if (!sourceProvider) { + continue; + } + let nextProvider = provider; + let providerMutated = false; + + const sourceApiKeyMarker = resolveSourceManagedApiKeyMarker({ + sourceProvider, + sourceSecretDefaults: params.sourceSecretDefaults, + }); + if (sourceApiKeyMarker) { + params.secretRefManagedProviders?.add(providerKey.trim()); + if (nextProvider.apiKey !== sourceApiKeyMarker) { + providerMutated = true; + nextProvider = { + ...nextProvider, + apiKey: sourceApiKeyMarker, + }; + } + } + + const sourceHeaderMarkers = resolveSourceManagedHeaderMarkers({ + sourceProvider, + sourceSecretDefaults: params.sourceSecretDefaults, + }); + if (Object.keys(sourceHeaderMarkers).length > 0) { + const currentHeaders = isRecord(nextProvider.headers) + ? (nextProvider.headers as Record) + : undefined; + const nextHeaders = { + ...(currentHeaders as Record[string]>), + }; + let headersMutated = !currentHeaders; + for (const [headerName, marker] of Object.entries(sourceHeaderMarkers)) { + if (nextHeaders[headerName] === marker) { + continue; + } + headersMutated = true; + nextHeaders[headerName] = marker; + } + if (headersMutated) { + providerMutated = true; + nextProvider = { + ...nextProvider, + headers: nextHeaders, + }; + } + } + + if (!providerMutated) { + continue; + } + if (!nextProviders) { + nextProviders = { ...providers }; + } + nextProviders[providerKey] = nextProvider; + } + + return nextProviders ?? providers; +} + export function normalizeProviders(params: { providers: ModelsConfig["providers"]; agentDir: string; env?: NodeJS.ProcessEnv; - secretDefaults?: { - env?: string; - file?: string; - exec?: string; - }; + secretDefaults?: SecretDefaults; + sourceProviders?: ModelsConfig["providers"]; + sourceSecretDefaults?: SecretDefaults; secretRefManagedProviders?: Set; }): ModelsConfig["providers"] { const { providers } = params; @@ -434,7 +574,13 @@ export function normalizeProviders(params: { next[normalizedKey] = normalizedProvider; } - return mutated ? next : providers; + const normalizedProviders = mutated ? next : providers; + return enforceSourceManagedProviderSecrets({ + providers: normalizedProviders, + sourceProviders: params.sourceProviders, + sourceSecretDefaults: params.sourceSecretDefaults, + secretRefManagedProviders: params.secretRefManagedProviders, + }); } type ImplicitProviderParams = { diff --git a/src/agents/models-config.runtime-source-snapshot.test.ts b/src/agents/models-config.runtime-source-snapshot.test.ts index 4c5889769cc..cc033fb56a6 100644 --- a/src/agents/models-config.runtime-source-snapshot.test.ts +++ b/src/agents/models-config.runtime-source-snapshot.test.ts @@ -209,4 +209,152 @@ describe("models-config runtime source snapshot", () => { } }); }); + + it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => { + await withTempHome(async () => { + const sourceConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const runtimeConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const incompatibleCandidate: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + await ensureOpenClawModelsJson(incompatibleCandidate); + + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); + + it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => { + await withTempHome(async () => { + const sourceConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: { + source: "env", + provider: "default", + id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret + }, + "X-Tenant-Token": { + source: "file", + provider: "vault", + id: "/providers/openai/tenantToken", + }, + }, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const runtimeConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const incompatibleCandidate: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + await ensureOpenClawModelsJson(incompatibleCandidate); + + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.openai?.headers?.Authorization).toBe( + "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret + ); + expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); }); diff --git a/src/agents/models-config.ts b/src/agents/models-config.ts index 99714a1a792..3e013799b0b 100644 --- a/src/agents/models-config.ts +++ b/src/agents/models-config.ts @@ -42,15 +42,31 @@ async function writeModelsFileAtomic(targetPath: string, contents: string): Prom await fs.rename(tempPath, targetPath); } -function resolveModelsConfigInput(config?: OpenClawConfig): OpenClawConfig { +function resolveModelsConfigInput(config?: OpenClawConfig): { + config: OpenClawConfig; + sourceConfigForSecrets: OpenClawConfig; +} { const runtimeSource = getRuntimeConfigSourceSnapshot(); if (!config) { - return runtimeSource ?? loadConfig(); + const loaded = loadConfig(); + return { + config: runtimeSource ?? loaded, + sourceConfigForSecrets: runtimeSource ?? loaded, + }; } if (!runtimeSource) { - return config; + return { + config, + sourceConfigForSecrets: config, + }; } - return projectConfigOntoRuntimeSourceSnapshot(config); + const projected = projectConfigOntoRuntimeSourceSnapshot(config); + return { + config: projected, + // If projection is skipped (for example incompatible top-level shape), + // keep managed secret persistence anchored to the active source snapshot. + sourceConfigForSecrets: projected === config ? runtimeSource : projected, + }; } async function withModelsJsonWriteLock(targetPath: string, run: () => Promise): Promise { @@ -76,7 +92,8 @@ export async function ensureOpenClawModelsJson( config?: OpenClawConfig, agentDirOverride?: string, ): Promise<{ agentDir: string; wrote: boolean }> { - const cfg = resolveModelsConfigInput(config); + const resolved = resolveModelsConfigInput(config); + const cfg = resolved.config; const agentDir = agentDirOverride?.trim() ? agentDirOverride.trim() : resolveOpenClawAgentDir(); const targetPath = path.join(agentDir, "models.json"); @@ -87,6 +104,7 @@ export async function ensureOpenClawModelsJson( const existingModelsFile = await readExistingModelsFile(targetPath); const plan = await planOpenClawModelsJson({ cfg, + sourceConfigForSecrets: resolved.sourceConfigForSecrets, agentDir, env, existingRaw: existingModelsFile.raw, diff --git a/src/agents/ollama-models.test.ts b/src/agents/ollama-models.test.ts new file mode 100644 index 00000000000..7877d40bdf9 --- /dev/null +++ b/src/agents/ollama-models.test.ts @@ -0,0 +1,61 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + enrichOllamaModelsWithContext, + resolveOllamaApiBase, + type OllamaTagModel, +} from "./ollama-models.js"; + +function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); +} + +function requestUrl(input: string | URL | Request): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +function requestBody(body: BodyInit | null | undefined): string { + return typeof body === "string" ? body : "{}"; +} + +describe("ollama-models", () => { + afterEach(() => { + vi.unstubAllGlobals(); + }); + + it("strips /v1 when resolving the Ollama API base", () => { + expect(resolveOllamaApiBase("http://127.0.0.1:11434/v1")).toBe("http://127.0.0.1:11434"); + expect(resolveOllamaApiBase("http://127.0.0.1:11434///")).toBe("http://127.0.0.1:11434"); + }); + + it("enriches discovered models with context windows from /api/show", async () => { + const models: OllamaTagModel[] = [{ name: "llama3:8b" }, { name: "deepseek-r1:14b" }]; + const fetchMock = vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = requestUrl(input); + if (!url.endsWith("/api/show")) { + throw new Error(`Unexpected fetch: ${url}`); + } + const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + if (body.name === "llama3:8b") { + return jsonResponse({ model_info: { "llama.context_length": 65536 } }); + } + return jsonResponse({}); + }); + vi.stubGlobal("fetch", fetchMock); + + const enriched = await enrichOllamaModelsWithContext("http://127.0.0.1:11434", models); + + expect(enriched).toEqual([ + { name: "llama3:8b", contextWindow: 65536 }, + { name: "deepseek-r1:14b", contextWindow: undefined }, + ]); + }); +}); diff --git a/src/agents/ollama-models.ts b/src/agents/ollama-models.ts index 19d95605203..20406b3a80e 100644 --- a/src/agents/ollama-models.ts +++ b/src/agents/ollama-models.ts @@ -27,6 +27,12 @@ export type OllamaTagsResponse = { models?: OllamaTagModel[]; }; +export type OllamaModelWithContext = OllamaTagModel & { + contextWindow?: number; +}; + +const OLLAMA_SHOW_CONCURRENCY = 8; + /** * Derive the Ollama native API base URL from a configured base URL. * @@ -43,6 +49,58 @@ export function resolveOllamaApiBase(configuredBaseUrl?: string): string { return trimmed.replace(/\/v1$/i, ""); } +export async function queryOllamaContextWindow( + apiBase: string, + modelName: string, +): Promise { + try { + const response = await fetch(`${apiBase}/api/show`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name: modelName }), + signal: AbortSignal.timeout(3000), + }); + if (!response.ok) { + return undefined; + } + const data = (await response.json()) as { model_info?: Record }; + if (!data.model_info) { + return undefined; + } + for (const [key, value] of Object.entries(data.model_info)) { + if (key.endsWith(".context_length") && typeof value === "number" && Number.isFinite(value)) { + const contextWindow = Math.floor(value); + if (contextWindow > 0) { + return contextWindow; + } + } + } + return undefined; + } catch { + return undefined; + } +} + +export async function enrichOllamaModelsWithContext( + apiBase: string, + models: OllamaTagModel[], + opts?: { concurrency?: number }, +): Promise { + const concurrency = Math.max(1, Math.floor(opts?.concurrency ?? OLLAMA_SHOW_CONCURRENCY)); + const enriched: OllamaModelWithContext[] = []; + for (let index = 0; index < models.length; index += concurrency) { + const batch = models.slice(index, index + concurrency); + const batchResults = await Promise.all( + batch.map(async (model) => ({ + ...model, + contextWindow: await queryOllamaContextWindow(apiBase, model.name), + })), + ); + enriched.push(...batchResults); + } + return enriched; +} + /** Heuristic: treat models with "r1", "reasoning", or "think" in the name as reasoning models. */ export function isReasoningModelHeuristic(modelId: string): boolean { return /r1|reasoning|think|reason/i.test(modelId); diff --git a/src/agents/openai-responses.reasoning-replay.test.ts b/src/agents/openai-responses.reasoning-replay.test.ts index b5ccc50e4b4..0fcb02ece6d 100644 --- a/src/agents/openai-responses.reasoning-replay.test.ts +++ b/src/agents/openai-responses.reasoning-replay.test.ts @@ -30,6 +30,13 @@ function extractInputTypes(input: unknown[]) { .filter((t): t is string => typeof t === "string"); } +function extractInputMessages(input: unknown[]) { + return input.filter( + (item): item is Record => + !!item && typeof item === "object" && (item as Record).type === "message", + ); +} + const ZERO_USAGE = { input: 0, output: 0, @@ -184,4 +191,36 @@ describe("openai-responses reasoning replay", () => { expect(types).toContain("reasoning"); expect(types).toContain("message"); }); + + it.each(["commentary", "final_answer"] as const)( + "replays assistant message phase metadata for %s", + async (phase) => { + const assistantWithText = buildAssistantMessage({ + stopReason: "stop", + content: [ + buildReasoningPart(), + { + type: "text", + text: "hello", + textSignature: JSON.stringify({ v: 1, id: `msg_${phase}`, phase }), + }, + ], + }); + + const { input, types } = await runAbortedOpenAIResponsesStream({ + messages: [ + { role: "user", content: "Hi", timestamp: Date.now() }, + assistantWithText, + { role: "user", content: "Ok", timestamp: Date.now() }, + ], + }); + + expect(types).toContain("message"); + + const replayedMessage = extractInputMessages(input).find( + (item) => item.id === `msg_${phase}`, + ); + expect(replayedMessage?.phase).toBe(phase); + }, + ); }); diff --git a/src/agents/openai-ws-connection.test.ts b/src/agents/openai-ws-connection.test.ts index fb80f510ac1..2a7b95f7eb9 100644 --- a/src/agents/openai-ws-connection.test.ts +++ b/src/agents/openai-ws-connection.test.ts @@ -595,14 +595,12 @@ describe("OpenAIWebSocketManager", () => { manager.warmUp({ model: "gpt-5.2", - tools: [{ type: "function", function: { name: "exec", description: "Run a command" } }], + tools: [{ type: "function", name: "exec", description: "Run a command" }], }); const sent = JSON.parse(sock.sentMessages[0] ?? "{}") as Record; expect(sent["tools"]).toHaveLength(1); - expect((sent["tools"] as Array<{ function?: { name?: string } }>)[0]?.function?.name).toBe( - "exec", - ); + expect((sent["tools"] as Array<{ name?: string }>)[0]?.name).toBe("exec"); }); }); diff --git a/src/agents/openai-ws-connection.ts b/src/agents/openai-ws-connection.ts index a765c0f3780..2d9c6ffe7e6 100644 --- a/src/agents/openai-ws-connection.ts +++ b/src/agents/openai-ws-connection.ts @@ -37,12 +37,15 @@ export interface UsageInfo { total_tokens: number; } +export type OpenAIResponsesAssistantPhase = "commentary" | "final_answer"; + export type OutputItem = | { type: "message"; id: string; role: "assistant"; content: Array<{ type: "output_text"; text: string }>; + phase?: OpenAIResponsesAssistantPhase; status?: "in_progress" | "completed"; } | { @@ -190,6 +193,7 @@ export type InputItem = type: "message"; role: "system" | "developer" | "user" | "assistant"; content: string | ContentPart[]; + phase?: OpenAIResponsesAssistantPhase; } | { type: "function_call"; id?: string; call_id?: string; name: string; arguments: string } | { type: "function_call_output"; call_id: string; output: string } @@ -204,11 +208,10 @@ export type ToolChoice = export interface FunctionToolDefinition { type: "function"; - function: { - name: string; - description?: string; - parameters?: Record; - }; + name: string; + description?: string; + parameters?: Record; + strict?: boolean; } /** Standard response.create event payload (full turn) */ diff --git a/src/agents/openai-ws-stream.e2e.test.ts b/src/agents/openai-ws-stream.e2e.test.ts index 2b90d0dbc78..1146d71ffe3 100644 --- a/src/agents/openai-ws-stream.e2e.test.ts +++ b/src/agents/openai-ws-stream.e2e.test.ts @@ -14,6 +14,7 @@ * Skipped in CI — no API key available and we avoid billable external calls. */ +import type { AssistantMessage, Context } from "@mariozechner/pi-ai"; import { describe, it, expect, afterEach } from "vitest"; import { createOpenAIWebSocketStreamFn, @@ -28,14 +29,13 @@ const testFn = LIVE ? it : it.skip; const model = { api: "openai-responses" as const, provider: "openai", - id: "gpt-4o-mini", - name: "gpt-4o-mini", - baseUrl: "", - reasoning: false, - input: { maxTokens: 128_000 }, - output: { maxTokens: 16_384 }, - cache: false, - compat: {}, + id: "gpt-5.2", + name: "gpt-5.2", + contextWindow: 128_000, + maxTokens: 4_096, + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, } as unknown as Parameters>[0]; type StreamFnParams = Parameters>; @@ -47,6 +47,61 @@ function makeContext(userMessage: string): StreamFnParams[1] { } as unknown as StreamFnParams[1]; } +function makeToolContext(userMessage: string): StreamFnParams[1] { + return { + systemPrompt: "You are a precise assistant. Follow tool instructions exactly.", + messages: [{ role: "user" as const, content: userMessage }], + tools: [ + { + name: "noop", + description: "Return the supplied tool result to the user.", + parameters: { + type: "object", + additionalProperties: false, + properties: {}, + }, + }, + ], + } as unknown as Context; +} + +function makeToolResultMessage( + callId: string, + output: string, +): StreamFnParams[1]["messages"][number] { + return { + role: "toolResult" as const, + toolCallId: callId, + toolName: "noop", + content: [{ type: "text" as const, text: output }], + isError: false, + timestamp: Date.now(), + } as unknown as StreamFnParams[1]["messages"][number]; +} + +async function collectEvents( + stream: ReturnType>, +): Promise> { + const events: Array<{ type: string; message?: AssistantMessage }> = []; + for await (const event of stream as AsyncIterable<{ type: string; message?: AssistantMessage }>) { + events.push(event); + } + return events; +} + +function expectDone(events: Array<{ type: string; message?: AssistantMessage }>): AssistantMessage { + const done = events.find((event) => event.type === "done")?.message; + expect(done).toBeDefined(); + return done!; +} + +function assistantText(message: AssistantMessage): string { + return message.content + .filter((block) => block.type === "text") + .map((block) => block.text) + .join(""); +} + /** Each test gets a unique session ID to avoid cross-test interference. */ const sessions: string[] = []; function freshSession(name: string): string { @@ -68,26 +123,14 @@ describe("OpenAI WebSocket e2e", () => { async () => { const sid = freshSession("single"); const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); - const stream = streamFn(model, makeContext("What is 2+2?"), {}); + const stream = streamFn(model, makeContext("What is 2+2?"), { transport: "websocket" }); + const done = expectDone(await collectEvents(stream)); - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } - - const done = events.find((e) => e.type === "done") as - | { type: "done"; message: { content: Array<{ type: string; text?: string }> } } - | undefined; - expect(done).toBeDefined(); - expect(done!.message.content.length).toBeGreaterThan(0); - - const text = done!.message.content - .filter((c) => c.type === "text") - .map((c) => c.text) - .join(""); + expect(done.content.length).toBeGreaterThan(0); + const text = assistantText(done); expect(text).toMatch(/4/); }, - 30_000, + 45_000, ); testFn( @@ -96,19 +139,80 @@ describe("OpenAI WebSocket e2e", () => { const sid = freshSession("temp"); const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); const stream = streamFn(model, makeContext("Pick a random number between 1 and 1000."), { + transport: "websocket", temperature: 0.8, }); - - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } + const events = await collectEvents(stream); // Stream must complete (done or error with fallback) — must NOT hang. const hasTerminal = events.some((e) => e.type === "done" || e.type === "error"); expect(hasTerminal).toBe(true); }, - 30_000, + 45_000, + ); + + testFn( + "reuses the websocket session for tool-call follow-up turns", + async () => { + const sid = freshSession("tool-roundtrip"); + const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); + const firstContext = makeToolContext( + "Call the tool `noop` with {}. After the tool result arrives, reply with exactly the tool output and nothing else.", + ); + const firstEvents = await collectEvents( + streamFn(model, firstContext, { + transport: "websocket", + toolChoice: "required", + maxTokens: 128, + } as unknown as StreamFnParams[2]), + ); + const firstDone = expectDone(firstEvents); + const toolCall = firstDone.content.find((block) => block.type === "toolCall") as + | { type: "toolCall"; id: string; name: string } + | undefined; + expect(toolCall?.name).toBe("noop"); + expect(toolCall?.id).toBeTruthy(); + + const secondContext = { + ...firstContext, + messages: [ + ...firstContext.messages, + firstDone, + makeToolResultMessage(toolCall!.id, "TOOL_OK"), + ], + } as unknown as StreamFnParams[1]; + const secondDone = expectDone( + await collectEvents( + streamFn(model, secondContext, { + transport: "websocket", + maxTokens: 128, + }), + ), + ); + + expect(assistantText(secondDone)).toMatch(/TOOL_OK/); + }, + 60_000, + ); + + testFn( + "supports websocket warm-up before the first request", + async () => { + const sid = freshSession("warmup"); + const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); + const done = expectDone( + await collectEvents( + streamFn(model, makeContext("Reply with the word warmed."), { + transport: "websocket", + openaiWsWarmup: true, + maxTokens: 32, + } as unknown as StreamFnParams[2]), + ), + ); + + expect(assistantText(done).toLowerCase()).toContain("warmed"); + }, + 45_000, ); testFn( @@ -119,16 +223,13 @@ describe("OpenAI WebSocket e2e", () => { expect(hasWsSession(sid)).toBe(false); - const stream = streamFn(model, makeContext("Say hello."), {}); - for await (const _ of stream as AsyncIterable) { - /* consume */ - } + await collectEvents(streamFn(model, makeContext("Say hello."), { transport: "websocket" })); expect(hasWsSession(sid)).toBe(true); releaseWsSession(sid); expect(hasWsSession(sid)).toBe(false); }, - 30_000, + 45_000, ); testFn( @@ -137,15 +238,11 @@ describe("OpenAI WebSocket e2e", () => { const sid = freshSession("fallback"); const streamFn = createOpenAIWebSocketStreamFn("sk-invalid-key", sid); const stream = streamFn(model, makeContext("Hello"), {}); - - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } + const events = await collectEvents(stream); const hasTerminal = events.some((e) => e.type === "done" || e.type === "error"); expect(hasTerminal).toBe(true); }, - 30_000, + 45_000, ); }); diff --git a/src/agents/openai-ws-stream.test.ts b/src/agents/openai-ws-stream.test.ts index a9c3679f561..cd3425bec83 100644 --- a/src/agents/openai-ws-stream.test.ts +++ b/src/agents/openai-ws-stream.test.ts @@ -224,6 +224,7 @@ type FakeMessage = | { role: "assistant"; content: unknown[]; + phase?: "commentary" | "final_answer"; stopReason: string; api: string; provider: string; @@ -247,6 +248,7 @@ function userMsg(text: string): FakeMessage { function assistantMsg( textBlocks: string[], toolCalls: Array<{ id: string; name: string; args: Record }> = [], + phase?: "commentary" | "final_answer", ): FakeMessage { const content: unknown[] = []; for (const t of textBlocks) { @@ -258,6 +260,7 @@ function assistantMsg( return { role: "assistant", content, + phase, stopReason: toolCalls.length > 0 ? "toolUse" : "stop", api: "openai-responses", provider: "openai", @@ -302,6 +305,7 @@ function makeResponseObject( id: string, outputText?: string, toolCallName?: string, + phase?: "commentary" | "final_answer", ): ResponseObject { const output: ResponseObject["output"] = []; if (outputText) { @@ -310,6 +314,7 @@ function makeResponseObject( id: "item_1", role: "assistant", content: [{ type: "output_text", text: outputText }], + phase, }); } if (toolCallName) { @@ -357,18 +362,16 @@ describe("convertTools", () => { expect(result).toHaveLength(1); expect(result[0]).toMatchObject({ type: "function", - function: { - name: "exec", - description: "Run a command", - parameters: { type: "object", properties: { cmd: { type: "string" } } }, - }, + name: "exec", + description: "Run a command", + parameters: { type: "object", properties: { cmd: { type: "string" } } }, }); }); it("handles tools without description", () => { const tools = [{ name: "ping", description: "", parameters: {} }]; const result = convertTools(tools as Parameters[0]); - expect(result[0]?.function?.name).toBe("ping"); + expect(result[0]?.name).toBe("ping"); }); }); @@ -391,6 +394,19 @@ describe("convertMessagesToInputItems", () => { expect(items[0]).toMatchObject({ type: "message", role: "assistant", content: "Hi there." }); }); + it("preserves assistant phase on replayed assistant messages", () => { + const items = convertMessagesToInputItems([ + assistantMsg(["Working on it."], [], "commentary"), + ] as Parameters[0]); + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: "message", + role: "assistant", + content: "Working on it.", + phase: "commentary", + }); + }); + it("converts an assistant message with a tool call", () => { const msg = assistantMsg( ["Let me run that."], @@ -408,10 +424,58 @@ describe("convertMessagesToInputItems", () => { call_id: "call_1", name: "exec", }); + expect(textItem).not.toHaveProperty("phase"); const fc = fcItem as { arguments: string }; expect(JSON.parse(fc.arguments)).toEqual({ cmd: "ls" }); }); + it("preserves assistant phase on commentary text before tool calls", () => { + const msg = assistantMsg( + ["Let me run that."], + [{ id: "call_1", name: "exec", args: { cmd: "ls" } }], + "commentary", + ); + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + const textItem = items.find((i) => i.type === "message"); + expect(textItem).toMatchObject({ + type: "message", + role: "assistant", + content: "Let me run that.", + phase: "commentary", + }); + }); + + it("preserves assistant phase from textSignature metadata without local phase field", () => { + const msg = { + role: "assistant" as const, + content: [ + { + type: "text" as const, + text: "Working on it.", + textSignature: JSON.stringify({ v: 1, id: "msg_sig", phase: "commentary" }), + }, + ], + stopReason: "stop", + api: "openai-responses", + provider: "openai", + model: "gpt-5.2", + usage: {}, + timestamp: 0, + }; + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: "message", + role: "assistant", + content: "Working on it.", + phase: "commentary", + }); + }); + it("converts a tool result message", () => { const items = convertMessagesToInputItems([toolResultMsg("call_1", "file.txt")] as Parameters< typeof convertMessagesToInputItems @@ -518,6 +582,34 @@ describe("convertMessagesToInputItems", () => { expect((items[0] as { content?: unknown }).content).toBe("Here is my answer."); }); + it("replays reasoning blocks from thinking signatures", () => { + const msg = { + role: "assistant" as const, + content: [ + { + type: "thinking" as const, + thinking: "internal reasoning...", + thinkingSignature: JSON.stringify({ + type: "reasoning", + id: "rs_test", + summary: [], + }), + }, + { type: "text" as const, text: "Here is my answer." }, + ], + stopReason: "stop", + api: "openai-responses", + provider: "openai", + model: "gpt-5.2", + usage: {}, + timestamp: 0, + }; + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + expect(items.map((item) => item.type)).toEqual(["reasoning", "message"]); + }); + it("returns empty array for empty messages", () => { expect(convertMessagesToInputItems([])).toEqual([]); }); @@ -594,6 +686,16 @@ describe("buildAssistantMessageFromResponse", () => { expect(msg.content).toEqual([]); expect(msg.stopReason).toBe("stop"); }); + + it("preserves phase from assistant message output items", () => { + const response = makeResponseObject("resp_8", "Final answer", undefined, "final_answer"); + const msg = buildAssistantMessageFromResponse(response, modelInfo) as { + phase?: string; + content: Array<{ type: string; text?: string }>; + }; + expect(msg.phase).toBe("final_answer"); + expect(msg.content[0]?.text).toBe("Final answer"); + }); }); // ───────────────────────────────────────────────────────────────────────────── @@ -633,6 +735,7 @@ describe("createOpenAIWebSocketStreamFn", () => { releaseWsSession("sess-fallback"); releaseWsSession("sess-incremental"); releaseWsSession("sess-full"); + releaseWsSession("sess-phase"); releaseWsSession("sess-tools"); releaseWsSession("sess-store-default"); releaseWsSession("sess-store-compat"); @@ -795,6 +898,40 @@ describe("createOpenAIWebSocketStreamFn", () => { expect(doneEvent?.message.content[0]?.text).toBe("Hello back!"); }); + it("keeps assistant phase on completed WebSocket responses", async () => { + const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-phase"); + const stream = streamFn( + modelStub as Parameters[0], + contextStub as Parameters[1], + ); + + const events: unknown[] = []; + const done = (async () => { + for await (const ev of await resolveStream(stream)) { + events.push(ev); + } + })(); + + await new Promise((r) => setImmediate(r)); + const manager = MockManager.lastInstance!; + manager.simulateEvent({ + type: "response.completed", + response: makeResponseObject("resp_phase", "Working...", "exec", "commentary"), + }); + + await done; + + const doneEvent = events.find((e) => (e as { type?: string }).type === "done") as + | { + type: string; + reason: string; + message: { phase?: string; stopReason: string }; + } + | undefined; + expect(doneEvent?.message.phase).toBe("commentary"); + expect(doneEvent?.message.stopReason).toBe("toolUse"); + }); + it("falls back to HTTP when WebSocket connect fails (session pre-broken via flag)", async () => { // Set the class-level flag BEFORE calling streamFn so the new instance // fails on connect(). We patch the static default via MockManager directly. diff --git a/src/agents/openai-ws-stream.ts b/src/agents/openai-ws-stream.ts index dd82ced9e95..5b7a80f52ec 100644 --- a/src/agents/openai-ws-stream.ts +++ b/src/agents/openai-ws-stream.ts @@ -37,6 +37,7 @@ import { type ContentPart, type FunctionToolDefinition, type InputItem, + type OpenAIResponsesAssistantPhase, type OpenAIWebSocketManagerOptions, type ResponseObject, } from "./openai-ws-connection.js"; @@ -100,6 +101,8 @@ export function hasWsSession(sessionId: string): boolean { // ───────────────────────────────────────────────────────────────────────────── type AnyMessage = Message & { role: string; content: unknown }; +type AssistantMessageWithPhase = AssistantMessage & { phase?: OpenAIResponsesAssistantPhase }; +type ReplayModelInfo = { input?: ReadonlyArray }; function toNonEmptyString(value: unknown): string | null { if (typeof value !== "string") { @@ -109,6 +112,50 @@ function toNonEmptyString(value: unknown): string | null { return trimmed.length > 0 ? trimmed : null; } +function normalizeAssistantPhase(value: unknown): OpenAIResponsesAssistantPhase | undefined { + return value === "commentary" || value === "final_answer" ? value : undefined; +} + +function encodeAssistantTextSignature(params: { + id: string; + phase?: OpenAIResponsesAssistantPhase; +}): string { + return JSON.stringify({ + v: 1, + id: params.id, + ...(params.phase ? { phase: params.phase } : {}), + }); +} + +function parseAssistantTextSignature( + value: unknown, +): { id: string; phase?: OpenAIResponsesAssistantPhase } | null { + if (typeof value !== "string" || value.trim().length === 0) { + return null; + } + if (!value.startsWith("{")) { + return { id: value }; + } + try { + const parsed = JSON.parse(value) as { v?: unknown; id?: unknown; phase?: unknown }; + if (parsed.v !== 1 || typeof parsed.id !== "string") { + return null; + } + return { + id: parsed.id, + ...(normalizeAssistantPhase(parsed.phase) + ? { phase: normalizeAssistantPhase(parsed.phase) } + : {}), + }; + } catch { + return null; + } +} + +function supportsImageInput(modelOverride?: ReplayModelInfo): boolean { + return !Array.isArray(modelOverride?.input) || modelOverride.input.includes("image"); +} + /** Convert pi-ai content (string | ContentPart[]) to plain text. */ function contentToText(content: unknown): string { if (typeof content === "string") { @@ -117,30 +164,50 @@ function contentToText(content: unknown): string { if (!Array.isArray(content)) { return ""; } - return (content as Array<{ type?: string; text?: string }>) - .filter((p) => p.type === "text" && typeof p.text === "string") - .map((p) => p.text as string) + return content + .filter( + (part): part is { type?: string; text?: string } => Boolean(part) && typeof part === "object", + ) + .filter( + (part) => + (part.type === "text" || part.type === "input_text" || part.type === "output_text") && + typeof part.text === "string", + ) + .map((part) => part.text as string) .join(""); } /** Convert pi-ai content to OpenAI ContentPart[]. */ -function contentToOpenAIParts(content: unknown): ContentPart[] { +function contentToOpenAIParts(content: unknown, modelOverride?: ReplayModelInfo): ContentPart[] { if (typeof content === "string") { return content ? [{ type: "input_text", text: content }] : []; } if (!Array.isArray(content)) { return []; } + + const includeImages = supportsImageInput(modelOverride); const parts: ContentPart[] = []; for (const part of content as Array<{ type?: string; text?: string; data?: string; mimeType?: string; + source?: unknown; }>) { - if (part.type === "text" && typeof part.text === "string") { + if ( + (part.type === "text" || part.type === "input_text" || part.type === "output_text") && + typeof part.text === "string" + ) { parts.push({ type: "input_text", text: part.text }); - } else if (part.type === "image" && typeof part.data === "string") { + continue; + } + + if (!includeImages) { + continue; + } + + if (part.type === "image" && typeof part.data === "string") { parts.push({ type: "input_image", source: { @@ -149,11 +216,60 @@ function contentToOpenAIParts(content: unknown): ContentPart[] { data: part.data, }, }); + continue; + } + + if ( + part.type === "input_image" && + part.source && + typeof part.source === "object" && + typeof (part.source as { type?: unknown }).type === "string" + ) { + parts.push({ + type: "input_image", + source: part.source as + | { type: "url"; url: string } + | { type: "base64"; media_type: string; data: string }, + }); } } return parts; } +function parseReasoningItem(value: unknown): Extract | null { + if (!value || typeof value !== "object") { + return null; + } + const record = value as { + type?: unknown; + content?: unknown; + encrypted_content?: unknown; + summary?: unknown; + }; + if (record.type !== "reasoning") { + return null; + } + return { + type: "reasoning", + ...(typeof record.content === "string" ? { content: record.content } : {}), + ...(typeof record.encrypted_content === "string" + ? { encrypted_content: record.encrypted_content } + : {}), + ...(typeof record.summary === "string" ? { summary: record.summary } : {}), + }; +} + +function parseThinkingSignature(value: unknown): Extract | null { + if (typeof value !== "string" || value.trim().length === 0) { + return null; + } + try { + return parseReasoningItem(JSON.parse(value)); + } catch { + return null; + } +} + /** Convert pi-ai tool array to OpenAI FunctionToolDefinition[]. */ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] { if (!tools || tools.length === 0) { @@ -161,11 +277,9 @@ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] } return tools.map((tool) => ({ type: "function" as const, - function: { - name: tool.name, - description: typeof tool.description === "string" ? tool.description : undefined, - parameters: (tool.parameters ?? {}) as Record, - }, + name: tool.name, + description: typeof tool.description === "string" ? tool.description : undefined, + parameters: (tool.parameters ?? {}) as Record, })); } @@ -173,14 +287,24 @@ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] * Convert the full pi-ai message history to an OpenAI `input` array. * Handles user messages, assistant text+tool-call messages, and tool results. */ -export function convertMessagesToInputItems(messages: Message[]): InputItem[] { +export function convertMessagesToInputItems( + messages: Message[], + modelOverride?: ReplayModelInfo, +): InputItem[] { const items: InputItem[] = []; for (const msg of messages) { - const m = msg as AnyMessage; + const m = msg as AnyMessage & { + phase?: unknown; + toolCallId?: unknown; + toolUseId?: unknown; + }; if (m.role === "user") { - const parts = contentToOpenAIParts(m.content); + const parts = contentToOpenAIParts(m.content, modelOverride); + if (parts.length === 0) { + continue; + } items.push({ type: "message", role: "user", @@ -194,87 +318,116 @@ export function convertMessagesToInputItems(messages: Message[]): InputItem[] { if (m.role === "assistant") { const content = m.content; + let assistantPhase = normalizeAssistantPhase(m.phase); if (Array.isArray(content)) { - // Collect text blocks and tool calls separately const textParts: string[] = []; - for (const block of content as Array<{ - type?: string; - text?: string; - id?: string; - name?: string; - arguments?: Record; - thinking?: string; - }>) { - if (block.type === "text" && typeof block.text === "string") { - textParts.push(block.text); - } else if (block.type === "thinking" && typeof block.thinking === "string") { - // Skip thinking blocks — not sent back to the model - } else if (block.type === "toolCall") { - // Push accumulated text first - if (textParts.length > 0) { - items.push({ - type: "message", - role: "assistant", - content: textParts.join(""), - }); - textParts.length = 0; - } - const callId = toNonEmptyString(block.id); - const toolName = toNonEmptyString(block.name); - if (!callId || !toolName) { - continue; - } - // Push function_call item - items.push({ - type: "function_call", - call_id: callId, - name: toolName, - arguments: - typeof block.arguments === "string" - ? block.arguments - : JSON.stringify(block.arguments ?? {}), - }); + const pushAssistantText = () => { + if (textParts.length === 0) { + return; } - } - if (textParts.length > 0) { items.push({ type: "message", role: "assistant", content: textParts.join(""), + ...(assistantPhase ? { phase: assistantPhase } : {}), }); - } - } else { - const text = contentToText(m.content); - if (text) { + textParts.length = 0; + }; + + for (const block of content as Array<{ + type?: string; + text?: string; + textSignature?: unknown; + id?: unknown; + name?: unknown; + arguments?: unknown; + thinkingSignature?: unknown; + }>) { + if (block.type === "text" && typeof block.text === "string") { + const parsedSignature = parseAssistantTextSignature(block.textSignature); + if (!assistantPhase) { + assistantPhase = parsedSignature?.phase; + } + textParts.push(block.text); + continue; + } + + if (block.type === "thinking") { + pushAssistantText(); + const reasoningItem = parseThinkingSignature(block.thinkingSignature); + if (reasoningItem) { + items.push(reasoningItem); + } + continue; + } + + if (block.type !== "toolCall") { + continue; + } + + pushAssistantText(); + const callIdRaw = toNonEmptyString(block.id); + const toolName = toNonEmptyString(block.name); + if (!callIdRaw || !toolName) { + continue; + } + const [callId, itemId] = callIdRaw.split("|", 2); items.push({ - type: "message", - role: "assistant", - content: text, + type: "function_call", + ...(itemId ? { id: itemId } : {}), + call_id: callId, + name: toolName, + arguments: + typeof block.arguments === "string" + ? block.arguments + : JSON.stringify(block.arguments ?? {}), }); } + + pushAssistantText(); + continue; } + + const text = contentToText(content); + if (!text) { + continue; + } + items.push({ + type: "message", + role: "assistant", + content: text, + ...(assistantPhase ? { phase: assistantPhase } : {}), + }); continue; } - if (m.role === "toolResult") { - const tr = m as unknown as { - toolCallId?: string; - toolUseId?: string; - content: unknown; - isError: boolean; - }; - const callId = toNonEmptyString(tr.toolCallId) ?? toNonEmptyString(tr.toolUseId); - if (!callId) { - continue; - } - const outputText = contentToText(tr.content); - items.push({ - type: "function_call_output", - call_id: callId, - output: outputText, - }); + if (m.role !== "toolResult") { continue; } + + const toolCallId = toNonEmptyString(m.toolCallId) ?? toNonEmptyString(m.toolUseId); + if (!toolCallId) { + continue; + } + const [callId] = toolCallId.split("|", 2); + const parts = Array.isArray(m.content) ? contentToOpenAIParts(m.content, modelOverride) : []; + const textOutput = contentToText(m.content); + const imageParts = parts.filter((part) => part.type === "input_image"); + items.push({ + type: "function_call_output", + call_id: callId, + output: textOutput || (imageParts.length > 0 ? "(see attached image)" : ""), + }); + if (imageParts.length > 0) { + items.push({ + type: "message", + role: "user", + content: [ + { type: "input_text", text: "Attached image(s) from tool result:" }, + ...imageParts, + ], + }); + } } return items; @@ -289,12 +442,24 @@ export function buildAssistantMessageFromResponse( modelInfo: { api: string; provider: string; id: string }, ): AssistantMessage { const content: (TextContent | ToolCall)[] = []; + let assistantPhase: OpenAIResponsesAssistantPhase | undefined; for (const item of response.output ?? []) { if (item.type === "message") { + const itemPhase = normalizeAssistantPhase(item.phase); + if (itemPhase) { + assistantPhase = itemPhase; + } for (const part of item.content ?? []) { if (part.type === "output_text" && part.text) { - content.push({ type: "text", text: part.text }); + content.push({ + type: "text", + text: part.text, + textSignature: encodeAssistantTextSignature({ + id: item.id, + ...(itemPhase ? { phase: itemPhase } : {}), + }), + }); } } } else if (item.type === "function_call") { @@ -321,7 +486,7 @@ export function buildAssistantMessageFromResponse( const hasToolCalls = content.some((c) => c.type === "toolCall"); const stopReason: StopReason = hasToolCalls ? "toolUse" : "stop"; - return buildAssistantMessage({ + const message = buildAssistantMessage({ model: modelInfo, content, stopReason, @@ -331,6 +496,10 @@ export function buildAssistantMessageFromResponse( totalTokens: response.usage?.total_tokens ?? 0, }), }); + + return assistantPhase + ? ({ ...message, phase: assistantPhase } as AssistantMessageWithPhase) + : message; } // ───────────────────────────────────────────────────────────────────────────── @@ -504,6 +673,7 @@ export function createOpenAIWebSocketStreamFn( if (resolveWsWarmup(options) && !session.warmUpAttempted) { session.warmUpAttempted = true; + let warmupFailed = false; try { await runWarmUp({ manager: session.manager, @@ -517,10 +687,33 @@ export function createOpenAIWebSocketStreamFn( if (signal?.aborted) { throw warmErr instanceof Error ? warmErr : new Error(String(warmErr)); } + warmupFailed = true; log.warn( `[ws-stream] warm-up failed for session=${sessionId}; continuing without warm-up. error=${String(warmErr)}`, ); } + if (warmupFailed && !session.manager.isConnected()) { + try { + session.manager.close(); + } catch { + /* ignore */ + } + try { + await session.manager.connect(apiKey); + session.everConnected = true; + log.debug(`[ws-stream] reconnected after warm-up failure for session=${sessionId}`); + } catch (reconnectErr) { + session.broken = true; + wsRegistry.delete(sessionId); + if (transport === "websocket") { + throw reconnectErr instanceof Error ? reconnectErr : new Error(String(reconnectErr)); + } + log.warn( + `[ws-stream] reconnect after warm-up failed for session=${sessionId}; falling back to HTTP. error=${String(reconnectErr)}`, + ); + return fallbackToHttp(model, context, options, eventStream, opts.signal); + } + } } // ── 3. Compute incremental vs full input ───────────────────────────── @@ -537,16 +730,16 @@ export function createOpenAIWebSocketStreamFn( log.debug( `[ws-stream] session=${sessionId}: no new tool results found; sending full context`, ); - inputItems = buildFullInput(context); + inputItems = buildFullInput(context, model); } else { - inputItems = convertMessagesToInputItems(toolResults); + inputItems = convertMessagesToInputItems(toolResults, model); } log.debug( `[ws-stream] session=${sessionId}: incremental send (${inputItems.length} tool results) previous_response_id=${prevResponseId}`, ); } else { // First turn: send full context - inputItems = buildFullInput(context); + inputItems = buildFullInput(context, model); log.debug( `[ws-stream] session=${sessionId}: full context send (${inputItems.length} items)`, ); @@ -605,10 +798,9 @@ export function createOpenAIWebSocketStreamFn( ...extraParams, }; const nextPayload = await options?.onPayload?.(payload, model); - const requestPayload = - nextPayload && typeof nextPayload === "object" - ? (nextPayload as Parameters[0]) - : (payload as Parameters[0]); + const requestPayload = (nextPayload ?? payload) as Parameters< + OpenAIWebSocketManager["send"] + >[0]; try { session.manager.send(requestPayload); @@ -734,8 +926,8 @@ export function createOpenAIWebSocketStreamFn( // ───────────────────────────────────────────────────────────────────────────── /** Build full input items from context (system prompt is passed via `instructions` field). */ -function buildFullInput(context: Context): InputItem[] { - return convertMessagesToInputItems(context.messages); +function buildFullInput(context: Context, model: ReplayModelInfo): InputItem[] { + return convertMessagesToInputItems(context.messages, model); } /** diff --git a/src/agents/openclaw-tools.session-status.test.ts b/src/agents/openclaw-tools.session-status.test.ts index db45e8d48b8..193deb6304f 100644 --- a/src/agents/openclaw-tools.session-status.test.ts +++ b/src/agents/openclaw-tools.session-status.test.ts @@ -2,6 +2,22 @@ import { describe, expect, it, vi } from "vitest"; const loadSessionStoreMock = vi.fn(); const updateSessionStoreMock = vi.fn(); +const callGatewayMock = vi.fn(); + +const createMockConfig = () => ({ + session: { mainKey: "main", scope: "per-sender" }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + }, + }, + tools: { + agentToAgent: { enabled: false }, + }, +}); + +let mockConfig: Record = createMockConfig(); vi.mock("../config/sessions.js", async (importOriginal) => { const actual = await importOriginal(); @@ -22,19 +38,15 @@ vi.mock("../config/sessions.js", async (importOriginal) => { }; }); +vi.mock("../gateway/call.js", () => ({ + callGateway: (opts: unknown) => callGatewayMock(opts), +})); + vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); return { ...actual, - loadConfig: () => ({ - session: { mainKey: "main", scope: "per-sender" }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - }, - }, - }), + loadConfig: () => mockConfig, }; }); @@ -82,13 +94,17 @@ import { createOpenClawTools } from "./openclaw-tools.js"; function resetSessionStore(store: Record) { loadSessionStoreMock.mockClear(); updateSessionStoreMock.mockClear(); + callGatewayMock.mockClear(); loadSessionStoreMock.mockReturnValue(store); + callGatewayMock.mockResolvedValue({}); + mockConfig = createMockConfig(); } -function getSessionStatusTool(agentSessionKey = "main") { - const tool = createOpenClawTools({ agentSessionKey }).find( - (candidate) => candidate.name === "session_status", - ); +function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) { + const tool = createOpenClawTools({ + agentSessionKey, + sandboxed: options?.sandboxed, + }).find((candidate) => candidate.name === "session_status"); expect(tool).toBeDefined(); if (!tool) { throw new Error("missing session_status tool"); @@ -176,6 +192,153 @@ describe("session_status tool", () => { ); }); + it("blocks sandboxed child session_status access outside its tree before store lookup", async () => { + resetSessionStore({ + "agent:main:subagent:child": { + sessionId: "s-child", + updatedAt: 20, + }, + "agent:main:main": { + sessionId: "s-parent", + updatedAt: 10, + }, + }); + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { sessions: [] }; + } + return {}; + }); + + const tool = getSessionStatusTool("agent:main:subagent:child", { + sandboxed: true, + }); + const expectedError = "Session status visibility is restricted to the current session tree"; + + await expect( + tool.execute("call6", { + sessionKey: "agent:main:main", + model: "anthropic/claude-sonnet-4-5", + }), + ).rejects.toThrow(expectedError); + + await expect( + tool.execute("call7", { + sessionKey: "agent:main:subagent:missing", + }), + ).rejects.toThrow(expectedError); + + expect(loadSessionStoreMock).not.toHaveBeenCalled(); + expect(updateSessionStoreMock).not.toHaveBeenCalled(); + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "agent:main:subagent:child", + }, + }); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "agent:main:subagent:child", + }, + }); + }); + + it("keeps legacy main requester keys for sandboxed session tree checks", async () => { + resetSessionStore({ + "agent:main:main": { + sessionId: "s-main", + updatedAt: 10, + }, + "agent:main:subagent:child": { + sessionId: "s-child", + updatedAt: 20, + }, + }); + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { + sessions: + request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], + }; + } + return {}; + }); + + const tool = getSessionStatusTool("main", { + sandboxed: true, + }); + + const mainResult = await tool.execute("call8", {}); + const mainDetails = mainResult.details as { ok?: boolean; sessionKey?: string }; + expect(mainDetails.ok).toBe(true); + expect(mainDetails.sessionKey).toBe("agent:main:main"); + + const childResult = await tool.execute("call9", { + sessionKey: "agent:main:subagent:child", + }); + const childDetails = childResult.details as { ok?: boolean; sessionKey?: string }; + expect(childDetails.ok).toBe(true); + expect(childDetails.sessionKey).toBe("agent:main:subagent:child"); + + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "main", + }, + }); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "main", + }, + }); + }); + it("scopes bare session keys to the requester agent", async () => { loadSessionStoreMock.mockClear(); updateSessionStoreMock.mockClear(); diff --git a/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts b/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts index b9c86bf7472..34fcbfbafd4 100644 --- a/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts +++ b/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts @@ -85,7 +85,10 @@ describe("sessions_spawn depth + child limits", () => { }); it("rejects spawning when caller depth reaches maxSpawnDepth", async () => { - const tool = createSessionsSpawnTool({ agentSessionKey: "agent:main:subagent:parent" }); + const tool = createSessionsSpawnTool({ + agentSessionKey: "agent:main:subagent:parent", + workspaceDir: "/parent/workspace", + }); const result = await tool.execute("call-depth-reject", { task: "hello" }); expect(result.details).toMatchObject({ @@ -109,8 +112,13 @@ describe("sessions_spawn depth + child limits", () => { const calls = callGatewayMock.mock.calls.map( (call) => call[0] as { method?: string; params?: Record }, ); - const agentCall = calls.find((entry) => entry.method === "agent"); - expect(agentCall?.params?.spawnedBy).toBe("agent:main:subagent:parent"); + const spawnedByPatch = calls.find( + (entry) => + entry.method === "sessions.patch" && + entry.params?.spawnedBy === "agent:main:subagent:parent", + ); + expect(spawnedByPatch?.params?.key).toMatch(/^agent:main:subagent:/); + expect(typeof spawnedByPatch?.params?.spawnedWorkspaceDir).toBe("string"); const spawnDepthPatch = calls.find( (entry) => entry.method === "sessions.patch" && entry.params?.spawnDepth === 2, diff --git a/src/agents/openclaw-tools.ts b/src/agents/openclaw-tools.ts index 8473e4a06e8..a400ac133cd 100644 --- a/src/agents/openclaw-tools.ts +++ b/src/agents/openclaw-tools.ts @@ -200,6 +200,7 @@ export function createOpenClawTools( createSessionStatusTool({ agentSessionKey: options?.agentSessionKey, config: options?.config, + sandboxed: options?.sandboxed, }), ...(webSearchTool ? [webSearchTool] : []), ...(webFetchTool ? [webFetchTool] : []), diff --git a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts index 608483b99bf..9ed183a6910 100644 --- a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts +++ b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts @@ -106,6 +106,9 @@ describe("isBillingErrorMessage", () => { "Payment Required", "HTTP 402 Payment Required", "plans & billing", + // Venice returns "Insufficient USD or Diem balance" which has extra words + // between "insufficient" and "balance" + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", ]; for (const sample of samples) { expect(isBillingErrorMessage(sample)).toBe(true); @@ -149,6 +152,11 @@ describe("isBillingErrorMessage", () => { expect(longResponse.length).toBeGreaterThan(512); expect(isBillingErrorMessage(longResponse)).toBe(false); }); + it("does not false-positive on short non-billing text that mentions insufficient and balance", () => { + const sample = "The evidence is insufficient to reconcile the final balance after compaction."; + expect(isBillingErrorMessage(sample)).toBe(false); + expect(classifyFailoverReason(sample)).toBeNull(); + }); it("still matches explicit 402 markers in long payloads", () => { const longStructuredError = '{"error":{"code":402,"message":"payment required","details":"' + "x".repeat(700) + '"}}'; @@ -439,6 +447,18 @@ describe("isLikelyContextOverflowError", () => { expect(isLikelyContextOverflowError(sample)).toBe(false); } }); + + it("excludes billing errors even when text matches context overflow patterns", () => { + const samples = [ + "402 Payment Required: request token limit exceeded for this billing plan", + "insufficient credits: request size exceeds your current plan limits", + "Your credit balance is too low. Maximum request token limit exceeded.", + ]; + for (const sample of samples) { + expect(isBillingErrorMessage(sample)).toBe(true); + expect(isLikelyContextOverflowError(sample)).toBe(false); + } + }); }); describe("isTransientHttpError", () => { @@ -515,6 +535,23 @@ describe("isFailoverErrorMessage", () => { } }); + it("matches network errno codes in serialized error messages", () => { + const samples = [ + "Error: connect ETIMEDOUT 10.0.0.1:443", + "Error: connect ESOCKETTIMEDOUT 10.0.0.1:443", + "Error: connect EHOSTUNREACH 10.0.0.1:443", + "Error: connect ENETUNREACH 10.0.0.1:443", + "Error: write EPIPE", + "Error: read ENETRESET", + "Error: connect EHOSTDOWN 192.168.1.1:443", + ]; + for (const sample of samples) { + expect(isTimeoutErrorMessage(sample)).toBe(true); + expect(classifyFailoverReason(sample)).toBe("timeout"); + expect(isFailoverErrorMessage(sample)).toBe(true); + } + }); + it("does not classify MALFORMED_FUNCTION_CALL as timeout", () => { const sample = "Unhandled stop reason: MALFORMED_FUNCTION_CALL"; expect(isTimeoutErrorMessage(sample)).toBe(false); @@ -638,6 +675,12 @@ describe("classifyFailoverReason", () => { expect(classifyFailoverReason(TOGETHER_ENGINE_OVERLOADED_MESSAGE)).toBe("overloaded"); expect(classifyFailoverReason(GROQ_TOO_MANY_REQUESTS_MESSAGE)).toBe("rate_limit"); expect(classifyFailoverReason(GROQ_SERVICE_UNAVAILABLE_MESSAGE)).toBe("overloaded"); + // Venice 402 billing error with extra words between "insufficient" and "balance" + expect( + classifyFailoverReason( + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", + ), + ).toBe("billing"); }); it("classifies internal and compatibility error messages", () => { diff --git a/src/agents/pi-embedded-helpers/errors.ts b/src/agents/pi-embedded-helpers/errors.ts index 181ba89d8ce..e9bfd92951e 100644 --- a/src/agents/pi-embedded-helpers/errors.ts +++ b/src/agents/pi-embedded-helpers/errors.ts @@ -138,6 +138,13 @@ export function isLikelyContextOverflowError(errorMessage?: string): boolean { return false; } + // Billing/quota errors can contain patterns like "request size exceeds" or + // "maximum token limit exceeded" that match the context overflow heuristic. + // Billing is a more specific error class — exclude it early. + if (isBillingErrorMessage(errorMessage)) { + return false; + } + if (CONTEXT_WINDOW_TOO_SMALL_RE.test(errorMessage)) { return false; } diff --git a/src/agents/pi-embedded-helpers/failover-matches.ts b/src/agents/pi-embedded-helpers/failover-matches.ts index a7948703f39..ffe0c428f55 100644 --- a/src/agents/pi-embedded-helpers/failover-matches.ts +++ b/src/agents/pi-embedded-helpers/failover-matches.ts @@ -37,6 +37,13 @@ const ERROR_PATTERNS = { "fetch failed", "socket hang up", /\beconn(?:refused|reset|aborted)\b/i, + /\benetunreach\b/i, + /\behostunreach\b/i, + /\behostdown\b/i, + /\benetreset\b/i, + /\betimedout\b/i, + /\besockettimedout\b/i, + /\bepipe\b/i, /\benotfound\b/i, /\beai_again\b/i, /without sending (?:any )?chunks?/i, @@ -52,6 +59,7 @@ const ERROR_PATTERNS = { "credit balance", "plans & billing", "insufficient balance", + "insufficient usd or diem balance", ], authPermanent: [ /api[_ ]?key[_ ]?(?:revoked|invalid|deactivated|deleted)/i, diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index 500df72cced..3f6fb7a2f5a 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -276,7 +276,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { model: "deepseek/deepseek-r1" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -308,7 +308,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -332,7 +332,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning_effort: "high" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -357,7 +357,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning: { max_tokens: 256 } }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -381,7 +381,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning_effort: "medium" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -588,7 +588,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { thinking: "off" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -619,7 +619,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { thinking: "off" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -650,7 +650,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -674,7 +674,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { tool_choice: "required" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -699,7 +699,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -749,7 +749,7 @@ describe("applyExtraParamsToAgent", () => { ], tool_choice: { type: "tool", name: "read" }, }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -793,7 +793,7 @@ describe("applyExtraParamsToAgent", () => { }, ], }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -832,7 +832,7 @@ describe("applyExtraParamsToAgent", () => { }, ], }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -896,7 +896,7 @@ describe("applyExtraParamsToAgent", () => { }, }, }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -943,7 +943,7 @@ describe("applyExtraParamsToAgent", () => { }, }, }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -1081,7 +1081,7 @@ describe("applyExtraParamsToAgent", () => { expect(calls).toHaveLength(1); expect(calls[0]?.transport).toBe("auto"); - expect(calls[0]?.openaiWsWarmup).toBe(true); + expect(calls[0]?.openaiWsWarmup).toBe(false); }); it("lets runtime options override OpenAI default transport", () => { diff --git a/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts b/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts index 2d658aada32..0aa665e0635 100644 --- a/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts +++ b/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts @@ -981,7 +981,7 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }), ).rejects.toMatchObject({ name: "FailoverError", - reason: "rate_limit", + reason: "unknown", provider: "openai", model: "mock-1", }); @@ -1153,7 +1153,7 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }), ).rejects.toMatchObject({ name: "FailoverError", - reason: "rate_limit", + reason: "unknown", provider: "openai", model: "mock-1", }); diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts index 9ef2a3efe76..dc1511a5e05 100644 --- a/src/agents/pi-embedded-runner/compact.hooks.test.ts +++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts @@ -7,6 +7,7 @@ const { sessionCompactImpl, triggerInternalHook, sanitizeSessionHistoryMock, + contextEngineCompactMock, } = vi.hoisted(() => ({ hookRunner: { hasHooks: vi.fn(), @@ -28,6 +29,14 @@ const { })), triggerInternalHook: vi.fn(), sanitizeSessionHistoryMock: vi.fn(async (params: { messages: unknown[] }) => params.messages), + contextEngineCompactMock: vi.fn(async () => ({ + ok: true as boolean, + compacted: true as boolean, + reason: undefined as string | undefined, + result: { summary: "engine-summary", tokensAfter: 50 } as + | { summary: string; tokensAfter: number } + | undefined, + })), })); vi.mock("../../plugins/hook-runner-global.js", () => ({ @@ -123,6 +132,27 @@ vi.mock("../session-write-lock.js", () => ({ resolveSessionLockMaxHoldFromTimeout: vi.fn(() => 0), })); +vi.mock("../../context-engine/index.js", () => ({ + ensureContextEnginesInitialized: vi.fn(), + resolveContextEngine: vi.fn(async () => ({ + info: { ownsCompaction: true }, + compact: contextEngineCompactMock, + })), +})); + +vi.mock("../../process/command-queue.js", () => ({ + enqueueCommandInLane: vi.fn((_lane: unknown, task: () => unknown) => task()), +})); + +vi.mock("./lanes.js", () => ({ + resolveSessionLane: vi.fn(() => "test-session-lane"), + resolveGlobalLane: vi.fn(() => "test-global-lane"), +})); + +vi.mock("../context-window-guard.js", () => ({ + resolveContextWindowInfo: vi.fn(() => ({ tokens: 128_000 })), +})); + vi.mock("../bootstrap-files.js", () => ({ makeBootstrapWarn: vi.fn(() => () => {}), resolveBootstrapContextForRun: vi.fn(async () => ({ contextFiles: [] })), @@ -160,7 +190,7 @@ vi.mock("../transcript-policy.js", () => ({ })); vi.mock("./extensions.js", () => ({ - buildEmbeddedExtensionFactories: vi.fn(() => []), + buildEmbeddedExtensionFactories: vi.fn(() => ({ factories: [] })), })); vi.mock("./history.js", () => ({ @@ -251,7 +281,7 @@ vi.mock("./utils.js", () => ({ import { getApiProvider, unregisterApiProviders } from "@mariozechner/pi-ai"; import { getCustomApiRegistrySourceId } from "../custom-api-registry.js"; -import { compactEmbeddedPiSessionDirect } from "./compact.js"; +import { compactEmbeddedPiSessionDirect, compactEmbeddedPiSession } from "./compact.js"; const sessionHook = (action: string) => triggerInternalHook.mock.calls.find( @@ -436,3 +466,103 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { expect(result.ok).toBe(true); }); }); + +describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { + beforeEach(() => { + hookRunner.hasHooks.mockReset(); + hookRunner.runBeforeCompaction.mockReset(); + hookRunner.runAfterCompaction.mockReset(); + contextEngineCompactMock.mockReset(); + contextEngineCompactMock.mockResolvedValue({ + ok: true, + compacted: true, + reason: undefined, + result: { summary: "engine-summary", tokensAfter: 50 }, + }); + resolveModelMock.mockReset(); + resolveModelMock.mockReturnValue({ + model: { provider: "openai", api: "responses", id: "fake", input: [] }, + error: null, + authStorage: { setRuntimeApiKey: vi.fn() }, + modelRegistry: {}, + }); + }); + + it("fires before_compaction with sentinel -1 and after_compaction on success", async () => { + hookRunner.hasHooks.mockReturnValue(true); + + const result = await compactEmbeddedPiSession({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + messageChannel: "telegram", + customInstructions: "focus on decisions", + enqueue: (task) => task(), + }); + + expect(result.ok).toBe(true); + expect(result.compacted).toBe(true); + + expect(hookRunner.runBeforeCompaction).toHaveBeenCalledWith( + { messageCount: -1, sessionFile: "/tmp/session.jsonl" }, + expect.objectContaining({ + sessionKey: "agent:main:session-1", + messageProvider: "telegram", + }), + ); + expect(hookRunner.runAfterCompaction).toHaveBeenCalledWith( + { + messageCount: -1, + compactedCount: -1, + tokenCount: 50, + sessionFile: "/tmp/session.jsonl", + }, + expect.objectContaining({ + sessionKey: "agent:main:session-1", + messageProvider: "telegram", + }), + ); + }); + + it("does not fire after_compaction when compaction fails", async () => { + hookRunner.hasHooks.mockReturnValue(true); + contextEngineCompactMock.mockResolvedValue({ + ok: false, + compacted: false, + reason: "nothing to compact", + result: undefined, + }); + + const result = await compactEmbeddedPiSession({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions: "focus on decisions", + enqueue: (task) => task(), + }); + + expect(result.ok).toBe(false); + expect(hookRunner.runBeforeCompaction).toHaveBeenCalled(); + expect(hookRunner.runAfterCompaction).not.toHaveBeenCalled(); + }); + + it("catches and logs hook exceptions without aborting compaction", async () => { + hookRunner.hasHooks.mockReturnValue(true); + hookRunner.runBeforeCompaction.mockRejectedValue(new Error("hook boom")); + + const result = await compactEmbeddedPiSession({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions: "focus on decisions", + enqueue: (task) => task(), + }); + + expect(result.ok).toBe(true); + expect(result.compacted).toBe(true); + expect(contextEngineCompactMock).toHaveBeenCalled(); + }); +}); diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts index 91f99571db4..feba0f81493 100644 --- a/src/agents/pi-embedded-runner/compact.ts +++ b/src/agents/pi-embedded-runner/compact.ts @@ -936,6 +936,43 @@ export async function compactEmbeddedPiSession( modelContextWindow: ceModel?.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); + // When the context engine owns compaction, its compact() implementation + // bypasses compactEmbeddedPiSessionDirect (which fires the hooks internally). + // Fire before_compaction / after_compaction hooks here so plugin subscribers + // are notified regardless of which engine is active. + const engineOwnsCompaction = contextEngine.info.ownsCompaction === true; + const hookRunner = engineOwnsCompaction ? getGlobalHookRunner() : null; + const hookSessionKey = params.sessionKey?.trim() || params.sessionId; + const { sessionAgentId } = resolveSessionAgentIds({ + sessionKey: params.sessionKey, + config: params.config, + }); + const resolvedMessageProvider = params.messageChannel ?? params.messageProvider; + const hookCtx = { + sessionId: params.sessionId, + agentId: sessionAgentId, + sessionKey: hookSessionKey, + workspaceDir: resolveUserPath(params.workspaceDir), + messageProvider: resolvedMessageProvider, + }; + // Engine-owned compaction doesn't load the transcript at this level, so + // message counts are unavailable. We pass sessionFile so hook subscribers + // can read the transcript themselves if they need exact counts. + if (hookRunner?.hasHooks("before_compaction")) { + try { + await hookRunner.runBeforeCompaction( + { + messageCount: -1, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (err) { + log.warn("before_compaction hook failed", { + errorMessage: err instanceof Error ? err.message : String(err), + }); + } + } const result = await contextEngine.compact({ sessionId: params.sessionId, sessionFile: params.sessionFile, @@ -944,6 +981,23 @@ export async function compactEmbeddedPiSession( force: params.trigger === "manual", runtimeContext: params as Record, }); + if (result.ok && result.compacted && hookRunner?.hasHooks("after_compaction")) { + try { + await hookRunner.runAfterCompaction( + { + messageCount: -1, + compactedCount: -1, + tokenCount: result.result?.tokensAfter, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (err) { + log.warn("after_compaction hook failed", { + errorMessage: err instanceof Error ? err.message : String(err), + }); + } + } return { ok: result.ok, compacted: result.compacted, diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts index 5789dfaad75..062369d9a96 100644 --- a/src/agents/pi-embedded-runner/model.test.ts +++ b/src/agents/pi-embedded-runner/model.test.ts @@ -382,6 +382,40 @@ describe("resolveModel", () => { expect(result.model?.reasoning).toBe(true); }); + it("matches prefixed OpenRouter native ids in configured fallback models", () => { + const cfg = { + models: { + providers: { + openrouter: { + baseUrl: "https://openrouter.ai/api/v1", + api: "openai-completions", + models: [ + { + ...makeModel("openrouter/healer-alpha"), + reasoning: true, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 65536, + }, + ], + }, + }, + }, + } as OpenClawConfig; + + const result = resolveModel("openrouter", "openrouter/healer-alpha", "/tmp/agent", cfg); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openrouter", + id: "openrouter/healer-alpha", + reasoning: true, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 65536, + }); + }); + it("prefers configured provider api metadata over discovered registry model", () => { mockDiscoveredModel({ provider: "onehub", diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index dfe42ff1835..c9bc2304f97 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -250,7 +250,7 @@ export function createOpenAIDefaultTransportWrapper(baseStreamFn: StreamFn | und const mergedOptions = { ...options, transport: options?.transport ?? "auto", - openaiWsWarmup: typedOptions?.openaiWsWarmup ?? true, + openaiWsWarmup: typedOptions?.openaiWsWarmup ?? false, } as SimpleStreamOptions; return underlying(model, context, mergedOptions); }; diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts index 8c7afc834d2..8c320f765be 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts @@ -9,16 +9,18 @@ export function makeOverflowError(message: string = DEFAULT_OVERFLOW_ERROR_MESSA export function makeCompactionSuccess(params: { summary: string; - firstKeptEntryId: string; - tokensBefore: number; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; }) { return { ok: true as const, compacted: true as const, result: { summary: params.summary, - firstKeptEntryId: params.firstKeptEntryId, - tokensBefore: params.tokensBefore, + ...(params.firstKeptEntryId ? { firstKeptEntryId: params.firstKeptEntryId } : {}), + ...(params.tokensBefore !== undefined ? { tokensBefore: params.tokensBefore } : {}), + ...(params.tokensAfter !== undefined ? { tokensAfter: params.tokensAfter } : {}), }, }; } @@ -55,8 +57,9 @@ type MockCompactDirect = { compacted: true; result: { summary: string; - firstKeptEntryId: string; - tokensBefore: number; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; }; }) => unknown; }; diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts index 5980170be62..7a2550ba1e9 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts @@ -2,9 +2,13 @@ import "./run.overflow-compaction.mocks.shared.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { isCompactionFailureError, isLikelyContextOverflowError } from "../pi-embedded-helpers.js"; -vi.mock("../../utils.js", () => ({ - resolveUserPath: vi.fn((p: string) => p), -})); +vi.mock(import("../../utils.js"), async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveUserPath: vi.fn((p: string) => p), + }; +}); import { log } from "./logger.js"; import { runEmbeddedPiAgent } from "./run.js"; @@ -16,6 +20,7 @@ import { queueOverflowAttemptWithOversizedToolOutput, } from "./run.overflow-compaction.fixture.js"; import { + mockedContextEngine, mockedCompactDirect, mockedRunEmbeddedAttempt, mockedSessionLikelyHasOversizedToolResults, @@ -30,6 +35,11 @@ const mockedIsLikelyContextOverflowError = vi.mocked(isLikelyContextOverflowErro describe("overflow compaction in run loop", () => { beforeEach(() => { vi.clearAllMocks(); + mockedRunEmbeddedAttempt.mockReset(); + mockedCompactDirect.mockReset(); + mockedSessionLikelyHasOversizedToolResults.mockReset(); + mockedTruncateOversizedToolResultsInSession.mockReset(); + mockedContextEngine.info.ownsCompaction = false; mockedIsCompactionFailureError.mockImplementation((msg?: string) => { if (!msg) { return false; @@ -72,7 +82,9 @@ describe("overflow compaction in run loop", () => { expect(mockedCompactDirect).toHaveBeenCalledTimes(1); expect(mockedCompactDirect).toHaveBeenCalledWith( - expect.objectContaining({ authProfileId: "test-profile" }), + expect.objectContaining({ + runtimeContext: expect.objectContaining({ authProfileId: "test-profile" }), + }), ); expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(2); expect(log.warn).toHaveBeenCalledWith( diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts index 22dee7b49cd..51f711508b1 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts @@ -6,6 +6,25 @@ import type { PluginHookBeforePromptBuildResult, } from "../../plugins/types.js"; +type MockCompactionResult = + | { + ok: true; + compacted: true; + result: { + summary: string; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; + }; + reason?: string; + } + | { + ok: false; + compacted: false; + reason: string; + result?: undefined; + }; + export const mockedGlobalHookRunner = { hasHooks: vi.fn((_hookName: string) => false), runBeforeAgentStart: vi.fn( @@ -26,12 +45,35 @@ export const mockedGlobalHookRunner = { _ctx: PluginHookAgentContext, ): Promise => undefined, ), + runBeforeCompaction: vi.fn(async () => undefined), + runAfterCompaction: vi.fn(async () => undefined), }; +export const mockedContextEngine = { + info: { ownsCompaction: false as boolean }, + compact: vi.fn<(params: unknown) => Promise>(async () => ({ + ok: false as const, + compacted: false as const, + reason: "nothing to compact", + })), +}; + +export const mockedContextEngineCompact = vi.mocked(mockedContextEngine.compact); +export const mockedEnsureRuntimePluginsLoaded: (...args: unknown[]) => void = vi.fn(); + vi.mock("../../plugins/hook-runner-global.js", () => ({ getGlobalHookRunner: vi.fn(() => mockedGlobalHookRunner), })); +vi.mock("../../context-engine/index.js", () => ({ + ensureContextEnginesInitialized: vi.fn(), + resolveContextEngine: vi.fn(async () => mockedContextEngine), +})); + +vi.mock("../runtime-plugins.js", () => ({ + ensureRuntimePluginsLoaded: mockedEnsureRuntimePluginsLoaded, +})); + vi.mock("../auth-profiles.js", () => ({ isProfileInCooldown: vi.fn(() => false), markAuthProfileFailure: vi.fn(async () => {}), @@ -141,9 +183,13 @@ vi.mock("../../process/command-queue.js", () => ({ enqueueCommandInLane: vi.fn((_lane: string, task: () => unknown) => task()), })); -vi.mock("../../utils/message-channel.js", () => ({ - isMarkdownCapableMessageChannel: vi.fn(() => true), -})); +vi.mock(import("../../utils/message-channel.js"), async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isMarkdownCapableMessageChannel: vi.fn(() => true), + }; +}); vi.mock("../agent-paths.js", () => ({ resolveOpenClawAgentDir: vi.fn(() => "/tmp/agent-dir"), diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts index 45bab82e1b8..c697ac9526a 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts @@ -1,5 +1,8 @@ import { vi } from "vitest"; -import { compactEmbeddedPiSessionDirect } from "./compact.js"; +import { + mockedContextEngine, + mockedContextEngineCompact, +} from "./run.overflow-compaction.mocks.shared.js"; import { runEmbeddedAttempt } from "./run/attempt.js"; import { sessionLikelyHasOversizedToolResults, @@ -7,13 +10,14 @@ import { } from "./tool-result-truncation.js"; export const mockedRunEmbeddedAttempt = vi.mocked(runEmbeddedAttempt); -export const mockedCompactDirect = vi.mocked(compactEmbeddedPiSessionDirect); +export const mockedCompactDirect = mockedContextEngineCompact; export const mockedSessionLikelyHasOversizedToolResults = vi.mocked( sessionLikelyHasOversizedToolResults, ); export const mockedTruncateOversizedToolResultsInSession = vi.mocked( truncateOversizedToolResultsInSession, ); +export { mockedContextEngine }; export const overflowBaseRunParams = { sessionId: "test-session", diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts index 19b4a81d279..b29394eedfd 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts @@ -11,6 +11,7 @@ import { } from "./run.overflow-compaction.fixture.js"; import { mockedGlobalHookRunner } from "./run.overflow-compaction.mocks.shared.js"; import { + mockedContextEngine, mockedCompactDirect, mockedRunEmbeddedAttempt, mockedSessionLikelyHasOversizedToolResults, @@ -22,6 +23,25 @@ const mockedPickFallbackThinkingLevel = vi.mocked(pickFallbackThinkingLevel); describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { beforeEach(() => { vi.clearAllMocks(); + mockedRunEmbeddedAttempt.mockReset(); + mockedCompactDirect.mockReset(); + mockedSessionLikelyHasOversizedToolResults.mockReset(); + mockedTruncateOversizedToolResultsInSession.mockReset(); + mockedGlobalHookRunner.runBeforeAgentStart.mockReset(); + mockedGlobalHookRunner.runBeforeCompaction.mockReset(); + mockedGlobalHookRunner.runAfterCompaction.mockReset(); + mockedContextEngine.info.ownsCompaction = false; + mockedCompactDirect.mockResolvedValue({ + ok: false, + compacted: false, + reason: "nothing to compact", + }); + mockedSessionLikelyHasOversizedToolResults.mockReturnValue(false); + mockedTruncateOversizedToolResultsInSession.mockResolvedValue({ + truncated: false, + truncatedCount: 0, + reason: "no oversized tool results", + }); mockedGlobalHookRunner.hasHooks.mockImplementation(() => false); }); @@ -81,8 +101,12 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(mockedCompactDirect).toHaveBeenCalledTimes(1); expect(mockedCompactDirect).toHaveBeenCalledWith( expect.objectContaining({ - trigger: "overflow", - authProfileId: "test-profile", + sessionId: "test-session", + sessionFile: "/tmp/session.json", + runtimeContext: expect.objectContaining({ + trigger: "overflow", + authProfileId: "test-profile", + }), }), ); }); @@ -132,6 +156,63 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(result.meta.error?.kind).toBe("context_overflow"); }); + it("fires compaction hooks during overflow recovery for ownsCompaction engines", async () => { + mockedContextEngine.info.ownsCompaction = true; + mockedGlobalHookRunner.hasHooks.mockImplementation( + (hookName) => hookName === "before_compaction" || hookName === "after_compaction", + ); + mockedRunEmbeddedAttempt + .mockResolvedValueOnce(makeAttemptResult({ promptError: makeOverflowError() })) + .mockResolvedValueOnce(makeAttemptResult({ promptError: null })); + mockedCompactDirect.mockResolvedValueOnce({ + ok: true, + compacted: true, + result: { + summary: "engine-owned compaction", + tokensAfter: 50, + }, + }); + + await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedGlobalHookRunner.runBeforeCompaction).toHaveBeenCalledWith( + { messageCount: -1, sessionFile: "/tmp/session.json" }, + expect.objectContaining({ + sessionKey: "test-key", + }), + ); + expect(mockedGlobalHookRunner.runAfterCompaction).toHaveBeenCalledWith( + { + messageCount: -1, + compactedCount: -1, + tokenCount: 50, + sessionFile: "/tmp/session.json", + }, + expect.objectContaining({ + sessionKey: "test-key", + }), + ); + }); + + it("guards thrown engine-owned overflow compaction attempts", async () => { + mockedContextEngine.info.ownsCompaction = true; + mockedGlobalHookRunner.hasHooks.mockImplementation( + (hookName) => hookName === "before_compaction" || hookName === "after_compaction", + ); + mockedRunEmbeddedAttempt.mockResolvedValueOnce( + makeAttemptResult({ promptError: makeOverflowError() }), + ); + mockedCompactDirect.mockRejectedValueOnce(new Error("engine boom")); + + const result = await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedCompactDirect).toHaveBeenCalledTimes(1); + expect(mockedGlobalHookRunner.runBeforeCompaction).toHaveBeenCalledTimes(1); + expect(mockedGlobalHookRunner.runAfterCompaction).not.toHaveBeenCalled(); + expect(result.meta.error?.kind).toBe("context_overflow"); + expect(result.payloads?.[0]?.isError).toBe(true); + }); + it("returns retry_limit when repeated retries never converge", async () => { mockedRunEmbeddedAttempt.mockClear(); mockedCompactDirect.mockClear(); diff --git a/src/agents/pi-embedded-runner/run.ts b/src/agents/pi-embedded-runner/run.ts index 7f5f4f525b7..09d5adda724 100644 --- a/src/agents/pi-embedded-runner/run.ts +++ b/src/agents/pi-embedded-runner/run.ts @@ -553,7 +553,7 @@ export async function runEmbeddedPiAgent( resolveProfilesUnavailableReason({ store: authStore, profileIds, - }) ?? "rate_limit" + }) ?? "unknown" ); } const classified = classifyFailoverReason(params.message); @@ -669,14 +669,15 @@ export async function runEmbeddedPiAgent( ? (resolveProfilesUnavailableReason({ store: authStore, profileIds: autoProfileCandidates, - }) ?? "rate_limit") + }) ?? "unknown") : null; const allowTransientCooldownProbe = params.allowTransientCooldownProbe === true && allAutoProfilesInCooldown && (unavailableReason === "rate_limit" || unavailableReason === "overloaded" || - unavailableReason === "billing"); + unavailableReason === "billing" || + unavailableReason === "unknown"); let didTransientCooldownProbe = false; while (profileIndex < profileCandidates.length) { @@ -1027,37 +1028,84 @@ export async function runEmbeddedPiAgent( log.warn( `context overflow detected (attempt ${overflowCompactionAttempts}/${MAX_OVERFLOW_COMPACTION_ATTEMPTS}); attempting auto-compaction for ${provider}/${modelId}`, ); - const compactResult = await contextEngine.compact({ - sessionId: params.sessionId, - sessionFile: params.sessionFile, - tokenBudget: ctxInfo.tokens, - force: true, - compactionTarget: "budget", - runtimeContext: { - sessionKey: params.sessionKey, - messageChannel: params.messageChannel, - messageProvider: params.messageProvider, - agentAccountId: params.agentAccountId, - authProfileId: lastProfileId, - workspaceDir: resolvedWorkspace, - agentDir, - config: params.config, - skillsSnapshot: params.skillsSnapshot, - senderIsOwner: params.senderIsOwner, - provider, - model: modelId, - runId: params.runId, - thinkLevel, - reasoningLevel: params.reasoningLevel, - bashElevated: params.bashElevated, - extraSystemPrompt: params.extraSystemPrompt, - ownerNumbers: params.ownerNumbers, - trigger: "overflow", - diagId: overflowDiagId, - attempt: overflowCompactionAttempts, - maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS, - }, - }); + let compactResult: Awaited>; + // When the engine owns compaction, hooks are not fired inside + // compactEmbeddedPiSessionDirect (which is bypassed). Fire them + // here so subscribers (memory extensions, usage trackers) are + // notified even on overflow-recovery compactions. + const overflowEngineOwnsCompaction = contextEngine.info.ownsCompaction === true; + const overflowHookRunner = overflowEngineOwnsCompaction ? hookRunner : null; + if (overflowHookRunner?.hasHooks("before_compaction")) { + try { + await overflowHookRunner.runBeforeCompaction( + { messageCount: -1, sessionFile: params.sessionFile }, + hookCtx, + ); + } catch (hookErr) { + log.warn( + `before_compaction hook failed during overflow recovery: ${String(hookErr)}`, + ); + } + } + try { + compactResult = await contextEngine.compact({ + sessionId: params.sessionId, + sessionFile: params.sessionFile, + tokenBudget: ctxInfo.tokens, + force: true, + compactionTarget: "budget", + runtimeContext: { + sessionKey: params.sessionKey, + messageChannel: params.messageChannel, + messageProvider: params.messageProvider, + agentAccountId: params.agentAccountId, + authProfileId: lastProfileId, + workspaceDir: resolvedWorkspace, + agentDir, + config: params.config, + skillsSnapshot: params.skillsSnapshot, + senderIsOwner: params.senderIsOwner, + provider, + model: modelId, + runId: params.runId, + thinkLevel, + reasoningLevel: params.reasoningLevel, + bashElevated: params.bashElevated, + extraSystemPrompt: params.extraSystemPrompt, + ownerNumbers: params.ownerNumbers, + trigger: "overflow", + diagId: overflowDiagId, + attempt: overflowCompactionAttempts, + maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS, + }, + }); + } catch (compactErr) { + log.warn( + `contextEngine.compact() threw during overflow recovery for ${provider}/${modelId}: ${String(compactErr)}`, + ); + compactResult = { ok: false, compacted: false, reason: String(compactErr) }; + } + if ( + compactResult.ok && + compactResult.compacted && + overflowHookRunner?.hasHooks("after_compaction") + ) { + try { + await overflowHookRunner.runAfterCompaction( + { + messageCount: -1, + compactedCount: -1, + tokenCount: compactResult.result?.tokensAfter, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (hookErr) { + log.warn( + `after_compaction hook failed during overflow recovery: ${String(hookErr)}`, + ); + } + } if (compactResult.compacted) { autoCompactionCount += 1; log.info(`auto-compaction succeeded for ${provider}/${modelId}; retrying prompt`); diff --git a/src/agents/pi-embedded-runner/run/attempt.test.ts b/src/agents/pi-embedded-runner/run/attempt.test.ts index 9821adc0e0b..0203721224f 100644 --- a/src/agents/pi-embedded-runner/run/attempt.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.test.ts @@ -13,6 +13,7 @@ import { shouldInjectOllamaCompatNumCtx, decodeHtmlEntitiesInObject, wrapOllamaCompatNumCtx, + wrapStreamFnRepairMalformedToolCallArguments, wrapStreamFnTrimToolCallNames, } from "./attempt.js"; @@ -430,6 +431,182 @@ describe("wrapStreamFnTrimToolCallNames", () => { }); }); +describe("wrapStreamFnRepairMalformedToolCallArguments", () => { + function createFakeStream(params: { events: unknown[]; resultMessage: unknown }): { + result: () => Promise; + [Symbol.asyncIterator]: () => AsyncIterator; + } { + return { + async result() { + return params.resultMessage; + }, + [Symbol.asyncIterator]() { + return (async function* () { + for (const event of params.events) { + yield event; + } + })(); + }, + }; + } + + async function invokeWrappedStream(baseFn: (...args: never[]) => unknown) { + const wrappedFn = wrapStreamFnRepairMalformedToolCallArguments(baseFn as never); + return await wrappedFn({} as never, {} as never, {} as never); + } + + it("repairs anthropic-compatible tool arguments when trailing junk follows valid JSON", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const endMessageToolCall = { type: "toolCall", name: "read", arguments: {} }; + const finalToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const endMessage = { role: "assistant", content: [endMessageToolCall] }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}', + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "xx", + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + message: endMessage, + }, + ], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + const result = await stream.result(); + + expect(partialToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(streamedToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(endMessageToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(finalToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(result).toBe(finalMessage); + }); + + it("keeps incomplete partial JSON unchanged until a complete object exists", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp', + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + }); + + it("does not repair tool arguments when trailing junk exceeds the Kimi-specific allowance", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}oops', + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + expect(streamedToolCall.arguments).toEqual({}); + }); + + it("clears a cached repair when later deltas make the trailing suffix invalid", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}', + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "x", + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "yzq", + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + expect(streamedToolCall.arguments).toEqual({}); + }); +}); + describe("isOllamaCompatProvider", () => { it("detects native ollama provider id", () => { expect( diff --git a/src/agents/pi-embedded-runner/run/attempt.ts b/src/agents/pi-embedded-runner/run/attempt.ts index 0014475a880..2f77b46aff5 100644 --- a/src/agents/pi-embedded-runner/run/attempt.ts +++ b/src/agents/pi-embedded-runner/run/attempt.ts @@ -436,6 +436,281 @@ export function wrapStreamFnTrimToolCallNames( }; } +function extractBalancedJsonPrefix(raw: string): string | null { + let start = 0; + while (start < raw.length && /\s/.test(raw[start] ?? "")) { + start += 1; + } + const startChar = raw[start]; + if (startChar !== "{" && startChar !== "[") { + return null; + } + + let depth = 0; + let inString = false; + let escaped = false; + for (let i = start; i < raw.length; i += 1) { + const char = raw[i]; + if (char === undefined) { + break; + } + if (inString) { + if (escaped) { + escaped = false; + } else if (char === "\\") { + escaped = true; + } else if (char === '"') { + inString = false; + } + continue; + } + if (char === '"') { + inString = true; + continue; + } + if (char === "{" || char === "[") { + depth += 1; + continue; + } + if (char === "}" || char === "]") { + depth -= 1; + if (depth === 0) { + return raw.slice(start, i + 1); + } + } + } + return null; +} + +const MAX_TOOLCALL_REPAIR_BUFFER_CHARS = 64_000; +const MAX_TOOLCALL_REPAIR_TRAILING_CHARS = 3; +const TOOLCALL_REPAIR_ALLOWED_TRAILING_RE = /^[^\s{}[\]":,\\]{1,3}$/; + +function shouldAttemptMalformedToolCallRepair(partialJson: string, delta: string): boolean { + if (/[}\]]/.test(delta)) { + return true; + } + const trimmedDelta = delta.trim(); + return ( + trimmedDelta.length > 0 && + trimmedDelta.length <= MAX_TOOLCALL_REPAIR_TRAILING_CHARS && + /[}\]]/.test(partialJson) + ); +} + +type ToolCallArgumentRepair = { + args: Record; + trailingSuffix: string; +}; + +function tryParseMalformedToolCallArguments(raw: string): ToolCallArgumentRepair | undefined { + if (!raw.trim()) { + return undefined; + } + try { + JSON.parse(raw); + return undefined; + } catch { + const jsonPrefix = extractBalancedJsonPrefix(raw); + if (!jsonPrefix) { + return undefined; + } + const suffix = raw.slice(raw.indexOf(jsonPrefix) + jsonPrefix.length).trim(); + if ( + suffix.length === 0 || + suffix.length > MAX_TOOLCALL_REPAIR_TRAILING_CHARS || + !TOOLCALL_REPAIR_ALLOWED_TRAILING_RE.test(suffix) + ) { + return undefined; + } + try { + const parsed = JSON.parse(jsonPrefix) as unknown; + return parsed && typeof parsed === "object" && !Array.isArray(parsed) + ? { args: parsed as Record, trailingSuffix: suffix } + : undefined; + } catch { + return undefined; + } + } +} + +function repairToolCallArgumentsInMessage( + message: unknown, + contentIndex: number, + repairedArgs: Record, +): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + const block = content[contentIndex]; + if (!block || typeof block !== "object") { + return; + } + const typedBlock = block as { type?: unknown; arguments?: unknown }; + if (!isToolCallBlockType(typedBlock.type)) { + return; + } + typedBlock.arguments = repairedArgs; +} + +function clearToolCallArgumentsInMessage(message: unknown, contentIndex: number): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + const block = content[contentIndex]; + if (!block || typeof block !== "object") { + return; + } + const typedBlock = block as { type?: unknown; arguments?: unknown }; + if (!isToolCallBlockType(typedBlock.type)) { + return; + } + typedBlock.arguments = {}; +} + +function repairMalformedToolCallArgumentsInMessage( + message: unknown, + repairedArgsByIndex: Map>, +): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + for (const [index, repairedArgs] of repairedArgsByIndex.entries()) { + repairToolCallArgumentsInMessage(message, index, repairedArgs); + } +} + +function wrapStreamRepairMalformedToolCallArguments( + stream: ReturnType, +): ReturnType { + const partialJsonByIndex = new Map(); + const repairedArgsByIndex = new Map>(); + const disabledIndices = new Set(); + const loggedRepairIndices = new Set(); + const originalResult = stream.result.bind(stream); + stream.result = async () => { + const message = await originalResult(); + repairMalformedToolCallArgumentsInMessage(message, repairedArgsByIndex); + partialJsonByIndex.clear(); + repairedArgsByIndex.clear(); + disabledIndices.clear(); + loggedRepairIndices.clear(); + return message; + }; + + const originalAsyncIterator = stream[Symbol.asyncIterator].bind(stream); + (stream as { [Symbol.asyncIterator]: typeof originalAsyncIterator })[Symbol.asyncIterator] = + function () { + const iterator = originalAsyncIterator(); + return { + async next() { + const result = await iterator.next(); + if (!result.done && result.value && typeof result.value === "object") { + const event = result.value as { + type?: unknown; + contentIndex?: unknown; + delta?: unknown; + partial?: unknown; + message?: unknown; + toolCall?: unknown; + }; + if ( + typeof event.contentIndex === "number" && + Number.isInteger(event.contentIndex) && + event.type === "toolcall_delta" && + typeof event.delta === "string" + ) { + if (disabledIndices.has(event.contentIndex)) { + return result; + } + const nextPartialJson = + (partialJsonByIndex.get(event.contentIndex) ?? "") + event.delta; + if (nextPartialJson.length > MAX_TOOLCALL_REPAIR_BUFFER_CHARS) { + partialJsonByIndex.delete(event.contentIndex); + repairedArgsByIndex.delete(event.contentIndex); + disabledIndices.add(event.contentIndex); + return result; + } + partialJsonByIndex.set(event.contentIndex, nextPartialJson); + if (shouldAttemptMalformedToolCallRepair(nextPartialJson, event.delta)) { + const repair = tryParseMalformedToolCallArguments(nextPartialJson); + if (repair) { + repairedArgsByIndex.set(event.contentIndex, repair.args); + repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repair.args); + repairToolCallArgumentsInMessage(event.message, event.contentIndex, repair.args); + if (!loggedRepairIndices.has(event.contentIndex)) { + loggedRepairIndices.add(event.contentIndex); + log.warn( + `repairing kimi-coding tool call arguments after ${repair.trailingSuffix.length} trailing chars`, + ); + } + } else { + repairedArgsByIndex.delete(event.contentIndex); + clearToolCallArgumentsInMessage(event.partial, event.contentIndex); + clearToolCallArgumentsInMessage(event.message, event.contentIndex); + } + } + } + if ( + typeof event.contentIndex === "number" && + Number.isInteger(event.contentIndex) && + event.type === "toolcall_end" + ) { + const repairedArgs = repairedArgsByIndex.get(event.contentIndex); + if (repairedArgs) { + if (event.toolCall && typeof event.toolCall === "object") { + (event.toolCall as { arguments?: unknown }).arguments = repairedArgs; + } + repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repairedArgs); + repairToolCallArgumentsInMessage(event.message, event.contentIndex, repairedArgs); + } + partialJsonByIndex.delete(event.contentIndex); + disabledIndices.delete(event.contentIndex); + loggedRepairIndices.delete(event.contentIndex); + } + } + return result; + }, + async return(value?: unknown) { + return iterator.return?.(value) ?? { done: true as const, value: undefined }; + }, + async throw(error?: unknown) { + return iterator.throw?.(error) ?? { done: true as const, value: undefined }; + }, + }; + }; + + return stream; +} + +export function wrapStreamFnRepairMalformedToolCallArguments(baseFn: StreamFn): StreamFn { + return (model, context, options) => { + const maybeStream = baseFn(model, context, options); + if (maybeStream && typeof maybeStream === "object" && "then" in maybeStream) { + return Promise.resolve(maybeStream).then((stream) => + wrapStreamRepairMalformedToolCallArguments(stream), + ); + } + return wrapStreamRepairMalformedToolCallArguments(maybeStream); + }; +} + +function shouldRepairMalformedAnthropicToolCallArguments(provider?: string): boolean { + return normalizeProviderId(provider ?? "") === "kimi-coding"; +} + // --------------------------------------------------------------------------- // xAI / Grok: decode HTML entities in tool call arguments // --------------------------------------------------------------------------- @@ -1379,6 +1654,15 @@ export async function runEmbeddedAttempt( allowedToolNames, ); + if ( + params.model.api === "anthropic-messages" && + shouldRepairMalformedAnthropicToolCallArguments(params.provider) + ) { + activeSession.agent.streamFn = wrapStreamFnRepairMalformedToolCallArguments( + activeSession.agent.streamFn, + ); + } + if (isXaiProvider(params.provider, params.modelId)) { activeSession.agent.streamFn = wrapStreamFnDecodeXaiToolCallArguments( activeSession.agent.streamFn, @@ -1774,6 +2058,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -1982,6 +2268,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -2042,6 +2330,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { diff --git a/src/agents/pi-embedded-runner/run/payloads.errors.test.ts b/src/agents/pi-embedded-runner/run/payloads.errors.test.ts index 4268e177dfc..a2e7873aedf 100644 --- a/src/agents/pi-embedded-runner/run/payloads.errors.test.ts +++ b/src/agents/pi-embedded-runner/run/payloads.errors.test.ts @@ -101,6 +101,18 @@ describe("buildEmbeddedRunPayloads", () => { expect(payloads[0]?.isError).toBe(true); }); + it("does not emit a synthetic billing error for successful turns with stale errorMessage", () => { + const payloads = buildPayloads({ + lastAssistant: makeAssistant({ + stopReason: "stop", + errorMessage: "insufficient credits for embedding model", + content: [{ type: "text", text: "Handle payment required errors in your API." }], + }), + }); + + expectSinglePayloadText(payloads, "Handle payment required errors in your API."); + }); + it("suppresses raw error JSON even when errorMessage is missing", () => { const payloads = buildPayloads({ assistantTexts: [errorJsonPretty], diff --git a/src/agents/pi-embedded-runner/run/payloads.ts b/src/agents/pi-embedded-runner/run/payloads.ts index 16a78ec2e97..c0e0ded136e 100644 --- a/src/agents/pi-embedded-runner/run/payloads.ts +++ b/src/agents/pi-embedded-runner/run/payloads.ts @@ -128,16 +128,17 @@ export function buildEmbeddedRunPayloads(params: { const useMarkdown = params.toolResultFormat === "markdown"; const suppressAssistantArtifacts = params.didSendDeterministicApprovalPrompt === true; const lastAssistantErrored = params.lastAssistant?.stopReason === "error"; - const errorText = params.lastAssistant - ? suppressAssistantArtifacts - ? undefined - : formatAssistantErrorText(params.lastAssistant, { - cfg: params.config, - sessionKey: params.sessionKey, - provider: params.provider, - model: params.model, - }) - : undefined; + const errorText = + params.lastAssistant && lastAssistantErrored + ? suppressAssistantArtifacts + ? undefined + : formatAssistantErrorText(params.lastAssistant, { + cfg: params.config, + sessionKey: params.sessionKey, + provider: params.provider, + model: params.model, + }) + : undefined; const rawErrorMessage = lastAssistantErrored ? params.lastAssistant?.errorMessage?.trim() || undefined : undefined; diff --git a/src/agents/pi-embedded-utils.test.ts b/src/agents/pi-embedded-utils.test.ts index 6a5ce710c85..ab84a375d94 100644 --- a/src/agents/pi-embedded-utils.test.ts +++ b/src/agents/pi-embedded-utils.test.ts @@ -134,6 +134,20 @@ describe("extractAssistantText", () => { ); }); + it("preserves response when errorMessage set from background failure (#13935)", () => { + const responseText = "Handle payment required errors in your API."; + const msg = makeAssistantMessage({ + role: "assistant", + errorMessage: "insufficient credits for embedding model", + stopReason: "stop", + content: [{ type: "text", text: responseText }], + timestamp: Date.now(), + }); + + const result = extractAssistantText(msg); + expect(result).toBe(responseText); + }); + it("strips Minimax tool invocations with extra attributes", () => { const msg = makeAssistantMessage({ role: "assistant", diff --git a/src/agents/pi-embedded-utils.ts b/src/agents/pi-embedded-utils.ts index da1dd7911b8..375df11654d 100644 --- a/src/agents/pi-embedded-utils.ts +++ b/src/agents/pi-embedded-utils.ts @@ -245,7 +245,9 @@ export function extractAssistantText(msg: AssistantMessage): string { }) ?? ""; // Only apply keyword-based error rewrites when the assistant message is actually an error. // Otherwise normal prose that *mentions* errors (e.g. "context overflow") can get clobbered. - const errorContext = msg.stopReason === "error" || Boolean(msg.errorMessage?.trim()); + // Gate on stopReason only — a non-error response with an errorMessage set (e.g. from a + // background tool failure) should not have its content rewritten (#13935). + const errorContext = msg.stopReason === "error"; return sanitizeUserFacingText(extracted, { errorContext }); } diff --git a/src/agents/pi-tools.workspace-only-false.test.ts b/src/agents/pi-tools.workspace-only-false.test.ts index fb18260db09..146eb943c49 100644 --- a/src/agents/pi-tools.workspace-only-false.test.ts +++ b/src/agents/pi-tools.workspace-only-false.test.ts @@ -3,10 +3,14 @@ import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -vi.mock("@mariozechner/pi-ai/oauth", () => ({ - getOAuthApiKey: () => undefined, - getOAuthProviders: () => [], -})); +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + getOAuthApiKey: () => undefined, + getOAuthProviders: () => [], + }; +}); import { createOpenClawCodingTools } from "./pi-tools.js"; diff --git a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts index f2d3974f0cc..57f22cc84b6 100644 --- a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts +++ b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts @@ -3,7 +3,10 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; -import { SANDBOX_PINNED_MUTATION_PYTHON } from "./fs-bridge-mutation-helper.js"; +import { + buildPinnedWritePlan, + SANDBOX_PINNED_MUTATION_PYTHON, +} from "./fs-bridge-mutation-helper.js"; async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); @@ -22,6 +25,35 @@ function runMutation(args: string[], input?: string) { }); } +function runWritePlan(args: string[], input?: string) { + const plan = buildPinnedWritePlan({ + check: { + target: { + hostPath: args[1] ?? "", + containerPath: args[1] ?? "", + relativePath: path.posix.join(args[2] ?? "", args[3] ?? ""), + writable: true, + }, + options: { + action: "write files", + requireWritable: true, + }, + }, + pinned: { + mountRootPath: args[1] ?? "", + relativeParentPath: args[2] ?? "", + basename: args[3] ?? "", + }, + mkdir: args[4] === "1", + }); + + return spawnSync("sh", ["-c", plan.script, "moltbot-sandbox-fs", ...(plan.args ?? [])], { + input, + encoding: "utf8", + stdio: ["pipe", "pipe", "pipe"], + }); +} + describe("sandbox pinned mutation helper", () => { it("writes through a pinned directory fd", async () => { await withTempRoot("openclaw-mutation-helper-", async (root) => { @@ -37,6 +69,26 @@ describe("sandbox pinned mutation helper", () => { }); }); + it.runIf(process.platform !== "win32")( + "preserves stdin payload bytes when the pinned write plan runs through sh", + async () => { + await withTempRoot("openclaw-mutation-helper-", async (root) => { + const workspace = path.join(root, "workspace"); + await fs.mkdir(workspace, { recursive: true }); + + const result = runWritePlan( + ["write", workspace, "nested/deeper", "note.txt", "1"], + "hello", + ); + + expect(result.status).toBe(0); + await expect( + fs.readFile(path.join(workspace, "nested", "deeper", "note.txt"), "utf8"), + ).resolves.toBe("hello"); + }); + }, + ); + it.runIf(process.platform !== "win32")( "rejects symlink-parent writes instead of materializing a temp file outside the mount", async () => { diff --git a/src/agents/sandbox/fs-bridge-mutation-helper.ts b/src/agents/sandbox/fs-bridge-mutation-helper.ts index fc50c5ab756..3c6edb2c2cb 100644 --- a/src/agents/sandbox/fs-bridge-mutation-helper.ts +++ b/src/agents/sandbox/fs-bridge-mutation-helper.ts @@ -257,7 +257,13 @@ function buildPinnedMutationPlan(params: { return { checks: params.checks, recheckBeforeCommand: true, - script: ["set -eu", "python3 - \"$@\" <<'PY'", SANDBOX_PINNED_MUTATION_PYTHON, "PY"].join("\n"), + // Feed the helper source over fd 3 so stdin stays available for write payload bytes. + script: [ + "set -eu", + "python3 /dev/fd/3 \"$@\" 3<<'PY'", + SANDBOX_PINNED_MUTATION_PYTHON, + "PY", + ].join("\n"), args: params.args, }; } diff --git a/src/agents/sandbox/fs-bridge-path-safety.ts b/src/agents/sandbox/fs-bridge-path-safety.ts index dfc6c6692a1..9ca4c52e537 100644 --- a/src/agents/sandbox/fs-bridge-path-safety.ts +++ b/src/agents/sandbox/fs-bridge-path-safety.ts @@ -24,6 +24,11 @@ export type PinnedSandboxEntry = { basename: string; }; +export type AnchoredSandboxEntry = { + canonicalParentPath: string; + basename: string; +}; + export type PinnedSandboxDirectoryEntry = { mountRootPath: string; relativePath: string; @@ -154,6 +159,48 @@ export class SandboxFsPathGuard { }; } + async resolveAnchoredSandboxEntry( + target: SandboxResolvedFsPath, + action: string, + ): Promise { + const basename = path.posix.basename(target.containerPath); + if (!basename || basename === "." || basename === "/") { + throw new Error(`Invalid sandbox entry target: ${target.containerPath}`); + } + const parentPath = normalizeContainerPath(path.posix.dirname(target.containerPath)); + const canonicalParentPath = await this.resolveCanonicalContainerPath({ + containerPath: parentPath, + allowFinalSymlinkForUnlink: false, + }); + this.resolveRequiredMount(canonicalParentPath, action); + return { + canonicalParentPath, + basename, + }; + } + + async resolveAnchoredPinnedEntry( + target: SandboxResolvedFsPath, + action: string, + ): Promise { + const anchoredTarget = await this.resolveAnchoredSandboxEntry(target, action); + const mount = this.resolveRequiredMount(anchoredTarget.canonicalParentPath, action); + const relativeParentPath = path.posix.relative( + mount.containerRoot, + anchoredTarget.canonicalParentPath, + ); + if (relativeParentPath.startsWith("..") || path.posix.isAbsolute(relativeParentPath)) { + throw new Error( + `Sandbox path escapes allowed mounts; cannot ${action}: ${target.containerPath}`, + ); + } + return { + mountRootPath: mount.containerRoot, + relativeParentPath: relativeParentPath === "." ? "" : relativeParentPath, + basename: anchoredTarget.basename, + }; + } + resolvePinnedDirectoryEntry( target: SandboxResolvedFsPath, action: string, diff --git a/src/agents/sandbox/fs-bridge-shell-command-plans.ts b/src/agents/sandbox/fs-bridge-shell-command-plans.ts index 2987472762b..4bcd1ae04de 100644 --- a/src/agents/sandbox/fs-bridge-shell-command-plans.ts +++ b/src/agents/sandbox/fs-bridge-shell-command-plans.ts @@ -1,4 +1,4 @@ -import type { PathSafetyCheck } from "./fs-bridge-path-safety.js"; +import type { AnchoredSandboxEntry, PathSafetyCheck } from "./fs-bridge-path-safety.js"; import type { SandboxResolvedFsPath } from "./fs-paths.js"; export type SandboxFsCommandPlan = { @@ -10,11 +10,14 @@ export type SandboxFsCommandPlan = { allowFailure?: boolean; }; -export function buildStatPlan(target: SandboxResolvedFsPath): SandboxFsCommandPlan { +export function buildStatPlan( + target: SandboxResolvedFsPath, + anchoredTarget: AnchoredSandboxEntry, +): SandboxFsCommandPlan { return { checks: [{ target, options: { action: "stat files" } }], - script: 'set -eu; stat -c "%F|%s|%Y" -- "$1"', - args: [target.containerPath], + script: 'set -eu\ncd -- "$1"\nstat -c "%F|%s|%Y" -- "$2"', + args: [anchoredTarget.canonicalParentPath, anchoredTarget.basename], allowFailure: true, }; } diff --git a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts index 9b15f02adf5..48e7e9e23f8 100644 --- a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts +++ b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts @@ -4,7 +4,12 @@ import { describe, expect, it } from "vitest"; import { createSandbox, createSandboxFsBridge, + dockerExecResult, + findCallsByScriptFragment, + findCallByDockerArg, + findCallByScriptFragment, getDockerArg, + getDockerScript, installFsBridgeTestHarness, mockedExecDockerRaw, withTempDir, @@ -66,6 +71,13 @@ describe("sandbox fs bridge anchored ops", () => { }); const pinnedCases = [ + { + name: "write pins canonical parent + basename", + invoke: (bridge: ReturnType) => + bridge.writeFile({ filePath: "nested/file.txt", data: "updated" }), + expectedArgs: ["write", "/workspace", "nested", "file.txt", "1"], + forbiddenArgs: ["/workspace/nested/file.txt"], + }, { name: "mkdirp pins mount root + relative path", invoke: (bridge: ReturnType) => @@ -108,7 +120,7 @@ describe("sandbox fs bridge anchored ops", () => { const opCall = mockedExecDockerRaw.mock.calls.find( ([args]) => typeof args[5] === "string" && - args[5].includes("python3 - \"$@\" <<'PY'") && + args[5].includes("python3 /dev/fd/3 \"$@\" 3<<'PY'") && getDockerArg(args, 1) === testCase.expectedArgs[0], ); expect(opCall).toBeDefined(); @@ -121,4 +133,74 @@ describe("sandbox fs bridge anchored ops", () => { }); }); }); + + it.runIf(process.platform !== "win32")( + "write resolves symlink parents to canonical pinned paths", + async () => { + await withTempDir("openclaw-fs-bridge-contract-write-", async (stateDir) => { + const workspaceDir = path.join(stateDir, "workspace"); + const realDir = path.join(workspaceDir, "real"); + await fs.mkdir(realDir, { recursive: true }); + await fs.symlink(realDir, path.join(workspaceDir, "alias")); + + mockedExecDockerRaw.mockImplementation(async (args) => { + const script = getDockerScript(args); + if (script.includes('readlink -f -- "$cursor"')) { + const target = getDockerArg(args, 1); + return dockerExecResult(`${target.replace("/workspace/alias", "/workspace/real")}\n`); + } + if (script.includes('stat -c "%F|%s|%Y"')) { + return dockerExecResult("regular file|1|2"); + } + return dockerExecResult(""); + }); + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await bridge.writeFile({ filePath: "alias/note.txt", data: "updated" }); + + const writeCall = findCallByDockerArg(1, "write"); + expect(writeCall).toBeDefined(); + const args = writeCall?.[0] ?? []; + expect(getDockerArg(args, 2)).toBe("/workspace"); + expect(getDockerArg(args, 3)).toBe("real"); + expect(getDockerArg(args, 4)).toBe("note.txt"); + expect(args).not.toContain("alias"); + + const canonicalCalls = findCallsByScriptFragment('readlink -f -- "$cursor"'); + expect( + canonicalCalls.some(([callArgs]) => getDockerArg(callArgs, 1) === "/workspace/alias"), + ).toBe(true); + }); + }, + ); + + it("stat anchors parent + basename", async () => { + await withTempDir("openclaw-fs-bridge-contract-stat-", async (stateDir) => { + const workspaceDir = path.join(stateDir, "workspace"); + await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); + await fs.writeFile(path.join(workspaceDir, "nested", "file.txt"), "bye", "utf8"); + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await bridge.stat({ filePath: "nested/file.txt" }); + + const statCall = findCallByScriptFragment('stat -c "%F|%s|%Y" -- "$2"'); + expect(statCall).toBeDefined(); + const args = statCall?.[0] ?? []; + expect(getDockerArg(args, 1)).toBe("/workspace/nested"); + expect(getDockerArg(args, 2)).toBe("file.txt"); + expect(args).not.toContain("/workspace/nested/file.txt"); + }); + }); }); diff --git a/src/agents/sandbox/fs-bridge.shell.test.ts b/src/agents/sandbox/fs-bridge.shell.test.ts index 24b7d9faba4..1685759ad38 100644 --- a/src/agents/sandbox/fs-bridge.shell.test.ts +++ b/src/agents/sandbox/fs-bridge.shell.test.ts @@ -129,6 +129,10 @@ describe("sandbox fs bridge shell compatibility", () => { await bridge.writeFile({ filePath: "b.txt", data: "hello" }); const scripts = getScriptsFromCalls(); + expect(scripts.some((script) => script.includes("python3 - \"$@\" <<'PY'"))).toBe(false); + expect(scripts.some((script) => script.includes("python3 /dev/fd/3 \"$@\" 3<<'PY'"))).toBe( + true, + ); expect(scripts.some((script) => script.includes('cat >"$1"'))).toBe(false); expect(scripts.some((script) => script.includes('cat >"$tmp"'))).toBe(false); expect(scripts.some((script) => script.includes("os.replace("))).toBe(true); diff --git a/src/agents/sandbox/fs-bridge.ts b/src/agents/sandbox/fs-bridge.ts index 83504d9b908..7a9a22d4459 100644 --- a/src/agents/sandbox/fs-bridge.ts +++ b/src/agents/sandbox/fs-bridge.ts @@ -118,7 +118,10 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { const buffer = Buffer.isBuffer(params.data) ? params.data : Buffer.from(params.data, params.encoding ?? "utf8"); - const pinnedWriteTarget = this.pathGuard.resolvePinnedEntry(target, "write files"); + const pinnedWriteTarget = await this.pathGuard.resolveAnchoredPinnedEntry( + target, + "write files", + ); await this.runCheckedCommand({ ...buildPinnedWritePlan({ check: writeCheck, @@ -218,7 +221,11 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { signal?: AbortSignal; }): Promise { const target = this.resolveResolvedPath(params); - const result = await this.runPlannedCommand(buildStatPlan(target), params.signal); + const anchoredTarget = await this.pathGuard.resolveAnchoredSandboxEntry(target, "stat files"); + const result = await this.runPlannedCommand( + buildStatPlan(target, anchoredTarget), + params.signal, + ); if (result.code !== 0) { const stderr = result.stderr.toString("utf8"); if (stderr.includes("No such file or directory")) { diff --git a/src/agents/sessions-spawn-hooks.test.ts b/src/agents/sessions-spawn-hooks.test.ts index e7abc2dba9f..89004289369 100644 --- a/src/agents/sessions-spawn-hooks.test.ts +++ b/src/agents/sessions-spawn-hooks.test.ts @@ -380,4 +380,36 @@ describe("sessions_spawn subagent lifecycle hooks", () => { emitLifecycleHooks: true, }); }); + + it("cleans up the provisional session when lineage patching fails after thread binding", async () => { + const callGatewayMock = getCallGatewayMock(); + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.patch" && typeof request.params?.spawnedBy === "string") { + throw new Error("lineage patch failed"); + } + if (request.method === "sessions.delete") { + return { ok: true }; + } + return {}; + }); + + const result = await executeDiscordThreadSessionSpawn("call9"); + + expect(result.details).toMatchObject({ + status: "error", + error: "lineage patch failed", + }); + expect(hookRunnerMocks.runSubagentSpawned).not.toHaveBeenCalled(); + expect(hookRunnerMocks.runSubagentEnded).not.toHaveBeenCalled(); + const methods = getGatewayMethods(); + expect(methods).toContain("sessions.delete"); + expect(methods).not.toContain("agent"); + const deleteCall = findGatewayRequest("sessions.delete"); + expect(deleteCall?.params).toMatchObject({ + key: (result.details as { childSessionKey?: string }).childSessionKey, + deleteTranscript: true, + emitLifecycleHooks: true, + }); + }); }); diff --git a/src/agents/subagent-spawn.attachments.test.ts b/src/agents/subagent-spawn.attachments.test.ts index b564e77a906..9fe774fa284 100644 --- a/src/agents/subagent-spawn.attachments.test.ts +++ b/src/agents/subagent-spawn.attachments.test.ts @@ -1,6 +1,7 @@ +import fs from "node:fs"; import os from "node:os"; import path from "node:path"; -import { beforeEach, describe, expect, it, vi } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { resetSubagentRegistryForTests } from "./subagent-registry.js"; import { decodeStrictBase64, spawnSubagentDirect } from "./subagent-spawn.js"; @@ -31,6 +32,7 @@ let configOverride: Record = { }, }, }; +let workspaceDirOverride = ""; vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); @@ -61,7 +63,7 @@ vi.mock("./agent-scope.js", async (importOriginal) => { const actual = await importOriginal(); return { ...actual, - resolveAgentWorkspaceDir: () => path.join(os.tmpdir(), "agent-workspace"), + resolveAgentWorkspaceDir: () => workspaceDirOverride, }; }); @@ -145,6 +147,16 @@ describe("spawnSubagentDirect filename validation", () => { resetSubagentRegistryForTests(); callGatewayMock.mockClear(); setupGatewayMock(); + workspaceDirOverride = fs.mkdtempSync( + path.join(os.tmpdir(), `openclaw-subagent-attachments-${process.pid}-${Date.now()}-`), + ); + }); + + afterEach(() => { + if (workspaceDirOverride) { + fs.rmSync(workspaceDirOverride, { recursive: true, force: true }); + workspaceDirOverride = ""; + } }); const ctx = { @@ -210,4 +222,43 @@ describe("spawnSubagentDirect filename validation", () => { expect(result.status).toBe("error"); expect(result.error).toMatch(/attachments_invalid_name/); }); + + it("removes materialized attachments when lineage patching fails", async () => { + const calls: Array<{ method?: string; params?: Record }> = []; + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + calls.push(request); + if (request.method === "sessions.patch" && typeof request.params?.spawnedBy === "string") { + throw new Error("lineage patch failed"); + } + if (request.method === "sessions.delete") { + return { ok: true }; + } + return {}; + }); + + const result = await spawnSubagentDirect( + { + task: "test", + attachments: [{ name: "file.txt", content: validContent, encoding: "base64" }], + }, + ctx, + ); + + expect(result).toMatchObject({ + status: "error", + error: "lineage patch failed", + }); + const attachmentsRoot = path.join(workspaceDirOverride, ".openclaw", "attachments"); + const retainedDirs = fs.existsSync(attachmentsRoot) + ? fs.readdirSync(attachmentsRoot).filter((entry) => !entry.startsWith(".")) + : []; + expect(retainedDirs).toHaveLength(0); + const deleteCall = calls.find((entry) => entry.method === "sessions.delete"); + expect(deleteCall?.params).toMatchObject({ + key: expect.stringMatching(/^agent:main:subagent:/), + deleteTranscript: true, + emitLifecycleHooks: false, + }); + }); }); diff --git a/src/agents/subagent-spawn.ts b/src/agents/subagent-spawn.ts index be5dac37f83..a4a6229c715 100644 --- a/src/agents/subagent-spawn.ts +++ b/src/agents/subagent-spawn.ts @@ -153,6 +153,25 @@ async function cleanupProvisionalSession( } } +async function cleanupFailedSpawnBeforeAgentStart(params: { + childSessionKey: string; + attachmentAbsDir?: string; + emitLifecycleHooks?: boolean; + deleteTranscript?: boolean; +}): Promise { + if (params.attachmentAbsDir) { + try { + await fs.rm(params.attachmentAbsDir, { recursive: true, force: true }); + } catch { + // Best-effort cleanup only. + } + } + await cleanupProvisionalSession(params.childSessionKey, { + emitLifecycleHooks: params.emitLifecycleHooks, + deleteTranscript: params.deleteTranscript, + }); +} + function resolveSpawnMode(params: { requestedMode?: SpawnSubagentMode; threadRequested: boolean; @@ -561,10 +580,32 @@ export async function spawnSubagentDirect( explicitWorkspaceDir: toolSpawnMetadata.workspaceDir, }), }); + const spawnLineagePatchError = await patchChildSession({ + spawnedBy: spawnedByKey, + ...(spawnedMetadata.workspaceDir ? { spawnedWorkspaceDir: spawnedMetadata.workspaceDir } : {}), + }); + if (spawnLineagePatchError) { + await cleanupFailedSpawnBeforeAgentStart({ + childSessionKey, + attachmentAbsDir, + emitLifecycleHooks: threadBindingReady, + deleteTranscript: true, + }); + return { + status: "error", + error: spawnLineagePatchError, + childSessionKey, + }; + } const childIdem = crypto.randomUUID(); let childRunId: string = childIdem; try { + const { + spawnedBy: _spawnedBy, + workspaceDir: _workspaceDir, + ...publicSpawnedMetadata + } = spawnedMetadata; const response = await callGateway<{ runId: string }>({ method: "agent", params: { @@ -581,7 +622,7 @@ export async function spawnSubagentDirect( thinking: thinkingOverride, timeout: runTimeoutSeconds, label: label || undefined, - ...spawnedMetadata, + ...publicSpawnedMetadata, }, timeoutMs: 10_000, }); diff --git a/src/agents/tool-catalog.test.ts b/src/agents/tool-catalog.test.ts new file mode 100644 index 00000000000..120a744432c --- /dev/null +++ b/src/agents/tool-catalog.test.ts @@ -0,0 +1,11 @@ +import { describe, expect, it } from "vitest"; +import { resolveCoreToolProfilePolicy } from "./tool-catalog.js"; + +describe("tool-catalog", () => { + it("includes web_search and web_fetch in the coding profile policy", () => { + const policy = resolveCoreToolProfilePolicy("coding"); + expect(policy).toBeDefined(); + expect(policy!.allow).toContain("web_search"); + expect(policy!.allow).toContain("web_fetch"); + }); +}); diff --git a/src/agents/tool-catalog.ts b/src/agents/tool-catalog.ts index bbada8e7bc9..5ba7ff3b3dc 100644 --- a/src/agents/tool-catalog.ts +++ b/src/agents/tool-catalog.ts @@ -86,7 +86,7 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ label: "web_search", description: "Search the web", sectionId: "web", - profiles: [], + profiles: ["coding"], includeInOpenClawGroup: true, }, { @@ -94,7 +94,7 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ label: "web_fetch", description: "Fetch web content", sectionId: "web", - profiles: [], + profiles: ["coding"], includeInOpenClawGroup: true, }, { diff --git a/src/agents/tools/session-status-tool.ts b/src/agents/tools/session-status-tool.ts index 2277b6e8ad2..29d8204b750 100644 --- a/src/agents/tools/session-status-tool.ts +++ b/src/agents/tools/session-status-tool.ts @@ -19,6 +19,7 @@ import { import { buildAgentMainSessionKey, DEFAULT_AGENT_ID, + parseAgentSessionKey, resolveAgentIdFromSessionKey, } from "../../routing/session-key.js"; import { applyModelOverrideToSessionEntry } from "../../sessions/model-overrides.js"; @@ -36,10 +37,12 @@ import { import type { AnyAgentTool } from "./common.js"; import { readStringParam } from "./common.js"; import { + createSessionVisibilityGuard, shouldResolveSessionIdInput, - resolveInternalSessionKey, - resolveMainSessionAlias, createAgentToAgentPolicy, + resolveEffectiveSessionToolsVisibility, + resolveInternalSessionKey, + resolveSandboxedSessionToolContext, } from "./sessions-helpers.js"; const SessionStatusToolSchema = Type.Object({ @@ -175,6 +178,7 @@ async function resolveModelOverride(params: { export function createSessionStatusTool(opts?: { agentSessionKey?: string; config?: OpenClawConfig; + sandboxed?: boolean; }): AnyAgentTool { return { label: "Session Status", @@ -185,18 +189,70 @@ export function createSessionStatusTool(opts?: { execute: async (_toolCallId, args) => { const params = args as Record; const cfg = opts?.config ?? loadConfig(); - const { mainKey, alias } = resolveMainSessionAlias(cfg); + const { mainKey, alias, effectiveRequesterKey } = resolveSandboxedSessionToolContext({ + cfg, + agentSessionKey: opts?.agentSessionKey, + sandboxed: opts?.sandboxed, + }); const a2aPolicy = createAgentToAgentPolicy(cfg); + const requesterAgentId = resolveAgentIdFromSessionKey( + opts?.agentSessionKey ?? effectiveRequesterKey, + ); + const visibilityRequesterKey = effectiveRequesterKey.trim(); + const usesLegacyMainAlias = alias === mainKey; + const isLegacyMainVisibilityKey = (sessionKey: string) => { + const trimmed = sessionKey.trim(); + return usesLegacyMainAlias && (trimmed === "main" || trimmed === mainKey); + }; + const resolveVisibilityMainSessionKey = (sessionAgentId: string) => { + const requesterParsed = parseAgentSessionKey(visibilityRequesterKey); + if ( + resolveAgentIdFromSessionKey(visibilityRequesterKey) === sessionAgentId && + (requesterParsed?.rest === mainKey || isLegacyMainVisibilityKey(visibilityRequesterKey)) + ) { + return visibilityRequesterKey; + } + return buildAgentMainSessionKey({ + agentId: sessionAgentId, + mainKey, + }); + }; + const normalizeVisibilityTargetSessionKey = (sessionKey: string, sessionAgentId: string) => { + const trimmed = sessionKey.trim(); + if (!trimmed) { + return trimmed; + } + if (trimmed.startsWith("agent:")) { + const parsed = parseAgentSessionKey(trimmed); + if (parsed?.rest === mainKey) { + return resolveVisibilityMainSessionKey(sessionAgentId); + } + return trimmed; + } + // Preserve legacy bare main keys for requester tree checks. + if (isLegacyMainVisibilityKey(trimmed)) { + return resolveVisibilityMainSessionKey(sessionAgentId); + } + return trimmed; + }; + const visibilityGuard = + opts?.sandboxed === true + ? await createSessionVisibilityGuard({ + action: "status", + requesterSessionKey: visibilityRequesterKey, + visibility: resolveEffectiveSessionToolsVisibility({ + cfg, + sandboxed: true, + }), + a2aPolicy, + }) + : null; const requestedKeyParam = readStringParam(params, "sessionKey"); let requestedKeyRaw = requestedKeyParam ?? opts?.agentSessionKey; if (!requestedKeyRaw?.trim()) { throw new Error("sessionKey required"); } - - const requesterAgentId = resolveAgentIdFromSessionKey( - opts?.agentSessionKey ?? requestedKeyRaw, - ); const ensureAgentAccess = (targetAgentId: string) => { if (targetAgentId === requesterAgentId) { return; @@ -213,7 +269,14 @@ export function createSessionStatusTool(opts?: { }; if (requestedKeyRaw.startsWith("agent:")) { - ensureAgentAccess(resolveAgentIdFromSessionKey(requestedKeyRaw)); + const requestedAgentId = resolveAgentIdFromSessionKey(requestedKeyRaw); + ensureAgentAccess(requestedAgentId); + const access = visibilityGuard?.check( + normalizeVisibilityTargetSessionKey(requestedKeyRaw, requestedAgentId), + ); + if (access && !access.allowed) { + throw new Error(access.error); + } } const isExplicitAgentKey = requestedKeyRaw.startsWith("agent:"); @@ -258,6 +321,15 @@ export function createSessionStatusTool(opts?: { throw new Error(`Unknown ${kind}: ${requestedKeyRaw}`); } + if (visibilityGuard && !requestedKeyRaw.startsWith("agent:")) { + const access = visibilityGuard.check( + normalizeVisibilityTargetSessionKey(resolved.key, agentId), + ); + if (!access.allowed) { + throw new Error(access.error); + } + } + const configured = resolveDefaultModelForAgent({ cfg, agentId }); const modelRaw = readStringParam(params, "model"); let changedModel = false; diff --git a/src/agents/tools/sessions-access.ts b/src/agents/tools/sessions-access.ts index 6574c2296cf..47bd0806f7b 100644 --- a/src/agents/tools/sessions-access.ts +++ b/src/agents/tools/sessions-access.ts @@ -14,7 +14,7 @@ export type AgentToAgentPolicy = { isAllowed: (requesterAgentId: string, targetAgentId: string) => boolean; }; -export type SessionAccessAction = "history" | "send" | "list"; +export type SessionAccessAction = "history" | "send" | "list" | "status"; export type SessionAccessResult = | { allowed: true } @@ -130,6 +130,9 @@ function actionPrefix(action: SessionAccessAction): string { if (action === "send") { return "Session send"; } + if (action === "status") { + return "Session status"; + } return "Session list"; } @@ -140,6 +143,9 @@ function a2aDisabledMessage(action: SessionAccessAction): string { if (action === "send") { return "Agent-to-agent messaging is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent sends."; } + if (action === "status") { + return "Agent-to-agent status is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent access."; + } return "Agent-to-agent listing is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent visibility."; } @@ -150,6 +156,9 @@ function a2aDeniedMessage(action: SessionAccessAction): string { if (action === "send") { return "Agent-to-agent messaging denied by tools.agentToAgent.allow."; } + if (action === "status") { + return "Agent-to-agent status denied by tools.agentToAgent.allow."; + } return "Agent-to-agent listing denied by tools.agentToAgent.allow."; } @@ -160,6 +169,9 @@ function crossVisibilityMessage(action: SessionAccessAction): string { if (action === "send") { return "Session send visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; } + if (action === "status") { + return "Session status visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; + } return "Session list visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; } diff --git a/src/agents/tools/sessions-helpers.ts b/src/agents/tools/sessions-helpers.ts index 5b5f94699c6..e638438758c 100644 --- a/src/agents/tools/sessions-helpers.ts +++ b/src/agents/tools/sessions-helpers.ts @@ -166,9 +166,9 @@ export function extractAssistantText(message: unknown): string | undefined { normalizeText: (text) => text.trim(), }) ?? ""; const stopReason = (message as { stopReason?: unknown }).stopReason; - const errorMessage = (message as { errorMessage?: unknown }).errorMessage; - const errorContext = - stopReason === "error" || (typeof errorMessage === "string" && Boolean(errorMessage.trim())); + // Gate on stopReason only — a non-error response with a stale/background errorMessage + // should not have its content rewritten with error templates (#13935). + const errorContext = stopReason === "error"; return joined ? sanitizeUserFacingText(joined, { errorContext }) : undefined; } diff --git a/src/agents/tools/sessions.test.ts b/src/agents/tools/sessions.test.ts index aa831027f68..ce849e45d07 100644 --- a/src/agents/tools/sessions.test.ts +++ b/src/agents/tools/sessions.test.ts @@ -199,6 +199,16 @@ describe("extractAssistantText", () => { "Firebase downgraded us to the free Spark plan. Check whether billing should be re-enabled.", ); }); + + it("preserves successful turns with stale background errorMessage", () => { + const message = { + role: "assistant", + stopReason: "end_turn", + errorMessage: "insufficient credits for embedding model", + content: [{ type: "text", text: "Handle payment required errors in your API." }], + }; + expect(extractAssistantText(message)).toBe("Handle payment required errors in your API."); + }); }); describe("resolveAnnounceTarget", () => { diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index 2f6c27519b0..bdbd68ac2e4 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -6,8 +6,10 @@ import { getCliSessionId } from "../../agents/cli-session.js"; import { runWithModelFallback } from "../../agents/model-fallback.js"; import { isCliProvider } from "../../agents/model-selection.js"; import { + BILLING_ERROR_USER_MESSAGE, isCompactionFailureError, isContextOverflowError, + isBillingErrorMessage, isLikelyContextOverflowError, isTransientHttpError, sanitizeUserFacingText, @@ -514,8 +516,9 @@ export async function runAgentTurnWithFallback(params: { break; } catch (err) { const message = err instanceof Error ? err.message : String(err); - const isContextOverflow = isLikelyContextOverflowError(message); - const isCompactionFailure = isCompactionFailureError(message); + const isBilling = isBillingErrorMessage(message); + const isContextOverflow = !isBilling && isLikelyContextOverflowError(message); + const isCompactionFailure = !isBilling && isCompactionFailureError(message); const isSessionCorruption = /function call turn comes immediately after/i.test(message); const isRoleOrderingError = /incorrect role information|roles must alternate/i.test(message); const isTransientHttp = isTransientHttpError(message); @@ -610,11 +613,13 @@ export async function runAgentTurnWithFallback(params: { ? sanitizeUserFacingText(message, { errorContext: true }) : message; const trimmedMessage = safeMessage.replace(/\.\s*$/, ""); - const fallbackText = isContextOverflow - ? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model." - : isRoleOrderingError - ? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session." - : `⚠️ Agent failed before reply: ${trimmedMessage}.\nLogs: openclaw logs --follow`; + const fallbackText = isBilling + ? BILLING_ERROR_USER_MESSAGE + : isContextOverflow + ? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model." + : isRoleOrderingError + ? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session." + : `⚠️ Agent failed before reply: ${trimmedMessage}.\nLogs: openclaw logs --follow`; return { kind: "final", diff --git a/src/auto-reply/reply/agent-runner-payloads.test.ts b/src/auto-reply/reply/agent-runner-payloads.test.ts index 94088b2b5b8..26f23d7a42c 100644 --- a/src/auto-reply/reply/agent-runner-payloads.test.ts +++ b/src/auto-reply/reply/agent-runner-payloads.test.ts @@ -169,6 +169,50 @@ describe("buildReplyPayloads media filter integration", () => { expect(replyPayloads).toHaveLength(0); }); + it("drops all final payloads when block pipeline streamed successfully", async () => { + const pipeline: Parameters[0]["blockReplyPipeline"] = { + didStream: () => true, + isAborted: () => false, + hasSentPayload: () => false, + enqueue: () => {}, + flush: async () => {}, + stop: () => {}, + hasBuffered: () => false, + }; + // shouldDropFinalPayloads short-circuits to [] when the pipeline streamed + // without aborting, so hasSentPayload is never reached. + const { replyPayloads } = await buildReplyPayloads({ + ...baseParams, + blockStreamingEnabled: true, + blockReplyPipeline: pipeline, + replyToMode: "all", + payloads: [{ text: "response", replyToId: "post-123" }], + }); + + expect(replyPayloads).toHaveLength(0); + }); + + it("deduplicates final payloads against directly sent block keys regardless of replyToId", async () => { + // When block streaming is not active but directlySentBlockKeys has entries + // (e.g. from pre-tool flush), the key should match even if replyToId differs. + const { createBlockReplyContentKey } = await import("./block-reply-pipeline.js"); + const directlySentBlockKeys = new Set(); + directlySentBlockKeys.add( + createBlockReplyContentKey({ text: "response", replyToId: "post-1" }), + ); + + const { replyPayloads } = await buildReplyPayloads({ + ...baseParams, + blockStreamingEnabled: false, + blockReplyPipeline: null, + directlySentBlockKeys, + replyToMode: "off", + payloads: [{ text: "response" }], + }); + + expect(replyPayloads).toHaveLength(0); + }); + it("does not suppress same-target replies when accountId differs", async () => { const { replyPayloads } = await buildReplyPayloads({ ...baseParams, diff --git a/src/auto-reply/reply/agent-runner-payloads.ts b/src/auto-reply/reply/agent-runner-payloads.ts index 263dea9fd54..9e89c921407 100644 --- a/src/auto-reply/reply/agent-runner-payloads.ts +++ b/src/auto-reply/reply/agent-runner-payloads.ts @@ -5,7 +5,7 @@ import type { OriginatingChannelType } from "../templating.js"; import { SILENT_REPLY_TOKEN } from "../tokens.js"; import type { ReplyPayload } from "../types.js"; import { formatBunFetchSocketError, isBunFetchSocketError } from "./agent-runner-utils.js"; -import { createBlockReplyPayloadKey, type BlockReplyPipeline } from "./block-reply-pipeline.js"; +import { createBlockReplyContentKey, type BlockReplyPipeline } from "./block-reply-pipeline.js"; import { resolveOriginAccountId, resolveOriginMessageProvider, @@ -213,7 +213,7 @@ export async function buildReplyPayloads(params: { ) : params.directlySentBlockKeys?.size ? mediaFilteredPayloads.filter( - (payload) => !params.directlySentBlockKeys!.has(createBlockReplyPayloadKey(payload)), + (payload) => !params.directlySentBlockKeys!.has(createBlockReplyContentKey(payload)), ) : mediaFilteredPayloads; const replyPayloads = suppressMessagingToolReplies ? [] : filteredPayloads; diff --git a/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts b/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts index 659ccfe7951..14731dbb0ff 100644 --- a/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts +++ b/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts @@ -1628,3 +1628,72 @@ describe("runReplyAgent transient HTTP retry", () => { expect(payload?.text).toContain("Recovered response"); }); }); + +describe("runReplyAgent billing error classification", () => { + // Regression guard for the runner-level catch block in runAgentTurnWithFallback. + // Billing errors from providers like OpenRouter can contain token/size wording that + // matches context overflow heuristics. This test verifies the final user-visible + // message is the billing-specific one, not the "Context overflow" fallback. + it("returns billing message for mixed-signal error (billing text + overflow patterns)", async () => { + runEmbeddedPiAgentMock.mockRejectedValueOnce( + new Error("402 Payment Required: request token limit exceeded for this billing plan"), + ); + + const typing = createMockTypingController(); + const sessionCtx = { + Provider: "telegram", + MessageSid: "msg", + } as unknown as TemplateContext; + const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings; + const followupRun = { + prompt: "hello", + summaryLine: "hello", + enqueuedAt: Date.now(), + run: { + sessionId: "session", + sessionKey: "main", + messageProvider: "telegram", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + config: {}, + skillsSnapshot: {}, + provider: "anthropic", + model: "claude", + thinkLevel: "low", + verboseLevel: "off", + elevatedLevel: "off", + bashElevated: { + enabled: false, + allowed: false, + defaultLevel: "off", + }, + timeoutMs: 1_000, + blockReplyBreak: "message_end", + }, + } as unknown as FollowupRun; + + const result = await runReplyAgent({ + commandBody: "hello", + followupRun, + queueKey: "main", + resolvedQueue, + shouldSteer: false, + shouldFollowup: false, + isActive: false, + isStreaming: false, + typing, + sessionCtx, + defaultModel: "anthropic/claude", + resolvedVerboseLevel: "off", + isNewSession: false, + blockStreamingEnabled: false, + resolvedBlockStreamingBreak: "message_end", + shouldInjectGroupIntro: false, + typingMode: "instant", + }); + + const payload = Array.isArray(result) ? result[0] : result; + expect(payload?.text).toContain("billing error"); + expect(payload?.text).not.toContain("Context overflow"); + }); +}); diff --git a/src/auto-reply/reply/block-reply-pipeline.test.ts b/src/auto-reply/reply/block-reply-pipeline.test.ts new file mode 100644 index 00000000000..92564033df5 --- /dev/null +++ b/src/auto-reply/reply/block-reply-pipeline.test.ts @@ -0,0 +1,79 @@ +import { describe, expect, it } from "vitest"; +import { + createBlockReplyContentKey, + createBlockReplyPayloadKey, + createBlockReplyPipeline, +} from "./block-reply-pipeline.js"; + +describe("createBlockReplyPayloadKey", () => { + it("produces different keys for payloads differing only by replyToId", () => { + const a = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-1" }); + const b = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-2" }); + const c = createBlockReplyPayloadKey({ text: "hello world" }); + expect(a).not.toBe(b); + expect(a).not.toBe(c); + }); + + it("produces different keys for payloads with different text", () => { + const a = createBlockReplyPayloadKey({ text: "hello" }); + const b = createBlockReplyPayloadKey({ text: "world" }); + expect(a).not.toBe(b); + }); + + it("produces different keys for payloads with different media", () => { + const a = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///a.png" }); + const b = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///b.png" }); + expect(a).not.toBe(b); + }); + + it("trims whitespace from text for key comparison", () => { + const a = createBlockReplyPayloadKey({ text: " hello " }); + const b = createBlockReplyPayloadKey({ text: "hello" }); + expect(a).toBe(b); + }); +}); + +describe("createBlockReplyContentKey", () => { + it("produces the same key for payloads differing only by replyToId", () => { + const a = createBlockReplyContentKey({ text: "hello world", replyToId: "post-1" }); + const b = createBlockReplyContentKey({ text: "hello world", replyToId: "post-2" }); + const c = createBlockReplyContentKey({ text: "hello world" }); + expect(a).toBe(b); + expect(a).toBe(c); + }); +}); + +describe("createBlockReplyPipeline dedup with threading", () => { + it("keeps separate deliveries for same text with different replyToId", async () => { + const sent: Array<{ text?: string; replyToId?: string }> = []; + const pipeline = createBlockReplyPipeline({ + onBlockReply: async (payload) => { + sent.push({ text: payload.text, replyToId: payload.replyToId }); + }, + timeoutMs: 5000, + }); + + pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" }); + pipeline.enqueue({ text: "response text", replyToId: undefined }); + await pipeline.flush(); + + expect(sent).toEqual([ + { text: "response text", replyToId: "thread-root-1" }, + { text: "response text", replyToId: undefined }, + ]); + }); + + it("hasSentPayload matches regardless of replyToId", async () => { + const pipeline = createBlockReplyPipeline({ + onBlockReply: async () => {}, + timeoutMs: 5000, + }); + + pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" }); + await pipeline.flush(); + + // Final payload with no replyToId should be recognized as already sent + expect(pipeline.hasSentPayload({ text: "response text" })).toBe(true); + expect(pipeline.hasSentPayload({ text: "response text", replyToId: "other-id" })).toBe(true); + }); +}); diff --git a/src/auto-reply/reply/block-reply-pipeline.ts b/src/auto-reply/reply/block-reply-pipeline.ts index 752c70a1da2..9ce85334238 100644 --- a/src/auto-reply/reply/block-reply-pipeline.ts +++ b/src/auto-reply/reply/block-reply-pipeline.ts @@ -48,6 +48,19 @@ export function createBlockReplyPayloadKey(payload: ReplyPayload): string { }); } +export function createBlockReplyContentKey(payload: ReplyPayload): string { + const text = payload.text?.trim() ?? ""; + const mediaList = payload.mediaUrls?.length + ? payload.mediaUrls + : payload.mediaUrl + ? [payload.mediaUrl] + : []; + // Content-only key used for final-payload suppression after block streaming. + // This intentionally ignores replyToId so a streamed threaded payload and the + // later final payload still collapse when they carry the same content. + return JSON.stringify({ text, mediaList }); +} + const withTimeout = async ( promise: Promise, timeoutMs: number, @@ -80,6 +93,7 @@ export function createBlockReplyPipeline(params: { }): BlockReplyPipeline { const { onBlockReply, timeoutMs, coalescing, buffer } = params; const sentKeys = new Set(); + const sentContentKeys = new Set(); const pendingKeys = new Set(); const seenKeys = new Set(); const bufferedKeys = new Set(); @@ -95,6 +109,7 @@ export function createBlockReplyPipeline(params: { return; } const payloadKey = createBlockReplyPayloadKey(payload); + const contentKey = createBlockReplyContentKey(payload); if (!bypassSeenCheck) { if (seenKeys.has(payloadKey)) { return; @@ -130,6 +145,7 @@ export function createBlockReplyPipeline(params: { return; } sentKeys.add(payloadKey); + sentContentKeys.add(contentKey); didStream = true; }) .catch((err) => { @@ -238,8 +254,8 @@ export function createBlockReplyPipeline(params: { didStream: () => didStream, isAborted: () => aborted, hasSentPayload: (payload) => { - const payloadKey = createBlockReplyPayloadKey(payload); - return sentKeys.has(payloadKey); + const payloadKey = createBlockReplyContentKey(payload); + return sentContentKeys.has(payloadKey); }, }; } diff --git a/src/auto-reply/reply/reply-delivery.ts b/src/auto-reply/reply/reply-delivery.ts index acf04e73a3e..cacd6b083cb 100644 --- a/src/auto-reply/reply/reply-delivery.ts +++ b/src/auto-reply/reply/reply-delivery.ts @@ -2,7 +2,7 @@ import { logVerbose } from "../../globals.js"; import { SILENT_REPLY_TOKEN } from "../tokens.js"; import type { BlockReplyContext, ReplyPayload } from "../types.js"; import type { BlockReplyPipeline } from "./block-reply-pipeline.js"; -import { createBlockReplyPayloadKey } from "./block-reply-pipeline.js"; +import { createBlockReplyContentKey } from "./block-reply-pipeline.js"; import { parseReplyDirectives } from "./reply-directives.js"; import { applyReplyTagsToPayload, isRenderablePayload } from "./reply-payloads.js"; import type { TypingSignaler } from "./typing-mode.js"; @@ -128,7 +128,7 @@ export function createBlockReplyDeliveryHandler(params: { } else if (params.blockStreamingEnabled) { // Send directly when flushing before tool execution (no pipeline but streaming enabled). // Track sent key to avoid duplicate in final payloads. - params.directlySentBlockKeys.add(createBlockReplyPayloadKey(blockPayload)); + params.directlySentBlockKeys.add(createBlockReplyContentKey(blockPayload)); await params.onBlockReply(blockPayload); } // When streaming is disabled entirely, blocks are accumulated in final text instead. diff --git a/src/browser/proxy-files.test.ts b/src/browser/proxy-files.test.ts new file mode 100644 index 00000000000..1d7ea9566bb --- /dev/null +++ b/src/browser/proxy-files.test.ts @@ -0,0 +1,54 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { MEDIA_MAX_BYTES } from "../media/store.js"; +import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js"; +import { persistBrowserProxyFiles } from "./proxy-files.js"; + +describe("persistBrowserProxyFiles", () => { + let tempHome: TempHomeEnv; + + beforeEach(async () => { + tempHome = await createTempHomeEnv("openclaw-browser-proxy-files-"); + }); + + afterEach(async () => { + await tempHome.restore(); + }); + + it("persists browser proxy files under the shared media store", async () => { + const sourcePath = "/tmp/proxy-file.txt"; + const mapping = await persistBrowserProxyFiles([ + { + path: sourcePath, + base64: Buffer.from("hello from browser proxy").toString("base64"), + mimeType: "text/plain", + }, + ]); + + const savedPath = mapping.get(sourcePath); + expect(typeof savedPath).toBe("string"); + expect(path.normalize(savedPath ?? "")).toContain( + `${path.sep}.openclaw${path.sep}media${path.sep}browser${path.sep}`, + ); + await expect(fs.readFile(savedPath ?? "", "utf8")).resolves.toBe("hello from browser proxy"); + }); + + it("rejects browser proxy files that exceed the shared media size limit", async () => { + const oversized = Buffer.alloc(MEDIA_MAX_BYTES + 1, 0x41); + + await expect( + persistBrowserProxyFiles([ + { + path: "/tmp/oversized.bin", + base64: oversized.toString("base64"), + mimeType: "application/octet-stream", + }, + ]), + ).rejects.toThrow("Media exceeds 5MB limit"); + + await expect( + fs.stat(path.join(tempHome.home, ".openclaw", "media", "browser")), + ).rejects.toThrow(); + }); +}); diff --git a/src/browser/proxy-files.ts b/src/browser/proxy-files.ts index b18820a4594..1d39d71a09e 100644 --- a/src/browser/proxy-files.ts +++ b/src/browser/proxy-files.ts @@ -13,7 +13,7 @@ export async function persistBrowserProxyFiles(files: BrowserProxyFile[] | undef const mapping = new Map(); for (const file of files) { const buffer = Buffer.from(file.base64, "base64"); - const saved = await saveMediaBuffer(buffer, file.mimeType, "browser", buffer.byteLength); + const saved = await saveMediaBuffer(buffer, file.mimeType, "browser"); mapping.set(file.path, saved.path); } return mapping; diff --git a/src/channels/plugins/onboarding/helpers.ts b/src/channels/plugins/onboarding/helpers.ts index 6eab25fd239..77d03a4127a 100644 --- a/src/channels/plugins/onboarding/helpers.ts +++ b/src/channels/plugins/onboarding/helpers.ts @@ -164,11 +164,11 @@ export function setAccountAllowFromForChannel(params: { }); } -export function setTopLevelChannelAllowFrom(params: { +function patchTopLevelChannelConfig(params: { cfg: OpenClawConfig; channel: string; - allowFrom: string[]; enabled?: boolean; + patch: Record; }): OpenClawConfig { const channelConfig = (params.cfg.channels?.[params.channel] as Record | undefined) ?? {}; @@ -179,12 +179,26 @@ export function setTopLevelChannelAllowFrom(params: { [params.channel]: { ...channelConfig, ...(params.enabled ? { enabled: true } : {}), - allowFrom: params.allowFrom, + ...params.patch, }, }, }; } +export function setTopLevelChannelAllowFrom(params: { + cfg: OpenClawConfig; + channel: string; + allowFrom: string[]; + enabled?: boolean; +}): OpenClawConfig { + return patchTopLevelChannelConfig({ + cfg: params.cfg, + channel: params.channel, + enabled: params.enabled, + patch: { allowFrom: params.allowFrom }, + }); +} + export function setTopLevelChannelDmPolicyWithAllowFrom(params: { cfg: OpenClawConfig; channel: string; @@ -199,17 +213,14 @@ export function setTopLevelChannelDmPolicyWithAllowFrom(params: { undefined; const allowFrom = params.dmPolicy === "open" ? addWildcardAllowFrom(existingAllowFrom) : undefined; - return { - ...params.cfg, - channels: { - ...params.cfg.channels, - [params.channel]: { - ...channelConfig, - dmPolicy: params.dmPolicy, - ...(allowFrom ? { allowFrom } : {}), - }, + return patchTopLevelChannelConfig({ + cfg: params.cfg, + channel: params.channel, + patch: { + dmPolicy: params.dmPolicy, + ...(allowFrom ? { allowFrom } : {}), }, - }; + }); } export function setTopLevelChannelGroupPolicy(params: { @@ -218,19 +229,12 @@ export function setTopLevelChannelGroupPolicy(params: { groupPolicy: GroupPolicy; enabled?: boolean; }): OpenClawConfig { - const channelConfig = - (params.cfg.channels?.[params.channel] as Record | undefined) ?? {}; - return { - ...params.cfg, - channels: { - ...params.cfg.channels, - [params.channel]: { - ...channelConfig, - ...(params.enabled ? { enabled: true } : {}), - groupPolicy: params.groupPolicy, - }, - }, - }; + return patchTopLevelChannelConfig({ + cfg: params.cfg, + channel: params.channel, + enabled: params.enabled, + patch: { groupPolicy: params.groupPolicy }, + }); } export function setChannelDmPolicyWithAllowFrom(params: { diff --git a/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts b/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts index e98351cfa61..943c8a8ba9b 100644 --- a/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts +++ b/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts @@ -1,4 +1,4 @@ -import { describe, vi } from "vitest"; +import { describe, expect, it, vi } from "vitest"; import type { ReplyPayload } from "../../../auto-reply/types.js"; import { installSendPayloadContractSuite, @@ -34,4 +34,92 @@ describe("whatsappOutbound sendPayload", () => { chunking: { mode: "split", longTextLength: 5000, maxChunkLength: 4000 }, createHarness, }); + + it("trims leading whitespace for direct text sends", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendText!({ + cfg: {}, + to: "5511999999999@c.us", + text: "\n \thello", + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenCalledWith("5511999999999@c.us", "hello", { + verbose: false, + cfg: {}, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("trims leading whitespace for direct media captions", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendMedia!({ + cfg: {}, + to: "5511999999999@c.us", + text: "\n \tcaption", + mediaUrl: "/tmp/test.png", + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenCalledWith("5511999999999@c.us", "caption", { + verbose: false, + cfg: {}, + mediaUrl: "/tmp/test.png", + mediaLocalRoots: undefined, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("trims leading whitespace for sendPayload text and caption delivery", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n\nhello" }, + deps: { sendWhatsApp }, + }); + await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n\ncaption", mediaUrl: "/tmp/test.png" }, + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenNthCalledWith(1, "5511999999999@c.us", "hello", { + verbose: false, + cfg: {}, + accountId: undefined, + gifPlayback: undefined, + }); + expect(sendWhatsApp).toHaveBeenNthCalledWith(2, "5511999999999@c.us", "caption", { + verbose: false, + cfg: {}, + mediaUrl: "/tmp/test.png", + mediaLocalRoots: undefined, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("skips whitespace-only text payloads", async () => { + const sendWhatsApp = vi.fn(); + + const result = await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n \t" }, + deps: { sendWhatsApp }, + }); + + expect(result).toEqual({ channel: "whatsapp", messageId: "" }); + expect(sendWhatsApp).not.toHaveBeenCalled(); + }); }); diff --git a/src/channels/plugins/outbound/whatsapp.ts b/src/channels/plugins/outbound/whatsapp.ts index e5de15241ae..58004676e6e 100644 --- a/src/channels/plugins/outbound/whatsapp.ts +++ b/src/channels/plugins/outbound/whatsapp.ts @@ -5,6 +5,10 @@ import { resolveWhatsAppOutboundTarget } from "../../../whatsapp/resolve-outboun import type { ChannelOutboundAdapter } from "../types.js"; import { sendTextMediaPayload } from "./direct-text-media.js"; +function trimLeadingWhitespace(text: string | undefined): string { + return text?.trimStart() ?? ""; +} + export const whatsappOutbound: ChannelOutboundAdapter = { deliveryMode: "gateway", chunker: chunkText, @@ -13,12 +17,32 @@ export const whatsappOutbound: ChannelOutboundAdapter = { pollMaxOptions: 12, resolveTarget: ({ to, allowFrom, mode }) => resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), - sendPayload: async (ctx) => - await sendTextMediaPayload({ channel: "whatsapp", ctx, adapter: whatsappOutbound }), + sendPayload: async (ctx) => { + const text = trimLeadingWhitespace(ctx.payload.text); + const hasMedia = Boolean(ctx.payload.mediaUrl) || (ctx.payload.mediaUrls?.length ?? 0) > 0; + if (!text && !hasMedia) { + return { channel: "whatsapp", messageId: "" }; + } + return await sendTextMediaPayload({ + channel: "whatsapp", + ctx: { + ...ctx, + payload: { + ...ctx.payload, + text, + }, + }, + adapter: whatsappOutbound, + }); + }, sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { + const normalizedText = trimLeadingWhitespace(text); + if (!normalizedText) { + return { channel: "whatsapp", messageId: "" }; + } const send = deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, text, { + const result = await send(to, normalizedText, { verbose: false, cfg, accountId: accountId ?? undefined, @@ -27,9 +51,10 @@ export const whatsappOutbound: ChannelOutboundAdapter = { return { channel: "whatsapp", ...result }; }, sendMedia: async ({ cfg, to, text, mediaUrl, mediaLocalRoots, accountId, deps, gifPlayback }) => { + const normalizedText = trimLeadingWhitespace(text); const send = deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, text, { + const result = await send(to, normalizedText, { verbose: false, cfg, mediaUrl, diff --git a/src/cli/daemon-cli.coverage.test.ts b/src/cli/daemon-cli.coverage.test.ts index d897eee11cc..8faf44cdde3 100644 --- a/src/cli/daemon-cli.coverage.test.ts +++ b/src/cli/daemon-cli.coverage.test.ts @@ -10,7 +10,7 @@ const resolveGatewayProgramArguments = vi.fn(async (_opts?: unknown) => ({ const serviceInstall = vi.fn().mockResolvedValue(undefined); const serviceUninstall = vi.fn().mockResolvedValue(undefined); const serviceStop = vi.fn().mockResolvedValue(undefined); -const serviceRestart = vi.fn().mockResolvedValue(undefined); +const serviceRestart = vi.fn().mockResolvedValue({ outcome: "completed" }); const serviceIsLoaded = vi.fn().mockResolvedValue(false); const serviceReadCommand = vi.fn().mockResolvedValue(null); const serviceReadRuntime = vi.fn().mockResolvedValue({ status: "running" }); @@ -48,20 +48,24 @@ vi.mock("../daemon/program-args.js", () => ({ resolveGatewayProgramArguments: (opts: unknown) => resolveGatewayProgramArguments(opts), })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: () => ({ - label: "LaunchAgent", - loadedText: "loaded", - notLoadedText: "not loaded", - install: serviceInstall, - uninstall: serviceUninstall, - stop: serviceStop, - restart: serviceRestart, - isLoaded: serviceIsLoaded, - readCommand: serviceReadCommand, - readRuntime: serviceReadRuntime, - }), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: () => ({ + label: "LaunchAgent", + loadedText: "loaded", + notLoadedText: "not loaded", + install: serviceInstall, + uninstall: serviceUninstall, + stop: serviceStop, + restart: serviceRestart, + isLoaded: serviceIsLoaded, + readCommand: serviceReadCommand, + readRuntime: serviceReadRuntime, + }), + }; +}); vi.mock("../daemon/legacy.js", () => ({ findLegacyGatewayServices: async () => [], diff --git a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts index a785cde4d9b..188e7090915 100644 --- a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts @@ -65,7 +65,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => { service.restart.mockClear(); service.isLoaded.mockResolvedValue(true); service.readCommand.mockResolvedValue({ environment: {} }); - service.restart.mockResolvedValue(undefined); + service.restart.mockResolvedValue({ outcome: "completed" }); vi.unstubAllEnvs(); vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); @@ -163,7 +163,7 @@ describe("runServiceStart config pre-flight (#35862)", () => { service.isLoaded.mockClear(); service.restart.mockClear(); service.isLoaded.mockResolvedValue(true); - service.restart.mockResolvedValue(undefined); + service.restart.mockResolvedValue({ outcome: "completed" }); }); it("aborts start when config is invalid", async () => { diff --git a/src/cli/daemon-cli/lifecycle-core.test.ts b/src/cli/daemon-cli/lifecycle-core.test.ts index 8fa7ded1bde..ff66bd17653 100644 --- a/src/cli/daemon-cli/lifecycle-core.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.test.ts @@ -40,11 +40,12 @@ vi.mock("../../runtime.js", () => ({ })); let runServiceRestart: typeof import("./lifecycle-core.js").runServiceRestart; +let runServiceStart: typeof import("./lifecycle-core.js").runServiceStart; let runServiceStop: typeof import("./lifecycle-core.js").runServiceStop; describe("runServiceRestart token drift", () => { beforeAll(async () => { - ({ runServiceRestart, runServiceStop } = await import("./lifecycle-core.js")); + ({ runServiceRestart, runServiceStart, runServiceStop } = await import("./lifecycle-core.js")); }); beforeEach(() => { @@ -64,7 +65,7 @@ describe("runServiceRestart token drift", () => { service.readCommand.mockResolvedValue({ environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" }, }); - service.restart.mockResolvedValue(undefined); + service.restart.mockResolvedValue({ outcome: "completed" }); vi.unstubAllEnvs(); vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); @@ -176,4 +177,41 @@ describe("runServiceRestart token drift", () => { expect(payload.result).toBe("restarted"); expect(payload.message).toContain("unmanaged process"); }); + + it("skips restart health checks when restart is only scheduled", async () => { + const postRestartCheck = vi.fn(async () => {}); + service.restart.mockResolvedValue({ outcome: "scheduled" }); + + const result = await runServiceRestart({ + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + postRestartCheck, + }); + + expect(result).toBe(true); + expect(postRestartCheck).not.toHaveBeenCalled(); + const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); + const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + expect(payload.result).toBe("scheduled"); + expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); + }); + + it("emits scheduled when service start routes through a scheduled restart", async () => { + service.restart.mockResolvedValue({ outcome: "scheduled" }); + + await runServiceStart({ + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + }); + + expect(service.isLoaded).toHaveBeenCalledTimes(1); + const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); + const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + expect(payload.result).toBe("scheduled"); + expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); + }); }); diff --git a/src/cli/daemon-cli/lifecycle-core.ts b/src/cli/daemon-cli/lifecycle-core.ts index 75bba03b418..a1ad4073584 100644 --- a/src/cli/daemon-cli/lifecycle-core.ts +++ b/src/cli/daemon-cli/lifecycle-core.ts @@ -3,6 +3,8 @@ import { readBestEffortConfig, readConfigFileSnapshot } from "../../config/confi import { formatConfigIssueLines } from "../../config/issue-format.js"; import { resolveIsNixMode } from "../../config/paths.js"; import { checkTokenDrift } from "../../daemon/service-audit.js"; +import type { GatewayServiceRestartResult } from "../../daemon/service-types.js"; +import { describeGatewayServiceRestart } from "../../daemon/service.js"; import type { GatewayService } from "../../daemon/service.js"; import { renderSystemdUnavailableHints } from "../../daemon/systemd-hints.js"; import { isSystemdUserServiceAvailable } from "../../daemon/systemd.js"; @@ -223,7 +225,20 @@ export async function runServiceStart(params: { } try { - await params.service.restart({ env: process.env, stdout }); + const restartResult = await params.service.restart({ env: process.env, stdout }); + const restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return; + } } catch (err) { const hints = params.renderStartHints(); fail(`${params.serviceNoun} start failed: ${String(err)}`, hints); @@ -317,7 +332,7 @@ export async function runServiceRestart(params: { renderStartHints: () => string[]; opts?: DaemonLifecycleOptions; checkTokenDrift?: boolean; - postRestartCheck?: (ctx: RestartPostCheckContext) => Promise; + postRestartCheck?: (ctx: RestartPostCheckContext) => Promise; onNotLoaded?: (ctx: NotLoadedActionContext) => Promise; }): Promise { const json = Boolean(params.opts?.json); @@ -402,11 +417,42 @@ export async function runServiceRestart(params: { } try { + let restartResult: GatewayServiceRestartResult = { outcome: "completed" }; if (loaded) { - await params.service.restart({ env: process.env, stdout }); + restartResult = await params.service.restart({ env: process.env, stdout }); + } + let restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + warnings: warnings.length ? warnings : undefined, + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return true; } if (params.postRestartCheck) { - await params.postRestartCheck({ json, stdout, warnings, fail }); + const postRestartResult = await params.postRestartCheck({ json, stdout, warnings, fail }); + if (postRestartResult) { + restartStatus = describeGatewayServiceRestart(params.serviceNoun, postRestartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + warnings: warnings.length ? warnings : undefined, + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return true; + } + } } let restarted = loaded; if (loaded) { diff --git a/src/cli/daemon-cli/lifecycle.test.ts b/src/cli/daemon-cli/lifecycle.test.ts index f1e87fc4938..61899e4e78c 100644 --- a/src/cli/daemon-cli/lifecycle.test.ts +++ b/src/cli/daemon-cli/lifecycle.test.ts @@ -132,6 +132,7 @@ describe("runDaemonRestart health checks", () => { programArguments: ["openclaw", "gateway", "--port", "18789"], environment: {}, }); + service.restart.mockResolvedValue({ outcome: "completed" }); runServiceRestart.mockImplementation(async (params: RestartParams) => { const fail = (message: string, hints?: string[]) => { @@ -204,6 +205,25 @@ describe("runDaemonRestart health checks", () => { expect(waitForGatewayHealthyRestart).toHaveBeenCalledTimes(2); }); + it("skips stale-pid retry health checks when the retry restart is only scheduled", async () => { + const unhealthy: RestartHealthSnapshot = { + healthy: false, + staleGatewayPids: [1993], + runtime: { status: "stopped" }, + portUsage: { port: 18789, status: "busy", listeners: [], hints: [] }, + }; + waitForGatewayHealthyRestart.mockResolvedValueOnce(unhealthy); + terminateStaleGatewayPids.mockResolvedValue([1993]); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + const result = await runDaemonRestart({ json: true }); + + expect(result).toBe(true); + expect(terminateStaleGatewayPids).toHaveBeenCalledWith([1993]); + expect(service.restart).toHaveBeenCalledTimes(1); + expect(waitForGatewayHealthyRestart).toHaveBeenCalledTimes(1); + }); + it("fails restart when gateway remains unhealthy", async () => { const unhealthy: RestartHealthSnapshot = { healthy: false, diff --git a/src/cli/daemon-cli/lifecycle.ts b/src/cli/daemon-cli/lifecycle.ts index 7fa7396d0b0..2b0775b0c48 100644 --- a/src/cli/daemon-cli/lifecycle.ts +++ b/src/cli/daemon-cli/lifecycle.ts @@ -286,7 +286,10 @@ export async function runDaemonRestart(opts: DaemonLifecycleOptions = {}): Promi } await terminateStaleGatewayPids(health.staleGatewayPids); - await service.restart({ env: process.env, stdout }); + const retryRestart = await service.restart({ env: process.env, stdout }); + if (retryRestart.outcome === "scheduled") { + return retryRestart; + } health = await waitForGatewayHealthyRestart({ service, port: restartPort, diff --git a/src/cli/skills-cli.format.ts b/src/cli/skills-cli.format.ts index 580f17b2d40..045281bc7d1 100644 --- a/src/cli/skills-cli.format.ts +++ b/src/cli/skills-cli.format.ts @@ -1,4 +1,5 @@ import type { SkillStatusEntry, SkillStatusReport } from "../agents/skills-status.js"; +import { stripAnsi } from "../terminal/ansi.js"; import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { shortenHomePath } from "../utils.js"; @@ -42,6 +43,32 @@ function normalizeSkillEmoji(emoji?: string): string { return (emoji ?? "📦").replaceAll("\uFE0E", "\uFE0F"); } +const REMAINING_ESC_SEQUENCE_REGEX = new RegExp( + String.raw`\u001b(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])`, + "g", +); +const JSON_CONTROL_CHAR_REGEX = new RegExp(String.raw`[\u0000-\u001f\u007f-\u009f]`, "g"); + +function sanitizeJsonString(value: string): string { + return stripAnsi(value) + .replace(REMAINING_ESC_SEQUENCE_REGEX, "") + .replace(JSON_CONTROL_CHAR_REGEX, ""); +} + +function sanitizeJsonValue(value: unknown): unknown { + if (typeof value === "string") { + return sanitizeJsonString(value); + } + if (Array.isArray(value)) { + return value.map((item) => sanitizeJsonValue(item)); + } + if (value && typeof value === "object") { + return Object.fromEntries( + Object.entries(value).map(([key, entryValue]) => [key, sanitizeJsonValue(entryValue)]), + ); + } + return value; +} function formatSkillName(skill: SkillStatusEntry): string { const emoji = normalizeSkillEmoji(skill.emoji); return `${emoji} ${theme.command(skill.name)}`; @@ -71,7 +98,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti const skills = opts.eligible ? report.skills.filter((s) => s.eligible) : report.skills; if (opts.json) { - const jsonReport = { + const jsonReport = sanitizeJsonValue({ workspaceDir: report.workspaceDir, managedSkillsDir: report.managedSkillsDir, skills: skills.map((s) => ({ @@ -87,7 +114,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti homepage: s.homepage, missing: s.missing, })), - }; + }); return JSON.stringify(jsonReport, null, 2); } @@ -154,7 +181,7 @@ export function formatSkillInfo( } if (opts.json) { - return JSON.stringify(skill, null, 2); + return JSON.stringify(sanitizeJsonValue(skill), null, 2); } const lines: string[] = []; @@ -251,7 +278,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp if (opts.json) { return JSON.stringify( - { + sanitizeJsonValue({ summary: { total: report.skills.length, eligible: eligible.length, @@ -267,7 +294,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp missing: s.missing, install: s.install, })), - }, + }), null, 2, ); diff --git a/src/cli/skills-cli.test.ts b/src/cli/skills-cli.test.ts index e87f8b2d313..27031fc0fdf 100644 --- a/src/cli/skills-cli.test.ts +++ b/src/cli/skills-cli.test.ts @@ -243,5 +243,46 @@ describe("skills-cli", () => { const parsed = JSON.parse(output) as Record; assert(parsed); }); + + it("sanitizes ANSI and C1 controls in skills list JSON output", () => { + const report = createMockReport([ + createMockSkill({ + name: "json-skill", + emoji: "\u001b[31m📧\u001b[0m\u009f", + description: "desc\u0093\u001b[2J\u001b[33m colored\u001b[0m", + }), + ]); + + const output = formatSkillsList(report, { json: true }); + const parsed = JSON.parse(output) as { + skills: Array<{ emoji: string; description: string }>; + }; + + expect(parsed.skills[0]?.emoji).toBe("📧"); + expect(parsed.skills[0]?.description).toBe("desc colored"); + expect(output).not.toContain("\\u001b"); + }); + + it("sanitizes skills info JSON output", () => { + const report = createMockReport([ + createMockSkill({ + name: "info-json", + emoji: "\u001b[31m🎙\u001b[0m\u009f", + description: "hi\u0091", + homepage: "https://example.com/\u0092docs", + }), + ]); + + const output = formatSkillInfo(report, "info-json", { json: true }); + const parsed = JSON.parse(output) as { + emoji: string; + description: string; + homepage: string; + }; + + expect(parsed.emoji).toBe("🎙"); + expect(parsed.description).toBe("hi"); + expect(parsed.homepage).toBe("https://example.com/docs"); + }); }); }); diff --git a/src/commands/configure.daemon.test.ts b/src/commands/configure.daemon.test.ts index 9a7aa76e0c8..11b54dc6b19 100644 --- a/src/commands/configure.daemon.test.ts +++ b/src/commands/configure.daemon.test.ts @@ -1,13 +1,22 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; -const withProgress = vi.hoisted(() => vi.fn(async (_opts, run) => run({ setLabel: vi.fn() }))); +const progressSetLabel = vi.hoisted(() => vi.fn()); +const withProgress = vi.hoisted(() => + vi.fn(async (_opts, run) => run({ setLabel: progressSetLabel })), +); const loadConfig = vi.hoisted(() => vi.fn()); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn()); const buildGatewayInstallPlan = vi.hoisted(() => vi.fn()); const note = vi.hoisted(() => vi.fn()); const serviceIsLoaded = vi.hoisted(() => vi.fn(async () => false)); const serviceInstall = vi.hoisted(() => vi.fn(async () => {})); +const serviceRestart = vi.hoisted(() => + vi.fn<() => Promise<{ outcome: "completed" } | { outcome: "scheduled" }>>(async () => ({ + outcome: "completed", + })), +); const ensureSystemdUserLingerInteractive = vi.hoisted(() => vi.fn(async () => {})); +const select = vi.hoisted(() => vi.fn(async () => "node")); vi.mock("../cli/progress.js", () => ({ withProgress, @@ -32,7 +41,7 @@ vi.mock("../terminal/note.js", () => ({ vi.mock("./configure.shared.js", () => ({ confirm: vi.fn(async () => true), - select: vi.fn(async () => "node"), + select, })); vi.mock("./daemon-runtime.js", () => ({ @@ -40,12 +49,17 @@ vi.mock("./daemon-runtime.js", () => ({ GATEWAY_DAEMON_RUNTIME_OPTIONS: [{ value: "node", label: "Node" }], })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: vi.fn(() => ({ - isLoaded: serviceIsLoaded, - install: serviceInstall, - })), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: vi.fn(() => ({ + isLoaded: serviceIsLoaded, + install: serviceInstall, + restart: serviceRestart, + })), + }; +}); vi.mock("./onboard-helpers.js", () => ({ guardCancel: (value: unknown) => value, @@ -60,8 +74,10 @@ const { maybeInstallDaemon } = await import("./configure.daemon.js"); describe("maybeInstallDaemon", () => { beforeEach(() => { vi.clearAllMocks(); + progressSetLabel.mockReset(); serviceIsLoaded.mockResolvedValue(false); serviceInstall.mockResolvedValue(undefined); + serviceRestart.mockResolvedValue({ outcome: "completed" }); loadConfig.mockReturnValue({}); resolveGatewayInstallToken.mockResolvedValue({ token: undefined, @@ -152,4 +168,19 @@ describe("maybeInstallDaemon", () => { expect(serviceInstall).toHaveBeenCalledTimes(1); }); + + it("shows restart scheduled when a loaded service defers restart handoff", async () => { + serviceIsLoaded.mockResolvedValue(true); + select.mockResolvedValueOnce("restart"); + serviceRestart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeInstallDaemon({ + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + port: 18789, + }); + + expect(serviceRestart).toHaveBeenCalledTimes(1); + expect(serviceInstall).not.toHaveBeenCalled(); + expect(progressSetLabel).toHaveBeenLastCalledWith("Gateway service restart scheduled."); + }); }); diff --git a/src/commands/configure.daemon.ts b/src/commands/configure.daemon.ts index 4f943982a38..64272c9e2bc 100644 --- a/src/commands/configure.daemon.ts +++ b/src/commands/configure.daemon.ts @@ -1,6 +1,6 @@ import { withProgress } from "../cli/progress.js"; import { loadConfig } from "../config/config.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { isNonFatalSystemdInstallProbeError } from "../daemon/systemd.js"; import type { RuntimeEnv } from "../runtime.js"; import { note } from "../terminal/note.js"; @@ -50,11 +50,13 @@ export async function maybeInstallDaemon(params: { { label: "Gateway service", indeterminate: true, delayMs: 0 }, async (progress) => { progress.setLabel("Restarting Gateway service…"); - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); - progress.setLabel("Gateway service restarted."); + progress.setLabel( + describeGatewayServiceRestart("Gateway", restartResult).progressMessage, + ); }, ); shouldCheckLinger = true; diff --git a/src/commands/doctor-gateway-daemon-flow.test.ts b/src/commands/doctor-gateway-daemon-flow.test.ts new file mode 100644 index 00000000000..02c0b885bb0 --- /dev/null +++ b/src/commands/doctor-gateway-daemon-flow.test.ts @@ -0,0 +1,194 @@ +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; + +const service = vi.hoisted(() => ({ + isLoaded: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), + install: vi.fn(), + readCommand: vi.fn(), +})); +const note = vi.hoisted(() => vi.fn()); +const sleep = vi.hoisted(() => vi.fn(async () => {})); +const healthCommand = vi.hoisted(() => vi.fn(async () => {})); +const inspectPortUsage = vi.hoisted(() => vi.fn()); +const readLastGatewayErrorLine = vi.hoisted(() => vi.fn(async () => null)); + +vi.mock("../config/config.js", () => ({ + resolveGatewayPort: vi.fn(() => 18789), +})); + +vi.mock("../daemon/constants.js", () => ({ + resolveGatewayLaunchAgentLabel: vi.fn(() => "ai.openclaw.gateway"), + resolveNodeLaunchAgentLabel: vi.fn(() => "ai.openclaw.node"), +})); + +vi.mock("../daemon/diagnostics.js", () => ({ + readLastGatewayErrorLine, +})); + +vi.mock("../daemon/launchd.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isLaunchAgentListed: vi.fn(async () => false), + isLaunchAgentLoaded: vi.fn(async () => false), + launchAgentPlistExists: vi.fn(async () => false), + repairLaunchAgentBootstrap: vi.fn(async () => ({ ok: true })), + }; +}); + +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: () => service, + }; +}); + +vi.mock("../daemon/systemd-hints.js", () => ({ + renderSystemdUnavailableHints: vi.fn(() => []), +})); + +vi.mock("../daemon/systemd.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isSystemdUserServiceAvailable: vi.fn(async () => true), + }; +}); + +vi.mock("../infra/ports.js", () => ({ + inspectPortUsage, + formatPortDiagnostics: vi.fn(() => []), +})); + +vi.mock("../infra/wsl.js", () => ({ + isWSL: vi.fn(async () => false), +})); + +vi.mock("../terminal/note.js", () => ({ + note, +})); + +vi.mock("../utils.js", () => ({ + sleep, +})); + +vi.mock("./daemon-install-helpers.js", () => ({ + buildGatewayInstallPlan: vi.fn(), + gatewayInstallErrorHint: vi.fn(() => "hint"), +})); + +vi.mock("./doctor-format.js", () => ({ + buildGatewayRuntimeHints: vi.fn(() => []), + formatGatewayRuntimeSummary: vi.fn(() => null), +})); + +vi.mock("./gateway-install-token.js", () => ({ + resolveGatewayInstallToken: vi.fn(), +})); + +vi.mock("./health-format.js", () => ({ + formatHealthCheckFailure: vi.fn(() => "health failed"), +})); + +vi.mock("./health.js", () => ({ + healthCommand, +})); + +describe("maybeRepairGatewayDaemon", () => { + let maybeRepairGatewayDaemon: typeof import("./doctor-gateway-daemon-flow.js").maybeRepairGatewayDaemon; + const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + + beforeAll(async () => { + ({ maybeRepairGatewayDaemon } = await import("./doctor-gateway-daemon-flow.js")); + }); + + beforeEach(() => { + vi.clearAllMocks(); + service.isLoaded.mockResolvedValue(true); + service.readRuntime.mockResolvedValue({ status: "running" }); + service.restart.mockResolvedValue({ outcome: "completed" }); + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + }); + + afterEach(() => { + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + + function setPlatform(platform: NodeJS.Platform) { + if (!originalPlatformDescriptor) { + return; + } + Object.defineProperty(process, "platform", { + ...originalPlatformDescriptor, + value: platform, + }); + } + + function createPrompter(confirmImpl: (message: string) => boolean) { + return { + confirm: vi.fn(), + confirmRepair: vi.fn(), + confirmAggressive: vi.fn(), + confirmSkipInNonInteractive: vi.fn(async ({ message }: { message: string }) => + confirmImpl(message), + ), + select: vi.fn(), + shouldRepair: false, + shouldForce: false, + }; + } + + it("skips restart verification when a running service restart is only scheduled", async () => { + setPlatform("linux"); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeRepairGatewayDaemon({ + cfg: { gateway: {} }, + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + prompter: createPrompter((message) => message === "Restart gateway service now?"), + options: { deep: false }, + gatewayDetailsMessage: "details", + healthOk: false, + }); + + expect(service.restart).toHaveBeenCalledTimes(1); + expect(note).toHaveBeenCalledWith( + "restart scheduled, gateway will restart momentarily", + "Gateway", + ); + expect(sleep).not.toHaveBeenCalled(); + expect(healthCommand).not.toHaveBeenCalled(); + }); + + it("skips start verification when a stopped service start is only scheduled", async () => { + setPlatform("linux"); + service.readRuntime.mockResolvedValue({ status: "stopped" }); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeRepairGatewayDaemon({ + cfg: { gateway: {} }, + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + prompter: createPrompter((message) => message === "Start gateway service now?"), + options: { deep: false }, + gatewayDetailsMessage: "details", + healthOk: false, + }); + + expect(service.restart).toHaveBeenCalledTimes(1); + expect(note).toHaveBeenCalledWith( + "restart scheduled, gateway will restart momentarily", + "Gateway", + ); + expect(sleep).not.toHaveBeenCalled(); + expect(healthCommand).not.toHaveBeenCalled(); + }); +}); diff --git a/src/commands/doctor-gateway-daemon-flow.ts b/src/commands/doctor-gateway-daemon-flow.ts index 4fd8df3490b..c476efa615f 100644 --- a/src/commands/doctor-gateway-daemon-flow.ts +++ b/src/commands/doctor-gateway-daemon-flow.ts @@ -12,7 +12,7 @@ import { launchAgentPlistExists, repairLaunchAgentBootstrap, } from "../daemon/launchd.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { renderSystemdUnavailableHints } from "../daemon/systemd-hints.js"; import { isSystemdUserServiceAvailable } from "../daemon/systemd.js"; import { formatPortDiagnostics, inspectPortUsage } from "../infra/ports.js"; @@ -235,11 +235,16 @@ export async function maybeRepairGatewayDaemon(params: { initialValue: true, }); if (start) { - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); - await sleep(1500); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + if (!restartStatus.scheduled) { + await sleep(1500); + } else { + note(restartStatus.message, "Gateway"); + } } } @@ -257,10 +262,15 @@ export async function maybeRepairGatewayDaemon(params: { initialValue: true, }); if (restart) { - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + if (restartStatus.scheduled) { + note(restartStatus.message, "Gateway"); + return; + } await sleep(1500); try { await healthCommand({ json: false, timeoutMs: 10_000 }, params.runtime); diff --git a/src/commands/ollama-setup.test.ts b/src/commands/ollama-setup.test.ts index 2313588f180..124254c53b2 100644 --- a/src/commands/ollama-setup.test.ts +++ b/src/commands/ollama-setup.test.ts @@ -30,6 +30,53 @@ function jsonResponse(body: unknown, status = 200): Response { }); } +function requestUrl(input: string | URL | Request): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +function requestBody(body: BodyInit | null | undefined): string { + return typeof body === "string" ? body : "{}"; +} + +function createOllamaFetchMock(params: { + tags?: string[]; + show?: Record; + meResponses?: Response[]; + pullResponse?: Response; + tagsError?: Error; +}) { + const meResponses = [...(params.meResponses ?? [])]; + return vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = requestUrl(input); + if (url.endsWith("/api/tags")) { + if (params.tagsError) { + throw params.tagsError; + } + return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) }); + } + if (url.endsWith("/api/show")) { + const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const contextWindow = body.name ? params.show?.[body.name] : undefined; + return contextWindow + ? jsonResponse({ model_info: { "llama.context_length": contextWindow } }) + : jsonResponse({}); + } + if (url.endsWith("/api/me")) { + return meResponses.shift() ?? jsonResponse({ username: "testuser" }); + } + if (url.endsWith("/api/pull")) { + return params.pullResponse ?? new Response('{"status":"success"}\n', { status: 200 }); + } + throw new Error(`Unexpected fetch: ${url}`); + }); +} + describe("ollama setup", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -45,9 +92,7 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "llama3:8b" }] })); + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); const result = await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -62,10 +107,7 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "llama3:8b" }] })) - .mockResolvedValueOnce(jsonResponse({ username: "testuser" })); + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); const result = await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -80,11 +122,7 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce( - jsonResponse({ models: [{ name: "llama3:8b" }, { name: "glm-4.7-flash" }] }), - ); + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] }); vi.stubGlobal("fetch", fetchMock); const result = await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -103,13 +141,13 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "llama3:8b" }] })) - .mockResolvedValueOnce( + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - ) - .mockResolvedValueOnce(jsonResponse({ username: "testuser" })); + jsonResponse({ username: "testuser" }), + ], + }); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -127,13 +165,13 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "llama3:8b" }] })) - .mockResolvedValueOnce( + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - ) - .mockResolvedValueOnce(jsonResponse({ username: "testuser" })); + jsonResponse({ username: "testuser" }), + ], + }); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -148,15 +186,16 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "llama3:8b" }] })); + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); - expect(fetchMock).toHaveBeenCalledTimes(1); - expect(fetchMock.mock.calls[0][0]).toContain("/api/tags"); + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(fetchMock.mock.calls[0]?.[0]).toContain("/api/tags"); + expect(fetchMock.mock.calls.some((call) => requestUrl(call[0]).includes("/api/me"))).toBe( + false, + ); }); it("suggested models appear first in model list (cloud+local)", async () => { @@ -166,14 +205,9 @@ describe("ollama setup", () => { note: vi.fn(async () => undefined), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce( - jsonResponse({ - models: [{ name: "llama3:8b" }, { name: "glm-4.7-flash" }, { name: "deepseek-r1:14b" }], - }), - ) - .mockResolvedValueOnce(jsonResponse({ username: "testuser" })); + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b", "glm-4.7-flash", "deepseek-r1:14b"], + }); vi.stubGlobal("fetch", fetchMock); const result = await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -189,6 +223,27 @@ describe("ollama setup", () => { ]); }); + it("uses /api/show context windows when building Ollama model configs", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("local"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + show: { "llama3:8b": 65536 }, + }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + const model = result.config.models?.providers?.ollama?.models?.find( + (m) => m.id === "llama3:8b", + ); + + expect(model?.contextWindow).toBe(65536); + }); + describe("ensureOllamaModelPulled", () => { it("pulls model when not available locally", async () => { const progress = { update: vi.fn(), stop: vi.fn() }; @@ -196,12 +251,10 @@ describe("ollama setup", () => { progress: vi.fn(() => progress), } as unknown as WizardPrompter; - const fetchMock = vi - .fn() - // /api/tags — model not present - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "llama3:8b" }] })) - // /api/pull - .mockResolvedValueOnce(new Response('{"status":"success"}\n', { status: 200 })); + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + pullResponse: new Response('{"status":"success"}\n', { status: 200 }), + }); vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ @@ -219,9 +272,7 @@ describe("ollama setup", () => { it("skips pull when model is already available", async () => { const prompter = {} as unknown as WizardPrompter; - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "glm-4.7-flash" }] })); + const fetchMock = createOllamaFetchMock({ tags: ["glm-4.7-flash"] }); vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ @@ -268,10 +319,10 @@ describe("ollama setup", () => { }); it("uses discovered model when requested non-interactive download fails", async () => { - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [{ name: "qwen2.5-coder:7b" }] })) - .mockResolvedValueOnce(new Response('{"error":"disk full"}\n', { status: 200 })); + const fetchMock = createOllamaFetchMock({ + tags: ["qwen2.5-coder:7b"], + pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }), + }); vi.stubGlobal("fetch", fetchMock); const runtime = { @@ -306,10 +357,10 @@ describe("ollama setup", () => { }); it("normalizes ollama/ prefix in non-interactive custom model download", async () => { - const fetchMock = vi - .fn() - .mockResolvedValueOnce(jsonResponse({ models: [] })) - .mockResolvedValueOnce(new Response('{"status":"success"}\n', { status: 200 })); + const fetchMock = createOllamaFetchMock({ + tags: [], + pullResponse: new Response('{"status":"success"}\n', { status: 200 }), + }); vi.stubGlobal("fetch", fetchMock); const runtime = { @@ -328,14 +379,14 @@ describe("ollama setup", () => { }); const pullRequest = fetchMock.mock.calls[1]?.[1]; - expect(JSON.parse(String(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); + expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); expect(result.agents?.defaults?.model).toEqual( expect.objectContaining({ primary: "ollama/llama3.2:latest" }), ); }); it("accepts cloud models in non-interactive mode without pulling", async () => { - const fetchMock = vi.fn().mockResolvedValueOnce(jsonResponse({ models: [] })); + const fetchMock = createOllamaFetchMock({ tags: [] }); vi.stubGlobal("fetch", fetchMock); const runtime = { @@ -363,7 +414,9 @@ describe("ollama setup", () => { }); it("exits when Ollama is unreachable", async () => { - const fetchMock = vi.fn().mockRejectedValueOnce(new Error("connect ECONNREFUSED")); + const fetchMock = createOllamaFetchMock({ + tagsError: new Error("connect ECONNREFUSED"), + }); vi.stubGlobal("fetch", fetchMock); const runtime = { diff --git a/src/commands/ollama-setup.ts b/src/commands/ollama-setup.ts index 7af3e18cff1..f6aec85dafc 100644 --- a/src/commands/ollama-setup.ts +++ b/src/commands/ollama-setup.ts @@ -2,8 +2,10 @@ import { upsertAuthProfileWithLock } from "../agents/auth-profiles.js"; import { OLLAMA_DEFAULT_BASE_URL, buildOllamaModelDefinition, + enrichOllamaModelsWithContext, fetchOllamaModels, resolveOllamaApiBase, + type OllamaModelWithContext, } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -239,14 +241,20 @@ async function pullOllamaModelNonInteractive( return true; } -function buildOllamaModelsConfig(modelNames: string[]) { - return modelNames.map((name) => buildOllamaModelDefinition(name)); +function buildOllamaModelsConfig( + modelNames: string[], + discoveredModelsByName?: Map, +) { + return modelNames.map((name) => + buildOllamaModelDefinition(name, discoveredModelsByName?.get(name)?.contextWindow), + ); } function applyOllamaProviderConfig( cfg: OpenClawConfig, baseUrl: string, modelNames: string[], + discoveredModelsByName?: Map, ): OpenClawConfig { return { ...cfg, @@ -259,7 +267,7 @@ function applyOllamaProviderConfig( baseUrl, api: "ollama", apiKey: "OLLAMA_API_KEY", // pragma: allowlist secret - models: buildOllamaModelsConfig(modelNames), + models: buildOllamaModelsConfig(modelNames, discoveredModelsByName), }, }, }, @@ -299,7 +307,6 @@ export async function promptAndConfigureOllama(params: { // 2. Check reachability const { reachable, models } = await fetchOllamaModels(baseUrl); - const modelNames = models.map((m) => m.name); if (!reachable) { await prompter.note( @@ -314,6 +321,10 @@ export async function promptAndConfigureOllama(params: { throw new WizardCancelledError("Ollama not reachable"); } + const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50)); + const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model])); + const modelNames = models.map((m) => m.name); + // 3. Mode selection const mode = (await prompter.select({ message: "Ollama mode", @@ -387,7 +398,12 @@ export async function promptAndConfigureOllama(params: { await storeOllamaCredential(params.agentDir); const defaultModelId = suggestedModels[0] ?? OLLAMA_DEFAULT_MODEL; - const config = applyOllamaProviderConfig(params.cfg, baseUrl, orderedModelNames); + const config = applyOllamaProviderConfig( + params.cfg, + baseUrl, + orderedModelNames, + discoveredModelsByName, + ); return { config, defaultModelId }; } @@ -405,7 +421,6 @@ export async function configureOllamaNonInteractive(params: { const baseUrl = resolveOllamaApiBase(configuredBaseUrl); const { reachable, models } = await fetchOllamaModels(baseUrl); - const modelNames = models.map((m) => m.name); const explicitModel = normalizeOllamaModelName(opts.customModelId); if (!reachable) { @@ -421,6 +436,10 @@ export async function configureOllamaNonInteractive(params: { await storeOllamaCredential(); + const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50)); + const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model])); + const modelNames = models.map((m) => m.name); + // Apply local suggested model ordering. const suggestedModels = OLLAMA_SUGGESTED_MODELS_LOCAL; const orderedModelNames = [ @@ -478,7 +497,12 @@ export async function configureOllamaNonInteractive(params: { } } - const config = applyOllamaProviderConfig(params.nextConfig, baseUrl, allModelNames); + const config = applyOllamaProviderConfig( + params.nextConfig, + baseUrl, + allModelNames, + discoveredModelsByName, + ); const modelRef = `ollama/${defaultModelId}`; runtime.log(`Default Ollama model: ${defaultModelId}`); return applyAgentDefaultModelPrimary(config, modelRef); diff --git a/src/commands/onboard-custom.test.ts b/src/commands/onboard-custom.test.ts index b04f7bc08ab..bc1a1927bdc 100644 --- a/src/commands/onboard-custom.test.ts +++ b/src/commands/onboard-custom.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js"; +import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import { defaultRuntime } from "../runtime.js"; import { @@ -133,6 +134,23 @@ describe("promptCustomApiConfig", () => { expect(result.config.agents?.defaults?.models?.["custom/llama3"]?.alias).toBe("local"); }); + it("defaults custom onboarding to the native Ollama base URL", async () => { + const prompter = createTestPrompter({ + text: ["http://localhost:11434", "", "llama3", "custom", ""], + select: ["plaintext", "openai"], + }); + stubFetchSequence([{ ok: true }]); + + await runPromptCustomApi(prompter); + + expect(prompter.text).toHaveBeenCalledWith( + expect.objectContaining({ + message: "API Base URL", + initialValue: OLLAMA_DEFAULT_BASE_URL, + }), + ); + }); + it("retries when verification fails", async () => { const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "bad-model", "good-model", "custom", ""], diff --git a/src/commands/onboard-custom.ts b/src/commands/onboard-custom.ts index a05922aafe0..874018a74ea 100644 --- a/src/commands/onboard-custom.ts +++ b/src/commands/onboard-custom.ts @@ -1,6 +1,7 @@ import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js"; import { DEFAULT_PROVIDER } from "../agents/defaults.js"; import { buildModelAliasIndex, modelKey } from "../agents/model-selection.js"; +import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import type { ModelProviderConfig } from "../config/types.models.js"; import { isSecretRef, type SecretInput } from "../config/types.secrets.js"; @@ -16,7 +17,6 @@ import { applyPrimaryModel } from "./model-picker.js"; import { normalizeAlias } from "./models/shared.js"; import type { SecretInputMode } from "./onboard-types.js"; -const DEFAULT_OLLAMA_BASE_URL = "http://127.0.0.1:11434/v1"; const DEFAULT_CONTEXT_WINDOW = CONTEXT_WINDOW_HARD_MIN_TOKENS; const DEFAULT_MAX_TOKENS = 4096; const VERIFY_TIMEOUT_MS = 30_000; @@ -389,7 +389,7 @@ async function promptBaseUrlAndKey(params: { }): Promise<{ baseUrl: string; apiKey?: SecretInput; resolvedApiKey: string }> { const baseUrlInput = await params.prompter.text({ message: "API Base URL", - initialValue: params.initialBaseUrl ?? DEFAULT_OLLAMA_BASE_URL, + initialValue: params.initialBaseUrl ?? OLLAMA_DEFAULT_BASE_URL, placeholder: "https://api.example.com/v1", validate: (val) => { try { diff --git a/src/commands/status.service-summary.test.ts b/src/commands/status.service-summary.test.ts index fb51d8036e4..f1a688ea092 100644 --- a/src/commands/status.service-summary.test.ts +++ b/src/commands/status.service-summary.test.ts @@ -10,7 +10,7 @@ function createService(overrides: Partial): GatewayService { install: vi.fn(async () => {}), uninstall: vi.fn(async () => {}), stop: vi.fn(async () => {}), - restart: vi.fn(async () => {}), + restart: vi.fn(async () => ({ outcome: "completed" as const })), isLoaded: vi.fn(async () => false), readCommand: vi.fn(async () => null), readRuntime: vi.fn(async () => ({ status: "stopped" as const })), diff --git a/src/commands/status.summary.redaction.test.ts b/src/commands/status.summary.redaction.test.ts index 02eaecbcb35..26e28887560 100644 --- a/src/commands/status.summary.redaction.test.ts +++ b/src/commands/status.summary.redaction.test.ts @@ -22,6 +22,7 @@ function createRecentSessionRow() { describe("redactSensitiveStatusSummary", () => { it("removes sensitive session and path details while preserving summary structure", () => { const input: StatusSummary = { + runtimeVersion: "2026.3.8", heartbeat: { defaultAgentId: "main", agents: [{ agentId: "main", enabled: true, every: "5m", everyMs: 300_000 }], @@ -50,6 +51,7 @@ describe("redactSensitiveStatusSummary", () => { expect(redacted.sessions.recent).toEqual([]); expect(redacted.sessions.byAgent[0]?.path).toBe("[redacted]"); expect(redacted.sessions.byAgent[0]?.recent).toEqual([]); + expect(redacted.runtimeVersion).toBe("2026.3.8"); expect(redacted.heartbeat).toEqual(input.heartbeat); expect(redacted.channelSummary).toEqual(input.channelSummary); }); diff --git a/src/commands/status.summary.test.ts b/src/commands/status.summary.test.ts new file mode 100644 index 00000000000..addda823a23 --- /dev/null +++ b/src/commands/status.summary.test.ts @@ -0,0 +1,85 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +vi.mock("../agents/context.js", () => ({ + resolveContextTokensForModel: vi.fn(() => 200_000), +})); + +vi.mock("../agents/defaults.js", () => ({ + DEFAULT_CONTEXT_TOKENS: 200_000, + DEFAULT_MODEL: "gpt-5.2", + DEFAULT_PROVIDER: "openai", +})); + +vi.mock("../agents/model-selection.js", () => ({ + resolveConfiguredModelRef: vi.fn(() => ({ + provider: "openai", + model: "gpt-5.2", + })), +})); + +vi.mock("../config/config.js", () => ({ + loadConfig: vi.fn(() => ({})), +})); + +vi.mock("../config/sessions.js", () => ({ + loadSessionStore: vi.fn(() => ({})), + resolveFreshSessionTotalTokens: vi.fn(() => undefined), + resolveMainSessionKey: vi.fn(() => "main"), + resolveStorePath: vi.fn(() => "/tmp/sessions.json"), +})); + +vi.mock("../gateway/session-utils.js", () => ({ + classifySessionKey: vi.fn(() => "direct"), + listAgentsForGateway: vi.fn(() => ({ + defaultId: "main", + agents: [{ id: "main" }], + })), + resolveSessionModelRef: vi.fn(() => ({ + provider: "openai", + model: "gpt-5.2", + })), +})); + +vi.mock("../infra/channel-summary.js", () => ({ + buildChannelSummary: vi.fn(async () => ["ok"]), +})); + +vi.mock("../infra/heartbeat-runner.js", () => ({ + resolveHeartbeatSummaryForAgent: vi.fn(() => ({ + enabled: true, + every: "5m", + everyMs: 300_000, + })), +})); + +vi.mock("../infra/system-events.js", () => ({ + peekSystemEvents: vi.fn(() => []), +})); + +vi.mock("../routing/session-key.js", () => ({ + parseAgentSessionKey: vi.fn(() => null), +})); + +vi.mock("../version.js", () => ({ + resolveRuntimeServiceVersion: vi.fn(() => "2026.3.8"), +})); + +vi.mock("./status.link-channel.js", () => ({ + resolveLinkChannelContext: vi.fn(async () => undefined), +})); + +describe("getStatusSummary", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("includes runtimeVersion in the status payload", async () => { + const { getStatusSummary } = await import("./status.summary.js"); + + const summary = await getStatusSummary(); + + expect(summary.runtimeVersion).toBe("2026.3.8"); + expect(summary.heartbeat.defaultAgentId).toBe("main"); + expect(summary.channelSummary).toEqual(["ok"]); + }); +}); diff --git a/src/commands/status.summary.ts b/src/commands/status.summary.ts index 3a71464973f..79a51f0d9d3 100644 --- a/src/commands/status.summary.ts +++ b/src/commands/status.summary.ts @@ -19,6 +19,7 @@ import { buildChannelSummary } from "../infra/channel-summary.js"; import { resolveHeartbeatSummaryForAgent } from "../infra/heartbeat-runner.js"; import { peekSystemEvents } from "../infra/system-events.js"; import { parseAgentSessionKey } from "../routing/session-key.js"; +import { resolveRuntimeServiceVersion } from "../version.js"; import { resolveLinkChannelContext } from "./status.link-channel.js"; import type { HeartbeatStatus, SessionStatus, StatusSummary } from "./status.types.js"; @@ -210,6 +211,7 @@ export async function getStatusSummary( const totalSessions = allSessions.length; const summary: StatusSummary = { + runtimeVersion: resolveRuntimeServiceVersion(process.env), linkChannel: linkContext ? { id: linkContext.plugin.id, diff --git a/src/commands/status.types.ts b/src/commands/status.types.ts index a3e0a5ca8e2..ec157b3488a 100644 --- a/src/commands/status.types.ts +++ b/src/commands/status.types.ts @@ -34,6 +34,7 @@ export type HeartbeatStatus = { }; export type StatusSummary = { + runtimeVersion?: string | null; linkChannel?: { id: ChannelId; label: string; diff --git a/src/config/config.discord.test.ts b/src/config/config.discord.test.ts index 8afde31b9e3..0bf5484dbe3 100644 --- a/src/config/config.discord.test.ts +++ b/src/config/config.discord.test.ts @@ -36,7 +36,7 @@ describe("config discord", () => { requireMention: false, users: ["steipete"], channels: { - general: { allow: true }, + general: { allow: true, autoThread: true }, }, }, }, @@ -54,6 +54,7 @@ describe("config discord", () => { expect(cfg.channels?.discord?.actions?.channels).toBe(true); expect(cfg.channels?.discord?.guilds?.["123"]?.slug).toBe("friends-of-openclaw"); expect(cfg.channels?.discord?.guilds?.["123"]?.channels?.general?.allow).toBe(true); + expect(cfg.channels?.discord?.guilds?.["123"]?.channels?.general?.autoThread).toBe(true); }, ); }); diff --git a/src/config/config.plugin-validation.test.ts b/src/config/config.plugin-validation.test.ts index 02eab6789ea..464a5f37ced 100644 --- a/src/config/config.plugin-validation.test.ts +++ b/src/config/config.plugin-validation.test.ts @@ -38,12 +38,15 @@ describe("config plugin validation", () => { let enumPluginDir = ""; let bluebubblesPluginDir = ""; let voiceCallSchemaPluginDir = ""; - const envSnapshot = { - OPENCLAW_STATE_DIR: process.env.OPENCLAW_STATE_DIR, - OPENCLAW_PLUGIN_MANIFEST_CACHE_MS: process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS, - }; + const suiteEnv = () => + ({ + ...process.env, + OPENCLAW_STATE_DIR: path.join(suiteHome, ".openclaw"), + OPENCLAW_PLUGIN_MANIFEST_CACHE_MS: "10000", + }) satisfies NodeJS.ProcessEnv; - const validateInSuite = (raw: unknown) => validateConfigObjectWithPlugins(raw); + const validateInSuite = (raw: unknown) => + validateConfigObjectWithPlugins(raw, { env: suiteEnv() }); beforeAll(async () => { fixtureRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-config-plugin-validation-")); @@ -102,8 +105,6 @@ describe("config plugin validation", () => { id: "voice-call-schema-fixture", schema: voiceCallManifest.configSchema, }); - process.env.OPENCLAW_STATE_DIR = path.join(suiteHome, ".openclaw"); - process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS = "10000"; clearPluginManifestRegistryCache(); // Warm the plugin manifest cache once so path-based validations can reuse // parsed manifests across test cases. @@ -118,16 +119,6 @@ describe("config plugin validation", () => { afterAll(async () => { await fs.rm(fixtureRoot, { recursive: true, force: true }); clearPluginManifestRegistryCache(); - if (envSnapshot.OPENCLAW_STATE_DIR === undefined) { - delete process.env.OPENCLAW_STATE_DIR; - } else { - process.env.OPENCLAW_STATE_DIR = envSnapshot.OPENCLAW_STATE_DIR; - } - if (envSnapshot.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS === undefined) { - delete process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS; - } else { - process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS = envSnapshot.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS; - } }); it("reports missing plugin refs across load paths, entries, and allowlist surfaces", async () => { @@ -279,6 +270,31 @@ describe("config plugin validation", () => { expect(res.ok).toBe(true); }); + it("accepts voice-call OpenAI TTS speed, instructions, and baseUrl config fields", async () => { + const res = validateInSuite({ + agents: { list: [{ id: "pi" }] }, + plugins: { + enabled: true, + load: { paths: [voiceCallSchemaPluginDir] }, + entries: { + "voice-call-schema-fixture": { + config: { + tts: { + openai: { + baseUrl: "http://localhost:8880/v1", + voice: "alloy", + speed: 1.5, + instructions: "Speak in a cheerful tone", + }, + }, + }, + }, + }, + }, + }); + expect(res.ok).toBe(true); + }); + it("accepts known plugin ids and valid channel/heartbeat enums", async () => { const res = validateInSuite({ agents: { diff --git a/src/config/config.schema-regressions.test.ts b/src/config/config.schema-regressions.test.ts index 4125cb1b3d4..3e605e06c35 100644 --- a/src/config/config.schema-regressions.test.ts +++ b/src/config/config.schema-regressions.test.ts @@ -184,4 +184,31 @@ describe("config schema regressions", () => { expect(res.ok).toBe(false); }); + + it("accepts signal accountUuid for loop protection", () => { + const res = validateConfigObject({ + channels: { + signal: { + accountUuid: "a1b2c3d4-e5f6-7890-abcd-ef1234567890", + }, + }, + }); + + expect(res.ok).toBe(true); + }); + + it("accepts telegram actions editMessage and createForumTopic", () => { + const res = validateConfigObject({ + channels: { + telegram: { + actions: { + editMessage: true, + createForumTopic: false, + }, + }, + }, + }); + + expect(res.ok).toBe(true); + }); }); diff --git a/src/config/io.ts b/src/config/io.ts index 2b542bba755..fba17f253aa 100644 --- a/src/config/io.ts +++ b/src/config/io.ts @@ -164,6 +164,32 @@ function hashConfigRaw(raw: string | null): string { .digest("hex"); } +async function tightenStateDirPermissionsIfNeeded(params: { + configPath: string; + env: NodeJS.ProcessEnv; + homedir: () => string; + fsModule: typeof fs; +}): Promise { + if (process.platform === "win32") { + return; + } + const stateDir = resolveStateDir(params.env, params.homedir); + const configDir = path.dirname(params.configPath); + if (path.resolve(configDir) !== path.resolve(stateDir)) { + return; + } + try { + const stat = await params.fsModule.promises.stat(configDir); + const mode = stat.mode & 0o777; + if ((mode & 0o077) === 0) { + return; + } + await params.fsModule.promises.chmod(configDir, 0o700); + } catch { + // Best-effort hardening only; callers still need the config write to proceed. + } +} + function formatConfigValidationFailure(pathLabel: string, issueMessage: string): string { const match = issueMessage.match(OPEN_DM_POLICY_ALLOW_FROM_RE); const policyPath = match?.groups?.policyPath?.trim(); @@ -1136,6 +1162,12 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) { const dir = path.dirname(configPath); await deps.fs.promises.mkdir(dir, { recursive: true, mode: 0o700 }); + await tightenStateDirPermissionsIfNeeded({ + configPath, + env: deps.env, + homedir: deps.homedir, + fsModule: deps.fs, + }); const outputConfigBase = envRefMap && changedPaths ? (restoreEnvRefsFromMap(cfgToWrite, "", envRefMap, changedPaths) as OpenClawConfig) diff --git a/src/config/io.write-config.test.ts b/src/config/io.write-config.test.ts index 6b73b9fbd30..68709725d83 100644 --- a/src/config/io.write-config.test.ts +++ b/src/config/io.write-config.test.ts @@ -142,6 +142,28 @@ describe("config io write", () => { }); }); + it.runIf(process.platform !== "win32")( + "tightens world-writable state dir when writing the default config", + async () => { + await withSuiteHome(async (home) => { + const stateDir = path.join(home, ".openclaw"); + await fs.mkdir(stateDir, { recursive: true, mode: 0o777 }); + await fs.chmod(stateDir, 0o777); + + const io = createConfigIO({ + env: {} as NodeJS.ProcessEnv, + homedir: () => home, + logger: silentLogger, + }); + + await io.writeConfigFile({ gateway: { mode: "local" } }); + + const stat = await fs.stat(stateDir); + expect(stat.mode & 0o777).toBe(0o700); + }); + }, + ); + it('shows actionable guidance for dmPolicy="open" without wildcard allowFrom', async () => { await withSuiteHome(async (home) => { const io = createConfigIO({ diff --git a/src/config/schema.help.quality.test.ts b/src/config/schema.help.quality.test.ts index 04d5200bfbb..965eed0e55d 100644 --- a/src/config/schema.help.quality.test.ts +++ b/src/config/schema.help.quality.test.ts @@ -72,6 +72,10 @@ const TARGET_KEYS = [ "agents.defaults.memorySearch.fallback", "agents.defaults.memorySearch.sources", "agents.defaults.memorySearch.extraPaths", + "agents.defaults.memorySearch.multimodal", + "agents.defaults.memorySearch.multimodal.enabled", + "agents.defaults.memorySearch.multimodal.modalities", + "agents.defaults.memorySearch.multimodal.maxFileBytes", "agents.defaults.memorySearch.experimental.sessionMemory", "agents.defaults.memorySearch.remote.baseUrl", "agents.defaults.memorySearch.remote.apiKey", @@ -83,6 +87,7 @@ const TARGET_KEYS = [ "agents.defaults.memorySearch.remote.batch.timeoutMinutes", "agents.defaults.memorySearch.local.modelPath", "agents.defaults.memorySearch.store.path", + "agents.defaults.memorySearch.outputDimensionality", "agents.defaults.memorySearch.store.vector.enabled", "agents.defaults.memorySearch.store.vector.extensionPath", "agents.defaults.memorySearch.query.hybrid.enabled", diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts index 908829cbf33..3db7f40fe73 100644 --- a/src/config/schema.help.ts +++ b/src/config/schema.help.ts @@ -778,13 +778,23 @@ export const FIELD_HELP: Record = { "agents.defaults.memorySearch.sources": 'Chooses which sources are indexed: "memory" reads MEMORY.md + memory files, and "sessions" includes transcript history. Keep ["memory"] unless you need recall from prior chat transcripts.', "agents.defaults.memorySearch.extraPaths": - "Adds extra directories or .md files to the memory index beyond default memory files. Use this when key reference docs live elsewhere in your repo; keep paths small and intentional to avoid noisy recall.", + "Adds extra directories or .md files to the memory index beyond default memory files. Use this when key reference docs live elsewhere in your repo; when multimodal memory is enabled, matching image/audio files under these paths are also eligible for indexing.", + "agents.defaults.memorySearch.multimodal": + 'Optional multimodal memory settings for indexing image and audio files from configured extra paths. Keep this off unless your embedding model explicitly supports cross-modal embeddings, and set `memorySearch.fallback` to "none" while it is enabled. Matching files are uploaded to the configured remote embedding provider during indexing.', + "agents.defaults.memorySearch.multimodal.enabled": + "Enables image/audio memory indexing from extraPaths. This currently requires Gemini embedding-2, keeps the default memory roots Markdown-only, disables memory-search fallback providers, and uploads matching binary content to the configured remote embedding provider.", + "agents.defaults.memorySearch.multimodal.modalities": + 'Selects which multimodal file types are indexed from extraPaths: "image", "audio", or "all". Keep this narrow to avoid indexing large binary corpora unintentionally.', + "agents.defaults.memorySearch.multimodal.maxFileBytes": + "Sets the maximum bytes allowed per multimodal file before it is skipped during memory indexing. Use this to cap upload cost and indexing latency, or raise it for short high-quality audio clips.", "agents.defaults.memorySearch.experimental.sessionMemory": "Indexes session transcripts into memory search so responses can reference prior chat turns. Keep this off unless transcript recall is needed, because indexing cost and storage usage both increase.", "agents.defaults.memorySearch.provider": 'Selects the embedding backend used to build/query memory vectors: "openai", "gemini", "voyage", "mistral", "ollama", or "local". Keep your most reliable provider here and configure fallback for resilience.', "agents.defaults.memorySearch.model": "Embedding model override used by the selected memory provider when a non-default model is required. Set this only when you need explicit recall quality/cost tuning beyond provider defaults.", + "agents.defaults.memorySearch.outputDimensionality": + "Gemini embedding-2 only: chooses the output vector size for memory embeddings. Use 768, 1536, or 3072 (default), and expect a full reindex when you change it because stored vector dimensions must stay consistent.", "agents.defaults.memorySearch.remote.baseUrl": "Overrides the embedding API endpoint, such as an OpenAI-compatible proxy or custom Gemini base URL. Use this only when routing through your own gateway or vendor endpoint; keep provider defaults otherwise.", "agents.defaults.memorySearch.remote.apiKey": diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts index c643cf91cd9..01b8d0f57dd 100644 --- a/src/config/schema.labels.ts +++ b/src/config/schema.labels.ts @@ -319,6 +319,10 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.enabled": "Enable Memory Search", "agents.defaults.memorySearch.sources": "Memory Search Sources", "agents.defaults.memorySearch.extraPaths": "Extra Memory Paths", + "agents.defaults.memorySearch.multimodal": "Memory Search Multimodal", + "agents.defaults.memorySearch.multimodal.enabled": "Enable Memory Search Multimodal", + "agents.defaults.memorySearch.multimodal.modalities": "Memory Search Multimodal Modalities", + "agents.defaults.memorySearch.multimodal.maxFileBytes": "Memory Search Multimodal Max File Bytes", "agents.defaults.memorySearch.experimental.sessionMemory": "Memory Search Session Index (Experimental)", "agents.defaults.memorySearch.provider": "Memory Search Provider", @@ -331,6 +335,7 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.remote.batch.pollIntervalMs": "Remote Batch Poll Interval (ms)", "agents.defaults.memorySearch.remote.batch.timeoutMinutes": "Remote Batch Timeout (min)", "agents.defaults.memorySearch.model": "Memory Search Model", + "agents.defaults.memorySearch.outputDimensionality": "Memory Search Output Dimensionality", "agents.defaults.memorySearch.fallback": "Memory Search Fallback", "agents.defaults.memorySearch.local.modelPath": "Local Embedding Model Path", "agents.defaults.memorySearch.store.path": "Memory Search Index Path", diff --git a/src/config/sessions/types.ts b/src/config/sessions/types.ts index 817f9efc3d8..0ae44b2db7a 100644 --- a/src/config/sessions/types.ts +++ b/src/config/sessions/types.ts @@ -78,6 +78,8 @@ export type SessionEntry = { sessionFile?: string; /** Parent session key that spawned this session (used for sandbox session-tool scoping). */ spawnedBy?: string; + /** Workspace inherited by spawned sessions and reused on later turns for the same child session. */ + spawnedWorkspaceDir?: string; /** True after a thread/topic session has been forked from its parent transcript once. */ forkedFromParent?: boolean; /** Subagent spawn depth (0 = main, 1 = sub-agent, 2 = sub-sub-agent). */ diff --git a/src/config/types.discord.ts b/src/config/types.discord.ts index 2d2e674f6b6..2d005dd7d7a 100644 --- a/src/config/types.discord.ts +++ b/src/config/types.discord.ts @@ -52,6 +52,8 @@ export type DiscordGuildChannelConfig = { systemPrompt?: string; /** If false, omit thread starter context for this channel (default: true). */ includeThreadStarter?: boolean; + /** If true, automatically create a thread for each new message in this channel. */ + autoThread?: boolean; }; export type DiscordReactionNotificationMode = "off" | "own" | "all" | "allowlist"; diff --git a/src/config/types.gateway.ts b/src/config/types.gateway.ts index 58b061682a1..422bbc82eed 100644 --- a/src/config/types.gateway.ts +++ b/src/config/types.gateway.ts @@ -186,6 +186,8 @@ export type GatewayTailscaleConfig = { }; export type GatewayRemoteConfig = { + /** Whether remote gateway surfaces are enabled. Default: true when absent. */ + enabled?: boolean; /** Remote Gateway WebSocket URL (ws:// or wss://). */ url?: string; /** Transport for macOS remote connections (ssh tunnel or direct WS). */ diff --git a/src/config/types.tools.ts b/src/config/types.tools.ts index e352f858c39..aaf6cb33e79 100644 --- a/src/config/types.tools.ts +++ b/src/config/types.tools.ts @@ -319,6 +319,15 @@ export type MemorySearchConfig = { sources?: Array<"memory" | "sessions">; /** Extra paths to include in memory search (directories or .md files). */ extraPaths?: string[]; + /** Optional multimodal file indexing for selected extra paths. */ + multimodal?: { + /** Enable image/audio embeddings from extraPaths. */ + enabled?: boolean; + /** Which non-text file types to index. */ + modalities?: Array<"image" | "audio" | "all">; + /** Max bytes allowed per multimodal file before it is skipped. */ + maxFileBytes?: number; + }; /** Experimental memory search settings. */ experimental?: { /** Enable session transcript indexing (experimental, default: false). */ @@ -347,6 +356,11 @@ export type MemorySearchConfig = { fallback?: "openai" | "gemini" | "local" | "voyage" | "mistral" | "ollama" | "none"; /** Embedding model id (remote) or alias (local). */ model?: string; + /** + * Gemini embedding-2 models only: output vector dimensions. + * Supported values today are 768, 1536, and 3072. + */ + outputDimensionality?: number; /** Local embedding settings (node-llama-cpp). */ local?: { /** GGUF model path or hf: URI. */ diff --git a/src/config/types.tts.ts b/src/config/types.tts.ts index 3d898ff9c57..a6232f9de5a 100644 --- a/src/config/types.tts.ts +++ b/src/config/types.tts.ts @@ -61,6 +61,10 @@ export type TtsConfig = { baseUrl?: string; model?: string; voice?: string; + /** Playback speed (0.25–4.0, default 1.0). */ + speed?: number; + /** System-level instructions for the TTS model (gpt-4o-mini-tts only). */ + instructions?: string; }; /** Microsoft Edge (node-edge-tts) configuration. */ edge?: { diff --git a/src/config/validation.ts b/src/config/validation.ts index 90d733e0818..686dbb0ed43 100644 --- a/src/config/validation.ts +++ b/src/config/validation.ts @@ -297,17 +297,23 @@ type ValidateConfigWithPluginsResult = warnings: ConfigValidationIssue[]; }; -export function validateConfigObjectWithPlugins(raw: unknown): ValidateConfigWithPluginsResult { - return validateConfigObjectWithPluginsBase(raw, { applyDefaults: true }); +export function validateConfigObjectWithPlugins( + raw: unknown, + params?: { env?: NodeJS.ProcessEnv }, +): ValidateConfigWithPluginsResult { + return validateConfigObjectWithPluginsBase(raw, { applyDefaults: true, env: params?.env }); } -export function validateConfigObjectRawWithPlugins(raw: unknown): ValidateConfigWithPluginsResult { - return validateConfigObjectWithPluginsBase(raw, { applyDefaults: false }); +export function validateConfigObjectRawWithPlugins( + raw: unknown, + params?: { env?: NodeJS.ProcessEnv }, +): ValidateConfigWithPluginsResult { + return validateConfigObjectWithPluginsBase(raw, { applyDefaults: false, env: params?.env }); } function validateConfigObjectWithPluginsBase( raw: unknown, - opts: { applyDefaults: boolean }, + opts: { applyDefaults: boolean; env?: NodeJS.ProcessEnv }, ): ValidateConfigWithPluginsResult { const base = opts.applyDefaults ? validateConfigObject(raw) : validateConfigObjectRaw(raw); if (!base.ok) { @@ -345,6 +351,7 @@ function validateConfigObjectWithPluginsBase( const registry = loadPluginManifestRegistry({ config, workspaceDir: workspaceDir ?? undefined, + env: opts.env, }); for (const diag of registry.diagnostics) { diff --git a/src/config/zod-schema.agent-runtime.ts b/src/config/zod-schema.agent-runtime.ts index 3ede7218b80..d5b9eeedb16 100644 --- a/src/config/zod-schema.agent-runtime.ts +++ b/src/config/zod-schema.agent-runtime.ts @@ -553,6 +553,16 @@ export const MemorySearchSchema = z enabled: z.boolean().optional(), sources: z.array(z.union([z.literal("memory"), z.literal("sessions")])).optional(), extraPaths: z.array(z.string()).optional(), + multimodal: z + .object({ + enabled: z.boolean().optional(), + modalities: z + .array(z.union([z.literal("image"), z.literal("audio"), z.literal("all")])) + .optional(), + maxFileBytes: z.number().int().positive().optional(), + }) + .strict() + .optional(), experimental: z .object({ sessionMemory: z.boolean().optional(), @@ -599,6 +609,7 @@ export const MemorySearchSchema = z ]) .optional(), model: z.string().optional(), + outputDimensionality: z.number().int().positive().optional(), local: z .object({ modelPath: z.string().optional(), diff --git a/src/config/zod-schema.core.ts b/src/config/zod-schema.core.ts index 066a33f0f4f..305efab4b26 100644 --- a/src/config/zod-schema.core.ts +++ b/src/config/zod-schema.core.ts @@ -404,6 +404,8 @@ export const TtsConfigSchema = z baseUrl: z.string().optional(), model: z.string().optional(), voice: z.string().optional(), + speed: z.number().min(0.25).max(4).optional(), + instructions: z.string().optional(), }) .strict() .optional(), diff --git a/src/config/zod-schema.providers-core.ts b/src/config/zod-schema.providers-core.ts index 0bb676fa5ad..d68ac63759c 100644 --- a/src/config/zod-schema.providers-core.ts +++ b/src/config/zod-schema.providers-core.ts @@ -244,7 +244,9 @@ export const TelegramAccountSchemaBase = z sendMessage: z.boolean().optional(), poll: z.boolean().optional(), deleteMessage: z.boolean().optional(), + editMessage: z.boolean().optional(), sticker: z.boolean().optional(), + createForumTopic: z.boolean().optional(), }) .strict() .optional(), @@ -977,6 +979,7 @@ export const SignalAccountSchemaBase = z enabled: z.boolean().optional(), configWrites: z.boolean().optional(), account: z.string().optional(), + accountUuid: z.string().optional(), httpUrl: z.string().optional(), httpHost: z.string().optional(), httpPort: z.number().int().positive().optional(), diff --git a/src/config/zod-schema.tts.test.ts b/src/config/zod-schema.tts.test.ts new file mode 100644 index 00000000000..70398e81054 --- /dev/null +++ b/src/config/zod-schema.tts.test.ts @@ -0,0 +1,36 @@ +import { describe, expect, it } from "vitest"; +import { TtsConfigSchema } from "./zod-schema.core.js"; + +describe("TtsConfigSchema openai speed and instructions", () => { + it("accepts speed and instructions in openai section", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + voice: "alloy", + speed: 1.5, + instructions: "Speak in a cheerful tone", + }, + }), + ).not.toThrow(); + }); + + it("rejects out-of-range openai speed", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + speed: 5.0, + }, + }), + ).toThrow(); + }); + + it("rejects openai speed below minimum", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + speed: 0.1, + }, + }), + ).toThrow(); + }); +}); diff --git a/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts b/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts index 9da88bbb4a3..2c7eb20a3c6 100644 --- a/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts +++ b/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts @@ -217,6 +217,9 @@ describe("dispatchCronDelivery — double-announce guard", () => { payloads: [{ text: "Detailed child result, everything finished successfully." }], }), ); + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ skipQueue: true }), + ); }); it("normal text delivery sends exactly once and sets deliveryAttempted=true", async () => { @@ -304,4 +307,69 @@ describe("dispatchCronDelivery — double-announce guard", () => { expect(deliverOutboundPayloads).not.toHaveBeenCalled(); expect(state.deliveryAttempted).toBe(false); }); + + it("text delivery always bypasses the write-ahead queue", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Daily digest ready." }); + const state = await dispatchCronDelivery(params); + + expect(state.delivered).toBe(true); + expect(state.deliveryAttempted).toBe(true); + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "telegram", + to: "123456", + payloads: [{ text: "Daily digest ready." }], + skipQueue: true, + }), + ); + }); + + it("structured/thread delivery also bypasses the write-ahead queue", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Report attached." }); + // Simulate structured content so useDirectDelivery path is taken (no retryTransient) + (params as Record).deliveryPayloadHasStructuredContent = true; + await dispatchCronDelivery(params); + + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ skipQueue: true }), + ); + }); + + it("transient retry delivers exactly once with skipQueue on both attempts", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + + // First call throws a transient error, second call succeeds. + vi.mocked(deliverOutboundPayloads) + .mockRejectedValueOnce(new Error("gateway timeout")) + .mockResolvedValueOnce([{ ok: true } as never]); + + vi.stubEnv("OPENCLAW_TEST_FAST", "1"); + try { + const params = makeBaseParams({ synthesizedText: "Retry test." }); + const state = await dispatchCronDelivery(params); + + expect(state.delivered).toBe(true); + expect(state.deliveryAttempted).toBe(true); + // Two calls total: first failed transiently, second succeeded. + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(2); + + const calls = vi.mocked(deliverOutboundPayloads).mock.calls; + expect(calls[0][0]).toEqual(expect.objectContaining({ skipQueue: true })); + expect(calls[1][0]).toEqual(expect.objectContaining({ skipQueue: true })); + } finally { + vi.unstubAllEnvs(); + } + }); }); diff --git a/src/cron/isolated-agent/delivery-dispatch.ts b/src/cron/isolated-agent/delivery-dispatch.ts index fa9a295a777..a5dc0190b72 100644 --- a/src/cron/isolated-agent/delivery-dispatch.ts +++ b/src/cron/isolated-agent/delivery-dispatch.ts @@ -157,7 +157,9 @@ function isTransientDirectCronDeliveryError(error: unknown): boolean { } function resolveDirectCronRetryDelaysMs(): readonly number[] { - return process.env.OPENCLAW_TEST_FAST === "1" ? [8, 16, 32] : [5_000, 10_000, 20_000]; + return process.env.NODE_ENV === "test" && process.env.OPENCLAW_TEST_FAST === "1" + ? [8, 16, 32] + : [5_000, 10_000, 20_000]; } async function retryTransientDirectCronDelivery(params: { @@ -256,6 +258,12 @@ export async function dispatchCronDelivery( bestEffort: params.deliveryBestEffort, deps: createOutboundSendDeps(params.deps), abortSignal: params.abortSignal, + // Isolated cron direct delivery uses its own transient retry loop. + // Keep all attempts out of the write-ahead delivery queue so a + // late-successful first send cannot leave behind a failed queue + // entry that replays on the next restart. + // See: https://github.com/openclaw/openclaw/issues/40545 + skipQueue: true, }); const deliveryResults = options?.retryTransient ? await retryTransientDirectCronDelivery({ diff --git a/src/daemon/launchd-restart-handoff.test.ts b/src/daemon/launchd-restart-handoff.test.ts new file mode 100644 index 00000000000..d685e64d851 --- /dev/null +++ b/src/daemon/launchd-restart-handoff.test.ts @@ -0,0 +1,43 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; + +const spawnMock = vi.hoisted(() => vi.fn()); +const unrefMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawn: (...args: unknown[]) => spawnMock(...args), +})); + +import { scheduleDetachedLaunchdRestartHandoff } from "./launchd-restart-handoff.js"; + +afterEach(() => { + spawnMock.mockReset(); + unrefMock.mockReset(); + spawnMock.mockReturnValue({ pid: 4242, unref: unrefMock }); +}); + +describe("scheduleDetachedLaunchdRestartHandoff", () => { + it("waits for the caller pid before kickstarting launchd", () => { + const env = { + HOME: "/Users/test", + OPENCLAW_PROFILE: "default", + }; + spawnMock.mockReturnValue({ pid: 4242, unref: unrefMock }); + + const result = scheduleDetachedLaunchdRestartHandoff({ + env, + mode: "kickstart", + waitForPid: 9876, + }); + + expect(result).toEqual({ ok: true, pid: 4242 }); + expect(spawnMock).toHaveBeenCalledTimes(1); + const [, args] = spawnMock.mock.calls[0] as [string, string[]]; + expect(args[0]).toBe("-c"); + expect(args[2]).toBe("openclaw-launchd-restart-handoff"); + expect(args[6]).toBe("9876"); + expect(args[1]).toContain('while kill -0 "$wait_pid" >/dev/null 2>&1; do'); + expect(args[1]).toContain('launchctl kickstart -k "$service_target" >/dev/null 2>&1'); + expect(args[1]).not.toContain("sleep 1"); + expect(unrefMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/src/daemon/launchd-restart-handoff.ts b/src/daemon/launchd-restart-handoff.ts new file mode 100644 index 00000000000..ff2fa9dc612 --- /dev/null +++ b/src/daemon/launchd-restart-handoff.ts @@ -0,0 +1,138 @@ +import { spawn } from "node:child_process"; +import os from "node:os"; +import path from "node:path"; +import { resolveGatewayLaunchAgentLabel } from "./constants.js"; + +export type LaunchdRestartHandoffMode = "kickstart" | "start-after-exit"; + +export type LaunchdRestartHandoffResult = { + ok: boolean; + pid?: number; + detail?: string; +}; + +export type LaunchdRestartTarget = { + domain: string; + label: string; + plistPath: string; + serviceTarget: string; +}; + +function resolveGuiDomain(): string { + if (typeof process.getuid !== "function") { + return "gui/501"; + } + return `gui/${process.getuid()}`; +} + +function resolveLaunchAgentLabel(env?: Record): string { + const envLabel = env?.OPENCLAW_LAUNCHD_LABEL?.trim(); + if (envLabel) { + return envLabel; + } + return resolveGatewayLaunchAgentLabel(env?.OPENCLAW_PROFILE); +} + +export function resolveLaunchdRestartTarget( + env: Record = process.env, +): LaunchdRestartTarget { + const domain = resolveGuiDomain(); + const label = resolveLaunchAgentLabel(env); + const home = env.HOME?.trim() || os.homedir(); + const plistPath = path.join(home, "Library", "LaunchAgents", `${label}.plist`); + return { + domain, + label, + plistPath, + serviceTarget: `${domain}/${label}`, + }; +} + +export function isCurrentProcessLaunchdServiceLabel( + label: string, + env: NodeJS.ProcessEnv = process.env, +): boolean { + const launchdLabel = + env.LAUNCH_JOB_LABEL?.trim() || env.LAUNCH_JOB_NAME?.trim() || env.XPC_SERVICE_NAME?.trim(); + if (launchdLabel) { + return launchdLabel === label; + } + const configuredLabel = env.OPENCLAW_LAUNCHD_LABEL?.trim(); + return Boolean(configuredLabel && configuredLabel === label); +} + +function buildLaunchdRestartScript(mode: LaunchdRestartHandoffMode): string { + const waitForCallerPid = `wait_pid="$4" +if [ -n "$wait_pid" ] && [ "$wait_pid" -gt 1 ] 2>/dev/null; then + while kill -0 "$wait_pid" >/dev/null 2>&1; do + sleep 0.1 + done +fi +`; + + if (mode === "kickstart") { + return `service_target="$1" +domain="$2" +plist_path="$3" +${waitForCallerPid} +if ! launchctl kickstart -k "$service_target" >/dev/null 2>&1; then + launchctl enable "$service_target" >/dev/null 2>&1 + if launchctl bootstrap "$domain" "$plist_path" >/dev/null 2>&1; then + launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + fi +fi +`; + } + + return `service_target="$1" +domain="$2" +plist_path="$3" +${waitForCallerPid} +if ! launchctl start "$service_target" >/dev/null 2>&1; then + launchctl enable "$service_target" >/dev/null 2>&1 + if launchctl bootstrap "$domain" "$plist_path" >/dev/null 2>&1; then + launchctl start "$service_target" >/dev/null 2>&1 || launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + else + launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + fi +fi +`; +} + +export function scheduleDetachedLaunchdRestartHandoff(params: { + env?: Record; + mode: LaunchdRestartHandoffMode; + waitForPid?: number; +}): LaunchdRestartHandoffResult { + const target = resolveLaunchdRestartTarget(params.env); + const waitForPid = + typeof params.waitForPid === "number" && Number.isFinite(params.waitForPid) + ? Math.floor(params.waitForPid) + : 0; + try { + const child = spawn( + "/bin/sh", + [ + "-c", + buildLaunchdRestartScript(params.mode), + "openclaw-launchd-restart-handoff", + target.serviceTarget, + target.domain, + target.plistPath, + String(waitForPid), + ], + { + detached: true, + stdio: "ignore", + env: { ...process.env, ...params.env }, + }, + ); + child.unref(); + return { ok: true, pid: child.pid ?? undefined }; + } catch (err) { + return { + ok: false, + detail: err instanceof Error ? err.message : String(err), + }; + } +} diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index 99e5e1f933e..3acd239afe1 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -18,11 +18,17 @@ const state = vi.hoisted(() => ({ listOutput: "", printOutput: "", bootstrapError: "", + kickstartError: "", + kickstartFailuresRemaining: 0, dirs: new Set(), dirModes: new Map(), files: new Map(), fileModes: new Map(), })); +const launchdRestartHandoffState = vi.hoisted(() => ({ + isCurrentProcessLaunchdServiceLabel: vi.fn<(label: string) => boolean>(() => false), + scheduleDetachedLaunchdRestartHandoff: vi.fn((_params: unknown) => ({ ok: true, pid: 7331 })), +})); const defaultProgramArguments = ["node", "-e", "process.exit(0)"]; function normalizeLaunchctlArgs(file: string, args: string[]): string[] { @@ -49,10 +55,21 @@ vi.mock("./exec-file.js", () => ({ if (call[0] === "bootstrap" && state.bootstrapError) { return { stdout: "", stderr: state.bootstrapError, code: 1 }; } + if (call[0] === "kickstart" && state.kickstartError && state.kickstartFailuresRemaining > 0) { + state.kickstartFailuresRemaining -= 1; + return { stdout: "", stderr: state.kickstartError, code: 1 }; + } return { stdout: "", stderr: "", code: 0 }; }), })); +vi.mock("./launchd-restart-handoff.js", () => ({ + isCurrentProcessLaunchdServiceLabel: (label: string) => + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel(label), + scheduleDetachedLaunchdRestartHandoff: (params: unknown) => + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff(params), +})); + vi.mock("node:fs/promises", async (importOriginal) => { const actual = await importOriginal(); const wrapped = { @@ -109,10 +126,19 @@ beforeEach(() => { state.listOutput = ""; state.printOutput = ""; state.bootstrapError = ""; + state.kickstartError = ""; + state.kickstartFailuresRemaining = 0; state.dirs.clear(); state.dirModes.clear(); state.files.clear(); state.fileModes.clear(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReset(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(false); + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReset(); + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReturnValue({ + ok: true, + pid: 7331, + }); vi.clearAllMocks(); }); @@ -304,9 +330,28 @@ describe("launchd install", () => { expect(state.fileModes.get(plistPath)).toBe(0o644); }); - it("restarts LaunchAgent with bootout-enable-bootstrap-kickstart order", async () => { + it("restarts LaunchAgent with kickstart and no bootout", async () => { const env = createDefaultLaunchdEnv(); - await restartLaunchAgent({ + const result = await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const serviceId = `${domain}/${label}`; + expect(result).toEqual({ outcome: "completed" }); + expect(state.launchctlCalls).toContainEqual(["kickstart", "-k", serviceId]); + expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); + expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false); + }); + + it("falls back to bootstrap when kickstart cannot find the service", async () => { + const env = createDefaultLaunchdEnv(); + state.kickstartError = "Could not find service"; + state.kickstartFailuresRemaining = 1; + + const result = await restartLaunchAgent({ env, stdout: new PassThrough(), }); @@ -315,8 +360,8 @@ describe("launchd install", () => { const label = "ai.openclaw.gateway"; const plistPath = resolveLaunchAgentPlistPath(env); const serviceId = `${domain}/${label}`; - const bootoutIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootout" && c[1] === serviceId, + const kickstartCalls = state.launchctlCalls.filter( + (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); const enableIndex = state.launchctlCalls.findIndex( (c) => c[0] === "enable" && c[1] === serviceId, @@ -324,53 +369,46 @@ describe("launchd install", () => { const bootstrapIndex = state.launchctlCalls.findIndex( (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, ); - const kickstartIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, - ); - expect(bootoutIndex).toBeGreaterThanOrEqual(0); + expect(result).toEqual({ outcome: "completed" }); + expect(kickstartCalls).toHaveLength(2); expect(enableIndex).toBeGreaterThanOrEqual(0); expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(bootoutIndex).toBeLessThan(enableIndex); - expect(enableIndex).toBeLessThan(bootstrapIndex); - expect(bootstrapIndex).toBeLessThan(kickstartIndex); + expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); }); - it("waits for previous launchd pid to exit before bootstrapping", async () => { + it("surfaces the original kickstart failure when the service is still loaded", async () => { const env = createDefaultLaunchdEnv(); - state.printOutput = ["state = running", "pid = 4242"].join("\n"); - const killSpy = vi.spyOn(process, "kill"); - killSpy - .mockImplementationOnce(() => true) - .mockImplementationOnce(() => { - const err = new Error("no such process") as NodeJS.ErrnoException; - err.code = "ESRCH"; - throw err; - }); + state.kickstartError = "Input/output error"; + state.kickstartFailuresRemaining = 1; - vi.useFakeTimers(); - try { - const restartPromise = restartLaunchAgent({ + await expect( + restartLaunchAgent({ env, stdout: new PassThrough(), - }); - await vi.advanceTimersByTimeAsync(250); - await restartPromise; - expect(killSpy).toHaveBeenCalledWith(4242, 0); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const bootoutIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootout" && c[1] === `${domain}/${label}`, - ); - const bootstrapIndex = state.launchctlCalls.findIndex((c) => c[0] === "bootstrap"); - expect(bootoutIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(bootoutIndex).toBeLessThan(bootstrapIndex); - } finally { - vi.useRealTimers(); - killSpy.mockRestore(); - } + }), + ).rejects.toThrow("launchctl kickstart failed: Input/output error"); + + expect(state.launchctlCalls.some((call) => call[0] === "enable")).toBe(false); + expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false); + }); + + it("hands restart off to a detached helper when invoked from the current LaunchAgent", async () => { + const env = createDefaultLaunchdEnv(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(true); + + const result = await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + expect(result).toEqual({ outcome: "scheduled" }); + expect(launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff).toHaveBeenCalledWith({ + env, + mode: "kickstart", + waitForPid: process.pid, + }); + expect(state.launchctlCalls).toEqual([]); }); it("shows actionable guidance when launchctl gui domain does not support bootstrap", async () => { diff --git a/src/daemon/launchd.ts b/src/daemon/launchd.ts index 492eb2e4d6e..68ae1b43edd 100644 --- a/src/daemon/launchd.ts +++ b/src/daemon/launchd.ts @@ -12,6 +12,10 @@ import { buildLaunchAgentPlist as buildLaunchAgentPlistImpl, readLaunchAgentProgramArgumentsFromFile, } from "./launchd-plist.js"; +import { + isCurrentProcessLaunchdServiceLabel, + scheduleDetachedLaunchdRestartHandoff, +} from "./launchd-restart-handoff.js"; import { formatLine, toPosixPath, writeFormattedLines } from "./output.js"; import { resolveGatewayStateDir, resolveHomeDir } from "./paths.js"; import { parseKeyValueOutput } from "./runtime-parse.js"; @@ -23,6 +27,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; const LAUNCH_AGENT_DIR_MODE = 0o755; @@ -352,34 +357,6 @@ function isUnsupportedGuiDomain(detail: string): boolean { ); } -const RESTART_PID_WAIT_TIMEOUT_MS = 10_000; -const RESTART_PID_WAIT_INTERVAL_MS = 200; - -async function sleepMs(ms: number): Promise { - await new Promise((resolve) => { - setTimeout(resolve, ms); - }); -} - -async function waitForPidExit(pid: number): Promise { - if (!Number.isFinite(pid) || pid <= 1) { - return; - } - const deadline = Date.now() + RESTART_PID_WAIT_TIMEOUT_MS; - while (Date.now() < deadline) { - try { - process.kill(pid, 0); - } catch (err) { - const code = (err as NodeJS.ErrnoException).code; - if (code === "ESRCH" || code === "EPERM") { - return; - } - return; - } - await sleepMs(RESTART_PID_WAIT_INTERVAL_MS); - } -} - export async function stopLaunchAgent({ stdout, env }: GatewayServiceControlArgs): Promise { const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env }); @@ -471,29 +448,53 @@ export async function installLaunchAgent({ export async function restartLaunchAgent({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { const serviceEnv = env ?? (process.env as GatewayServiceEnv); const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env: serviceEnv }); const plistPath = resolveLaunchAgentPlistPath(serviceEnv); + const serviceTarget = `${domain}/${label}`; - const runtime = await execLaunchctl(["print", `${domain}/${label}`]); - const previousPid = - runtime.code === 0 - ? parseLaunchctlPrint(runtime.stdout || runtime.stderr || "").pid - : undefined; - - const stop = await execLaunchctl(["bootout", `${domain}/${label}`]); - if (stop.code !== 0 && !isLaunchctlNotLoaded(stop)) { - throw new Error(`launchctl bootout failed: ${stop.stderr || stop.stdout}`.trim()); - } - if (typeof previousPid === "number") { - await waitForPidExit(previousPid); + // Restart requests issued from inside the managed gateway process tree need a + // detached handoff. A direct `kickstart -k` would terminate the caller before + // it can finish the restart command. + if (isCurrentProcessLaunchdServiceLabel(label)) { + const handoff = scheduleDetachedLaunchdRestartHandoff({ + env: serviceEnv, + mode: "kickstart", + waitForPid: process.pid, + }); + if (!handoff.ok) { + throw new Error(`launchd restart handoff failed: ${handoff.detail ?? "unknown error"}`); + } + try { + stdout.write(`${formatLine("Scheduled LaunchAgent restart", serviceTarget)}\n`); + } catch (err: unknown) { + if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { + throw err; + } + } + return { outcome: "scheduled" }; } - // launchd can persist "disabled" state after bootout; clear it before bootstrap - // (matches the same guard in installLaunchAgent). - await execLaunchctl(["enable", `${domain}/${label}`]); + const start = await execLaunchctl(["kickstart", "-k", serviceTarget]); + if (start.code === 0) { + try { + stdout.write(`${formatLine("Restarted LaunchAgent", serviceTarget)}\n`); + } catch (err: unknown) { + if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { + throw err; + } + } + return { outcome: "completed" }; + } + + if (!isLaunchctlNotLoaded(start)) { + throw new Error(`launchctl kickstart failed: ${start.stderr || start.stdout}`.trim()); + } + + // If the service was previously booted out, re-register the plist and retry. + await execLaunchctl(["enable", serviceTarget]); const boot = await execLaunchctl(["bootstrap", domain, plistPath]); if (boot.code !== 0) { const detail = (boot.stderr || boot.stdout).trim(); @@ -511,15 +512,16 @@ export async function restartLaunchAgent({ throw new Error(`launchctl bootstrap failed: ${detail}`); } - const start = await execLaunchctl(["kickstart", "-k", `${domain}/${label}`]); - if (start.code !== 0) { - throw new Error(`launchctl kickstart failed: ${start.stderr || start.stdout}`.trim()); + const retry = await execLaunchctl(["kickstart", "-k", serviceTarget]); + if (retry.code !== 0) { + throw new Error(`launchctl kickstart failed: ${retry.stderr || retry.stdout}`.trim()); } try { - stdout.write(`${formatLine("Restarted LaunchAgent", `${domain}/${label}`)}\n`); + stdout.write(`${formatLine("Restarted LaunchAgent", serviceTarget)}\n`); } catch (err: unknown) { if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { throw err; } } + return { outcome: "completed" }; } diff --git a/src/daemon/schtasks.ts b/src/daemon/schtasks.ts index af09d2ca564..ddca704f6a4 100644 --- a/src/daemon/schtasks.ts +++ b/src/daemon/schtasks.ts @@ -16,6 +16,7 @@ import type { GatewayServiceInstallArgs, GatewayServiceManageArgs, GatewayServiceRenderArgs, + GatewayServiceRestartResult, } from "./service-types.js"; function resolveTaskName(env: GatewayServiceEnv): string { @@ -316,7 +317,7 @@ export async function stopScheduledTask({ stdout, env }: GatewayServiceControlAr export async function restartScheduledTask({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { await assertSchtasksAvailable(); const taskName = resolveTaskName(env ?? (process.env as GatewayServiceEnv)); await execSchtasks(["/End", "/TN", taskName]); @@ -325,6 +326,7 @@ export async function restartScheduledTask({ throw new Error(`schtasks run failed: ${res.stderr || res.stdout}`.trim()); } stdout.write(`${formatLine("Restarted Scheduled Task", taskName)}\n`); + return { outcome: "completed" }; } export async function isScheduledTaskInstalled(args: GatewayServiceEnvArgs): Promise { diff --git a/src/daemon/service-types.ts b/src/daemon/service-types.ts index ae7d8d1a28f..202930bd6ce 100644 --- a/src/daemon/service-types.ts +++ b/src/daemon/service-types.ts @@ -19,6 +19,8 @@ export type GatewayServiceControlArgs = { env?: GatewayServiceEnv; }; +export type GatewayServiceRestartResult = { outcome: "completed" } | { outcome: "scheduled" }; + export type GatewayServiceEnvArgs = { env?: GatewayServiceEnv; }; diff --git a/src/daemon/service.test.ts b/src/daemon/service.test.ts index 19811e49699..ea2c53e8e1a 100644 --- a/src/daemon/service.test.ts +++ b/src/daemon/service.test.ts @@ -1,5 +1,5 @@ import { afterEach, describe, expect, it } from "vitest"; -import { resolveGatewayService } from "./service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "./service.js"; const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); @@ -37,4 +37,13 @@ describe("resolveGatewayService", () => { setPlatform("aix"); expect(() => resolveGatewayService()).toThrow("Gateway service install not supported on aix"); }); + + it("describes scheduled restart handoffs consistently", () => { + expect(describeGatewayServiceRestart("Gateway", { outcome: "scheduled" })).toEqual({ + scheduled: true, + daemonActionResult: "scheduled", + message: "restart scheduled, gateway will restart momentarily", + progressMessage: "Gateway service restart scheduled.", + }); + }); }); diff --git a/src/daemon/service.ts b/src/daemon/service.ts index 9685ed1ece5..8083ce4b5e1 100644 --- a/src/daemon/service.ts +++ b/src/daemon/service.ts @@ -24,6 +24,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; import { installSystemdService, @@ -41,6 +42,7 @@ export type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; function ignoreInstallResult( @@ -58,12 +60,37 @@ export type GatewayService = { install: (args: GatewayServiceInstallArgs) => Promise; uninstall: (args: GatewayServiceManageArgs) => Promise; stop: (args: GatewayServiceControlArgs) => Promise; - restart: (args: GatewayServiceControlArgs) => Promise; + restart: (args: GatewayServiceControlArgs) => Promise; isLoaded: (args: GatewayServiceEnvArgs) => Promise; readCommand: (env: GatewayServiceEnv) => Promise; readRuntime: (env: GatewayServiceEnv) => Promise; }; +export function describeGatewayServiceRestart( + serviceNoun: string, + result: GatewayServiceRestartResult, +): { + scheduled: boolean; + daemonActionResult: "restarted" | "scheduled"; + message: string; + progressMessage: string; +} { + if (result.outcome === "scheduled") { + return { + scheduled: true, + daemonActionResult: "scheduled", + message: `restart scheduled, ${serviceNoun.toLowerCase()} will restart momentarily`, + progressMessage: `${serviceNoun} service restart scheduled.`, + }; + } + return { + scheduled: false, + daemonActionResult: "restarted", + message: `${serviceNoun} service restarted.`, + progressMessage: `${serviceNoun} service restarted.`, + }; +} + type SupportedGatewayServicePlatform = "darwin" | "linux" | "win32"; const GATEWAY_SERVICE_REGISTRY: Record = { diff --git a/src/daemon/systemd.ts b/src/daemon/systemd.ts index bce7593e24e..62ab2dfa146 100644 --- a/src/daemon/systemd.ts +++ b/src/daemon/systemd.ts @@ -20,6 +20,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; import { enableSystemdUserLinger, @@ -570,13 +571,14 @@ export async function stopSystemdService({ export async function restartSystemdService({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { await runSystemdServiceAction({ stdout, env, action: "restart", label: "Restarted systemd service", }); + return { outcome: "completed" }; } export async function isSystemdServiceEnabled(args: GatewayServiceEnvArgs): Promise { diff --git a/src/discord/monitor.test.ts b/src/discord/monitor.test.ts index 10c7dc66747..9471a3fe6bc 100644 --- a/src/discord/monitor.test.ts +++ b/src/discord/monitor.test.ts @@ -38,6 +38,7 @@ const makeEntries = ( requireMention: value.requireMention, reactionNotifications: value.reactionNotifications, users: value.users, + roles: value.roles, channels: value.channels, }; } @@ -730,6 +731,17 @@ describe("discord reaction notification gating", () => { }, expected: true, }, + { + name: "all mode blocks non-allowlisted guild member", + input: { + mode: "all" as const, + botId: "bot-1", + messageAuthorId: "user-1", + userId: "user-2", + guildInfo: { users: ["trusted-user"] }, + }, + expected: false, + }, { name: "own mode with bot-authored message", input: { @@ -750,6 +762,17 @@ describe("discord reaction notification gating", () => { }, expected: false, }, + { + name: "own mode still blocks member outside users allowlist", + input: { + mode: "own" as const, + botId: "bot-1", + messageAuthorId: "bot-1", + userId: "user-3", + guildInfo: { users: ["trusted-user"] }, + }, + expected: false, + }, { name: "allowlist mode without match", input: { @@ -769,7 +792,7 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "123", userName: "steipete", - allowlist: ["123", "other"] as string[], + guildInfo: { users: ["123", "other"] }, }, expected: true, }, @@ -781,7 +804,7 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "999", userName: "trusted-user", - allowlist: ["trusted-user"] as string[], + guildInfo: { users: ["trusted-user"] }, }, expected: false, }, @@ -793,21 +816,29 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "999", userName: "trusted-user", - allowlist: ["trusted-user"] as string[], + guildInfo: { users: ["trusted-user"] }, allowNameMatching: true, }, expected: true, }, + { + name: "allowlist mode matches allowed role", + input: { + mode: "allowlist" as const, + botId: "bot-1", + messageAuthorId: "user-1", + userId: "999", + guildInfo: { roles: ["role:trusted-role"] }, + memberRoleIds: ["trusted-role"], + }, + expected: true, + }, ]); for (const testCase of cases) { expect( shouldEmitDiscordReactionNotification({ ...testCase.input, - allowlist: - "allowlist" in testCase.input && testCase.input.allowlist - ? [...testCase.input.allowlist] - : undefined, }), testCase.name, ).toBe(testCase.expected); @@ -863,6 +894,7 @@ function makeReactionEvent(overrides?: { messageAuthorId?: string; messageFetch?: ReturnType; guild?: { name?: string; id?: string }; + memberRoleIds?: string[]; }) { const userId = overrides?.userId ?? "user-1"; const messageId = overrides?.messageId ?? "msg-1"; @@ -882,6 +914,7 @@ function makeReactionEvent(overrides?: { message_id: messageId, emoji: { name: overrides?.emojiName ?? "👍", id: null }, guild: overrides?.guild, + rawMember: overrides?.memberRoleIds ? { roles: overrides.memberRoleIds } : undefined, user: { id: userId, bot: false, @@ -1059,7 +1092,31 @@ describe("discord DM reaction handling", () => { expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); }); - it("still processes guild reactions (no regression)", async () => { + it("blocks guild reactions for sender outside users allowlist", async () => { + const data = makeReactionEvent({ + guildId: "guild-123", + userId: "attacker-user", + botAsAuthor: true, + guild: { id: "guild-123", name: "Test Guild" }, + }); + const client = makeReactionClient({ channelType: ChannelType.GuildText }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + guildEntries: makeEntries({ + "guild-123": { + users: ["user:trusted-user"], + }, + }), + }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + expect(resolveAgentRouteMock).not.toHaveBeenCalled(); + }); + + it("allows guild reactions for sender in channel role allowlist override", async () => { resolveAgentRouteMock.mockReturnValueOnce({ agentId: "default", channel: "discord", @@ -1069,11 +1126,27 @@ describe("discord DM reaction handling", () => { const data = makeReactionEvent({ guildId: "guild-123", + userId: "member-user", botAsAuthor: true, - guild: { name: "Test Guild" }, + guild: { id: "guild-123", name: "Test Guild" }, + memberRoleIds: ["trusted-role"], }); const client = makeReactionClient({ channelType: ChannelType.GuildText }); - const listener = new DiscordReactionListener(makeReactionListenerParams()); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + guildEntries: makeEntries({ + "guild-123": { + roles: ["role:blocked-role"], + channels: { + "channel-1": { + allow: true, + roles: ["role:trusted-role"], + }, + }, + }, + }), + }), + ); await listener.handle(data, client); diff --git a/src/discord/monitor/allow-list.ts b/src/discord/monitor/allow-list.ts index b736928e276..7c1250cb8ef 100644 --- a/src/discord/monitor/allow-list.ts +++ b/src/discord/monitor/allow-list.ts @@ -556,6 +556,9 @@ export function shouldEmitDiscordReactionNotification(params: { userId: string; userName?: string; userTag?: string; + channelConfig?: DiscordChannelConfigResolved | null; + guildInfo?: DiscordGuildEntryResolved | null; + memberRoleIds?: string[]; allowlist?: string[]; allowNameMatching?: boolean; }) { @@ -563,26 +566,31 @@ export function shouldEmitDiscordReactionNotification(params: { if (mode === "off") { return false; } + const accessGuildInfo = + params.guildInfo ?? + (params.allowlist ? ({ users: params.allowlist } satisfies DiscordGuildEntryResolved) : null); + const { hasAccessRestrictions, memberAllowed } = resolveDiscordMemberAccessState({ + channelConfig: params.channelConfig, + guildInfo: accessGuildInfo, + memberRoleIds: params.memberRoleIds ?? [], + sender: { + id: params.userId, + name: params.userName, + tag: params.userTag, + }, + allowNameMatching: params.allowNameMatching, + }); + if (mode === "allowlist") { + return hasAccessRestrictions && memberAllowed; + } + if (hasAccessRestrictions && !memberAllowed) { + return false; + } if (mode === "all") { return true; } if (mode === "own") { return Boolean(params.botId && params.messageAuthorId === params.botId); } - if (mode === "allowlist") { - const list = normalizeDiscordAllowList(params.allowlist, ["discord:", "user:", "pk:"]); - if (!list) { - return false; - } - return allowListMatches( - list, - { - id: params.userId, - name: params.userName, - tag: params.userTag, - }, - { allowNameMatching: params.allowNameMatching }, - ); - } return false; } diff --git a/src/discord/monitor/listeners.ts b/src/discord/monitor/listeners.ts index 056a1ad7116..824cb5fb19a 100644 --- a/src/discord/monitor/listeners.ts +++ b/src/discord/monitor/listeners.ts @@ -24,6 +24,7 @@ import { normalizeDiscordSlug, resolveDiscordAllowListMatch, resolveDiscordChannelConfigWithFallback, + resolveDiscordMemberAccessState, resolveGroupDmAllow, resolveDiscordGuildEntry, shouldEmitDiscordReactionNotification, @@ -294,6 +295,7 @@ async function runDiscordReactionHandler(params: { type DiscordReactionIngressAuthorizationParams = { accountId: string; user: User; + memberRoleIds: string[]; isDirectMessage: boolean; isGroupDm: boolean; isGuildMessage: boolean; @@ -308,7 +310,7 @@ type DiscordReactionIngressAuthorizationParams = { groupPolicy: "open" | "allowlist" | "disabled"; allowNameMatching: boolean; guildInfo: import("./allow-list.js").DiscordGuildEntryResolved | null; - channelConfig?: { allowed?: boolean } | null; + channelConfig?: import("./allow-list.js").DiscordChannelConfigResolved | null; }; async function authorizeDiscordReactionIngress( @@ -383,6 +385,20 @@ async function authorizeDiscordReactionIngress( if (params.channelConfig?.allowed === false) { return { allowed: false, reason: "guild-channel-denied" }; } + const { hasAccessRestrictions, memberAllowed } = resolveDiscordMemberAccessState({ + channelConfig: params.channelConfig, + guildInfo: params.guildInfo, + memberRoleIds: params.memberRoleIds, + sender: { + id: params.user.id, + name: params.user.username, + tag: formatDiscordUserTag(params.user), + }, + allowNameMatching: params.allowNameMatching, + }); + if (hasAccessRestrictions && !memberAllowed) { + return { allowed: false, reason: "guild-member-denied" }; + } return { allowed: true }; } @@ -434,9 +450,13 @@ async function handleDiscordReactionEvent( channelType === ChannelType.PublicThread || channelType === ChannelType.PrivateThread || channelType === ChannelType.AnnouncementThread; + const memberRoleIds = Array.isArray(data.rawMember?.roles) + ? data.rawMember.roles.map((roleId: string) => String(roleId)) + : []; const reactionIngressBase: Omit = { accountId: params.accountId, user, + memberRoleIds, isDirectMessage, isGroupDm, isGuildMessage, @@ -452,17 +472,18 @@ async function handleDiscordReactionEvent( allowNameMatching: params.allowNameMatching, guildInfo, }; - const ingressAccess = await authorizeDiscordReactionIngress(reactionIngressBase); - if (!ingressAccess.allowed) { - logVerbose(`discord reaction blocked sender=${user.id} (reason=${ingressAccess.reason})`); - return; + // Guild reactions need resolved channel/thread config before member access + // can mirror the normal message preflight path. + if (!isGuildMessage) { + const ingressAccess = await authorizeDiscordReactionIngress(reactionIngressBase); + if (!ingressAccess.allowed) { + logVerbose(`discord reaction blocked sender=${user.id} (reason=${ingressAccess.reason})`); + return; + } } let parentId = "parentId" in channel ? (channel.parentId ?? undefined) : undefined; let parentName: string | undefined; let parentSlug = ""; - const memberRoleIds = Array.isArray(data.rawMember?.roles) - ? data.rawMember.roles.map((roleId: string) => String(roleId)) - : []; let reactionBase: { baseText: string; contextKey: string } | null = null; const resolveReactionBase = () => { if (reactionBase) { @@ -507,6 +528,7 @@ async function handleDiscordReactionEvent( const shouldNotifyReaction = (options: { mode: "off" | "own" | "all" | "allowlist"; messageAuthorId?: string; + channelConfig?: ReturnType; }) => shouldEmitDiscordReactionNotification({ mode: options.mode, @@ -515,7 +537,9 @@ async function handleDiscordReactionEvent( userId: user.id, userName: user.username, userTag: formatDiscordUserTag(user), - allowlist: guildInfo?.users, + channelConfig: options.channelConfig, + guildInfo, + memberRoleIds, allowNameMatching: params.allowNameMatching, }); const emitReactionWithAuthor = (message: { author?: User } | null) => { @@ -550,10 +574,12 @@ async function handleDiscordReactionEvent( ...reactionIngressBase, channelConfig, }); - const authorizeThreadChannelAccess = async (channelInfo: { parentId?: string } | null) => { + const resolveThreadChannelAccess = async (channelInfo: { parentId?: string } | null) => { parentId = channelInfo?.parentId; await loadThreadParentInfo(); - return await authorizeReactionIngressForChannel(resolveThreadChannelConfig()); + const channelConfig = resolveThreadChannelConfig(); + const access = await authorizeReactionIngressForChannel(channelConfig); + return { access, channelConfig }; }; // Parallelize async operations for thread channels @@ -572,16 +598,18 @@ async function handleDiscordReactionEvent( // Fast path: for "all" and "allowlist" modes, we don't need to fetch the message if (reactionMode === "all" || reactionMode === "allowlist") { const channelInfo = await channelInfoPromise; - const threadAccess = await authorizeThreadChannelAccess(channelInfo); + const { access: threadAccess, channelConfig: threadChannelConfig } = + await resolveThreadChannelAccess(channelInfo); if (!threadAccess.allowed) { return; } - - // For allowlist mode, check if user is in allowlist first - if (reactionMode === "allowlist") { - if (!shouldNotifyReaction({ mode: reactionMode })) { - return; - } + if ( + !shouldNotifyReaction({ + mode: reactionMode, + channelConfig: threadChannelConfig, + }) + ) { + return; } const { baseText } = resolveReactionBase(); @@ -593,13 +621,20 @@ async function handleDiscordReactionEvent( const messagePromise = data.message.fetch().catch(() => null); const [channelInfo, message] = await Promise.all([channelInfoPromise, messagePromise]); - const threadAccess = await authorizeThreadChannelAccess(channelInfo); + const { access: threadAccess, channelConfig: threadChannelConfig } = + await resolveThreadChannelAccess(channelInfo); if (!threadAccess.allowed) { return; } const messageAuthorId = message?.author?.id ?? undefined; - if (!shouldNotifyReaction({ mode: reactionMode, messageAuthorId })) { + if ( + !shouldNotifyReaction({ + mode: reactionMode, + messageAuthorId, + channelConfig: threadChannelConfig, + }) + ) { return; } @@ -634,11 +669,8 @@ async function handleDiscordReactionEvent( // Fast path: for "all" and "allowlist" modes, we don't need to fetch the message if (reactionMode === "all" || reactionMode === "allowlist") { - // For allowlist mode, check if user is in allowlist first - if (reactionMode === "allowlist") { - if (!shouldNotifyReaction({ mode: reactionMode })) { - return; - } + if (!shouldNotifyReaction({ mode: reactionMode, channelConfig })) { + return; } const { baseText } = resolveReactionBase(); @@ -649,7 +681,7 @@ async function handleDiscordReactionEvent( // For "own" mode, we need to fetch the message to check the author const message = await data.message.fetch().catch(() => null); const messageAuthorId = message?.author?.id ?? undefined; - if (!shouldNotifyReaction({ mode: reactionMode, messageAuthorId })) { + if (!shouldNotifyReaction({ mode: reactionMode, messageAuthorId, channelConfig })) { return; } diff --git a/src/gateway/chat-abort.ts b/src/gateway/chat-abort.ts index 0210f9223f7..4be479153f6 100644 --- a/src/gateway/chat-abort.ts +++ b/src/gateway/chat-abort.ts @@ -6,6 +6,8 @@ export type ChatAbortControllerEntry = { sessionKey: string; startedAtMs: number; expiresAtMs: number; + ownerConnId?: string; + ownerDeviceId?: string; }; export function isChatStopCommandText(text: string): boolean { diff --git a/src/gateway/protocol/schema/agent.ts b/src/gateway/protocol/schema/agent.ts index 75d560ba92b..11369a4ed4a 100644 --- a/src/gateway/protocol/schema/agent.ts +++ b/src/gateway/protocol/schema/agent.ts @@ -1,6 +1,5 @@ import { Type } from "@sinclair/typebox"; -import { INPUT_PROVENANCE_KIND_VALUES } from "../../../sessions/input-provenance.js"; -import { NonEmptyString, SessionLabelString } from "./primitives.js"; +import { InputProvenanceSchema, NonEmptyString, SessionLabelString } from "./primitives.js"; export const AgentInternalEventSchema = Type.Object( { @@ -96,22 +95,9 @@ export const AgentParamsSchema = Type.Object( lane: Type.Optional(Type.String()), extraSystemPrompt: Type.Optional(Type.String()), internalEvents: Type.Optional(Type.Array(AgentInternalEventSchema)), - inputProvenance: Type.Optional( - Type.Object( - { - kind: Type.String({ enum: [...INPUT_PROVENANCE_KIND_VALUES] }), - originSessionId: Type.Optional(Type.String()), - sourceSessionKey: Type.Optional(Type.String()), - sourceChannel: Type.Optional(Type.String()), - sourceTool: Type.Optional(Type.String()), - }, - { additionalProperties: false }, - ), - ), + inputProvenance: Type.Optional(InputProvenanceSchema), idempotencyKey: NonEmptyString, label: Type.Optional(SessionLabelString), - spawnedBy: Type.Optional(Type.String()), - workspaceDir: Type.Optional(Type.String()), }, { additionalProperties: false }, ); diff --git a/src/gateway/protocol/schema/logs-chat.ts b/src/gateway/protocol/schema/logs-chat.ts index 5545bd443f1..5c4003acb8e 100644 --- a/src/gateway/protocol/schema/logs-chat.ts +++ b/src/gateway/protocol/schema/logs-chat.ts @@ -1,6 +1,5 @@ import { Type } from "@sinclair/typebox"; -import { INPUT_PROVENANCE_KIND_VALUES } from "../../../sessions/input-provenance.js"; -import { ChatSendSessionKeyString, NonEmptyString } from "./primitives.js"; +import { ChatSendSessionKeyString, InputProvenanceSchema, NonEmptyString } from "./primitives.js"; export const LogsTailParamsSchema = Type.Object( { @@ -40,18 +39,7 @@ export const ChatSendParamsSchema = Type.Object( deliver: Type.Optional(Type.Boolean()), attachments: Type.Optional(Type.Array(Type.Unknown())), timeoutMs: Type.Optional(Type.Integer({ minimum: 0 })), - systemInputProvenance: Type.Optional( - Type.Object( - { - kind: Type.String({ enum: [...INPUT_PROVENANCE_KIND_VALUES] }), - originSessionId: Type.Optional(Type.String()), - sourceSessionKey: Type.Optional(Type.String()), - sourceChannel: Type.Optional(Type.String()), - sourceTool: Type.Optional(Type.String()), - }, - { additionalProperties: false }, - ), - ), + systemInputProvenance: Type.Optional(InputProvenanceSchema), systemProvenanceReceipt: Type.Optional(Type.String()), idempotencyKey: NonEmptyString, }, diff --git a/src/gateway/protocol/schema/primitives.ts b/src/gateway/protocol/schema/primitives.ts index 6ac6a71b64a..2983c834f35 100644 --- a/src/gateway/protocol/schema/primitives.ts +++ b/src/gateway/protocol/schema/primitives.ts @@ -5,6 +5,7 @@ import { FILE_SECRET_REF_ID_PATTERN, SECRET_PROVIDER_ALIAS_PATTERN, } from "../../../secrets/ref-contract.js"; +import { INPUT_PROVENANCE_KIND_VALUES } from "../../../sessions/input-provenance.js"; import { SESSION_LABEL_MAX_LENGTH } from "../../../sessions/session-label.js"; import { GATEWAY_CLIENT_IDS, GATEWAY_CLIENT_MODES } from "../client-info.js"; @@ -18,6 +19,16 @@ export const SessionLabelString = Type.String({ minLength: 1, maxLength: SESSION_LABEL_MAX_LENGTH, }); +export const InputProvenanceSchema = Type.Object( + { + kind: Type.String({ enum: [...INPUT_PROVENANCE_KIND_VALUES] }), + originSessionId: Type.Optional(Type.String()), + sourceSessionKey: Type.Optional(Type.String()), + sourceChannel: Type.Optional(Type.String()), + sourceTool: Type.Optional(Type.String()), + }, + { additionalProperties: false }, +); export const GatewayClientIdSchema = Type.Union( Object.values(GATEWAY_CLIENT_IDS).map((value) => Type.Literal(value)), diff --git a/src/gateway/protocol/schema/sessions.ts b/src/gateway/protocol/schema/sessions.ts index 83f09e8ecba..30595c15698 100644 --- a/src/gateway/protocol/schema/sessions.ts +++ b/src/gateway/protocol/schema/sessions.ts @@ -71,6 +71,7 @@ export const SessionsPatchParamsSchema = Type.Object( execNode: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), model: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), spawnedBy: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), + spawnedWorkspaceDir: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), spawnDepth: Type.Optional(Type.Union([Type.Integer({ minimum: 0 }), Type.Null()])), subagentRole: Type.Optional( Type.Union([Type.Literal("orchestrator"), Type.Literal("leaf"), Type.Null()]), diff --git a/src/gateway/server-methods/agent.test.ts b/src/gateway/server-methods/agent.test.ts index fbc8b056c34..5dfa27b20ce 100644 --- a/src/gateway/server-methods/agent.test.ts +++ b/src/gateway/server-methods/agent.test.ts @@ -405,30 +405,53 @@ describe("gateway agent handler", () => { expect(callArgs.bestEffortDeliver).toBe(false); }); - it("only forwards workspaceDir for spawned subagent runs", async () => { + it("rejects public spawned-run metadata fields", async () => { primeMainAgentRun(); mocks.agentCommand.mockClear(); - - await invokeAgent( - { - message: "normal run", - sessionKey: "agent:main:main", - workspaceDir: "/tmp/ignored", - idempotencyKey: "workspace-ignored", - }, - { reqId: "workspace-ignored-1" }, - ); - await vi.waitFor(() => expect(mocks.agentCommand).toHaveBeenCalled()); - const normalCall = mocks.agentCommand.mock.calls.at(-1)?.[0] as { workspaceDir?: string }; - expect(normalCall.workspaceDir).toBeUndefined(); - mocks.agentCommand.mockClear(); + const respond = vi.fn(); await invokeAgent( { message: "spawned run", sessionKey: "agent:main:main", spawnedBy: "agent:main:subagent:parent", - workspaceDir: "/tmp/inherited", + workspaceDir: "/tmp/injected", + idempotencyKey: "workspace-rejected", + } as AgentParams, + { reqId: "workspace-rejected-1", respond }, + ); + + expect(mocks.agentCommand).not.toHaveBeenCalled(); + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ + message: expect.stringContaining("invalid agent params"), + }), + ); + }); + + it("only forwards workspaceDir for spawned sessions with stored workspace inheritance", async () => { + primeMainAgentRun(); + mockMainSessionEntry({ + spawnedBy: "agent:main:subagent:parent", + spawnedWorkspaceDir: "/tmp/inherited", + }); + mocks.updateSessionStore.mockImplementation(async (_path, updater) => { + const store: Record = { + "agent:main:main": buildExistingMainStoreEntry({ + spawnedBy: "agent:main:subagent:parent", + spawnedWorkspaceDir: "/tmp/inherited", + }), + }; + return await updater(store); + }); + mocks.agentCommand.mockClear(); + + await invokeAgent( + { + message: "spawned run", + sessionKey: "agent:main:main", idempotencyKey: "workspace-forwarded", }, { reqId: "workspace-forwarded-1" }, diff --git a/src/gateway/server-methods/agent.ts b/src/gateway/server-methods/agent.ts index a6d437e6792..98466f91044 100644 --- a/src/gateway/server-methods/agent.ts +++ b/src/gateway/server-methods/agent.ts @@ -190,24 +190,20 @@ export const agentHandlers: GatewayRequestHandlers = { timeout?: number; bestEffortDeliver?: boolean; label?: string; - spawnedBy?: string; inputProvenance?: InputProvenance; - workspaceDir?: string; }; const senderIsOwner = resolveSenderIsOwnerFromClient(client); const cfg = loadConfig(); const idem = request.idempotencyKey; const normalizedSpawned = normalizeSpawnedRunMetadata({ - spawnedBy: request.spawnedBy, groupId: request.groupId, groupChannel: request.groupChannel, groupSpace: request.groupSpace, - workspaceDir: request.workspaceDir, }); let resolvedGroupId: string | undefined = normalizedSpawned.groupId; let resolvedGroupChannel: string | undefined = normalizedSpawned.groupChannel; let resolvedGroupSpace: string | undefined = normalizedSpawned.groupSpace; - let spawnedByValue = normalizedSpawned.spawnedBy; + let spawnedByValue: string | undefined; const inputProvenance = normalizeInputProvenance(request.inputProvenance); const cached = context.dedupe.get(`agent:${idem}`); if (cached) { @@ -359,11 +355,7 @@ export const agentHandlers: GatewayRequestHandlers = { const sessionId = entry?.sessionId ?? randomUUID(); const labelValue = request.label?.trim() || entry?.label; const sessionAgent = resolveAgentIdFromSessionKey(canonicalKey); - spawnedByValue = canonicalizeSpawnedByForAgent( - cfg, - sessionAgent, - spawnedByValue || entry?.spawnedBy, - ); + spawnedByValue = canonicalizeSpawnedByForAgent(cfg, sessionAgent, entry?.spawnedBy); let inheritedGroup: | { groupId?: string; groupChannel?: string; groupSpace?: string } | undefined; @@ -400,6 +392,7 @@ export const agentHandlers: GatewayRequestHandlers = { providerOverride: entry?.providerOverride, label: labelValue, spawnedBy: spawnedByValue, + spawnedWorkspaceDir: entry?.spawnedWorkspaceDir, spawnDepth: entry?.spawnDepth, channel: entry?.channel ?? request.channel?.trim(), groupId: resolvedGroupId ?? entry?.groupId, @@ -628,7 +621,7 @@ export const agentHandlers: GatewayRequestHandlers = { // Internal-only: allow workspace override for spawned subagent runs. workspaceDir: resolveIngressWorkspaceOverrideForSpawnedRun({ spawnedBy: spawnedByValue, - workspaceDir: request.workspaceDir, + workspaceDir: sessionEntry?.spawnedWorkspaceDir, }), senderIsOwner, }, diff --git a/src/gateway/server-methods/browser.profile-from-body.test.ts b/src/gateway/server-methods/browser.profile-from-body.test.ts index 972fca9f848..3b2caf8dbdc 100644 --- a/src/gateway/server-methods/browser.profile-from-body.test.ts +++ b/src/gateway/server-methods/browser.profile-from-body.test.ts @@ -100,4 +100,42 @@ describe("browser.request profile selection", () => { }), ); }); + + it.each([ + { + method: "POST", + path: "/profiles/create", + body: { name: "poc", cdpUrl: "http://10.0.0.42:9222" }, + }, + { + method: "DELETE", + path: "/profiles/poc", + body: undefined, + }, + { + method: "POST", + path: "profiles/create", + body: { name: "poc", cdpUrl: "http://10.0.0.42:9222" }, + }, + { + method: "DELETE", + path: "profiles/poc", + body: undefined, + }, + ])("blocks persistent profile mutations for $method $path", async ({ method, path, body }) => { + const { respond, nodeRegistry } = await runBrowserRequest({ + method, + path, + body, + }); + + expect(nodeRegistry.invoke).not.toHaveBeenCalled(); + expect(respond).toHaveBeenCalledWith( + false, + undefined, + expect.objectContaining({ + message: "browser.request cannot create or delete persistent browser profiles", + }), + ); + }); }); diff --git a/src/gateway/server-methods/browser.ts b/src/gateway/server-methods/browser.ts index bda77ad98e4..0bb2db3dafd 100644 --- a/src/gateway/server-methods/browser.ts +++ b/src/gateway/server-methods/browser.ts @@ -20,6 +20,26 @@ type BrowserRequestParams = { timeoutMs?: number; }; +function normalizeBrowserRequestPath(value: string): string { + const trimmed = value.trim(); + if (!trimmed) { + return trimmed; + } + const withLeadingSlash = trimmed.startsWith("/") ? trimmed : `/${trimmed}`; + if (withLeadingSlash.length <= 1) { + return withLeadingSlash; + } + return withLeadingSlash.replace(/\/+$/, ""); +} + +function isPersistentBrowserProfileMutation(method: string, path: string): boolean { + const normalizedPath = normalizeBrowserRequestPath(path); + if (method === "POST" && normalizedPath === "/profiles/create") { + return true; + } + return method === "DELETE" && /^\/profiles\/[^/]+$/.test(normalizedPath); +} + function resolveRequestedProfile(params: { query?: Record; body?: unknown; @@ -167,6 +187,17 @@ export const browserHandlers: GatewayRequestHandlers = { ); return; } + if (isPersistentBrowserProfileMutation(methodRaw, path)) { + respond( + false, + undefined, + errorShape( + ErrorCodes.INVALID_REQUEST, + "browser.request cannot create or delete persistent browser profiles", + ), + ); + return; + } const cfg = loadConfig(); let nodeTarget: NodeSession | null = null; diff --git a/src/gateway/server-methods/chat.abort-authorization.test.ts b/src/gateway/server-methods/chat.abort-authorization.test.ts new file mode 100644 index 00000000000..6fbf0478df3 --- /dev/null +++ b/src/gateway/server-methods/chat.abort-authorization.test.ts @@ -0,0 +1,147 @@ +import { describe, expect, it, vi } from "vitest"; +import { chatHandlers } from "./chat.js"; + +function createActiveRun(sessionKey: string, owner?: { connId?: string; deviceId?: string }) { + const now = Date.now(); + return { + controller: new AbortController(), + sessionId: `${sessionKey}-session`, + sessionKey, + startedAtMs: now, + expiresAtMs: now + 30_000, + ownerConnId: owner?.connId, + ownerDeviceId: owner?.deviceId, + }; +} + +function createContext(overrides: Record = {}) { + return { + chatAbortControllers: new Map(), + chatRunBuffers: new Map(), + chatDeltaSentAt: new Map(), + chatAbortedRuns: new Map(), + removeChatRun: vi + .fn() + .mockImplementation((run: string) => ({ sessionKey: "main", clientRunId: run })), + agentRunSeq: new Map(), + broadcast: vi.fn(), + nodeSendToSession: vi.fn(), + logGateway: { warn: vi.fn() }, + ...overrides, + }; +} + +async function invokeChatAbort(params: { + context: ReturnType; + request: { sessionKey: string; runId?: string }; + client?: { + connId?: string; + connect?: { + device?: { id?: string }; + scopes?: string[]; + }; + } | null; +}) { + const respond = vi.fn(); + await chatHandlers["chat.abort"]({ + params: params.request, + respond: respond as never, + context: params.context as never, + req: {} as never, + client: (params.client ?? null) as never, + isWebchatConnect: () => false, + }); + return respond; +} + +describe("chat.abort authorization", () => { + it("rejects explicit run aborts from other clients", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-1", createActiveRun("main", { connId: "conn-owner", deviceId: "dev-owner" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main", runId: "run-1" }, + client: { + connId: "conn-other", + connect: { device: { id: "dev-other" }, scopes: ["operator.write"] }, + }, + }); + + const [ok, payload, error] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(false); + expect(payload).toBeUndefined(); + expect(error).toMatchObject({ code: "INVALID_REQUEST", message: "unauthorized" }); + expect(context.chatAbortControllers.has("run-1")).toBe(true); + }); + + it("allows the same paired device to abort after reconnecting", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-1", createActiveRun("main", { connId: "conn-old", deviceId: "dev-1" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main", runId: "run-1" }, + client: { + connId: "conn-new", + connect: { device: { id: "dev-1" }, scopes: ["operator.write"] }, + }, + }); + + const [ok, payload] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(true); + expect(payload).toMatchObject({ aborted: true, runIds: ["run-1"] }); + expect(context.chatAbortControllers.has("run-1")).toBe(false); + }); + + it("only aborts session-scoped runs owned by the requester", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-mine", createActiveRun("main", { deviceId: "dev-1" })], + ["run-other", createActiveRun("main", { deviceId: "dev-2" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main" }, + client: { + connId: "conn-1", + connect: { device: { id: "dev-1" }, scopes: ["operator.write"] }, + }, + }); + + const [ok, payload] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(true); + expect(payload).toMatchObject({ aborted: true, runIds: ["run-mine"] }); + expect(context.chatAbortControllers.has("run-mine")).toBe(false); + expect(context.chatAbortControllers.has("run-other")).toBe(true); + }); + + it("allows operator.admin clients to bypass owner checks", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-1", createActiveRun("main", { connId: "conn-owner", deviceId: "dev-owner" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main", runId: "run-1" }, + client: { + connId: "conn-admin", + connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] }, + }, + }); + + const [ok, payload] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(true); + expect(payload).toMatchObject({ aborted: true, runIds: ["run-1"] }); + }); +}); diff --git a/src/gateway/server-methods/chat.directive-tags.test.ts b/src/gateway/server-methods/chat.directive-tags.test.ts index 1415ef6d6f7..06b642b28c5 100644 --- a/src/gateway/server-methods/chat.directive-tags.test.ts +++ b/src/gateway/server-methods/chat.directive-tags.test.ts @@ -656,6 +656,49 @@ describe("chat directive tag stripping for non-streaming final payloads", () => ); }); + it("chat.send does not inherit external delivery context for UI clients on main sessions when deliver is enabled", async () => { + createTranscriptFixture("openclaw-chat-send-main-ui-deliver-no-route-"); + mockState.finalText = "ok"; + mockState.sessionEntry = { + deliveryContext: { + channel: "telegram", + to: "telegram:200482621", + accountId: "default", + }, + lastChannel: "telegram", + lastTo: "telegram:200482621", + lastAccountId: "default", + }; + const respond = vi.fn(); + const context = createChatContext(); + + await runNonStreamingChatSend({ + context, + respond, + idempotencyKey: "idem-main-ui-deliver-no-route", + client: { + connect: { + client: { + mode: GATEWAY_CLIENT_MODES.UI, + id: "openclaw-tui", + }, + }, + } as unknown, + sessionKey: "agent:main:main", + deliver: true, + expectBroadcast: false, + }); + + expect(mockState.lastDispatchCtx).toEqual( + expect.objectContaining({ + OriginatingChannel: "webchat", + OriginatingTo: undefined, + ExplicitDeliverRoute: false, + AccountId: undefined, + }), + ); + }); + it("chat.send inherits external delivery context for CLI clients on configured main sessions", async () => { createTranscriptFixture("openclaw-chat-send-config-main-cli-routes-"); mockState.mainSessionKey = "work"; diff --git a/src/gateway/server-methods/chat.ts b/src/gateway/server-methods/chat.ts index 71669080382..857868c59a5 100644 --- a/src/gateway/server-methods/chat.ts +++ b/src/gateway/server-methods/chat.ts @@ -20,12 +20,12 @@ import { } from "../../utils/directive-tags.js"; import { INTERNAL_MESSAGE_CHANNEL, + isGatewayCliClient, isWebchatClient, normalizeMessageChannel, } from "../../utils/message-channel.js"; import { abortChatRunById, - abortChatRunsForSessionKey, type ChatAbortControllerEntry, type ChatAbortOps, isChatStopCommandText, @@ -33,6 +33,7 @@ import { } from "../chat-abort.js"; import { type ChatImageContent, parseMessageWithAttachments } from "../chat-attachments.js"; import { stripEnvelopeFromMessage, stripEnvelopeFromMessages } from "../chat-sanitize.js"; +import { ADMIN_SCOPE } from "../method-scopes.js"; import { GATEWAY_CLIENT_CAPS, GATEWAY_CLIENT_MODES, @@ -83,6 +84,12 @@ type AbortedPartialSnapshot = { abortOrigin: AbortOrigin; }; +type ChatAbortRequester = { + connId?: string; + deviceId?: string; + isAdmin: boolean; +}; + const CHAT_HISTORY_TEXT_MAX_CHARS = 12_000; const CHAT_HISTORY_MAX_SINGLE_MESSAGE_BYTES = 128 * 1024; const CHAT_HISTORY_OVERSIZED_PLACEHOLDER = "[chat.history omitted: message too large]"; @@ -175,21 +182,27 @@ function resolveChatSendOriginatingRoute(params: { typeof sessionScopeParts[1] === "string" && sessionChannelHint === routeChannelCandidate; const isFromWebchatClient = isWebchatClient(params.client); + const isFromGatewayCliClient = isGatewayCliClient(params.client); + const hasClientMetadata = + (typeof params.client?.mode === "string" && params.client.mode.trim().length > 0) || + (typeof params.client?.id === "string" && params.client.id.trim().length > 0); const configuredMainKey = (params.mainKey ?? "main").trim().toLowerCase(); const isConfiguredMainSessionScope = normalizedSessionScopeHead.length > 0 && normalizedSessionScopeHead === configuredMainKey; + const canInheritConfiguredMainRoute = + isConfiguredMainSessionScope && + params.hasConnectedClient && + (isFromGatewayCliClient || !hasClientMetadata); - // Webchat/Control UI clients never inherit external delivery routes, even when - // accessing channel-scoped sessions. External routes are only for non-webchat - // clients where the session key explicitly encodes an external target. - // Preserve the old configured-main contract: any connected non-webchat client - // may inherit the last external route even when client metadata is absent. + // Webchat clients never inherit external delivery routes. Configured-main + // sessions are stricter than channel-scoped sessions: only CLI callers, or + // legacy callers with no client metadata, may inherit the last external route. const canInheritDeliverableRoute = Boolean( !isFromWebchatClient && sessionChannelHint && sessionChannelHint !== INTERNAL_MESSAGE_CHANNEL && ((!isChannelAgnosticSessionScope && (isChannelScopedSession || hasLegacyChannelPeerShape)) || - (isConfiguredMainSessionScope && params.hasConnectedClient)), + canInheritConfiguredMainRoute), ); const hasDeliverableRoute = canInheritDeliverableRoute && @@ -314,6 +327,68 @@ function sanitizeChatHistoryContentBlock(block: unknown): { block: unknown; chan return { block: changed ? entry : block, changed }; } +/** + * Validate that a value is a finite number, returning undefined otherwise. + */ +function toFiniteNumber(x: unknown): number | undefined { + return typeof x === "number" && Number.isFinite(x) ? x : undefined; +} + +/** + * Sanitize usage metadata to ensure only finite numeric fields are included. + * Prevents UI crashes from malformed transcript JSON. + */ +function sanitizeUsage(raw: unknown): Record | undefined { + if (!raw || typeof raw !== "object") { + return undefined; + } + const u = raw as Record; + const out: Record = {}; + + // Whitelist known usage fields and validate they're finite numbers + const knownFields = [ + "input", + "output", + "totalTokens", + "inputTokens", + "outputTokens", + "cacheRead", + "cacheWrite", + "cache_read_input_tokens", + "cache_creation_input_tokens", + ]; + + for (const k of knownFields) { + const n = toFiniteNumber(u[k]); + if (n !== undefined) { + out[k] = n; + } + } + + // Preserve nested usage.cost when present + if ("cost" in u && u.cost != null && typeof u.cost === "object") { + const sanitizedCost = sanitizeCost(u.cost); + if (sanitizedCost) { + (out as Record).cost = sanitizedCost; + } + } + + return Object.keys(out).length > 0 ? out : undefined; +} + +/** + * Sanitize cost metadata to ensure only finite numeric fields are included. + * Prevents UI crashes from calling .toFixed() on non-numbers. + */ +function sanitizeCost(raw: unknown): { total?: number } | undefined { + if (!raw || typeof raw !== "object") { + return undefined; + } + const c = raw as Record; + const total = toFiniteNumber(c.total); + return total !== undefined ? { total } : undefined; +} + function sanitizeChatHistoryMessage(message: unknown): { message: unknown; changed: boolean } { if (!message || typeof message !== "object") { return { message, changed: false }; @@ -325,13 +400,38 @@ function sanitizeChatHistoryMessage(message: unknown): { message: unknown; chang delete entry.details; changed = true; } - if ("usage" in entry) { - delete entry.usage; - changed = true; - } - if ("cost" in entry) { - delete entry.cost; - changed = true; + + // Keep usage/cost so the chat UI can render per-message token and cost badges. + // Only retain usage/cost on assistant messages and validate numeric fields to prevent UI crashes. + if (entry.role !== "assistant") { + if ("usage" in entry) { + delete entry.usage; + changed = true; + } + if ("cost" in entry) { + delete entry.cost; + changed = true; + } + } else { + // Validate and sanitize usage/cost for assistant messages + if ("usage" in entry) { + const sanitized = sanitizeUsage(entry.usage); + if (sanitized) { + entry.usage = sanitized; + } else { + delete entry.usage; + } + changed = true; + } + if ("cost" in entry) { + const sanitized = sanitizeCost(entry.cost); + if (sanitized) { + entry.cost = sanitized; + } else { + delete entry.cost; + } + changed = true; + } } if (typeof entry.content === "string") { @@ -597,12 +697,12 @@ function appendAssistantTranscriptMessage(params: { function collectSessionAbortPartials(params: { chatAbortControllers: Map; chatRunBuffers: Map; - sessionKey: string; + runIds: ReadonlySet; abortOrigin: AbortOrigin; }): AbortedPartialSnapshot[] { const out: AbortedPartialSnapshot[] = []; for (const [runId, active] of params.chatAbortControllers) { - if (active.sessionKey !== params.sessionKey) { + if (!params.runIds.has(runId)) { continue; } const text = params.chatRunBuffers.get(runId); @@ -664,23 +764,104 @@ function createChatAbortOps(context: GatewayRequestContext): ChatAbortOps { }; } +function normalizeOptionalText(value?: string | null): string | undefined { + const trimmed = value?.trim(); + return trimmed || undefined; +} + +function resolveChatAbortRequester( + client: GatewayRequestHandlerOptions["client"], +): ChatAbortRequester { + const scopes = Array.isArray(client?.connect?.scopes) ? client.connect.scopes : []; + return { + connId: normalizeOptionalText(client?.connId), + deviceId: normalizeOptionalText(client?.connect?.device?.id), + isAdmin: scopes.includes(ADMIN_SCOPE), + }; +} + +function canRequesterAbortChatRun( + entry: ChatAbortControllerEntry, + requester: ChatAbortRequester, +): boolean { + if (requester.isAdmin) { + return true; + } + const ownerDeviceId = normalizeOptionalText(entry.ownerDeviceId); + const ownerConnId = normalizeOptionalText(entry.ownerConnId); + if (!ownerDeviceId && !ownerConnId) { + return true; + } + if (ownerDeviceId && requester.deviceId && ownerDeviceId === requester.deviceId) { + return true; + } + if (ownerConnId && requester.connId && ownerConnId === requester.connId) { + return true; + } + return false; +} + +function resolveAuthorizedRunIdsForSession(params: { + chatAbortControllers: Map; + sessionKey: string; + requester: ChatAbortRequester; +}) { + const authorizedRunIds: string[] = []; + let matchedSessionRuns = 0; + for (const [runId, active] of params.chatAbortControllers) { + if (active.sessionKey !== params.sessionKey) { + continue; + } + matchedSessionRuns += 1; + if (canRequesterAbortChatRun(active, params.requester)) { + authorizedRunIds.push(runId); + } + } + return { + matchedSessionRuns, + authorizedRunIds, + }; +} + function abortChatRunsForSessionKeyWithPartials(params: { context: GatewayRequestContext; ops: ChatAbortOps; sessionKey: string; abortOrigin: AbortOrigin; stopReason?: string; + requester: ChatAbortRequester; }) { + const { matchedSessionRuns, authorizedRunIds } = resolveAuthorizedRunIdsForSession({ + chatAbortControllers: params.context.chatAbortControllers, + sessionKey: params.sessionKey, + requester: params.requester, + }); + if (authorizedRunIds.length === 0) { + return { + aborted: false, + runIds: [], + unauthorized: matchedSessionRuns > 0, + }; + } + const authorizedRunIdSet = new Set(authorizedRunIds); const snapshots = collectSessionAbortPartials({ chatAbortControllers: params.context.chatAbortControllers, chatRunBuffers: params.context.chatRunBuffers, - sessionKey: params.sessionKey, + runIds: authorizedRunIdSet, abortOrigin: params.abortOrigin, }); - const res = abortChatRunsForSessionKey(params.ops, { - sessionKey: params.sessionKey, - stopReason: params.stopReason, - }); + const runIds: string[] = []; + for (const runId of authorizedRunIds) { + const res = abortChatRunById(params.ops, { + runId, + sessionKey: params.sessionKey, + stopReason: params.stopReason, + }); + if (res.aborted) { + runIds.push(runId); + } + } + const res = { aborted: runIds.length > 0, runIds, unauthorized: false }; if (res.aborted) { persistAbortedPartials({ context: params.context, @@ -802,7 +983,7 @@ export const chatHandlers: GatewayRequestHandlers = { verboseLevel, }); }, - "chat.abort": ({ params, respond, context }) => { + "chat.abort": ({ params, respond, context, client }) => { if (!validateChatAbortParams(params)) { respond( false, @@ -820,6 +1001,7 @@ export const chatHandlers: GatewayRequestHandlers = { }; const ops = createChatAbortOps(context); + const requester = resolveChatAbortRequester(client); if (!runId) { const res = abortChatRunsForSessionKeyWithPartials({ @@ -828,7 +1010,12 @@ export const chatHandlers: GatewayRequestHandlers = { sessionKey: rawSessionKey, abortOrigin: "rpc", stopReason: "rpc", + requester, }); + if (res.unauthorized) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized")); + return; + } respond(true, { ok: true, aborted: res.aborted, runIds: res.runIds }); return; } @@ -846,6 +1033,10 @@ export const chatHandlers: GatewayRequestHandlers = { ); return; } + if (!canRequesterAbortChatRun(active, requester)) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized")); + return; + } const partialText = context.chatRunBuffers.get(runId); const res = abortChatRunById(ops, { @@ -987,7 +1178,12 @@ export const chatHandlers: GatewayRequestHandlers = { sessionKey: rawSessionKey, abortOrigin: "stop-command", stopReason: "stop", + requester: resolveChatAbortRequester(client), }); + if (res.unauthorized) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized")); + return; + } respond(true, { ok: true, aborted: res.aborted, runIds: res.runIds }); return; } @@ -1017,6 +1213,8 @@ export const chatHandlers: GatewayRequestHandlers = { sessionKey: rawSessionKey, startedAtMs: now, expiresAtMs: resolveChatRunExpiresAtMs({ now, timeoutMs }), + ownerConnId: normalizeOptionalText(client?.connId), + ownerDeviceId: normalizeOptionalText(client?.connect?.device?.id), }); const ackPayload = { runId: clientRunId, diff --git a/src/gateway/server-methods/config.ts b/src/gateway/server-methods/config.ts index 9b57a126e5f..1d3d1c85977 100644 --- a/src/gateway/server-methods/config.ts +++ b/src/gateway/server-methods/config.ts @@ -10,6 +10,7 @@ import { validateConfigObjectWithPlugins, writeConfigFile, } from "../../config/config.js"; +import { formatConfigIssueLines } from "../../config/issue-format.js"; import { applyLegacyMigrations } from "../../config/legacy.js"; import { applyMergePatch } from "../../config/merge-patch.js"; import { @@ -23,7 +24,7 @@ import { type ConfigSchemaResponse, } from "../../config/schema.js"; import { extractDeliveryInfo } from "../../config/sessions.js"; -import type { OpenClawConfig } from "../../config/types.openclaw.js"; +import type { ConfigValidationIssue, OpenClawConfig } from "../../config/types.openclaw.js"; import { formatDoctorNonInteractiveHint, type RestartSentinelPayload, @@ -54,6 +55,8 @@ import { parseRestartRequestParams } from "./restart-request.js"; import type { GatewayRequestHandlers, RespondFn } from "./types.js"; import { assertValidParams } from "./validation.js"; +const MAX_CONFIG_ISSUES_IN_ERROR_MESSAGE = 3; + function requireConfigBaseHash( params: unknown, snapshot: Awaited>, @@ -158,7 +161,7 @@ function parseValidateConfigFromRawOrRespond( respond( false, undefined, - errorShape(ErrorCodes.INVALID_REQUEST, "invalid config", { + errorShape(ErrorCodes.INVALID_REQUEST, summarizeConfigValidationIssues(validated.issues), { details: { issues: validated.issues }, }), ); @@ -167,6 +170,20 @@ function parseValidateConfigFromRawOrRespond( return { config: validated.config, schema }; } +function summarizeConfigValidationIssues(issues: ReadonlyArray): string { + const trimmed = issues.slice(0, MAX_CONFIG_ISSUES_IN_ERROR_MESSAGE); + const lines = formatConfigIssueLines(trimmed, "", { normalizeRoot: true }) + .map((line) => line.trim()) + .filter(Boolean); + if (lines.length === 0) { + return "invalid config"; + } + const hiddenCount = Math.max(0, issues.length - lines.length); + return `invalid config: ${lines.join("; ")}${ + hiddenCount > 0 ? ` (+${hiddenCount} more issue${hiddenCount === 1 ? "" : "s"})` : "" + }`; +} + function resolveConfigRestartRequest(params: unknown): { sessionKey: string | undefined; note: string | undefined; @@ -398,7 +415,7 @@ export const configHandlers: GatewayRequestHandlers = { respond( false, undefined, - errorShape(ErrorCodes.INVALID_REQUEST, "invalid config", { + errorShape(ErrorCodes.INVALID_REQUEST, summarizeConfigValidationIssues(validated.issues), { details: { issues: validated.issues }, }), ); diff --git a/src/gateway/server-methods/exec-approval.ts b/src/gateway/server-methods/exec-approval.ts index 07dd8546c3f..81d479cbbd6 100644 --- a/src/gateway/server-methods/exec-approval.ts +++ b/src/gateway/server-methods/exec-approval.ts @@ -1,3 +1,4 @@ +import { sanitizeExecApprovalDisplayText } from "../../infra/exec-approval-command-display.js"; import type { ExecApprovalForwarder } from "../../infra/exec-approval-forwarder.js"; import { DEFAULT_EXEC_APPROVAL_TIMEOUT_MS, @@ -125,8 +126,11 @@ export function createExecApprovalHandlers( return; } const request = { - command: effectiveCommandText, - commandPreview: host === "node" ? undefined : approvalContext.commandPreview, + command: sanitizeExecApprovalDisplayText(effectiveCommandText), + commandPreview: + host === "node" || !approvalContext.commandPreview + ? undefined + : sanitizeExecApprovalDisplayText(approvalContext.commandPreview), commandArgv: host === "node" ? undefined : effectiveCommandArgv, envKeys: systemRunBinding?.envKeys?.length ? systemRunBinding.envKeys : undefined, systemRunBinding: systemRunBinding?.binding ?? null, diff --git a/src/gateway/server-methods/server-methods.test.ts b/src/gateway/server-methods/server-methods.test.ts index 51da6927f5e..424511370cd 100644 --- a/src/gateway/server-methods/server-methods.test.ts +++ b/src/gateway/server-methods/server-methods.test.ts @@ -641,6 +641,34 @@ describe("exec approval handlers", () => { ); }); + it("sanitizes invisible Unicode format chars in approval display text without changing node bindings", async () => { + const { handlers, broadcasts, respond, context } = createExecApprovalFixture(); + await requestExecApproval({ + handlers, + respond, + context, + params: { + timeoutMs: 10, + command: "bash safe\u200B.sh", + commandArgv: ["bash", "safe\u200B.sh"], + systemRunPlan: { + argv: ["bash", "safe\u200B.sh"], + cwd: "/real/cwd", + commandText: "bash safe\u200B.sh", + agentId: "main", + sessionKey: "agent:main:main", + }, + }, + }); + const requested = broadcasts.find((entry) => entry.event === "exec.approval.requested"); + expect(requested).toBeTruthy(); + const request = (requested?.payload as { request?: Record })?.request ?? {}; + expect(request["command"]).toBe("bash safe\\u{200B}.sh"); + expect((request["systemRunPlan"] as { commandText?: string }).commandText).toBe( + "bash safe\u200B.sh", + ); + }); + it("accepts resolve during broadcast", async () => { const manager = new ExecApprovalManager(); const handlers = createExecApprovalHandlers(manager); diff --git a/src/gateway/server.auth.browser-hardening.test.ts b/src/gateway/server.auth.browser-hardening.test.ts index e9550a8b1aa..c4060716bd4 100644 --- a/src/gateway/server.auth.browser-hardening.test.ts +++ b/src/gateway/server.auth.browser-hardening.test.ts @@ -12,6 +12,7 @@ import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-cha import { buildDeviceAuthPayload } from "./device-auth.js"; import { connectReq, + connectOk, installGatewayTestHooks, readConnectChallengeNonce, testState, @@ -27,6 +28,7 @@ const TEST_OPERATOR_CLIENT = { platform: "test", mode: GATEWAY_CLIENT_MODES.TEST, }; +const ALLOWED_BROWSER_ORIGIN = "https://control.example.com"; const originForPort = (port: number) => `http://127.0.0.1:${port}`; @@ -73,6 +75,127 @@ async function createSignedDevice(params: { } describe("gateway auth browser hardening", () => { + test("rejects trusted-proxy browser connects from origins outside the allowlist", async () => { + const { writeConfigFile } = await import("../config/config.js"); + await writeConfigFile({ + gateway: { + auth: { + mode: "trusted-proxy", + trustedProxy: { + userHeader: "x-forwarded-user", + requiredHeaders: ["x-forwarded-proto"], + }, + }, + trustedProxies: ["127.0.0.1"], + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); + + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { + origin: "https://evil.example", + "x-forwarded-for": "203.0.113.50", + "x-forwarded-proto": "https", + "x-forwarded-user": "operator@example.com", + }); + try { + const res = await connectReq(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, + }); + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("origin not allowed"); + } finally { + ws.close(); + } + }); + }); + + test("accepts trusted-proxy browser connects from allowed origins", async () => { + const { writeConfigFile } = await import("../config/config.js"); + await writeConfigFile({ + gateway: { + auth: { + mode: "trusted-proxy", + trustedProxy: { + userHeader: "x-forwarded-user", + requiredHeaders: ["x-forwarded-proto"], + }, + }, + trustedProxies: ["127.0.0.1"], + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); + + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { + origin: ALLOWED_BROWSER_ORIGIN, + "x-forwarded-for": "203.0.113.50", + "x-forwarded-proto": "https", + "x-forwarded-user": "operator@example.com", + }); + try { + const payload = await connectOk(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, + }); + expect(payload.type).toBe("hello-ok"); + } finally { + ws.close(); + } + }); + }); + + test.each([ + { + name: "rejects disallowed origins", + origin: "https://evil.example", + ok: false, + expectedMessage: "origin not allowed", + }, + { + name: "accepts allowed origins", + origin: ALLOWED_BROWSER_ORIGIN, + ok: true, + }, + ])( + "keeps non-proxy browser-origin behavior unchanged: $name", + async ({ origin, ok, expectedMessage }) => { + const { writeConfigFile } = await import("../config/config.js"); + testState.gatewayAuth = { mode: "token", token: "secret" }; + await writeConfigFile({ + gateway: { + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); + + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { origin }); + try { + const res = await connectReq(ws, { + token: "secret", + client: TEST_OPERATOR_CLIENT, + device: null, + }); + expect(res.ok).toBe(ok); + if (ok) { + expect((res.payload as { type?: string } | undefined)?.type).toBe("hello-ok"); + } else { + expect(res.error?.message ?? "").toContain(expectedMessage ?? ""); + } + } finally { + ws.close(); + } + }); + }, + ); + test("rejects non-local browser origins for non-control-ui clients", async () => { testState.gatewayAuth = { mode: "token", token: "secret" }; await withGatewayServer(async ({ port }) => { diff --git a/src/gateway/server.chat.gateway-server-chat-b.test.ts b/src/gateway/server.chat.gateway-server-chat-b.test.ts index 2e76e1a5de1..ca1e2c09402 100644 --- a/src/gateway/server.chat.gateway-server-chat-b.test.ts +++ b/src/gateway/server.chat.gateway-server-chat-b.test.ts @@ -273,6 +273,37 @@ describe("gateway server chat", () => { }); }); + test("chat.history preserves usage and cost metadata for assistant messages", async () => { + await withGatewayChatHarness(async ({ ws, createSessionDir }) => { + await connectOk(ws); + + const sessionDir = await createSessionDir(); + await writeMainSessionStore(); + + await writeMainSessionTranscript(sessionDir, [ + JSON.stringify({ + message: { + role: "assistant", + timestamp: Date.now(), + content: [{ type: "text", text: "hello" }], + usage: { input: 12, output: 5, totalTokens: 17 }, + cost: { total: 0.0123 }, + details: { debug: true }, + }, + }), + ]); + + const messages = await fetchHistoryMessages(ws); + expect(messages).toHaveLength(1); + expect(messages[0]).toMatchObject({ + role: "assistant", + usage: { input: 12, output: 5, totalTokens: 17 }, + cost: { total: 0.0123 }, + }); + expect(messages[0]).not.toHaveProperty("details"); + }); + }); + test("chat.history strips inline directives from displayed message text", async () => { await withGatewayChatHarness(async ({ ws, createSessionDir }) => { await connectOk(ws); diff --git a/src/gateway/server.config-patch.test.ts b/src/gateway/server.config-patch.test.ts index 1f2d465b4da..67efe9b79be 100644 --- a/src/gateway/server.config-patch.test.ts +++ b/src/gateway/server.config-patch.test.ts @@ -72,6 +72,38 @@ describe("gateway config methods", () => { expect(res.payload?.config).toBeTruthy(); }); + it("returns config.set validation details in the top-level error message", async () => { + const current = await rpcReq<{ + hash?: string; + }>(requireWs(), "config.get", {}); + expect(current.ok).toBe(true); + expect(typeof current.payload?.hash).toBe("string"); + + const res = await rpcReq<{ + ok?: boolean; + error?: { + message?: string; + }; + }>(requireWs(), "config.set", { + raw: JSON.stringify({ gateway: { bind: 123 } }), + baseHash: current.payload?.hash, + }); + const error = res.error as + | { + message?: string; + details?: { + issues?: Array<{ path?: string; message?: string }>; + }; + } + | undefined; + + expect(res.ok).toBe(false); + expect(error?.message ?? "").toContain("invalid config:"); + expect(error?.message ?? "").toContain("gateway.bind"); + expect(error?.message ?? "").toContain("allowed:"); + expect(error?.details?.issues?.[0]?.path).toBe("gateway.bind"); + }); + it("returns a path-scoped config schema lookup", async () => { const res = await rpcReq<{ path: string; diff --git a/src/gateway/server/ws-connection/message-handler.ts b/src/gateway/server/ws-connection/message-handler.ts index 83d1b5f12a3..0897b51e937 100644 --- a/src/gateway/server/ws-connection/message-handler.ts +++ b/src/gateway/server/ws-connection/message-handler.ts @@ -114,7 +114,7 @@ function resolveHandshakeBrowserSecurityContext(params: { ); return { hasBrowserOriginHeader, - enforceOriginCheckForAnyClient: hasBrowserOriginHeader && !params.hasProxyHeaders, + enforceOriginCheckForAnyClient: hasBrowserOriginHeader, rateLimitClientIp: hasBrowserOriginHeader && isLoopbackAddress(params.clientIp) ? BROWSER_ORIGIN_LOOPBACK_RATE_LIMIT_IP diff --git a/src/gateway/session-utils.ts b/src/gateway/session-utils.ts index 969c60c378c..e16777f4f2c 100644 --- a/src/gateway/session-utils.ts +++ b/src/gateway/session-utils.ts @@ -810,6 +810,7 @@ export function listSessionsFromStore(params: { const model = resolvedModel.model ?? DEFAULT_MODEL; return { key, + spawnedBy: entry?.spawnedBy, entry, kind: classifySessionKey(key, entry), label: entry?.label, diff --git a/src/gateway/session-utils.types.ts b/src/gateway/session-utils.types.ts index 711a1997f22..80873b0000c 100644 --- a/src/gateway/session-utils.types.ts +++ b/src/gateway/session-utils.types.ts @@ -15,6 +15,7 @@ export type GatewaySessionsDefaults = { export type GatewaySessionRow = { key: string; + spawnedBy?: string; kind: "direct" | "group" | "global" | "unknown"; label?: string; displayName?: string; diff --git a/src/gateway/sessions-patch.test.ts b/src/gateway/sessions-patch.test.ts index 2249c7f5c77..79e332f23ba 100644 --- a/src/gateway/sessions-patch.test.ts +++ b/src/gateway/sessions-patch.test.ts @@ -265,6 +265,19 @@ describe("gateway sessions patch", () => { expect(entry.spawnedBy).toBe("agent:main:main"); }); + test("sets spawnedWorkspaceDir for subagent sessions", async () => { + const entry = expectPatchOk( + await runPatch({ + storeKey: "agent:main:subagent:child", + patch: { + key: "agent:main:subagent:child", + spawnedWorkspaceDir: "/tmp/subagent-workspace", + }, + }), + ); + expect(entry.spawnedWorkspaceDir).toBe("/tmp/subagent-workspace"); + }); + test("sets spawnDepth for ACP sessions", async () => { const entry = expectPatchOk( await runPatch({ @@ -282,6 +295,13 @@ describe("gateway sessions patch", () => { expectPatchError(result, "spawnDepth is only supported"); }); + test("rejects spawnedWorkspaceDir on non-subagent sessions", async () => { + const result = await runPatch({ + patch: { key: MAIN_SESSION_KEY, spawnedWorkspaceDir: "/tmp/nope" }, + }); + expectPatchError(result, "spawnedWorkspaceDir is only supported"); + }); + test("normalizes exec/send/group patches", async () => { const entry = expectPatchOk( await runPatch({ diff --git a/src/gateway/sessions-patch.ts b/src/gateway/sessions-patch.ts index 1bf79ba4edf..66010e4745c 100644 --- a/src/gateway/sessions-patch.ts +++ b/src/gateway/sessions-patch.ts @@ -128,6 +128,27 @@ export async function applySessionsPatchToStore(params: { } } + if ("spawnedWorkspaceDir" in patch) { + const raw = patch.spawnedWorkspaceDir; + if (raw === null) { + if (existing?.spawnedWorkspaceDir) { + return invalid("spawnedWorkspaceDir cannot be cleared once set"); + } + } else if (raw !== undefined) { + if (!supportsSpawnLineage(storeKey)) { + return invalid("spawnedWorkspaceDir is only supported for subagent:* or acp:* sessions"); + } + const trimmed = String(raw).trim(); + if (!trimmed) { + return invalid("invalid spawnedWorkspaceDir: empty"); + } + if (existing?.spawnedWorkspaceDir && existing.spawnedWorkspaceDir !== trimmed) { + return invalid("spawnedWorkspaceDir cannot be changed once set"); + } + next.spawnedWorkspaceDir = trimmed; + } + } + if ("spawnDepth" in patch) { const raw = patch.spawnDepth; if (raw === null) { diff --git a/src/gateway/tools-invoke-http.test.ts b/src/gateway/tools-invoke-http.test.ts index 66a68bf5d9f..36b05c00d50 100644 --- a/src/gateway/tools-invoke-http.test.ts +++ b/src/gateway/tools-invoke-http.test.ts @@ -1,8 +1,22 @@ import { createServer, type IncomingMessage, type ServerResponse } from "node:http"; import type { AddressInfo } from "node:net"; import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import type { runBeforeToolCallHook as runBeforeToolCallHookType } from "../agents/pi-tools.before-tool-call.js"; + +type RunBeforeToolCallHook = typeof runBeforeToolCallHookType; +type RunBeforeToolCallHookArgs = Parameters[0]; +type RunBeforeToolCallHookResult = Awaited>; const TEST_GATEWAY_TOKEN = "test-gateway-token-1234567890"; +const hookMocks = vi.hoisted(() => ({ + resolveToolLoopDetectionConfig: vi.fn(() => ({ warnAt: 3 })), + runBeforeToolCallHook: vi.fn( + async (args: RunBeforeToolCallHookArgs): Promise => ({ + blocked: false, + params: args.params, + }), + ), +})); let cfg: Record = {}; let lastCreateOpenClawToolsContext: Record | undefined; @@ -152,6 +166,14 @@ vi.mock("../agents/openclaw-tools.js", () => { }; }); +vi.mock("../agents/pi-tools.js", () => ({ + resolveToolLoopDetectionConfig: hookMocks.resolveToolLoopDetectionConfig, +})); + +vi.mock("../agents/pi-tools.before-tool-call.js", () => ({ + runBeforeToolCallHook: hookMocks.runBeforeToolCallHook, +})); + const { handleToolsInvokeHttpRequest } = await import("./tools-invoke-http.js"); let pluginHttpHandlers: Array<(req: IncomingMessage, res: ServerResponse) => Promise> = []; @@ -206,6 +228,15 @@ beforeEach(() => { pluginHttpHandlers = []; cfg = {}; lastCreateOpenClawToolsContext = undefined; + hookMocks.resolveToolLoopDetectionConfig.mockClear(); + hookMocks.resolveToolLoopDetectionConfig.mockImplementation(() => ({ warnAt: 3 })); + hookMocks.runBeforeToolCallHook.mockClear(); + hookMocks.runBeforeToolCallHook.mockImplementation( + async (args: RunBeforeToolCallHookArgs): Promise => ({ + blocked: false, + params: args.params, + }), + ); }); const resolveGatewayToken = (): string => TEST_GATEWAY_TOKEN; @@ -336,6 +367,56 @@ describe("POST /tools/invoke", () => { expect(body.ok).toBe(true); expect(body).toHaveProperty("result"); expect(lastCreateOpenClawToolsContext?.allowMediaInvokeCommands).toBe(true); + expect(hookMocks.runBeforeToolCallHook).toHaveBeenCalledWith( + expect.objectContaining({ + toolName: "agents_list", + ctx: expect.objectContaining({ + agentId: "main", + sessionKey: "agent:main:main", + loopDetection: { warnAt: 3 }, + }), + }), + ); + }); + + it("blocks tool execution when before_tool_call rejects the invoke", async () => { + setMainAllowedTools({ allow: ["tools_invoke_test"] }); + hookMocks.runBeforeToolCallHook.mockResolvedValueOnce({ + blocked: true, + reason: "blocked by test hook", + }); + + const res = await invokeToolAuthed({ + tool: "tools_invoke_test", + args: { mode: "ok" }, + sessionKey: "main", + }); + + expect(res.status).toBe(403); + await expect(res.json()).resolves.toMatchObject({ + ok: false, + error: { + type: "tool_call_blocked", + message: "blocked by test hook", + }, + }); + }); + + it("uses before_tool_call adjusted params for HTTP tool execution", async () => { + setMainAllowedTools({ allow: ["tools_invoke_test"] }); + hookMocks.runBeforeToolCallHook.mockImplementationOnce(async () => ({ + blocked: false, + params: { mode: "rewritten" }, + })); + + const res = await invokeToolAuthed({ + tool: "tools_invoke_test", + args: { mode: "input" }, + sessionKey: "main", + }); + + const body = await expectOkInvokeResponse(res); + expect(body.result).toMatchObject({ ok: true }); }); it("supports tools.alsoAllow in profile and implicit modes", async () => { diff --git a/src/gateway/tools-invoke-http.ts b/src/gateway/tools-invoke-http.ts index 88cea7b3845..0cccafce999 100644 --- a/src/gateway/tools-invoke-http.ts +++ b/src/gateway/tools-invoke-http.ts @@ -1,5 +1,7 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import { createOpenClawTools } from "../agents/openclaw-tools.js"; +import { runBeforeToolCallHook } from "../agents/pi-tools.before-tool-call.js"; +import { resolveToolLoopDetectionConfig } from "../agents/pi-tools.js"; import { resolveEffectiveToolPolicy, resolveGroupToolPolicy, @@ -311,14 +313,32 @@ export async function handleToolsInvokeHttpRequest( } try { + const toolCallId = `http-${Date.now()}`; const toolArgs = mergeActionIntoArgsIfSupported({ // oxlint-disable-next-line typescript/no-explicit-any toolSchema: (tool as any).parameters, action, args, }); + const hookResult = await runBeforeToolCallHook({ + toolName, + params: toolArgs, + toolCallId, + ctx: { + agentId, + sessionKey, + loopDetection: resolveToolLoopDetectionConfig({ cfg, agentId }), + }, + }); + if (hookResult.blocked) { + sendJson(res, 403, { + ok: false, + error: { type: "tool_call_blocked", message: hookResult.reason }, + }); + return true; + } // oxlint-disable-next-line typescript/no-explicit-any - const result = await (tool as any).execute?.(`http-${Date.now()}`, toolArgs); + const result = await (tool as any).execute?.(toolCallId, hookResult.params); sendJson(res, 200, { ok: true, result }); } catch (err) { const inputStatus = resolveToolInputErrorStatus(err); diff --git a/src/imessage/monitor/inbound-processing.test.ts b/src/imessage/monitor/inbound-processing.test.ts index fab878a4cc7..b18012b9f1f 100644 --- a/src/imessage/monitor/inbound-processing.test.ts +++ b/src/imessage/monitor/inbound-processing.test.ts @@ -1,9 +1,11 @@ import { describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../../config/config.js"; +import { sanitizeTerminalText } from "../../terminal/safe-text.js"; import { describeIMessageEchoDropLog, resolveIMessageInboundDecision, } from "./inbound-processing.js"; +import { createSelfChatCache } from "./self-chat-cache.js"; describe("resolveIMessageInboundDecision echo detection", () => { const cfg = {} as OpenClawConfig; @@ -46,6 +48,324 @@ describe("resolveIMessageInboundDecision echo detection", () => { }), ); }); + + it("drops reflected self-chat duplicates after seeing the from-me copy", () => { + const selfChatCache = createSelfChatCache(); + const createdAt = "2026-03-02T20:58:10.649Z"; + + expect( + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9641, + sender: "+15555550123", + text: "Do you want to report this issue?", + created_at: createdAt, + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: "Do you want to report this issue?", + bodyText: "Do you want to report this issue?", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "from me" }); + + expect( + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9642, + sender: "+15555550123", + text: "Do you want to report this issue?", + created_at: createdAt, + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "Do you want to report this issue?", + bodyText: "Do you want to report this issue?", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "self-chat echo" }); + }); + + it("does not drop same-text messages when created_at differs", () => { + const selfChatCache = createSelfChatCache(); + + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9641, + sender: "+15555550123", + text: "ok", + created_at: "2026-03-02T20:58:10.649Z", + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: "ok", + bodyText: "ok", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + const decision = resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9642, + sender: "+15555550123", + text: "ok", + created_at: "2026-03-02T20:58:11.649Z", + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "ok", + bodyText: "ok", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + }); + + it("keeps self-chat cache scoped to configured group threads", () => { + const selfChatCache = createSelfChatCache(); + const groupedCfg = { + channels: { + imessage: { + groups: { + "123": {}, + "456": {}, + }, + }, + }, + } as OpenClawConfig; + const createdAt = "2026-03-02T20:58:10.649Z"; + + expect( + resolveIMessageInboundDecision({ + cfg: groupedCfg, + accountId: "default", + message: { + id: 9701, + chat_id: 123, + sender: "+15555550123", + text: "same text", + created_at: createdAt, + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "from me" }); + + const decision = resolveIMessageInboundDecision({ + cfg: groupedCfg, + accountId: "default", + message: { + id: 9702, + chat_id: 456, + sender: "+15555550123", + text: "same text", + created_at: createdAt, + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + }); + + it("does not drop other participants in the same group thread", () => { + const selfChatCache = createSelfChatCache(); + const createdAt = "2026-03-02T20:58:10.649Z"; + + expect( + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9751, + chat_id: 123, + sender: "+15555550123", + text: "same text", + created_at: createdAt, + is_from_me: true, + is_group: true, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "from me" }); + + const decision = resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9752, + chat_id: 123, + sender: "+15555550999", + text: "same text", + created_at: createdAt, + is_from_me: false, + is_group: true, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + }); + + it("sanitizes reflected duplicate previews before logging", () => { + const selfChatCache = createSelfChatCache(); + const logVerbose = vi.fn(); + const createdAt = "2026-03-02T20:58:10.649Z"; + const bodyText = "line-1\nline-2\t\u001b[31mred"; + + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9801, + sender: "+15555550123", + text: bodyText, + created_at: createdAt, + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: bodyText, + bodyText, + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose, + }); + + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9802, + sender: "+15555550123", + text: bodyText, + created_at: createdAt, + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: bodyText, + bodyText, + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose, + }); + + expect(logVerbose).toHaveBeenCalledWith( + `imessage: dropping self-chat reflected duplicate: "${sanitizeTerminalText(bodyText)}"`, + ); + }); }); describe("describeIMessageEchoDropLog", () => { diff --git a/src/imessage/monitor/inbound-processing.ts b/src/imessage/monitor/inbound-processing.ts index d042f1f1a0f..b3fc10c1e7b 100644 --- a/src/imessage/monitor/inbound-processing.ts +++ b/src/imessage/monitor/inbound-processing.ts @@ -24,6 +24,7 @@ import { DM_GROUP_ACCESS_REASON, resolveDmGroupAccessWithLists, } from "../../security/dm-policy-shared.js"; +import { sanitizeTerminalText } from "../../terminal/safe-text.js"; import { truncateUtf16Safe } from "../../utils.js"; import { formatIMessageChatTarget, @@ -31,6 +32,7 @@ import { normalizeIMessageHandle, } from "../targets.js"; import { detectReflectedContent } from "./reflection-guard.js"; +import type { SelfChatCache } from "./self-chat-cache.js"; import type { MonitorIMessageOpts, IMessagePayload } from "./types.js"; type IMessageReplyContext = { @@ -101,6 +103,7 @@ export function resolveIMessageInboundDecision(params: { historyLimit: number; groupHistories: Map; echoCache?: { has: (scope: string, lookup: { text?: string; messageId?: string }) => boolean }; + selfChatCache?: SelfChatCache; logVerbose?: (msg: string) => void; }): IMessageInboundDecision { const senderRaw = params.message.sender ?? ""; @@ -109,13 +112,10 @@ export function resolveIMessageInboundDecision(params: { return { kind: "drop", reason: "missing sender" }; } const senderNormalized = normalizeIMessageHandle(sender); - if (params.message.is_from_me) { - return { kind: "drop", reason: "from me" }; - } - const chatId = params.message.chat_id ?? undefined; const chatGuid = params.message.chat_guid ?? undefined; const chatIdentifier = params.message.chat_identifier ?? undefined; + const createdAt = params.message.created_at ? Date.parse(params.message.created_at) : undefined; const groupIdCandidate = chatId !== undefined ? String(chatId) : undefined; const groupListPolicy = groupIdCandidate @@ -138,6 +138,18 @@ export function resolveIMessageInboundDecision(params: { groupIdCandidate && groupListPolicy.allowlistEnabled && groupListPolicy.groupConfig, ); const isGroup = Boolean(params.message.is_group) || treatAsGroupByConfig; + const selfChatLookup = { + accountId: params.accountId, + isGroup, + chatId, + sender, + text: params.bodyText, + createdAt, + }; + if (params.message.is_from_me) { + params.selfChatCache?.remember(selfChatLookup); + return { kind: "drop", reason: "from me" }; + } if (isGroup && !chatId) { return { kind: "drop", reason: "group without chat_id" }; } @@ -215,6 +227,17 @@ export function resolveIMessageInboundDecision(params: { return { kind: "drop", reason: "empty body" }; } + if ( + params.selfChatCache?.has({ + ...selfChatLookup, + text: bodyText, + }) + ) { + const preview = sanitizeTerminalText(truncateUtf16Safe(bodyText, 50)); + params.logVerbose?.(`imessage: dropping self-chat reflected duplicate: "${preview}"`); + return { kind: "drop", reason: "self-chat echo" }; + } + // Echo detection: check if the received message matches a recently sent message. // Scope by conversation so same text in different chats is not conflated. const inboundMessageId = params.message.id != null ? String(params.message.id) : undefined; @@ -250,7 +273,6 @@ export function resolveIMessageInboundDecision(params: { } const replyContext = describeReplyContext(params.message); - const createdAt = params.message.created_at ? Date.parse(params.message.created_at) : undefined; const historyKey = isGroup ? String(chatId ?? chatGuid ?? chatIdentifier ?? "unknown") : undefined; diff --git a/src/imessage/monitor/monitor-provider.ts b/src/imessage/monitor/monitor-provider.ts index 1ea35b60d95..1324529cbff 100644 --- a/src/imessage/monitor/monitor-provider.ts +++ b/src/imessage/monitor/monitor-provider.ts @@ -53,6 +53,7 @@ import { import { createLoopRateLimiter } from "./loop-rate-limiter.js"; import { parseIMessageNotification } from "./parse-notification.js"; import { normalizeAllowList, resolveRuntime } from "./runtime.js"; +import { createSelfChatCache } from "./self-chat-cache.js"; import type { IMessagePayload, MonitorIMessageOpts } from "./types.js"; /** @@ -99,6 +100,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P ); const groupHistories = new Map(); const sentMessageCache = createSentMessageCache(); + const selfChatCache = createSelfChatCache(); const loopRateLimiter = createLoopRateLimiter(); const textLimit = resolveTextChunkLimit(cfg, "imessage", accountInfo.accountId); const allowFrom = normalizeAllowList(opts.allowFrom ?? imessageCfg.allowFrom); @@ -252,6 +254,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P historyLimit, groupHistories, echoCache: sentMessageCache, + selfChatCache, logVerbose, }); @@ -267,6 +270,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P // are normal and should not escalate. const isLoopDrop = decision.reason === "echo" || + decision.reason === "self-chat echo" || decision.reason === "reflected assistant content" || decision.reason === "from me"; if (isLoopDrop) { diff --git a/src/imessage/monitor/self-chat-cache.test.ts b/src/imessage/monitor/self-chat-cache.test.ts new file mode 100644 index 00000000000..cf3a245ba30 --- /dev/null +++ b/src/imessage/monitor/self-chat-cache.test.ts @@ -0,0 +1,76 @@ +import { describe, expect, it, vi } from "vitest"; +import { createSelfChatCache } from "./self-chat-cache.js"; + +describe("createSelfChatCache", () => { + const directLookup = { + accountId: "default", + sender: "+15555550123", + isGroup: false, + } as const; + + it("matches repeated lookups for the same scope, timestamp, and text", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + cache.remember({ + ...directLookup, + text: " hello\r\nworld ", + createdAt: 123, + }); + + expect( + cache.has({ + ...directLookup, + text: "hello\nworld", + createdAt: 123, + }), + ).toBe(true); + }); + + it("expires entries after the ttl window", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + cache.remember({ ...directLookup, text: "hello", createdAt: 123 }); + + vi.advanceTimersByTime(11_001); + + expect(cache.has({ ...directLookup, text: "hello", createdAt: 123 })).toBe(false); + }); + + it("evicts older entries when the cache exceeds its cap", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + for (let i = 0; i < 513; i += 1) { + cache.remember({ + ...directLookup, + text: `message-${i}`, + createdAt: i, + }); + vi.advanceTimersByTime(1_001); + } + + expect(cache.has({ ...directLookup, text: "message-0", createdAt: 0 })).toBe(false); + expect(cache.has({ ...directLookup, text: "message-512", createdAt: 512 })).toBe(true); + }); + + it("does not collide long texts that differ only in the middle", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + const prefix = "a".repeat(256); + const suffix = "b".repeat(256); + const longTextA = `${prefix}${"x".repeat(300)}${suffix}`; + const longTextB = `${prefix}${"y".repeat(300)}${suffix}`; + + cache.remember({ ...directLookup, text: longTextA, createdAt: 123 }); + + expect(cache.has({ ...directLookup, text: longTextA, createdAt: 123 })).toBe(true); + expect(cache.has({ ...directLookup, text: longTextB, createdAt: 123 })).toBe(false); + }); +}); diff --git a/src/imessage/monitor/self-chat-cache.ts b/src/imessage/monitor/self-chat-cache.ts new file mode 100644 index 00000000000..a2c4c31ccd9 --- /dev/null +++ b/src/imessage/monitor/self-chat-cache.ts @@ -0,0 +1,103 @@ +import { createHash } from "node:crypto"; +import { formatIMessageChatTarget } from "../targets.js"; + +type SelfChatCacheKeyParts = { + accountId: string; + sender: string; + isGroup: boolean; + chatId?: number; +}; + +export type SelfChatLookup = SelfChatCacheKeyParts & { + text?: string; + createdAt?: number; +}; + +export type SelfChatCache = { + remember: (lookup: SelfChatLookup) => void; + has: (lookup: SelfChatLookup) => boolean; +}; + +const SELF_CHAT_TTL_MS = 10_000; +const MAX_SELF_CHAT_CACHE_ENTRIES = 512; +const CLEANUP_MIN_INTERVAL_MS = 1_000; + +function normalizeText(text: string | undefined): string | null { + if (!text) { + return null; + } + const normalized = text.replace(/\r\n?/g, "\n").trim(); + return normalized ? normalized : null; +} + +function isUsableTimestamp(createdAt: number | undefined): createdAt is number { + return typeof createdAt === "number" && Number.isFinite(createdAt); +} + +function digestText(text: string): string { + return createHash("sha256").update(text).digest("hex"); +} + +function buildScope(parts: SelfChatCacheKeyParts): string { + if (!parts.isGroup) { + return `${parts.accountId}:imessage:${parts.sender}`; + } + const chatTarget = formatIMessageChatTarget(parts.chatId) || "chat_id:unknown"; + return `${parts.accountId}:${chatTarget}:imessage:${parts.sender}`; +} + +class DefaultSelfChatCache implements SelfChatCache { + private cache = new Map(); + private lastCleanupAt = 0; + + private buildKey(lookup: SelfChatLookup): string | null { + const text = normalizeText(lookup.text); + if (!text || !isUsableTimestamp(lookup.createdAt)) { + return null; + } + return `${buildScope(lookup)}:${lookup.createdAt}:${digestText(text)}`; + } + + remember(lookup: SelfChatLookup): void { + const key = this.buildKey(lookup); + if (!key) { + return; + } + this.cache.set(key, Date.now()); + this.maybeCleanup(); + } + + has(lookup: SelfChatLookup): boolean { + this.maybeCleanup(); + const key = this.buildKey(lookup); + if (!key) { + return false; + } + const timestamp = this.cache.get(key); + return typeof timestamp === "number" && Date.now() - timestamp <= SELF_CHAT_TTL_MS; + } + + private maybeCleanup(): void { + const now = Date.now(); + if (now - this.lastCleanupAt < CLEANUP_MIN_INTERVAL_MS) { + return; + } + this.lastCleanupAt = now; + for (const [key, timestamp] of this.cache.entries()) { + if (now - timestamp > SELF_CHAT_TTL_MS) { + this.cache.delete(key); + } + } + while (this.cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) { + const oldestKey = this.cache.keys().next().value; + if (typeof oldestKey !== "string") { + break; + } + this.cache.delete(oldestKey); + } + } +} + +export function createSelfChatCache(): SelfChatCache { + return new DefaultSelfChatCache(); +} diff --git a/src/infra/device-pairing.test.ts b/src/infra/device-pairing.test.ts index c76b44b323d..17f03df089a 100644 --- a/src/infra/device-pairing.test.ts +++ b/src/infra/device-pairing.test.ts @@ -1,16 +1,19 @@ -import { mkdtemp } from "node:fs/promises"; +import { mkdtemp, readFile, writeFile } from "node:fs/promises"; import { tmpdir } from "node:os"; import { join } from "node:path"; import { describe, expect, test } from "vitest"; import { approveDevicePairing, clearDevicePairing, + ensureDeviceToken, getPairedDevice, removePairedDevice, requestDevicePairing, rotateDeviceToken, verifyDeviceToken, + type PairedDevice, } from "./device-pairing.js"; +import { resolvePairingPaths } from "./pairing-files.js"; async function setupPairedOperatorDevice(baseDir: string, scopes: string[]) { const request = await requestDevicePairing( @@ -51,6 +54,43 @@ function requireToken(token: string | undefined): string { return token; } +async function overwritePairedOperatorTokenScopes(baseDir: string, scopes: string[]) { + const { pairedPath } = resolvePairingPaths(baseDir, "devices"); + const pairedByDeviceId = JSON.parse(await readFile(pairedPath, "utf8")) as Record< + string, + PairedDevice + >; + const device = pairedByDeviceId["device-1"]; + expect(device?.tokens?.operator).toBeDefined(); + if (!device?.tokens?.operator) { + throw new Error("expected paired operator token"); + } + device.tokens.operator.scopes = scopes; + await writeFile(pairedPath, JSON.stringify(pairedByDeviceId, null, 2)); +} + +async function mutatePairedOperatorDevice(baseDir: string, mutate: (device: PairedDevice) => void) { + const { pairedPath } = resolvePairingPaths(baseDir, "devices"); + const pairedByDeviceId = JSON.parse(await readFile(pairedPath, "utf8")) as Record< + string, + PairedDevice + >; + const device = pairedByDeviceId["device-1"]; + expect(device).toBeDefined(); + if (!device) { + throw new Error("expected paired operator device"); + } + mutate(device); + await writeFile(pairedPath, JSON.stringify(pairedByDeviceId, null, 2)); +} + +async function clearPairedOperatorApprovalBaseline(baseDir: string) { + await mutatePairedOperatorDevice(baseDir, (device) => { + delete device.approvedScopes; + delete device.scopes; + }); +} + describe("device pairing tokens", () => { test("reuses existing pending requests for the same device", async () => { const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); @@ -180,6 +220,26 @@ describe("device pairing tokens", () => { expect(after?.approvedScopes).toEqual(["operator.read"]); }); + test("rejects scope escalation when ensuring a token and leaves state unchanged", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.read"]); + const before = await getPairedDevice("device-1", baseDir); + + const ensured = await ensureDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.admin"], + baseDir, + }); + expect(ensured).toBeNull(); + + const after = await getPairedDevice("device-1", baseDir); + expect(after?.tokens?.operator?.token).toEqual(before?.tokens?.operator?.token); + expect(after?.tokens?.operator?.scopes).toEqual(["operator.read"]); + expect(after?.scopes).toEqual(["operator.read"]); + expect(after?.approvedScopes).toEqual(["operator.read"]); + }); + test("verifies token and rejects mismatches", async () => { const { baseDir, token } = await setupOperatorToken(["operator.read"]); @@ -199,6 +259,32 @@ describe("device pairing tokens", () => { expect(mismatch.reason).toBe("token-mismatch"); }); + test("rejects persisted tokens whose scopes exceed the approved scope baseline", async () => { + const { baseDir, token } = await setupOperatorToken(["operator.read"]); + await overwritePairedOperatorTokenScopes(baseDir, ["operator.admin"]); + + await expect( + verifyOperatorToken({ + baseDir, + token, + scopes: ["operator.admin"], + }), + ).resolves.toEqual({ ok: false, reason: "scope-mismatch" }); + }); + + test("fails closed when the paired device approval baseline is missing during verification", async () => { + const { baseDir, token } = await setupOperatorToken(["operator.read"]); + await clearPairedOperatorApprovalBaseline(baseDir); + + await expect( + verifyOperatorToken({ + baseDir, + token, + scopes: ["operator.read"], + }), + ).resolves.toEqual({ ok: false, reason: "scope-mismatch" }); + }); + test("accepts operator.read/operator.write requests with an operator.admin token scope", async () => { const { baseDir, token } = await setupOperatorToken(["operator.admin"]); @@ -217,6 +303,57 @@ describe("device pairing tokens", () => { expect(writeOk.ok).toBe(true); }); + test("accepts custom operator scopes under an operator.admin approval baseline", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.admin"]); + + const rotated = await rotateDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.talk.secrets"], + baseDir, + }); + expect(rotated?.scopes).toEqual(["operator.talk.secrets"]); + + await expect( + verifyOperatorToken({ + baseDir, + token: requireToken(rotated?.token), + scopes: ["operator.talk.secrets"], + }), + ).resolves.toEqual({ ok: true }); + }); + + test("fails closed when the paired device approval baseline is missing during ensure", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.admin"]); + await clearPairedOperatorApprovalBaseline(baseDir); + + await expect( + ensureDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.admin"], + baseDir, + }), + ).resolves.toBeNull(); + }); + + test("fails closed when the paired device approval baseline is missing during rotation", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.admin"]); + await clearPairedOperatorApprovalBaseline(baseDir); + + await expect( + rotateDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.admin"], + baseDir, + }), + ).resolves.toBeNull(); + }); + test("treats multibyte same-length token input as mismatch without throwing", async () => { const { baseDir, token } = await setupOperatorToken(["operator.read"]); const multibyteToken = "é".repeat(token.length); diff --git a/src/infra/device-pairing.ts b/src/infra/device-pairing.ts index 591a9d70888..5bd2909a56e 100644 --- a/src/infra/device-pairing.ts +++ b/src/infra/device-pairing.ts @@ -181,44 +181,6 @@ function mergePendingDevicePairingRequest( }; } -function scopesAllow(requested: string[], allowed: string[]): boolean { - if (requested.length === 0) { - return true; - } - if (allowed.length === 0) { - return false; - } - const allowedSet = new Set(allowed); - return requested.every((scope) => allowedSet.has(scope)); -} - -const DEVICE_SCOPE_IMPLICATIONS: Readonly> = { - "operator.admin": ["operator.read", "operator.write", "operator.approvals", "operator.pairing"], - "operator.write": ["operator.read"], -}; - -function expandScopeImplications(scopes: string[]): string[] { - const expanded = new Set(scopes); - const queue = [...scopes]; - while (queue.length > 0) { - const scope = queue.pop(); - if (!scope) { - continue; - } - for (const impliedScope of DEVICE_SCOPE_IMPLICATIONS[scope] ?? []) { - if (!expanded.has(impliedScope)) { - expanded.add(impliedScope); - queue.push(impliedScope); - } - } - } - return [...expanded]; -} - -function scopesAllowWithImplications(requested: string[], allowed: string[]): boolean { - return scopesAllow(expandScopeImplications(requested), expandScopeImplications(allowed)); -} - function newToken() { return generatePairingToken(); } @@ -252,6 +214,29 @@ function buildDeviceAuthToken(params: { }; } +function resolveApprovedDeviceScopeBaseline(device: PairedDevice): string[] | null { + const baseline = device.approvedScopes ?? device.scopes; + if (!Array.isArray(baseline)) { + return null; + } + return normalizeDeviceAuthScopes(baseline); +} + +function scopesWithinApprovedDeviceBaseline(params: { + role: string; + scopes: readonly string[]; + approvedScopes: readonly string[] | null; +}): boolean { + if (!params.approvedScopes) { + return false; + } + return roleScopesAllow({ + role: params.role, + requestedScopes: params.scopes, + allowedScopes: params.approvedScopes, + }); +} + export async function listDevicePairing(baseDir?: string): Promise { const state = await loadState(baseDir); const pending = Object.values(state.pendingById).toSorted((a, b) => b.ts - a.ts); @@ -494,6 +479,16 @@ export async function verifyDeviceToken(params: { if (!verifyPairingToken(params.token, entry.token)) { return { ok: false, reason: "token-mismatch" }; } + const approvedScopes = resolveApprovedDeviceScopeBaseline(device); + if ( + !scopesWithinApprovedDeviceBaseline({ + role, + scopes: entry.scopes, + approvedScopes, + }) + ) { + return { ok: false, reason: "scope-mismatch" }; + } const requestedScopes = normalizeDeviceAuthScopes(params.scopes); if (!roleScopesAllow({ role, requestedScopes, allowedScopes: entry.scopes })) { return { ok: false, reason: "scope-mismatch" }; @@ -525,8 +520,26 @@ export async function ensureDeviceToken(params: { return null; } const { device, role, tokens, existing } = context; + const approvedScopes = resolveApprovedDeviceScopeBaseline(device); + if ( + !scopesWithinApprovedDeviceBaseline({ + role, + scopes: requestedScopes, + approvedScopes, + }) + ) { + return null; + } if (existing && !existing.revokedAtMs) { - if (roleScopesAllow({ role, requestedScopes, allowedScopes: existing.scopes })) { + const existingWithinApproved = scopesWithinApprovedDeviceBaseline({ + role, + scopes: existing.scopes, + approvedScopes, + }); + if ( + existingWithinApproved && + roleScopesAllow({ role, requestedScopes, allowedScopes: existing.scopes }) + ) { return existing; } } @@ -589,10 +602,14 @@ export async function rotateDeviceToken(params: { const requestedScopes = normalizeDeviceAuthScopes( params.scopes ?? existing?.scopes ?? device.scopes, ); - const approvedScopes = normalizeDeviceAuthScopes( - device.approvedScopes ?? device.scopes ?? existing?.scopes, - ); - if (!scopesAllowWithImplications(requestedScopes, approvedScopes)) { + const approvedScopes = resolveApprovedDeviceScopeBaseline(device); + if ( + !scopesWithinApprovedDeviceBaseline({ + role, + scopes: requestedScopes, + approvedScopes, + }) + ) { return null; } const now = Date.now(); diff --git a/src/infra/exec-allowlist-pattern.test.ts b/src/infra/exec-allowlist-pattern.test.ts new file mode 100644 index 00000000000..1ac34112311 --- /dev/null +++ b/src/infra/exec-allowlist-pattern.test.ts @@ -0,0 +1,24 @@ +import { describe, expect, it } from "vitest"; +import { matchesExecAllowlistPattern } from "./exec-allowlist-pattern.js"; + +describe("matchesExecAllowlistPattern", () => { + it("does not let ? cross path separators", () => { + expect(matchesExecAllowlistPattern("/tmp/a?b", "/tmp/a/b")).toBe(false); + expect(matchesExecAllowlistPattern("/tmp/a?b", "/tmp/acb")).toBe(true); + }); + + it("keeps ** matching across path separators", () => { + expect(matchesExecAllowlistPattern("/tmp/**/tool", "/tmp/a/b/tool")).toBe(true); + }); + + it.runIf(process.platform !== "win32")("preserves case sensitivity on POSIX", () => { + expect(matchesExecAllowlistPattern("/tmp/Allowed-Tool", "/tmp/allowed-tool")).toBe(false); + expect(matchesExecAllowlistPattern("/tmp/Allowed-Tool", "/tmp/Allowed-Tool")).toBe(true); + }); + + it.runIf(process.platform === "win32")("preserves case-insensitive matching on Windows", () => { + expect(matchesExecAllowlistPattern("C:/Tools/Allowed-Tool", "c:/tools/allowed-tool")).toBe( + true, + ); + }); +}); diff --git a/src/infra/exec-allowlist-pattern.ts b/src/infra/exec-allowlist-pattern.ts index df05a2ae1d9..96e93b6f797 100644 --- a/src/infra/exec-allowlist-pattern.ts +++ b/src/infra/exec-allowlist-pattern.ts @@ -9,7 +9,7 @@ function normalizeMatchTarget(value: string): string { const stripped = value.replace(/^\\\\[?.]\\/, ""); return stripped.replace(/\\/g, "/").toLowerCase(); } - return value.replace(/\\\\/g, "/").toLowerCase(); + return value.replace(/\\\\/g, "/"); } function tryRealpath(value: string): string | null { @@ -25,7 +25,8 @@ function escapeRegExpLiteral(input: string): string { } function compileGlobRegex(pattern: string): RegExp { - const cached = globRegexCache.get(pattern); + const cacheKey = `${process.platform}:${pattern}`; + const cached = globRegexCache.get(cacheKey); if (cached) { return cached; } @@ -46,7 +47,7 @@ function compileGlobRegex(pattern: string): RegExp { continue; } if (ch === "?") { - regex += "."; + regex += "[^/]"; i += 1; continue; } @@ -55,11 +56,11 @@ function compileGlobRegex(pattern: string): RegExp { } regex += "$"; - const compiled = new RegExp(regex, "i"); + const compiled = new RegExp(regex, process.platform === "win32" ? "i" : ""); if (globRegexCache.size >= GLOB_REGEX_CACHE_LIMIT) { globRegexCache.clear(); } - globRegexCache.set(pattern, compiled); + globRegexCache.set(cacheKey, compiled); return compiled; } diff --git a/src/infra/exec-approval-command-display.ts b/src/infra/exec-approval-command-display.ts index b5b00625ef2..9ab62e55669 100644 --- a/src/infra/exec-approval-command-display.ts +++ b/src/infra/exec-approval-command-display.ts @@ -1,8 +1,22 @@ import type { ExecApprovalRequestPayload } from "./exec-approvals.js"; +const UNICODE_FORMAT_CHAR_REGEX = /\p{Cf}/gu; + +function formatCodePointEscape(char: string): string { + return `\\u{${char.codePointAt(0)?.toString(16).toUpperCase() ?? "FFFD"}}`; +} + +export function sanitizeExecApprovalDisplayText(commandText: string): string { + return commandText.replace(UNICODE_FORMAT_CHAR_REGEX, formatCodePointEscape); +} + function normalizePreview(commandText: string, commandPreview?: string | null): string | null { - const preview = commandPreview?.trim() ?? ""; - if (!preview || preview === commandText) { + const previewRaw = commandPreview?.trim() ?? ""; + if (!previewRaw) { + return null; + } + const preview = sanitizeExecApprovalDisplayText(previewRaw); + if (preview === commandText) { return null; } return preview; @@ -12,17 +26,15 @@ export function resolveExecApprovalCommandDisplay(request: ExecApprovalRequestPa commandText: string; commandPreview: string | null; } { - if (request.host === "node" && request.systemRunPlan) { - return { - commandText: request.systemRunPlan.commandText, - commandPreview: normalizePreview( - request.systemRunPlan.commandText, - request.systemRunPlan.commandPreview, - ), - }; - } + const commandTextSource = + request.command || + (request.host === "node" && request.systemRunPlan ? request.systemRunPlan.commandText : ""); + const commandText = sanitizeExecApprovalDisplayText(commandTextSource); + const previewSource = + request.commandPreview ?? + (request.host === "node" ? (request.systemRunPlan?.commandPreview ?? null) : null); return { - commandText: request.command, - commandPreview: normalizePreview(request.command, request.commandPreview), + commandText, + commandPreview: normalizePreview(commandText, previewSource), }; } diff --git a/src/infra/exec-approval-forwarder.test.ts b/src/infra/exec-approval-forwarder.test.ts index 8ae1b53cc57..ca4d81e012e 100644 --- a/src/infra/exec-approval-forwarder.test.ts +++ b/src/infra/exec-approval-forwarder.test.ts @@ -294,6 +294,24 @@ describe("exec approval forwarder", () => { expect(text).toContain("Reply with: /approve allow-once|allow-always|deny"); }); + it("renders invisible Unicode format chars as visible escapes", async () => { + vi.useFakeTimers(); + const { deliver, forwarder } = createForwarder({ cfg: TARGETS_CFG }); + + await expect( + forwarder.handleRequested({ + ...baseRequest, + request: { + ...baseRequest.request, + command: "bash safe\u200B.sh", + }, + }), + ).resolves.toBe(true); + await Promise.resolve(); + + expect(getFirstDeliveryText(deliver)).toContain("Command: `bash safe\\u{200B}.sh`"); + }); + it("formats complex commands as fenced code blocks", async () => { vi.useFakeTimers(); const { deliver, forwarder } = createForwarder({ cfg: TARGETS_CFG }); diff --git a/src/infra/git-commit.test.ts b/src/infra/git-commit.test.ts index d00c50fbf6f..c0ddb136e85 100644 --- a/src/infra/git-commit.test.ts +++ b/src/infra/git-commit.test.ts @@ -198,7 +198,7 @@ describe("git commit resolution", () => { await fs.mkdir(path.join(packageRoot, "dist"), { recursive: true }); await fs.writeFile( path.join(packageRoot, "package.json"), - JSON.stringify({ name: "openclaw", version: "2026.3.9" }), + JSON.stringify({ name: "openclaw", version: "2026.3.10" }), "utf-8", ); const moduleUrl = pathToFileURL(path.join(packageRoot, "dist", "entry.js")).href; diff --git a/src/infra/host-env-security-policy.json b/src/infra/host-env-security-policy.json index 8b8f3cf3333..9e3ad27581e 100644 --- a/src/infra/host-env-security-policy.json +++ b/src/infra/host-env-security-policy.json @@ -11,6 +11,7 @@ "BASH_ENV", "ENV", "GIT_EXTERNAL_DIFF", + "GIT_EXEC_PATH", "SHELL", "SHELLOPTS", "PS4", diff --git a/src/infra/host-env-security.test.ts b/src/infra/host-env-security.test.ts index 4e7bcdb9ed9..08f1a3d65fb 100644 --- a/src/infra/host-env-security.test.ts +++ b/src/infra/host-env-security.test.ts @@ -18,6 +18,7 @@ describe("isDangerousHostEnvVarName", () => { expect(isDangerousHostEnvVarName("bash_env")).toBe(true); expect(isDangerousHostEnvVarName("SHELL")).toBe(true); expect(isDangerousHostEnvVarName("GIT_EXTERNAL_DIFF")).toBe(true); + expect(isDangerousHostEnvVarName("git_exec_path")).toBe(true); expect(isDangerousHostEnvVarName("SHELLOPTS")).toBe(true); expect(isDangerousHostEnvVarName("ps4")).toBe(true); expect(isDangerousHostEnvVarName("DYLD_INSERT_LIBRARIES")).toBe(true); @@ -60,6 +61,7 @@ describe("sanitizeHostExecEnv", () => { ZDOTDIR: "/tmp/evil-zdotdir", BASH_ENV: "/tmp/pwn.sh", GIT_SSH_COMMAND: "touch /tmp/pwned", + GIT_EXEC_PATH: "/tmp/git-exec-path", EDITOR: "/tmp/editor", NPM_CONFIG_USERCONFIG: "/tmp/npmrc", GIT_CONFIG_GLOBAL: "/tmp/gitconfig", @@ -73,6 +75,7 @@ describe("sanitizeHostExecEnv", () => { expect(env.OPENCLAW_CLI).toBe(OPENCLAW_CLI_ENV_VALUE); expect(env.BASH_ENV).toBeUndefined(); expect(env.GIT_SSH_COMMAND).toBeUndefined(); + expect(env.GIT_EXEC_PATH).toBeUndefined(); expect(env.EDITOR).toBeUndefined(); expect(env.NPM_CONFIG_USERCONFIG).toBeUndefined(); expect(env.GIT_CONFIG_GLOBAL).toBeUndefined(); @@ -211,6 +214,65 @@ describe("shell wrapper exploit regression", () => { }); describe("git env exploit regression", () => { + it("blocks inherited GIT_EXEC_PATH so git cannot execute helper payloads", async () => { + if (process.platform === "win32") { + return; + } + const gitPath = "/usr/bin/git"; + if (!fs.existsSync(gitPath)) { + return; + } + + const helperDir = fs.mkdtempSync( + path.join(os.tmpdir(), `openclaw-git-exec-path-${process.pid}-${Date.now()}-`), + ); + const helperPath = path.join(helperDir, "git-remote-https"); + const marker = path.join( + os.tmpdir(), + `openclaw-git-exec-path-marker-${process.pid}-${Date.now()}`, + ); + try { + try { + fs.unlinkSync(marker); + } catch { + // no-op + } + fs.writeFileSync(helperPath, `#!/bin/sh\ntouch ${JSON.stringify(marker)}\nexit 1\n`, "utf8"); + fs.chmodSync(helperPath, 0o755); + + const target = "https://127.0.0.1:1/does-not-matter"; + const unsafeEnv = { + PATH: process.env.PATH ?? "/usr/bin:/bin", + GIT_EXEC_PATH: helperDir, + GIT_TERMINAL_PROMPT: "0", + }; + + await new Promise((resolve) => { + const child = spawn(gitPath, ["ls-remote", target], { env: unsafeEnv, stdio: "ignore" }); + child.once("error", () => resolve()); + child.once("close", () => resolve()); + }); + + expect(fs.existsSync(marker)).toBe(true); + fs.unlinkSync(marker); + + const safeEnv = sanitizeHostExecEnv({ + baseEnv: unsafeEnv, + }); + + await new Promise((resolve) => { + const child = spawn(gitPath, ["ls-remote", target], { env: safeEnv, stdio: "ignore" }); + child.once("error", () => resolve()); + child.once("close", () => resolve()); + }); + + expect(fs.existsSync(marker)).toBe(false); + } finally { + fs.rmSync(helperDir, { recursive: true, force: true }); + fs.rmSync(marker, { force: true }); + } + }); + it("blocks GIT_SSH_COMMAND override so git cannot execute helper payloads", async () => { if (process.platform === "win32") { return; diff --git a/src/infra/process-respawn.test.ts b/src/infra/process-respawn.test.ts index 7b9a9df1252..bacf4e1b24b 100644 --- a/src/infra/process-respawn.test.ts +++ b/src/infra/process-respawn.test.ts @@ -4,6 +4,7 @@ import { SUPERVISOR_HINT_ENV_VARS } from "./supervisor-markers.js"; const spawnMock = vi.hoisted(() => vi.fn()); const triggerOpenClawRestartMock = vi.hoisted(() => vi.fn()); +const scheduleDetachedLaunchdRestartHandoffMock = vi.hoisted(() => vi.fn()); vi.mock("node:child_process", () => ({ spawn: (...args: unknown[]) => spawnMock(...args), @@ -11,6 +12,10 @@ vi.mock("node:child_process", () => ({ vi.mock("./restart.js", () => ({ triggerOpenClawRestart: (...args: unknown[]) => triggerOpenClawRestartMock(...args), })); +vi.mock("../daemon/launchd-restart-handoff.js", () => ({ + scheduleDetachedLaunchdRestartHandoff: (...args: unknown[]) => + scheduleDetachedLaunchdRestartHandoffMock(...args), +})); import { restartGatewayProcessWithFreshPid } from "./process-respawn.js"; @@ -35,6 +40,8 @@ afterEach(() => { process.execArgv = [...originalExecArgv]; spawnMock.mockClear(); triggerOpenClawRestartMock.mockClear(); + scheduleDetachedLaunchdRestartHandoffMock.mockReset(); + scheduleDetachedLaunchdRestartHandoffMock.mockReturnValue({ ok: true, pid: 8123 }); if (originalPlatformDescriptor) { Object.defineProperty(process, "platform", originalPlatformDescriptor); } @@ -54,6 +61,11 @@ function expectLaunchdSupervisedWithoutKickstart(params?: { launchJobLabel?: str process.env.OPENCLAW_LAUNCHD_LABEL = "ai.openclaw.gateway"; const result = restartGatewayProcessWithFreshPid(); expect(result.mode).toBe("supervised"); + expect(scheduleDetachedLaunchdRestartHandoffMock).toHaveBeenCalledWith({ + env: process.env, + mode: "start-after-exit", + waitForPid: process.pid, + }); expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); expect(spawnMock).not.toHaveBeenCalled(); } @@ -72,6 +84,12 @@ describe("restartGatewayProcessWithFreshPid", () => { process.env.LAUNCH_JOB_LABEL = "ai.openclaw.gateway"; const result = restartGatewayProcessWithFreshPid(); expect(result.mode).toBe("supervised"); + expect(result.detail).toContain("launchd restart handoff"); + expect(scheduleDetachedLaunchdRestartHandoffMock).toHaveBeenCalledWith({ + env: process.env, + mode: "start-after-exit", + waitForPid: process.pid, + }); expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); expect(spawnMock).not.toHaveBeenCalled(); }); @@ -96,6 +114,25 @@ describe("restartGatewayProcessWithFreshPid", () => { expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); }); + it("falls back to plain supervised exit when launchd handoff scheduling fails", () => { + clearSupervisorHints(); + setPlatform("darwin"); + process.env.XPC_SERVICE_NAME = "ai.openclaw.gateway"; + scheduleDetachedLaunchdRestartHandoffMock.mockReturnValue({ + ok: false, + detail: "spawn failed", + }); + + const result = restartGatewayProcessWithFreshPid(); + + expect(result).toEqual({ + mode: "supervised", + detail: "launchd exit fallback (spawn failed)", + }); + expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + it("does not schedule kickstart on non-darwin platforms", () => { setPlatform("linux"); process.env.INVOCATION_ID = "abc123"; diff --git a/src/infra/process-respawn.ts b/src/infra/process-respawn.ts index 8bf1503b18f..473319f86fb 100644 --- a/src/infra/process-respawn.ts +++ b/src/infra/process-respawn.ts @@ -1,4 +1,5 @@ import { spawn } from "node:child_process"; +import { scheduleDetachedLaunchdRestartHandoff } from "../daemon/launchd-restart-handoff.js"; import { triggerOpenClawRestart } from "./restart.js"; import { detectRespawnSupervisor } from "./supervisor-markers.js"; @@ -30,10 +31,25 @@ export function restartGatewayProcessWithFreshPid(): GatewayRespawnResult { } const supervisor = detectRespawnSupervisor(process.env); if (supervisor) { - // launchd: exit(0) is sufficient — KeepAlive=true restarts the service. - // Self-issued `kickstart -k` races with launchd's bootout state machine - // and can leave the LaunchAgent permanently unloaded. - // See: https://github.com/openclaw/openclaw/issues/39760 + // Hand off launchd restarts to a detached helper before exiting so config + // reloads and SIGUSR1-driven restarts do not depend on exit/respawn timing. + if (supervisor === "launchd") { + const handoff = scheduleDetachedLaunchdRestartHandoff({ + env: process.env, + mode: "start-after-exit", + waitForPid: process.pid, + }); + if (!handoff.ok) { + return { + mode: "supervised", + detail: `launchd exit fallback (${handoff.detail ?? "restart handoff failed"})`, + }; + } + return { + mode: "supervised", + detail: `launchd restart handoff pid ${handoff.pid ?? "unknown"}`, + }; + } if (supervisor === "schtasks") { const restart = triggerOpenClawRestart(); if (!restart.ok) { diff --git a/src/install-sh-version.test.ts b/src/install-sh-version.test.ts index 824a5366efd..12336b803d6 100644 --- a/src/install-sh-version.test.ts +++ b/src/install-sh-version.test.ts @@ -73,10 +73,10 @@ describe("install.sh version resolution", () => { it.runIf(process.platform !== "win32")( "extracts the semantic version from decorated CLI output", () => { - const fixture = withFakeCli("OpenClaw 2026.3.9 (abcdef0)"); + const fixture = withFakeCli("OpenClaw 2026.3.10 (abcdef0)"); tempRoots.push(fixture.root); - expect(resolveVersionFromInstaller(fixture.cliPath)).toBe("2026.3.9"); + expect(resolveVersionFromInstaller(fixture.cliPath)).toBe("2026.3.10"); }, ); @@ -93,7 +93,7 @@ describe("install.sh version resolution", () => { it.runIf(process.platform !== "win32")( "does not source version helpers from cwd when installer runs via stdin", () => { - const fixture = withFakeCli("OpenClaw 2026.3.9 (abcdef0)"); + const fixture = withFakeCli("OpenClaw 2026.3.10 (abcdef0)"); tempRoots.push(fixture.root); const hostileCwd = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-install-stdin-")); @@ -115,7 +115,7 @@ extract_openclaw_semver() { "utf-8", ); - expect(resolveVersionFromInstallerViaStdin(fixture.cliPath, hostileCwd)).toBe("2026.3.9"); + expect(resolveVersionFromInstallerViaStdin(fixture.cliPath, hostileCwd)).toBe("2026.3.10"); }, ); }); diff --git a/src/media/mime.ts b/src/media/mime.ts index e551350c057..4c9b6bf1db3 100644 --- a/src/media/mime.ts +++ b/src/media/mime.ts @@ -12,6 +12,10 @@ const EXT_BY_MIME: Record = { "image/gif": ".gif", "audio/ogg": ".ogg", "audio/mpeg": ".mp3", + "audio/wav": ".wav", + "audio/flac": ".flac", + "audio/aac": ".aac", + "audio/opus": ".opus", "audio/x-m4a": ".m4a", "audio/mp4": ".m4a", "video/mp4": ".mp4", diff --git a/src/memory/batch-gemini.test.ts b/src/memory/batch-gemini.test.ts new file mode 100644 index 00000000000..0cbada7293b --- /dev/null +++ b/src/memory/batch-gemini.test.ts @@ -0,0 +1,102 @@ +import { afterEach, beforeAll, describe, expect, it, vi } from "vitest"; +import type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; + +function magnitude(values: number[]) { + return Math.sqrt(values.reduce((sum, value) => sum + value * value, 0)); +} + +describe("runGeminiEmbeddingBatches", () => { + let runGeminiEmbeddingBatches: typeof import("./batch-gemini.js").runGeminiEmbeddingBatches; + + beforeAll(async () => { + ({ runGeminiEmbeddingBatches } = await import("./batch-gemini.js")); + }); + + afterEach(() => { + vi.resetAllMocks(); + vi.unstubAllGlobals(); + }); + + const mockClient: GeminiEmbeddingClient = { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + headers: {}, + model: "gemini-embedding-2-preview", + modelPath: "models/gemini-embedding-2-preview", + apiKeys: ["test-key"], + outputDimensionality: 1536, + }; + + it("includes outputDimensionality in batch upload requests", async () => { + const fetchMock = vi.fn(async (input: RequestInfo | URL, init?: RequestInit) => { + const url = + typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url; + if (url.includes("/upload/v1beta/files?uploadType=multipart")) { + const body = init?.body; + if (!(body instanceof Blob)) { + throw new Error("expected multipart blob body"); + } + const text = await body.text(); + expect(text).toContain('"taskType":"RETRIEVAL_DOCUMENT"'); + expect(text).toContain('"outputDimensionality":1536'); + return new Response(JSON.stringify({ name: "files/file-123" }), { + status: 200, + headers: { "Content-Type": "application/json" }, + }); + } + if (url.endsWith(":asyncBatchEmbedContent")) { + return new Response( + JSON.stringify({ + name: "batches/batch-1", + state: "COMPLETED", + outputConfig: { file: "files/output-1" }, + }), + { + status: 200, + headers: { "Content-Type": "application/json" }, + }, + ); + } + if (url.endsWith("/files/output-1:download")) { + return new Response( + JSON.stringify({ + key: "req-1", + response: { embedding: { values: [3, 4] } }, + }), + { + status: 200, + headers: { "Content-Type": "application/jsonl" }, + }, + ); + } + throw new Error(`unexpected fetch ${url}`); + }); + + vi.stubGlobal("fetch", fetchMock); + + const results = await runGeminiEmbeddingBatches({ + gemini: mockClient, + agentId: "main", + requests: [ + { + custom_id: "req-1", + request: { + content: { parts: [{ text: "hello world" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 1536, + }, + }, + ], + wait: true, + pollIntervalMs: 1, + timeoutMs: 1000, + concurrency: 1, + }); + + const embedding = results.get("req-1"); + expect(embedding).toBeDefined(); + expect(embedding?.[0]).toBeCloseTo(0.6, 5); + expect(embedding?.[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding ?? [])).toBeCloseTo(1, 5); + expect(fetchMock).toHaveBeenCalledTimes(3); + }); +}); diff --git a/src/memory/batch-gemini.ts b/src/memory/batch-gemini.ts index 998f283b676..4bdc9fa055e 100644 --- a/src/memory/batch-gemini.ts +++ b/src/memory/batch-gemini.ts @@ -4,15 +4,15 @@ import { type EmbeddingBatchExecutionParams, } from "./batch-runner.js"; import { buildBatchHeaders, normalizeBatchBaseUrl } from "./batch-utils.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; import { debugEmbeddingsLog } from "./embeddings-debug.js"; -import type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; +import type { GeminiEmbeddingClient, GeminiTextEmbeddingRequest } from "./embeddings-gemini.js"; import { hashText } from "./internal.js"; import { withRemoteHttpResponse } from "./remote-http.js"; export type GeminiBatchRequest = { custom_id: string; - content: { parts: Array<{ text: string }> }; - taskType: "RETRIEVAL_DOCUMENT" | "RETRIEVAL_QUERY"; + request: GeminiTextEmbeddingRequest; }; export type GeminiBatchStatus = { @@ -82,10 +82,7 @@ async function submitGeminiBatch(params: { .map((request) => JSON.stringify({ key: request.custom_id, - request: { - content: request.content, - task_type: request.taskType, - }, + request: request.request, }), ) .join("\n"); @@ -350,7 +347,9 @@ export async function runGeminiEmbeddingBatches( errors.push(`${customId}: ${line.response.error.message}`); continue; } - const embedding = line.embedding?.values ?? line.response?.embedding?.values ?? []; + const embedding = sanitizeAndNormalizeEmbedding( + line.embedding?.values ?? line.response?.embedding?.values ?? [], + ); if (embedding.length === 0) { errors.push(`${customId}: empty embedding`); continue; diff --git a/src/memory/embedding-chunk-limits.ts b/src/memory/embedding-chunk-limits.ts index 033b30a84a3..5c8cf9020f3 100644 --- a/src/memory/embedding-chunk-limits.ts +++ b/src/memory/embedding-chunk-limits.ts @@ -1,4 +1,5 @@ import { estimateUtf8Bytes, splitTextToUtf8ByteLimit } from "./embedding-input-limits.js"; +import { hasNonTextEmbeddingParts } from "./embedding-inputs.js"; import { resolveEmbeddingMaxInputTokens } from "./embedding-model-limits.js"; import type { EmbeddingProvider } from "./embeddings.js"; import { hashText, type MemoryChunk } from "./internal.js"; @@ -16,6 +17,10 @@ export function enforceEmbeddingMaxInputTokens( const out: MemoryChunk[] = []; for (const chunk of chunks) { + if (hasNonTextEmbeddingParts(chunk.embeddingInput)) { + out.push(chunk); + continue; + } if (estimateUtf8Bytes(chunk.text) <= maxInputTokens) { out.push(chunk); continue; @@ -27,6 +32,7 @@ export function enforceEmbeddingMaxInputTokens( endLine: chunk.endLine, text, hash: hashText(text), + embeddingInput: { text }, }); } } diff --git a/src/memory/embedding-input-limits.ts b/src/memory/embedding-input-limits.ts index dad83bb7aa7..4eadf1bf48d 100644 --- a/src/memory/embedding-input-limits.ts +++ b/src/memory/embedding-input-limits.ts @@ -1,3 +1,5 @@ +import type { EmbeddingInput } from "./embedding-inputs.js"; + // Helpers for enforcing embedding model input size limits. // // We use UTF-8 byte length as a conservative upper bound for tokenizer output. @@ -11,6 +13,22 @@ export function estimateUtf8Bytes(text: string): number { return Buffer.byteLength(text, "utf8"); } +export function estimateStructuredEmbeddingInputBytes(input: EmbeddingInput): number { + if (!input.parts?.length) { + return estimateUtf8Bytes(input.text); + } + let total = 0; + for (const part of input.parts) { + if (part.type === "text") { + total += estimateUtf8Bytes(part.text); + continue; + } + total += estimateUtf8Bytes(part.mimeType); + total += estimateUtf8Bytes(part.data); + } + return total; +} + export function splitTextToUtf8ByteLimit(text: string, maxUtf8Bytes: number): string[] { if (maxUtf8Bytes <= 0) { return [text]; diff --git a/src/memory/embedding-inputs.ts b/src/memory/embedding-inputs.ts new file mode 100644 index 00000000000..767a463f740 --- /dev/null +++ b/src/memory/embedding-inputs.ts @@ -0,0 +1,34 @@ +export type EmbeddingInputTextPart = { + type: "text"; + text: string; +}; + +export type EmbeddingInputInlineDataPart = { + type: "inline-data"; + mimeType: string; + data: string; +}; + +export type EmbeddingInputPart = EmbeddingInputTextPart | EmbeddingInputInlineDataPart; + +export type EmbeddingInput = { + text: string; + parts?: EmbeddingInputPart[]; +}; + +export function buildTextEmbeddingInput(text: string): EmbeddingInput { + return { text }; +} + +export function isInlineDataEmbeddingInputPart( + part: EmbeddingInputPart, +): part is EmbeddingInputInlineDataPart { + return part.type === "inline-data"; +} + +export function hasNonTextEmbeddingParts(input: EmbeddingInput | undefined): boolean { + if (!input?.parts?.length) { + return false; + } + return input.parts.some((part) => isInlineDataEmbeddingInputPart(part)); +} diff --git a/src/memory/embedding-model-limits.ts b/src/memory/embedding-model-limits.ts index b9960009606..0819686b905 100644 --- a/src/memory/embedding-model-limits.ts +++ b/src/memory/embedding-model-limits.ts @@ -8,6 +8,8 @@ const KNOWN_EMBEDDING_MAX_INPUT_TOKENS: Record = { "openai:text-embedding-3-large": 8192, "openai:text-embedding-ada-002": 8191, "gemini:text-embedding-004": 2048, + "gemini:gemini-embedding-001": 2048, + "gemini:gemini-embedding-2-preview": 8192, "voyage:voyage-3": 32000, "voyage:voyage-3-lite": 16000, "voyage:voyage-code-3": 32000, diff --git a/src/memory/embedding-vectors.ts b/src/memory/embedding-vectors.ts new file mode 100644 index 00000000000..d589f61390d --- /dev/null +++ b/src/memory/embedding-vectors.ts @@ -0,0 +1,8 @@ +export function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { + const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0)); + const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0)); + if (magnitude < 1e-10) { + return sanitized; + } + return sanitized.map((value) => value / magnitude); +} diff --git a/src/memory/embeddings-gemini.test.ts b/src/memory/embeddings-gemini.test.ts new file mode 100644 index 00000000000..f97cc6cb142 --- /dev/null +++ b/src/memory/embeddings-gemini.test.ts @@ -0,0 +1,609 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import * as authModule from "../agents/model-auth.js"; +import { + buildGeminiEmbeddingRequest, + buildGeminiTextEmbeddingRequest, + createGeminiEmbeddingProvider, + DEFAULT_GEMINI_EMBEDDING_MODEL, + GEMINI_EMBEDDING_2_MODELS, + isGeminiEmbedding2Model, + resolveGeminiOutputDimensionality, +} from "./embeddings-gemini.js"; + +vi.mock("../agents/model-auth.js", async () => { + const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js"); + return createModelAuthMockModule(); +}); + +const createGeminiFetchMock = (embeddingValues = [1, 2, 3]) => + vi.fn(async (_input?: unknown, _init?: unknown) => ({ + ok: true, + status: 200, + json: async () => ({ embedding: { values: embeddingValues } }), + })); + +const createGeminiBatchFetchMock = (count: number, embeddingValues = [1, 2, 3]) => + vi.fn(async (_input?: unknown, _init?: unknown) => ({ + ok: true, + status: 200, + json: async () => ({ + embeddings: Array.from({ length: count }, () => ({ values: embeddingValues })), + }), + })); + +function readFirstFetchRequest(fetchMock: { mock: { calls: unknown[][] } }) { + const [url, init] = fetchMock.mock.calls[0] ?? []; + return { url, init: init as RequestInit | undefined }; +} + +function parseFetchBody(fetchMock: { mock: { calls: unknown[][] } }, callIndex = 0) { + const init = fetchMock.mock.calls[callIndex]?.[1] as RequestInit | undefined; + return JSON.parse((init?.body as string) ?? "{}") as Record; +} + +function magnitude(values: number[]) { + return Math.sqrt(values.reduce((sum, value) => sum + value * value, 0)); +} + +afterEach(() => { + vi.resetAllMocks(); + vi.unstubAllGlobals(); +}); + +function mockResolvedProviderKey(apiKey = "test-key") { + vi.mocked(authModule.resolveApiKeyForProvider).mockResolvedValue({ + apiKey, + mode: "api-key", + source: "test", + }); +} + +describe("buildGeminiTextEmbeddingRequest", () => { + it("builds a text embedding request with optional model and dimensions", () => { + expect( + buildGeminiTextEmbeddingRequest({ + text: "hello", + taskType: "RETRIEVAL_DOCUMENT", + modelPath: "models/gemini-embedding-2-preview", + outputDimensionality: 1536, + }), + ).toEqual({ + model: "models/gemini-embedding-2-preview", + content: { parts: [{ text: "hello" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 1536, + }); + }); +}); + +describe("buildGeminiEmbeddingRequest", () => { + it("builds a multimodal request from structured input parts", () => { + expect( + buildGeminiEmbeddingRequest({ + input: { + text: "Image file: diagram.png", + parts: [ + { type: "text", text: "Image file: diagram.png" }, + { type: "inline-data", mimeType: "image/png", data: "abc123" }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + modelPath: "models/gemini-embedding-2-preview", + outputDimensionality: 1536, + }), + ).toEqual({ + model: "models/gemini-embedding-2-preview", + content: { + parts: [ + { text: "Image file: diagram.png" }, + { inlineData: { mimeType: "image/png", data: "abc123" } }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 1536, + }); + }); +}); + +// ---------- Model detection ---------- + +describe("isGeminiEmbedding2Model", () => { + it("returns true for gemini-embedding-2-preview", () => { + expect(isGeminiEmbedding2Model("gemini-embedding-2-preview")).toBe(true); + }); + + it("returns false for gemini-embedding-001", () => { + expect(isGeminiEmbedding2Model("gemini-embedding-001")).toBe(false); + }); + + it("returns false for text-embedding-004", () => { + expect(isGeminiEmbedding2Model("text-embedding-004")).toBe(false); + }); +}); + +describe("GEMINI_EMBEDDING_2_MODELS", () => { + it("contains gemini-embedding-2-preview", () => { + expect(GEMINI_EMBEDDING_2_MODELS.has("gemini-embedding-2-preview")).toBe(true); + }); +}); + +// ---------- Dimension resolution ---------- + +describe("resolveGeminiOutputDimensionality", () => { + it("returns undefined for non-v2 models", () => { + expect(resolveGeminiOutputDimensionality("gemini-embedding-001")).toBeUndefined(); + expect(resolveGeminiOutputDimensionality("text-embedding-004")).toBeUndefined(); + }); + + it("returns 3072 by default for v2 models", () => { + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview")).toBe(3072); + }); + + it("accepts valid dimension values", () => { + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 768)).toBe(768); + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 1536)).toBe(1536); + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 3072)).toBe(3072); + }); + + it("throws for invalid dimension values", () => { + expect(() => resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 512)).toThrow( + /Invalid outputDimensionality 512/, + ); + expect(() => resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 1024)).toThrow( + /Valid values: 768, 1536, 3072/, + ); + }); +}); + +// ---------- Provider: gemini-embedding-001 (backward compat) ---------- + +describe("gemini-embedding-001 provider (backward compat)", () => { + it("does NOT include outputDimensionality in embedQuery", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-001", + fallback: "none", + }); + + await provider.embedQuery("test query"); + + const body = parseFetchBody(fetchMock); + expect(body).not.toHaveProperty("outputDimensionality"); + expect(body.taskType).toBe("RETRIEVAL_QUERY"); + expect(body.content).toEqual({ parts: [{ text: "test query" }] }); + }); + + it("does NOT include outputDimensionality in embedBatch", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-001", + fallback: "none", + }); + + await provider.embedBatch(["text1", "text2"]); + + const body = parseFetchBody(fetchMock); + expect(body).not.toHaveProperty("outputDimensionality"); + }); +}); + +// ---------- Provider: gemini-embedding-2-preview ---------- + +describe("gemini-embedding-2-preview provider", () => { + it("includes outputDimensionality in embedQuery request", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test query"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + expect(body.taskType).toBe("RETRIEVAL_QUERY"); + expect(body.content).toEqual({ parts: [{ text: "test query" }] }); + }); + + it("normalizes embedQuery response vectors", async () => { + const fetchMock = createGeminiFetchMock([3, 4]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const embedding = await provider.embedQuery("test query"); + + expect(embedding[0]).toBeCloseTo(0.6, 5); + expect(embedding[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding)).toBeCloseTo(1, 5); + }); + + it("includes outputDimensionality in embedBatch request", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedBatch(["text1", "text2"]); + + const body = parseFetchBody(fetchMock); + expect(body.requests).toEqual([ + { + model: "models/gemini-embedding-2-preview", + content: { parts: [{ text: "text1" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + { + model: "models/gemini-embedding-2-preview", + content: { parts: [{ text: "text2" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + ]); + }); + + it("normalizes embedBatch response vectors", async () => { + const fetchMock = createGeminiBatchFetchMock(2, [3, 4]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const embeddings = await provider.embedBatch(["text1", "text2"]); + + expect(embeddings).toHaveLength(2); + for (const embedding of embeddings) { + expect(embedding[0]).toBeCloseTo(0.6, 5); + expect(embedding[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding)).toBeCloseTo(1, 5); + } + }); + + it("respects custom outputDimensionality", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + outputDimensionality: 768, + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(768); + }); + + it("sanitizes and normalizes embedQuery responses", async () => { + const fetchMock = createGeminiFetchMock([3, 4, Number.NaN]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await expect(provider.embedQuery("test")).resolves.toEqual([0.6, 0.8, 0]); + }); + + it("uses custom outputDimensionality for each embedBatch request", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + outputDimensionality: 768, + }); + + await provider.embedBatch(["text1", "text2"]); + + const body = parseFetchBody(fetchMock); + expect(body.requests).toEqual([ + expect.objectContaining({ outputDimensionality: 768 }), + expect.objectContaining({ outputDimensionality: 768 }), + ]); + }); + + it("sanitizes and normalizes structured batch responses", async () => { + const fetchMock = createGeminiBatchFetchMock(1, [0, Number.POSITIVE_INFINITY, 5]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await expect( + provider.embedBatchInputs?.([ + { + text: "Image file: diagram.png", + parts: [ + { type: "text", text: "Image file: diagram.png" }, + { type: "inline-data", mimeType: "image/png", data: "img" }, + ], + }, + ]), + ).resolves.toEqual([[0, 0, 1]]); + }); + + it("supports multimodal embedBatchInputs requests", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + expect(provider.embedBatchInputs).toBeDefined(); + await provider.embedBatchInputs?.([ + { + text: "Image file: diagram.png", + parts: [ + { type: "text", text: "Image file: diagram.png" }, + { type: "inline-data", mimeType: "image/png", data: "img" }, + ], + }, + { + text: "Audio file: note.wav", + parts: [ + { type: "text", text: "Audio file: note.wav" }, + { type: "inline-data", mimeType: "audio/wav", data: "aud" }, + ], + }, + ]); + + const body = parseFetchBody(fetchMock); + expect(body.requests).toEqual([ + { + model: "models/gemini-embedding-2-preview", + content: { + parts: [ + { text: "Image file: diagram.png" }, + { inlineData: { mimeType: "image/png", data: "img" } }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + { + model: "models/gemini-embedding-2-preview", + content: { + parts: [ + { text: "Audio file: note.wav" }, + { inlineData: { mimeType: "audio/wav", data: "aud" } }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + ]); + }); + + it("throws for invalid outputDimensionality", async () => { + mockResolvedProviderKey(); + + await expect( + createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + outputDimensionality: 512, + }), + ).rejects.toThrow(/Invalid outputDimensionality 512/); + }); + + it("sanitizes non-finite values before normalization", async () => { + const fetchMock = createGeminiFetchMock([ + 1, + Number.NaN, + Number.POSITIVE_INFINITY, + Number.NEGATIVE_INFINITY, + ]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const embedding = await provider.embedQuery("test"); + + expect(embedding).toEqual([1, 0, 0, 0]); + }); + + it("uses correct endpoint URL", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const { url } = readFirstFetchRequest(fetchMock); + expect(url).toBe( + "https://generativelanguage.googleapis.com/v1beta/models/gemini-embedding-2-preview:embedContent", + ); + }); + + it("allows taskType override via options", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + taskType: "SEMANTIC_SIMILARITY", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.taskType).toBe("SEMANTIC_SIMILARITY"); + }); +}); + +// ---------- Model normalization ---------- + +describe("gemini model normalization", () => { + it("handles models/ prefix for v2 model", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "models/gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + }); + + it("handles gemini/ prefix for v2 model", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini/gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + }); + + it("handles google/ prefix for v2 model", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "google/gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + }); + + it("defaults to gemini-embedding-001 when model is empty", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider, client } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "", + fallback: "none", + }); + + expect(client.model).toBe(DEFAULT_GEMINI_EMBEDDING_MODEL); + expect(provider.model).toBe(DEFAULT_GEMINI_EMBEDDING_MODEL); + }); + + it("returns empty array for blank query text", async () => { + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const result = await provider.embedQuery(" "); + expect(result).toEqual([]); + }); + + it("returns empty array for empty batch", async () => { + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const result = await provider.embedBatch([]); + expect(result).toEqual([]); + }); +}); diff --git a/src/memory/embeddings-gemini.ts b/src/memory/embeddings-gemini.ts index 1d5cc5876ea..ab028241ed8 100644 --- a/src/memory/embeddings-gemini.ts +++ b/src/memory/embeddings-gemini.ts @@ -5,6 +5,8 @@ import { import { requireApiKey, resolveApiKeyForProvider } from "../agents/model-auth.js"; import { parseGeminiAuth } from "../infra/gemini-auth.js"; import type { SsrFPolicy } from "../infra/net/ssrf.js"; +import type { EmbeddingInput } from "./embedding-inputs.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; import { debugEmbeddingsLog } from "./embeddings-debug.js"; import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js"; import { buildRemoteBaseUrlPolicy, withRemoteHttpResponse } from "./remote-http.js"; @@ -17,6 +19,7 @@ export type GeminiEmbeddingClient = { model: string; modelPath: string; apiKeys: string[]; + outputDimensionality?: number; }; const DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"; @@ -24,6 +27,111 @@ export const DEFAULT_GEMINI_EMBEDDING_MODEL = "gemini-embedding-001"; const GEMINI_MAX_INPUT_TOKENS: Record = { "text-embedding-004": 2048, }; + +// --- gemini-embedding-2-preview support --- + +export const GEMINI_EMBEDDING_2_MODELS = new Set([ + "gemini-embedding-2-preview", + // Add the GA model name here once released. +]); + +const GEMINI_EMBEDDING_2_DEFAULT_DIMENSIONS = 3072; +const GEMINI_EMBEDDING_2_VALID_DIMENSIONS = [768, 1536, 3072] as const; + +export type GeminiTaskType = + | "RETRIEVAL_QUERY" + | "RETRIEVAL_DOCUMENT" + | "SEMANTIC_SIMILARITY" + | "CLASSIFICATION" + | "CLUSTERING" + | "QUESTION_ANSWERING" + | "FACT_VERIFICATION"; + +export type GeminiTextPart = { text: string }; +export type GeminiInlinePart = { + inlineData: { mimeType: string; data: string }; +}; +export type GeminiPart = GeminiTextPart | GeminiInlinePart; +export type GeminiEmbeddingRequest = { + content: { parts: GeminiPart[] }; + taskType: GeminiTaskType; + outputDimensionality?: number; + model?: string; +}; +export type GeminiTextEmbeddingRequest = GeminiEmbeddingRequest; + +/** Builds the text-only Gemini embedding request shape used across direct and batch APIs. */ +export function buildGeminiTextEmbeddingRequest(params: { + text: string; + taskType: GeminiTaskType; + outputDimensionality?: number; + modelPath?: string; +}): GeminiTextEmbeddingRequest { + return buildGeminiEmbeddingRequest({ + input: { text: params.text }, + taskType: params.taskType, + outputDimensionality: params.outputDimensionality, + modelPath: params.modelPath, + }); +} + +export function buildGeminiEmbeddingRequest(params: { + input: EmbeddingInput; + taskType: GeminiTaskType; + outputDimensionality?: number; + modelPath?: string; +}): GeminiEmbeddingRequest { + const request: GeminiEmbeddingRequest = { + content: { + parts: params.input.parts?.map((part) => + part.type === "text" + ? ({ text: part.text } satisfies GeminiTextPart) + : ({ + inlineData: { mimeType: part.mimeType, data: part.data }, + } satisfies GeminiInlinePart), + ) ?? [{ text: params.input.text }], + }, + taskType: params.taskType, + }; + if (params.modelPath) { + request.model = params.modelPath; + } + if (params.outputDimensionality != null) { + request.outputDimensionality = params.outputDimensionality; + } + return request; +} + +/** + * Returns true if the given model name is a gemini-embedding-2 variant that + * supports `outputDimensionality` and extended task types. + */ +export function isGeminiEmbedding2Model(model: string): boolean { + return GEMINI_EMBEDDING_2_MODELS.has(model); +} + +/** + * Validate and return the `outputDimensionality` for gemini-embedding-2 models. + * Returns `undefined` for older models (they don't support the param). + */ +export function resolveGeminiOutputDimensionality( + model: string, + requested?: number, +): number | undefined { + if (!isGeminiEmbedding2Model(model)) { + return undefined; + } + if (requested == null) { + return GEMINI_EMBEDDING_2_DEFAULT_DIMENSIONS; + } + const valid: readonly number[] = GEMINI_EMBEDDING_2_VALID_DIMENSIONS; + if (!valid.includes(requested)) { + throw new Error( + `Invalid outputDimensionality ${requested} for ${model}. Valid values: ${valid.join(", ")}`, + ); + } + return requested; +} function resolveRemoteApiKey(remoteApiKey: unknown): string | undefined { const trimmed = resolveMemorySecretInputString({ value: remoteApiKey, @@ -38,7 +146,7 @@ function resolveRemoteApiKey(remoteApiKey: unknown): string | undefined { return trimmed; } -function normalizeGeminiModel(model: string): string { +export function normalizeGeminiModel(model: string): string { const trimmed = model.trim(); if (!trimmed) { return DEFAULT_GEMINI_EMBEDDING_MODEL; @@ -53,6 +161,46 @@ function normalizeGeminiModel(model: string): string { return withoutPrefix; } +async function fetchGeminiEmbeddingPayload(params: { + client: GeminiEmbeddingClient; + endpoint: string; + body: unknown; +}): Promise<{ + embedding?: { values?: number[] }; + embeddings?: Array<{ values?: number[] }>; +}> { + return await executeWithApiKeyRotation({ + provider: "google", + apiKeys: params.client.apiKeys, + execute: async (apiKey) => { + const authHeaders = parseGeminiAuth(apiKey); + const headers = { + ...authHeaders.headers, + ...params.client.headers, + }; + return await withRemoteHttpResponse({ + url: params.endpoint, + ssrfPolicy: params.client.ssrfPolicy, + init: { + method: "POST", + headers, + body: JSON.stringify(params.body), + }, + onResponse: async (res) => { + if (!res.ok) { + const text = await res.text(); + throw new Error(`gemini embeddings failed: ${res.status} ${text}`); + } + return (await res.json()) as { + embedding?: { values?: number[] }; + embeddings?: Array<{ values?: number[] }>; + }; + }, + }); + }, + }); +} + function normalizeGeminiBaseUrl(raw: string): string { const trimmed = raw.replace(/\/+$/, ""); const openAiIndex = trimmed.indexOf("/openai"); @@ -73,70 +221,53 @@ export async function createGeminiEmbeddingProvider( const baseUrl = client.baseUrl.replace(/\/$/, ""); const embedUrl = `${baseUrl}/${client.modelPath}:embedContent`; const batchUrl = `${baseUrl}/${client.modelPath}:batchEmbedContents`; - - const fetchWithGeminiAuth = async (apiKey: string, endpoint: string, body: unknown) => { - const authHeaders = parseGeminiAuth(apiKey); - const headers = { - ...authHeaders.headers, - ...client.headers, - }; - const payload = await withRemoteHttpResponse({ - url: endpoint, - ssrfPolicy: client.ssrfPolicy, - init: { - method: "POST", - headers, - body: JSON.stringify(body), - }, - onResponse: async (res) => { - if (!res.ok) { - const text = await res.text(); - throw new Error(`gemini embeddings failed: ${res.status} ${text}`); - } - return (await res.json()) as { - embedding?: { values?: number[] }; - embeddings?: Array<{ values?: number[] }>; - }; - }, - }); - return payload; - }; + const isV2 = isGeminiEmbedding2Model(client.model); + const outputDimensionality = client.outputDimensionality; const embedQuery = async (text: string): Promise => { if (!text.trim()) { return []; } - const payload = await executeWithApiKeyRotation({ - provider: "google", - apiKeys: client.apiKeys, - execute: (apiKey) => - fetchWithGeminiAuth(apiKey, embedUrl, { - content: { parts: [{ text }] }, - taskType: "RETRIEVAL_QUERY", - }), + const payload = await fetchGeminiEmbeddingPayload({ + client, + endpoint: embedUrl, + body: buildGeminiTextEmbeddingRequest({ + text, + taskType: options.taskType ?? "RETRIEVAL_QUERY", + outputDimensionality: isV2 ? outputDimensionality : undefined, + }), }); - return payload.embedding?.values ?? []; + return sanitizeAndNormalizeEmbedding(payload.embedding?.values ?? []); + }; + + const embedBatchInputs = async (inputs: EmbeddingInput[]): Promise => { + if (inputs.length === 0) { + return []; + } + const payload = await fetchGeminiEmbeddingPayload({ + client, + endpoint: batchUrl, + body: { + requests: inputs.map((input) => + buildGeminiEmbeddingRequest({ + input, + modelPath: client.modelPath, + taskType: options.taskType ?? "RETRIEVAL_DOCUMENT", + outputDimensionality: isV2 ? outputDimensionality : undefined, + }), + ), + }, + }); + const embeddings = Array.isArray(payload.embeddings) ? payload.embeddings : []; + return inputs.map((_, index) => sanitizeAndNormalizeEmbedding(embeddings[index]?.values ?? [])); }; const embedBatch = async (texts: string[]): Promise => { - if (texts.length === 0) { - return []; - } - const requests = texts.map((text) => ({ - model: client.modelPath, - content: { parts: [{ text }] }, - taskType: "RETRIEVAL_DOCUMENT", - })); - const payload = await executeWithApiKeyRotation({ - provider: "google", - apiKeys: client.apiKeys, - execute: (apiKey) => - fetchWithGeminiAuth(apiKey, batchUrl, { - requests, - }), - }); - const embeddings = Array.isArray(payload.embeddings) ? payload.embeddings : []; - return texts.map((_, index) => embeddings[index]?.values ?? []); + return await embedBatchInputs( + texts.map((text) => ({ + text, + })), + ); }; return { @@ -146,6 +277,7 @@ export async function createGeminiEmbeddingProvider( maxInputTokens: GEMINI_MAX_INPUT_TOKENS[client.model], embedQuery, embedBatch, + embedBatchInputs, }, client, }; @@ -183,13 +315,18 @@ export async function resolveGeminiEmbeddingClient( }); const model = normalizeGeminiModel(options.model); const modelPath = buildGeminiModelPath(model); + const outputDimensionality = resolveGeminiOutputDimensionality( + model, + options.outputDimensionality, + ); debugEmbeddingsLog("memory embeddings: gemini client", { rawBaseUrl, baseUrl, model, modelPath, + outputDimensionality, embedEndpoint: `${baseUrl}/${modelPath}:embedContent`, batchEndpoint: `${baseUrl}/${modelPath}:batchEmbedContents`, }); - return { baseUrl, headers, ssrfPolicy, model, modelPath, apiKeys }; + return { baseUrl, headers, ssrfPolicy, model, modelPath, apiKeys, outputDimensionality }; } diff --git a/src/memory/embeddings-ollama.ts b/src/memory/embeddings-ollama.ts index 7ccdff6560d..7bd2bcf7428 100644 --- a/src/memory/embeddings-ollama.ts +++ b/src/memory/embeddings-ollama.ts @@ -3,6 +3,7 @@ import { resolveOllamaApiBase } from "../agents/ollama-models.js"; import { formatErrorMessage } from "../infra/errors.js"; import type { SsrFPolicy } from "../infra/net/ssrf.js"; import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; import { normalizeEmbeddingModelWithPrefixes } from "./embeddings-model-normalize.js"; import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js"; import { buildRemoteBaseUrlPolicy, withRemoteHttpResponse } from "./remote-http.js"; @@ -19,15 +20,6 @@ type OllamaEmbeddingClientConfig = Omit; export const DEFAULT_OLLAMA_EMBEDDING_MODEL = "nomic-embed-text"; -function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { - const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0)); - const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0)); - if (magnitude < 1e-10) { - return sanitized; - } - return sanitized.map((value) => value / magnitude); -} - function normalizeOllamaModel(model: string): string { return normalizeEmbeddingModelWithPrefixes({ model, diff --git a/src/memory/embeddings.ts b/src/memory/embeddings.ts index ca6b4046e2c..f9cc76eb19d 100644 --- a/src/memory/embeddings.ts +++ b/src/memory/embeddings.ts @@ -4,7 +4,13 @@ import type { OpenClawConfig } from "../config/config.js"; import type { SecretInput } from "../config/types.secrets.js"; import { formatErrorMessage } from "../infra/errors.js"; import { resolveUserPath } from "../utils.js"; -import { createGeminiEmbeddingProvider, type GeminiEmbeddingClient } from "./embeddings-gemini.js"; +import type { EmbeddingInput } from "./embedding-inputs.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; +import { + createGeminiEmbeddingProvider, + type GeminiEmbeddingClient, + type GeminiTaskType, +} from "./embeddings-gemini.js"; import { createMistralEmbeddingProvider, type MistralEmbeddingClient, @@ -14,15 +20,6 @@ import { createOpenAiEmbeddingProvider, type OpenAiEmbeddingClient } from "./emb import { createVoyageEmbeddingProvider, type VoyageEmbeddingClient } from "./embeddings-voyage.js"; import { importNodeLlamaCpp } from "./node-llama.js"; -function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { - const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0)); - const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0)); - if (magnitude < 1e-10) { - return sanitized; - } - return sanitized.map((value) => value / magnitude); -} - export type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; export type { MistralEmbeddingClient } from "./embeddings-mistral.js"; export type { OpenAiEmbeddingClient } from "./embeddings-openai.js"; @@ -35,6 +32,7 @@ export type EmbeddingProvider = { maxInputTokens?: number; embedQuery: (text: string) => Promise; embedBatch: (texts: string[]) => Promise; + embedBatchInputs?: (inputs: EmbeddingInput[]) => Promise; }; export type EmbeddingProviderId = "openai" | "local" | "gemini" | "voyage" | "mistral" | "ollama"; @@ -74,6 +72,10 @@ export type EmbeddingProviderOptions = { modelPath?: string; modelCacheDir?: string; }; + /** Gemini embedding-2: output vector dimensions (768, 1536, or 3072). */ + outputDimensionality?: number; + /** Gemini: override the default task type sent with embedding requests. */ + taskType?: GeminiTaskType; }; export const DEFAULT_LOCAL_MODEL = diff --git a/src/memory/index.test.ts b/src/memory/index.test.ts index 43ebcca58c2..23371056b18 100644 --- a/src/memory/index.test.ts +++ b/src/memory/index.test.ts @@ -1,3 +1,4 @@ +import { randomUUID } from "node:crypto"; import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; @@ -6,27 +7,86 @@ import { getMemorySearchManager, type MemoryIndexManager } from "./index.js"; import "./test-runtime-mocks.js"; let embedBatchCalls = 0; +let embedBatchInputCalls = 0; +let providerCalls: Array<{ provider?: string; model?: string; outputDimensionality?: number }> = []; vi.mock("./embeddings.js", () => { const embedText = (text: string) => { const lower = text.toLowerCase(); const alpha = lower.split("alpha").length - 1; const beta = lower.split("beta").length - 1; - return [alpha, beta]; + const image = lower.split("image").length - 1; + const audio = lower.split("audio").length - 1; + return [alpha, beta, image, audio]; }; return { - createEmbeddingProvider: async (options: { model?: string }) => ({ - requestedProvider: "openai", - provider: { - id: "mock", - model: options.model ?? "mock-embed", - embedQuery: async (text: string) => embedText(text), - embedBatch: async (texts: string[]) => { - embedBatchCalls += 1; - return texts.map(embedText); + createEmbeddingProvider: async (options: { + provider?: string; + model?: string; + outputDimensionality?: number; + }) => { + providerCalls.push({ + provider: options.provider, + model: options.model, + outputDimensionality: options.outputDimensionality, + }); + const providerId = options.provider === "gemini" ? "gemini" : "mock"; + const model = options.model ?? "mock-embed"; + return { + requestedProvider: options.provider ?? "openai", + provider: { + id: providerId, + model, + embedQuery: async (text: string) => embedText(text), + embedBatch: async (texts: string[]) => { + embedBatchCalls += 1; + return texts.map(embedText); + }, + ...(providerId === "gemini" + ? { + embedBatchInputs: async ( + inputs: Array<{ + text: string; + parts?: Array< + | { type: "text"; text: string } + | { type: "inline-data"; mimeType: string; data: string } + >; + }>, + ) => { + embedBatchInputCalls += 1; + return inputs.map((input) => { + const inlineData = input.parts?.find((part) => part.type === "inline-data"); + if (inlineData?.type === "inline-data" && inlineData.data.length > 9000) { + throw new Error("payload too large"); + } + const mimeType = + inlineData?.type === "inline-data" ? inlineData.mimeType : undefined; + if (mimeType?.startsWith("image/")) { + return [0, 0, 1, 0]; + } + if (mimeType?.startsWith("audio/")) { + return [0, 0, 0, 1]; + } + return embedText(input.text); + }); + }, + } + : {}), }, - }, - }), + ...(providerId === "gemini" + ? { + gemini: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + headers: {}, + model, + modelPath: `models/${model}`, + apiKeys: ["test-key"], + outputDimensionality: options.outputDimensionality, + }, + } + : {}), + }; + }, }; }); @@ -38,6 +98,7 @@ describe("memory index", () => { let indexVectorPath = ""; let indexMainPath = ""; let indexExtraPath = ""; + let indexMultimodalPath = ""; let indexStatusPath = ""; let indexSourceChangePath = ""; let indexModelPath = ""; @@ -71,6 +132,7 @@ describe("memory index", () => { indexMainPath = path.join(workspaceDir, "index-main.sqlite"); indexVectorPath = path.join(workspaceDir, "index-vector.sqlite"); indexExtraPath = path.join(workspaceDir, "index-extra.sqlite"); + indexMultimodalPath = path.join(workspaceDir, "index-multimodal.sqlite"); indexStatusPath = path.join(workspaceDir, "index-status.sqlite"); indexSourceChangePath = path.join(workspaceDir, "index-source-change.sqlite"); indexModelPath = path.join(workspaceDir, "index-model-change.sqlite"); @@ -93,6 +155,8 @@ describe("memory index", () => { // Keep atomic reindex tests on the safe path. vi.stubEnv("OPENCLAW_TEST_MEMORY_UNSAFE_REINDEX", "1"); embedBatchCalls = 0; + embedBatchInputCalls = 0; + providerCalls = []; // Keep the workspace stable to allow manager reuse across tests. await fs.mkdir(memoryDir, { recursive: true }); @@ -119,7 +183,14 @@ describe("memory index", () => { extraPaths?: string[]; sources?: Array<"memory" | "sessions">; sessionMemory?: boolean; + provider?: "openai" | "gemini"; model?: string; + outputDimensionality?: number; + multimodal?: { + enabled?: boolean; + modalities?: Array<"image" | "audio" | "all">; + maxFileBytes?: number; + }; vectorEnabled?: boolean; cacheEnabled?: boolean; minScore?: number; @@ -130,8 +201,9 @@ describe("memory index", () => { defaults: { workspace: workspaceDir, memorySearch: { - provider: "openai", + provider: params.provider ?? "openai", model: params.model ?? "mock-embed", + outputDimensionality: params.outputDimensionality, store: { path: params.storePath, vector: { enabled: params.vectorEnabled ?? false } }, // Perf: keep test indexes to a single chunk to reduce sqlite work. chunking: { tokens: 4000, overlap: 0 }, @@ -142,6 +214,7 @@ describe("memory index", () => { }, cache: params.cacheEnabled ? { enabled: true } : undefined, extraPaths: params.extraPaths, + multimodal: params.multimodal, sources: params.sources, experimental: { sessionMemory: params.sessionMemory ?? false }, }, @@ -217,6 +290,103 @@ describe("memory index", () => { ); }); + it("indexes multimodal image and audio files from extra paths with Gemini structured inputs", async () => { + const mediaDir = path.join(workspaceDir, "media-memory"); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(path.join(mediaDir, "diagram.png"), Buffer.from("png")); + await fs.writeFile(path.join(mediaDir, "meeting.wav"), Buffer.from("wav")); + + const cfg = createCfg({ + storePath: indexMultimodalPath, + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image", "audio"] }, + }); + const manager = await getPersistentManager(cfg); + await manager.sync({ reason: "test" }); + + expect(embedBatchInputCalls).toBeGreaterThan(0); + + const imageResults = await manager.search("image"); + expect(imageResults.some((result) => result.path.endsWith("diagram.png"))).toBe(true); + + const audioResults = await manager.search("audio"); + expect(audioResults.some((result) => result.path.endsWith("meeting.wav"))).toBe(true); + }); + + it("skips oversized multimodal inputs without aborting sync", async () => { + const mediaDir = path.join(workspaceDir, "media-oversize"); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(path.join(mediaDir, "huge.png"), Buffer.alloc(7000, 1)); + + const cfg = createCfg({ + storePath: path.join(workspaceDir, `index-oversize-${randomUUID()}.sqlite`), + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image"] }, + }); + const manager = requireManager(await getMemorySearchManager({ cfg, agentId: "main" })); + await manager.sync({ reason: "test" }); + + expect(embedBatchInputCalls).toBeGreaterThan(0); + const imageResults = await manager.search("image"); + expect(imageResults.some((result) => result.path.endsWith("huge.png"))).toBe(false); + + const alphaResults = await manager.search("alpha"); + expect(alphaResults.some((result) => result.path.endsWith("memory/2026-01-12.md"))).toBe(true); + + await manager.close?.(); + }); + + it("reindexes a multimodal file after a transient mid-sync disappearance", async () => { + const mediaDir = path.join(workspaceDir, "media-race"); + const imagePath = path.join(mediaDir, "diagram.png"); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(imagePath, Buffer.from("png")); + + const cfg = createCfg({ + storePath: path.join(workspaceDir, `index-race-${randomUUID()}.sqlite`), + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image"] }, + }); + const manager = requireManager(await getMemorySearchManager({ cfg, agentId: "main" })); + const realReadFile = fs.readFile.bind(fs); + let imageReads = 0; + const readSpy = vi.spyOn(fs, "readFile").mockImplementation(async (...args) => { + const [targetPath] = args; + if (typeof targetPath === "string" && targetPath === imagePath) { + imageReads += 1; + if (imageReads === 2) { + const err = Object.assign( + new Error(`ENOENT: no such file or directory, open '${imagePath}'`), + { + code: "ENOENT", + }, + ) as NodeJS.ErrnoException; + throw err; + } + } + return await realReadFile(...args); + }); + + await manager.sync({ reason: "test" }); + readSpy.mockRestore(); + + const callsAfterFirstSync = embedBatchInputCalls; + (manager as unknown as { dirty: boolean }).dirty = true; + await manager.sync({ reason: "test" }); + + expect(embedBatchInputCalls).toBeGreaterThan(callsAfterFirstSync); + const results = await manager.search("image"); + expect(results.some((result) => result.path.endsWith("diagram.png"))).toBe(true); + + await manager.close?.(); + }); + it("keeps dirty false in status-only manager after prior indexing", async () => { const cfg = createCfg({ storePath: indexStatusPath }); @@ -342,6 +512,143 @@ describe("memory index", () => { await secondManager.close?.(); }); + it("passes Gemini outputDimensionality from config into the provider", async () => { + const cfg = createCfg({ + storePath: indexMainPath, + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 1536, + }); + + const result = await getMemorySearchManager({ cfg, agentId: "main" }); + const manager = requireManager(result); + + expect( + providerCalls.some( + (call) => + call.provider === "gemini" && + call.model === "gemini-embedding-2-preview" && + call.outputDimensionality === 1536, + ), + ).toBe(true); + await manager.close?.(); + }); + + it("reindexes when Gemini outputDimensionality changes", async () => { + const base = createCfg({ + storePath: indexModelPath, + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 3072, + }); + const baseAgents = base.agents!; + const baseDefaults = baseAgents.defaults!; + const baseMemorySearch = baseDefaults.memorySearch!; + + const first = await getMemorySearchManager({ cfg: base, agentId: "main" }); + const firstManager = requireManager(first); + await firstManager.sync?.({ reason: "test" }); + const callsAfterFirstSync = embedBatchCalls; + await firstManager.close?.(); + + const second = await getMemorySearchManager({ + cfg: { + ...base, + agents: { + ...baseAgents, + defaults: { + ...baseDefaults, + memorySearch: { + ...baseMemorySearch, + outputDimensionality: 768, + }, + }, + }, + }, + agentId: "main", + }); + const secondManager = requireManager(second); + await secondManager.sync?.({ reason: "test" }); + expect(embedBatchCalls).toBeGreaterThan(callsAfterFirstSync); + await secondManager.close?.(); + }); + + it("reindexes when extraPaths change", async () => { + const storePath = path.join(workspaceDir, `index-scope-extra-${randomUUID()}.sqlite`); + const firstExtraDir = path.join(workspaceDir, "scope-extra-a"); + const secondExtraDir = path.join(workspaceDir, "scope-extra-b"); + await fs.rm(firstExtraDir, { recursive: true, force: true }); + await fs.rm(secondExtraDir, { recursive: true, force: true }); + await fs.mkdir(firstExtraDir, { recursive: true }); + await fs.mkdir(secondExtraDir, { recursive: true }); + await fs.writeFile(path.join(firstExtraDir, "a.md"), "alpha only"); + await fs.writeFile(path.join(secondExtraDir, "b.md"), "beta only"); + + const first = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + extraPaths: [firstExtraDir], + }), + agentId: "main", + }); + const firstManager = requireManager(first); + await firstManager.sync?.({ reason: "test" }); + await firstManager.close?.(); + + const second = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + extraPaths: [secondExtraDir], + }), + agentId: "main", + }); + const secondManager = requireManager(second); + await secondManager.sync?.({ reason: "test" }); + const results = await secondManager.search("beta"); + expect(results.some((result) => result.path.endsWith("scope-extra-b/b.md"))).toBe(true); + expect(results.some((result) => result.path.endsWith("scope-extra-a/a.md"))).toBe(false); + await secondManager.close?.(); + }); + + it("reindexes when multimodal settings change", async () => { + const storePath = path.join(workspaceDir, `index-scope-multimodal-${randomUUID()}.sqlite`); + const mediaDir = path.join(workspaceDir, "scope-media"); + await fs.rm(mediaDir, { recursive: true, force: true }); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(path.join(mediaDir, "diagram.png"), Buffer.from("png")); + + const first = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + }), + agentId: "main", + }); + const firstManager = requireManager(first); + await firstManager.sync?.({ reason: "test" }); + const multimodalCallsAfterFirstSync = embedBatchInputCalls; + await firstManager.close?.(); + + const second = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image"] }, + }), + agentId: "main", + }); + const secondManager = requireManager(second); + await secondManager.sync?.({ reason: "test" }); + expect(embedBatchInputCalls).toBeGreaterThan(multimodalCallsAfterFirstSync); + const results = await secondManager.search("image"); + expect(results.some((result) => result.path.endsWith("scope-media/diagram.png"))).toBe(true); + await secondManager.close?.(); + }); + it("reuses cached embeddings on forced reindex", async () => { const cfg = createCfg({ storePath: indexMainPath, cacheEnabled: true }); const manager = await getPersistentManager(cfg); diff --git a/src/memory/internal.test.ts b/src/memory/internal.test.ts index 0f17843a88d..d18120b413a 100644 --- a/src/memory/internal.test.ts +++ b/src/memory/internal.test.ts @@ -3,12 +3,17 @@ import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { + buildMultimodalChunkForIndexing, buildFileEntry, chunkMarkdown, listMemoryFiles, normalizeExtraMemoryPaths, remapChunkLines, } from "./internal.js"; +import { + DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES, + type MemoryMultimodalSettings, +} from "./multimodal.js"; function setupTempDirLifecycle(prefix: string): () => string { let tmpDir = ""; @@ -38,6 +43,11 @@ describe("normalizeExtraMemoryPaths", () => { describe("listMemoryFiles", () => { const getTmpDir = setupTempDirLifecycle("memory-test-"); + const multimodal: MemoryMultimodalSettings = { + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES, + }; it("includes files from additional paths (directory)", async () => { const tmpDir = getTmpDir(); @@ -131,10 +141,29 @@ describe("listMemoryFiles", () => { const memoryMatches = files.filter((file) => file.endsWith("MEMORY.md")); expect(memoryMatches).toHaveLength(1); }); + + it("includes image and audio files from extra paths when multimodal is enabled", async () => { + const tmpDir = getTmpDir(); + const extraDir = path.join(tmpDir, "media"); + await fs.mkdir(extraDir, { recursive: true }); + await fs.writeFile(path.join(extraDir, "diagram.png"), Buffer.from("png")); + await fs.writeFile(path.join(extraDir, "note.wav"), Buffer.from("wav")); + await fs.writeFile(path.join(extraDir, "ignore.bin"), Buffer.from("bin")); + + const files = await listMemoryFiles(tmpDir, [extraDir], multimodal); + expect(files.some((file) => file.endsWith("diagram.png"))).toBe(true); + expect(files.some((file) => file.endsWith("note.wav"))).toBe(true); + expect(files.some((file) => file.endsWith("ignore.bin"))).toBe(false); + }); }); describe("buildFileEntry", () => { const getTmpDir = setupTempDirLifecycle("memory-build-entry-"); + const multimodal: MemoryMultimodalSettings = { + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES, + }; it("returns null when the file disappears before reading", async () => { const tmpDir = getTmpDir(); @@ -154,6 +183,59 @@ describe("buildFileEntry", () => { expect(entry?.path).toBe("note.md"); expect(entry?.size).toBeGreaterThan(0); }); + + it("returns multimodal metadata for eligible image files", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + + expect(entry).toMatchObject({ + path: "diagram.png", + kind: "multimodal", + modality: "image", + mimeType: "image/png", + contentText: "Image file: diagram.png", + }); + }); + + it("builds a multimodal chunk lazily for indexing", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + const built = await buildMultimodalChunkForIndexing(entry!); + + expect(built?.chunk.embeddingInput?.parts).toEqual([ + { type: "text", text: "Image file: diagram.png" }, + expect.objectContaining({ type: "inline-data", mimeType: "image/png" }), + ]); + expect(built?.structuredInputBytes).toBeGreaterThan(0); + }); + + it("skips lazy multimodal indexing when the file grows after discovery", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + await fs.writeFile(target, Buffer.alloc(entry!.size + 32, 1)); + + await expect(buildMultimodalChunkForIndexing(entry!)).resolves.toBeNull(); + }); + + it("skips lazy multimodal indexing when file bytes change after discovery", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + await fs.writeFile(target, Buffer.from("gif")); + + await expect(buildMultimodalChunkForIndexing(entry!)).resolves.toBeNull(); + }); }); describe("chunkMarkdown", () => { diff --git a/src/memory/internal.ts b/src/memory/internal.ts index d39e355d2c0..d1d7e9c2e96 100644 --- a/src/memory/internal.ts +++ b/src/memory/internal.ts @@ -2,8 +2,17 @@ import crypto from "node:crypto"; import fsSync from "node:fs"; import fs from "node:fs/promises"; import path from "node:path"; +import { detectMime } from "../media/mime.js"; import { runTasksWithConcurrency } from "../utils/run-with-concurrency.js"; +import { estimateStructuredEmbeddingInputBytes } from "./embedding-input-limits.js"; +import { buildTextEmbeddingInput, type EmbeddingInput } from "./embedding-inputs.js"; import { isFileMissingError } from "./fs-utils.js"; +import { + buildMemoryMultimodalLabel, + classifyMemoryMultimodalPath, + type MemoryMultimodalModality, + type MemoryMultimodalSettings, +} from "./multimodal.js"; export type MemoryFileEntry = { path: string; @@ -11,6 +20,11 @@ export type MemoryFileEntry = { mtimeMs: number; size: number; hash: string; + dataHash?: string; + kind?: "markdown" | "multimodal"; + contentText?: string; + modality?: MemoryMultimodalModality; + mimeType?: string; }; export type MemoryChunk = { @@ -18,6 +32,18 @@ export type MemoryChunk = { endLine: number; text: string; hash: string; + embeddingInput?: EmbeddingInput; +}; + +export type MultimodalMemoryChunk = { + chunk: MemoryChunk; + structuredInputBytes: number; +}; + +const DISABLED_MULTIMODAL_SETTINGS: MemoryMultimodalSettings = { + enabled: false, + modalities: [], + maxFileBytes: 0, }; export function ensureDir(dir: string): string { @@ -56,7 +82,16 @@ export function isMemoryPath(relPath: string): boolean { return normalized.startsWith("memory/"); } -async function walkDir(dir: string, files: string[]) { +function isAllowedMemoryFilePath(filePath: string, multimodal?: MemoryMultimodalSettings): boolean { + if (filePath.endsWith(".md")) { + return true; + } + return ( + classifyMemoryMultimodalPath(filePath, multimodal ?? DISABLED_MULTIMODAL_SETTINGS) !== null + ); +} + +async function walkDir(dir: string, files: string[], multimodal?: MemoryMultimodalSettings) { const entries = await fs.readdir(dir, { withFileTypes: true }); for (const entry of entries) { const full = path.join(dir, entry.name); @@ -64,13 +99,13 @@ async function walkDir(dir: string, files: string[]) { continue; } if (entry.isDirectory()) { - await walkDir(full, files); + await walkDir(full, files, multimodal); continue; } if (!entry.isFile()) { continue; } - if (!entry.name.endsWith(".md")) { + if (!isAllowedMemoryFilePath(full, multimodal)) { continue; } files.push(full); @@ -80,6 +115,7 @@ async function walkDir(dir: string, files: string[]) { export async function listMemoryFiles( workspaceDir: string, extraPaths?: string[], + multimodal?: MemoryMultimodalSettings, ): Promise { const result: string[] = []; const memoryFile = path.join(workspaceDir, "MEMORY.md"); @@ -117,10 +153,10 @@ export async function listMemoryFiles( continue; } if (stat.isDirectory()) { - await walkDir(inputPath, result); + await walkDir(inputPath, result, multimodal); continue; } - if (stat.isFile() && inputPath.endsWith(".md")) { + if (stat.isFile() && isAllowedMemoryFilePath(inputPath, multimodal)) { result.push(inputPath); } } catch {} @@ -152,6 +188,7 @@ export function hashText(value: string): string { export async function buildFileEntry( absPath: string, workspaceDir: string, + multimodal?: MemoryMultimodalSettings, ): Promise { let stat; try { @@ -162,6 +199,49 @@ export async function buildFileEntry( } throw err; } + const normalizedPath = path.relative(workspaceDir, absPath).replace(/\\/g, "/"); + const multimodalSettings = multimodal ?? DISABLED_MULTIMODAL_SETTINGS; + const modality = classifyMemoryMultimodalPath(absPath, multimodalSettings); + if (modality) { + if (stat.size > multimodalSettings.maxFileBytes) { + return null; + } + let buffer: Buffer; + try { + buffer = await fs.readFile(absPath); + } catch (err) { + if (isFileMissingError(err)) { + return null; + } + throw err; + } + const mimeType = await detectMime({ buffer: buffer.subarray(0, 512), filePath: absPath }); + if (!mimeType || !mimeType.startsWith(`${modality}/`)) { + return null; + } + const contentText = buildMemoryMultimodalLabel(modality, normalizedPath); + const dataHash = crypto.createHash("sha256").update(buffer).digest("hex"); + const chunkHash = hashText( + JSON.stringify({ + path: normalizedPath, + contentText, + mimeType, + dataHash, + }), + ); + return { + path: normalizedPath, + absPath, + mtimeMs: stat.mtimeMs, + size: stat.size, + hash: chunkHash, + dataHash, + kind: "multimodal", + contentText, + modality, + mimeType, + }; + } let content: string; try { content = await fs.readFile(absPath, "utf-8"); @@ -173,11 +253,81 @@ export async function buildFileEntry( } const hash = hashText(content); return { - path: path.relative(workspaceDir, absPath).replace(/\\/g, "/"), + path: normalizedPath, absPath, mtimeMs: stat.mtimeMs, size: stat.size, hash, + kind: "markdown", + }; +} + +async function loadMultimodalEmbeddingInput( + entry: Pick< + MemoryFileEntry, + "absPath" | "contentText" | "mimeType" | "kind" | "size" | "dataHash" + >, +): Promise { + if (entry.kind !== "multimodal" || !entry.contentText || !entry.mimeType) { + return null; + } + let stat; + try { + stat = await fs.stat(entry.absPath); + } catch (err) { + if (isFileMissingError(err)) { + return null; + } + throw err; + } + if (stat.size !== entry.size) { + return null; + } + let buffer: Buffer; + try { + buffer = await fs.readFile(entry.absPath); + } catch (err) { + if (isFileMissingError(err)) { + return null; + } + throw err; + } + const dataHash = crypto.createHash("sha256").update(buffer).digest("hex"); + if (entry.dataHash && entry.dataHash !== dataHash) { + return null; + } + return { + text: entry.contentText, + parts: [ + { type: "text", text: entry.contentText }, + { + type: "inline-data", + mimeType: entry.mimeType, + data: buffer.toString("base64"), + }, + ], + }; +} + +export async function buildMultimodalChunkForIndexing( + entry: Pick< + MemoryFileEntry, + "absPath" | "contentText" | "mimeType" | "kind" | "hash" | "size" | "dataHash" + >, +): Promise { + const embeddingInput = await loadMultimodalEmbeddingInput(entry); + if (!embeddingInput) { + return null; + } + return { + chunk: { + startLine: 1, + endLine: 1, + text: entry.contentText ?? embeddingInput.text, + hash: entry.hash, + embeddingInput, + }, + structuredInputBytes: estimateStructuredEmbeddingInputBytes(embeddingInput), }; } @@ -213,6 +363,7 @@ export function chunkMarkdown( endLine, text, hash: hashText(text), + embeddingInput: buildTextEmbeddingInput(text), }); }; diff --git a/src/memory/manager-embedding-ops.ts b/src/memory/manager-embedding-ops.ts index 965058c8a3b..49171d809cb 100644 --- a/src/memory/manager-embedding-ops.ts +++ b/src/memory/manager-embedding-ops.ts @@ -8,8 +8,14 @@ import { } from "./batch-openai.js"; import { type VoyageBatchRequest, runVoyageEmbeddingBatches } from "./batch-voyage.js"; import { enforceEmbeddingMaxInputTokens } from "./embedding-chunk-limits.js"; -import { estimateUtf8Bytes } from "./embedding-input-limits.js"; import { + estimateStructuredEmbeddingInputBytes, + estimateUtf8Bytes, +} from "./embedding-input-limits.js"; +import { type EmbeddingInput, hasNonTextEmbeddingParts } from "./embedding-inputs.js"; +import { buildGeminiEmbeddingRequest } from "./embeddings-gemini.js"; +import { + buildMultimodalChunkForIndexing, chunkMarkdown, hashText, parseEmbedding, @@ -52,7 +58,9 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { let currentTokens = 0; for (const chunk of chunks) { - const estimate = estimateUtf8Bytes(chunk.text); + const estimate = chunk.embeddingInput + ? estimateStructuredEmbeddingInputBytes(chunk.embeddingInput) + : estimateUtf8Bytes(chunk.text); const wouldExceed = current.length > 0 && currentTokens + estimate > EMBEDDING_BATCH_MAX_TOKENS; if (wouldExceed) { @@ -187,9 +195,22 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { const missingChunks = missing.map((m) => m.chunk); const batches = this.buildEmbeddingBatches(missingChunks); const toCache: Array<{ hash: string; embedding: number[] }> = []; + const provider = this.provider; + if (!provider) { + throw new Error("Cannot embed batch in FTS-only mode (no embedding provider)"); + } let cursor = 0; for (const batch of batches) { - const batchEmbeddings = await this.embedBatchWithRetry(batch.map((chunk) => chunk.text)); + const inputs = batch.map((chunk) => chunk.embeddingInput ?? { text: chunk.text }); + const hasStructuredInputs = inputs.some((input) => hasNonTextEmbeddingParts(input)); + if (hasStructuredInputs && !provider.embedBatchInputs) { + throw new Error( + `Embedding provider "${provider.id}" does not support multimodal memory inputs.`, + ); + } + const batchEmbeddings = hasStructuredInputs + ? await this.embedBatchInputsWithRetry(inputs) + : await this.embedBatchWithRetry(batch.map((chunk) => chunk.text)); for (let i = 0; i < batch.length; i += 1) { const item = missing[cursor + i]; const embedding = batchEmbeddings[i] ?? []; @@ -236,6 +257,7 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { provider: "gemini", baseUrl: this.gemini.baseUrl, model: this.gemini.model, + outputDimensionality: this.gemini.outputDimensionality, headers: entries, }), ); @@ -474,6 +496,9 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { source: MemorySource, ): Promise { const gemini = this.gemini; + if (chunks.some((chunk) => hasNonTextEmbeddingParts(chunk.embeddingInput))) { + return await this.embedChunksInBatches(chunks); + } return await this.embedChunksWithProviderBatch({ chunks, entry, @@ -481,8 +506,12 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { provider: "gemini", enabled: Boolean(gemini), buildRequest: (chunk) => ({ - content: { parts: [{ text: chunk.text }] }, - taskType: "RETRIEVAL_DOCUMENT", + request: buildGeminiEmbeddingRequest({ + input: chunk.embeddingInput ?? { text: chunk.text }, + taskType: "RETRIEVAL_DOCUMENT", + modelPath: this.gemini?.modelPath, + outputDimensionality: this.gemini?.outputDimensionality, + }), }), runBatch: async (runnerOptions) => await runGeminiEmbeddingBatches({ @@ -531,6 +560,45 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { } } + protected async embedBatchInputsWithRetry(inputs: EmbeddingInput[]): Promise { + if (inputs.length === 0) { + return []; + } + if (!this.provider?.embedBatchInputs) { + return await this.embedBatchWithRetry(inputs.map((input) => input.text)); + } + let attempt = 0; + let delayMs = EMBEDDING_RETRY_BASE_DELAY_MS; + while (true) { + try { + const timeoutMs = this.resolveEmbeddingTimeout("batch"); + log.debug("memory embeddings: structured batch start", { + provider: this.provider.id, + items: inputs.length, + timeoutMs, + }); + return await this.withTimeout( + this.provider.embedBatchInputs(inputs), + timeoutMs, + `memory embeddings batch timed out after ${Math.round(timeoutMs / 1000)}s`, + ); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + if (!this.isRetryableEmbeddingError(message) || attempt >= EMBEDDING_RETRY_MAX_ATTEMPTS) { + throw err; + } + const waitMs = Math.min( + EMBEDDING_RETRY_MAX_DELAY_MS, + Math.round(delayMs * (1 + Math.random() * 0.2)), + ); + log.warn(`memory embeddings rate limited; retrying structured batch in ${waitMs}ms`); + await new Promise((resolve) => setTimeout(resolve, waitMs)); + delayMs *= 2; + attempt += 1; + } + } + } + private isRetryableEmbeddingError(message: string): boolean { return /(rate[_ ]limit|too many requests|429|resource has been exhausted|5\d\d|cloudflare|tokens per day)/i.test( message, @@ -690,6 +758,49 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { return this.batch.enabled ? this.batch.concurrency : EMBEDDING_INDEX_CONCURRENCY; } + private clearIndexedFileData(pathname: string, source: MemorySource): void { + if (this.vector.enabled) { + try { + this.db + .prepare( + `DELETE FROM ${VECTOR_TABLE} WHERE id IN (SELECT id FROM chunks WHERE path = ? AND source = ?)`, + ) + .run(pathname, source); + } catch {} + } + if (this.fts.enabled && this.fts.available && this.provider) { + try { + this.db + .prepare(`DELETE FROM ${FTS_TABLE} WHERE path = ? AND source = ? AND model = ?`) + .run(pathname, source, this.provider.model); + } catch {} + } + this.db.prepare(`DELETE FROM chunks WHERE path = ? AND source = ?`).run(pathname, source); + } + + private upsertFileRecord(entry: MemoryFileEntry | SessionFileEntry, source: MemorySource): void { + this.db + .prepare( + `INSERT INTO files (path, source, hash, mtime, size) VALUES (?, ?, ?, ?, ?) + ON CONFLICT(path) DO UPDATE SET + source=excluded.source, + hash=excluded.hash, + mtime=excluded.mtime, + size=excluded.size`, + ) + .run(entry.path, source, entry.hash, entry.mtimeMs, entry.size); + } + + private deleteFileRecord(pathname: string, source: MemorySource): void { + this.db.prepare(`DELETE FROM files WHERE path = ? AND source = ?`).run(pathname, source); + } + + private isStructuredInputTooLargeError(message: string): boolean { + return /(413|payload too large|request too large|input too large|too many tokens|input limit|request size)/i.test( + message, + ); + } + protected async indexFile( entry: MemoryFileEntry | SessionFileEntry, options: { source: MemorySource; content?: string }, @@ -703,42 +814,59 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { return; } - const content = options.content ?? (await fs.readFile(entry.absPath, "utf-8")); - const chunks = enforceEmbeddingMaxInputTokens( - this.provider, - chunkMarkdown(content, this.settings.chunking).filter( - (chunk) => chunk.text.trim().length > 0, - ), - EMBEDDING_BATCH_MAX_TOKENS, - ); - if (options.source === "sessions" && "lineMap" in entry) { - remapChunkLines(chunks, entry.lineMap); + let chunks: MemoryChunk[]; + let structuredInputBytes: number | undefined; + if ("kind" in entry && entry.kind === "multimodal") { + const multimodalChunk = await buildMultimodalChunkForIndexing(entry); + if (!multimodalChunk) { + this.clearIndexedFileData(entry.path, options.source); + this.deleteFileRecord(entry.path, options.source); + return; + } + structuredInputBytes = multimodalChunk.structuredInputBytes; + chunks = [multimodalChunk.chunk]; + } else { + const content = options.content ?? (await fs.readFile(entry.absPath, "utf-8")); + chunks = enforceEmbeddingMaxInputTokens( + this.provider, + chunkMarkdown(content, this.settings.chunking).filter( + (chunk) => chunk.text.trim().length > 0, + ), + EMBEDDING_BATCH_MAX_TOKENS, + ); + if (options.source === "sessions" && "lineMap" in entry) { + remapChunkLines(chunks, entry.lineMap); + } + } + let embeddings: number[][]; + try { + embeddings = this.batch.enabled + ? await this.embedChunksWithBatch(chunks, entry, options.source) + : await this.embedChunksInBatches(chunks); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + if ( + "kind" in entry && + entry.kind === "multimodal" && + this.isStructuredInputTooLargeError(message) + ) { + log.warn("memory embeddings: skipping multimodal file rejected as too large", { + path: entry.path, + bytes: structuredInputBytes, + provider: this.provider.id, + model: this.provider.model, + error: message, + }); + this.clearIndexedFileData(entry.path, options.source); + this.upsertFileRecord(entry, options.source); + return; + } + throw err; } - const embeddings = this.batch.enabled - ? await this.embedChunksWithBatch(chunks, entry, options.source) - : await this.embedChunksInBatches(chunks); const sample = embeddings.find((embedding) => embedding.length > 0); const vectorReady = sample ? await this.ensureVectorReady(sample.length) : false; const now = Date.now(); - if (vectorReady) { - try { - this.db - .prepare( - `DELETE FROM ${VECTOR_TABLE} WHERE id IN (SELECT id FROM chunks WHERE path = ? AND source = ?)`, - ) - .run(entry.path, options.source); - } catch {} - } - if (this.fts.enabled && this.fts.available) { - try { - this.db - .prepare(`DELETE FROM ${FTS_TABLE} WHERE path = ? AND source = ? AND model = ?`) - .run(entry.path, options.source, this.provider.model); - } catch {} - } - this.db - .prepare(`DELETE FROM chunks WHERE path = ? AND source = ?`) - .run(entry.path, options.source); + this.clearIndexedFileData(entry.path, options.source); for (let i = 0; i < chunks.length; i++) { const chunk = chunks[i]; const embedding = embeddings[i] ?? []; @@ -793,15 +921,6 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { ); } } - this.db - .prepare( - `INSERT INTO files (path, source, hash, mtime, size) VALUES (?, ?, ?, ?, ?) - ON CONFLICT(path) DO UPDATE SET - source=excluded.source, - hash=excluded.hash, - mtime=excluded.mtime, - size=excluded.size`, - ) - .run(entry.path, options.source, entry.hash, entry.mtimeMs, entry.size); + this.upsertFileRecord(entry, options.source); } } diff --git a/src/memory/manager-sync-ops.ts b/src/memory/manager-sync-ops.ts index 1fe91599b34..6fd3e6bb9c0 100644 --- a/src/memory/manager-sync-ops.ts +++ b/src/memory/manager-sync-ops.ts @@ -29,12 +29,18 @@ import { isFileMissingError } from "./fs-utils.js"; import { buildFileEntry, ensureDir, + hashText, listMemoryFiles, normalizeExtraMemoryPaths, runWithConcurrency, } from "./internal.js"; import { type MemoryFileEntry } from "./internal.js"; import { ensureMemoryIndexSchema } from "./memory-schema.js"; +import { + buildCaseInsensitiveExtensionGlob, + classifyMemoryMultimodalPath, + getMemoryMultimodalExtensions, +} from "./multimodal.js"; import type { SessionFileEntry } from "./session-files.js"; import { buildSessionEntry, @@ -50,6 +56,7 @@ type MemoryIndexMeta = { provider: string; providerKey?: string; sources?: MemorySource[]; + scopeHash?: string; chunkTokens: number; chunkOverlap: number; vectorDims?: number; @@ -383,9 +390,22 @@ export abstract class MemoryManagerSyncOps { } if (stat.isDirectory()) { watchPaths.add(path.join(entry, "**", "*.md")); + if (this.settings.multimodal.enabled) { + for (const modality of this.settings.multimodal.modalities) { + for (const extension of getMemoryMultimodalExtensions(modality)) { + watchPaths.add( + path.join(entry, "**", buildCaseInsensitiveExtensionGlob(extension)), + ); + } + } + } continue; } - if (stat.isFile() && entry.toLowerCase().endsWith(".md")) { + if ( + stat.isFile() && + (entry.toLowerCase().endsWith(".md") || + classifyMemoryMultimodalPath(entry, this.settings.multimodal) !== null) + ) { watchPaths.add(entry); } } catch { @@ -649,9 +669,19 @@ export abstract class MemoryManagerSyncOps { return; } - const files = await listMemoryFiles(this.workspaceDir, this.settings.extraPaths); + const files = await listMemoryFiles( + this.workspaceDir, + this.settings.extraPaths, + this.settings.multimodal, + ); const fileEntries = ( - await Promise.all(files.map(async (file) => buildFileEntry(file, this.workspaceDir))) + await runWithConcurrency( + files.map( + (file) => async () => + await buildFileEntry(file, this.workspaceDir, this.settings.multimodal), + ), + this.getIndexConcurrency(), + ) ).filter((entry): entry is MemoryFileEntry => entry !== null); log.debug("memory sync: indexing memory files", { files: fileEntries.length, @@ -868,6 +898,7 @@ export abstract class MemoryManagerSyncOps { const vectorReady = await this.ensureVectorReady(); const meta = this.readMeta(); const configuredSources = this.resolveConfiguredSourcesForMeta(); + const configuredScopeHash = this.resolveConfiguredScopeHash(); const needsFullReindex = params?.force || !meta || @@ -875,6 +906,7 @@ export abstract class MemoryManagerSyncOps { (this.provider && meta.provider !== this.provider.id) || meta.providerKey !== this.providerKey || this.metaSourcesDiffer(meta, configuredSources) || + meta.scopeHash !== configuredScopeHash || meta.chunkTokens !== this.settings.chunking.tokens || meta.chunkOverlap !== this.settings.chunking.overlap || (vectorReady && !meta?.vectorDims); @@ -996,6 +1028,7 @@ export abstract class MemoryManagerSyncOps { provider: fallback, remote: this.settings.remote, model: fallbackModel, + outputDimensionality: this.settings.outputDimensionality, fallback: "none", local: this.settings.local, }); @@ -1087,6 +1120,7 @@ export abstract class MemoryManagerSyncOps { provider: this.provider?.id ?? "none", providerKey: this.providerKey!, sources: this.resolveConfiguredSourcesForMeta(), + scopeHash: this.resolveConfiguredScopeHash(), chunkTokens: this.settings.chunking.tokens, chunkOverlap: this.settings.chunking.overlap, }; @@ -1158,6 +1192,7 @@ export abstract class MemoryManagerSyncOps { provider: this.provider?.id ?? "none", providerKey: this.providerKey!, sources: this.resolveConfiguredSourcesForMeta(), + scopeHash: this.resolveConfiguredScopeHash(), chunkTokens: this.settings.chunking.tokens, chunkOverlap: this.settings.chunking.overlap, }; @@ -1235,6 +1270,22 @@ export abstract class MemoryManagerSyncOps { return normalized.length > 0 ? normalized : ["memory"]; } + private resolveConfiguredScopeHash(): string { + const extraPaths = normalizeExtraMemoryPaths(this.workspaceDir, this.settings.extraPaths) + .map((value) => value.replace(/\\/g, "/")) + .toSorted(); + return hashText( + JSON.stringify({ + extraPaths, + multimodal: { + enabled: this.settings.multimodal.enabled, + modalities: [...this.settings.multimodal.modalities].toSorted(), + maxFileBytes: this.settings.multimodal.maxFileBytes, + }, + }), + ); + } + private metaSourcesDiffer(meta: MemoryIndexMeta, configuredSources: MemorySource[]): boolean { const metaSources = this.normalizeMetaSources(meta); if (metaSources.length !== configuredSources.length) { diff --git a/src/memory/manager.ts b/src/memory/manager.ts index 9b1ff74e54c..e79f83c570a 100644 --- a/src/memory/manager.ts +++ b/src/memory/manager.ts @@ -157,6 +157,7 @@ export class MemoryIndexManager extends MemoryManagerEmbeddingOps implements Mem provider: settings.provider, remote: settings.remote, model: settings.model, + outputDimensionality: settings.outputDimensionality, fallback: settings.fallback, local: settings.local, }); diff --git a/src/memory/manager.watcher-config.test.ts b/src/memory/manager.watcher-config.test.ts index 77221df34b6..43682183676 100644 --- a/src/memory/manager.watcher-config.test.ts +++ b/src/memory/manager.watcher-config.test.ts @@ -106,4 +106,50 @@ describe("memory watcher config", () => { expect(ignored?.(path.join(workspaceDir, "memory", ".venv", "lib", "python.md"))).toBe(true); expect(ignored?.(path.join(workspaceDir, "memory", "project", "notes.md"))).toBe(false); }); + + it("watches multimodal extensions with case-insensitive globs", async () => { + workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-watch-")); + extraDir = path.join(workspaceDir, "extra"); + await fs.mkdir(path.join(workspaceDir, "memory"), { recursive: true }); + await fs.mkdir(extraDir, { recursive: true }); + await fs.writeFile(path.join(extraDir, "PHOTO.PNG"), "png"); + + const cfg = { + agents: { + defaults: { + workspace: workspaceDir, + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + store: { path: path.join(workspaceDir, "index.sqlite"), vector: { enabled: false } }, + sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, + query: { minScore: 0, hybrid: { enabled: false } }, + extraPaths: [extraDir], + multimodal: { enabled: true, modalities: ["image", "audio"] }, + }, + }, + list: [{ id: "main", default: true }], + }, + } as OpenClawConfig; + + const result = await getMemorySearchManager({ cfg, agentId: "main" }); + expect(result.manager).not.toBeNull(); + if (!result.manager) { + throw new Error("manager missing"); + } + manager = result.manager as unknown as MemoryIndexManager; + + expect(watchMock).toHaveBeenCalledTimes(1); + const [watchedPaths] = watchMock.mock.calls[0] as unknown as [ + string[], + Record, + ]; + expect(watchedPaths).toEqual( + expect.arrayContaining([ + path.join(extraDir, "**", "*.[pP][nN][gG]"), + path.join(extraDir, "**", "*.[wW][aA][vV]"), + ]), + ); + }); }); diff --git a/src/memory/multimodal.ts b/src/memory/multimodal.ts new file mode 100644 index 00000000000..df72ed8c495 --- /dev/null +++ b/src/memory/multimodal.ts @@ -0,0 +1,118 @@ +const MEMORY_MULTIMODAL_SPECS = { + image: { + labelPrefix: "Image file", + extensions: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".heic", ".heif"], + }, + audio: { + labelPrefix: "Audio file", + extensions: [".mp3", ".wav", ".ogg", ".opus", ".m4a", ".aac", ".flac"], + }, +} as const; + +export type MemoryMultimodalModality = keyof typeof MEMORY_MULTIMODAL_SPECS; +export const MEMORY_MULTIMODAL_MODALITIES = Object.keys( + MEMORY_MULTIMODAL_SPECS, +) as MemoryMultimodalModality[]; +export type MemoryMultimodalSelection = MemoryMultimodalModality | "all"; + +export type MemoryMultimodalSettings = { + enabled: boolean; + modalities: MemoryMultimodalModality[]; + maxFileBytes: number; +}; + +export const DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES = 10 * 1024 * 1024; + +export function normalizeMemoryMultimodalModalities( + raw: MemoryMultimodalSelection[] | undefined, +): MemoryMultimodalModality[] { + if (raw === undefined || raw.includes("all")) { + return [...MEMORY_MULTIMODAL_MODALITIES]; + } + const normalized = new Set(); + for (const value of raw) { + if (value === "image" || value === "audio") { + normalized.add(value); + } + } + return Array.from(normalized); +} + +export function normalizeMemoryMultimodalSettings(raw: { + enabled?: boolean; + modalities?: MemoryMultimodalSelection[]; + maxFileBytes?: number; +}): MemoryMultimodalSettings { + const enabled = raw.enabled === true; + const maxFileBytes = + typeof raw.maxFileBytes === "number" && Number.isFinite(raw.maxFileBytes) + ? Math.max(1, Math.floor(raw.maxFileBytes)) + : DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES; + return { + enabled, + modalities: enabled ? normalizeMemoryMultimodalModalities(raw.modalities) : [], + maxFileBytes, + }; +} + +export function isMemoryMultimodalEnabled(settings: MemoryMultimodalSettings): boolean { + return settings.enabled && settings.modalities.length > 0; +} + +export function getMemoryMultimodalExtensions( + modality: MemoryMultimodalModality, +): readonly string[] { + return MEMORY_MULTIMODAL_SPECS[modality].extensions; +} + +export function buildMemoryMultimodalLabel( + modality: MemoryMultimodalModality, + normalizedPath: string, +): string { + return `${MEMORY_MULTIMODAL_SPECS[modality].labelPrefix}: ${normalizedPath}`; +} + +export function buildCaseInsensitiveExtensionGlob(extension: string): string { + const normalized = extension.trim().replace(/^\./, "").toLowerCase(); + if (!normalized) { + return "*"; + } + const parts = Array.from(normalized, (char) => `[${char.toLowerCase()}${char.toUpperCase()}]`); + return `*.${parts.join("")}`; +} + +export function classifyMemoryMultimodalPath( + filePath: string, + settings: MemoryMultimodalSettings, +): MemoryMultimodalModality | null { + if (!isMemoryMultimodalEnabled(settings)) { + return null; + } + const lower = filePath.trim().toLowerCase(); + for (const modality of settings.modalities) { + for (const extension of getMemoryMultimodalExtensions(modality)) { + if (lower.endsWith(extension)) { + return modality; + } + } + } + return null; +} + +export function normalizeGeminiEmbeddingModelForMemory(model: string): string { + const trimmed = model.trim(); + if (!trimmed) { + return ""; + } + return trimmed.replace(/^models\//, "").replace(/^(gemini|google)\//, ""); +} + +export function supportsMemoryMultimodalEmbeddings(params: { + provider: string; + model: string; +}): boolean { + if (params.provider !== "gemini") { + return false; + } + return normalizeGeminiEmbeddingModelForMemory(params.model) === "gemini-embedding-2-preview"; +} diff --git a/src/node-host/invoke-system-run-plan.test.ts b/src/node-host/invoke-system-run-plan.test.ts index c192509197e..3e1736000aa 100644 --- a/src/node-host/invoke-system-run-plan.test.ts +++ b/src/node-host/invoke-system-run-plan.test.ts @@ -246,6 +246,38 @@ describe("hardenApprovedExecutionPaths", () => { initialBody: 'console.log("SAFE");\n', expectedArgvIndex: 1, }, + { + name: "tsx direct file", + binName: "tsx", + argv: ["tsx", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, + { + name: "jiti direct file", + binName: "jiti", + argv: ["jiti", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, + { + name: "ts-node direct file", + binName: "ts-node", + argv: ["ts-node", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, + { + name: "vite-node direct file", + binName: "vite-node", + argv: ["vite-node", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, { name: "bun direct file", binName: "bun", @@ -387,4 +419,26 @@ describe("hardenApprovedExecutionPaths", () => { }, }); }); + + it("rejects tsx eval invocations that do not bind a concrete file", () => { + withFakeRuntimeBin({ + binName: "tsx", + run: () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-tsx-eval-")); + try { + const prepared = buildSystemRunApprovalPlan({ + command: ["tsx", "--eval", "console.log('SAFE')"], + cwd: tmp, + }); + expect(prepared).toEqual({ + ok: false, + message: + "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", + }); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }, + }); + }); }); diff --git a/src/node-host/invoke-system-run-plan.ts b/src/node-host/invoke-system-run-plan.ts index 606d50e7653..1b46312c3a1 100644 --- a/src/node-host/invoke-system-run-plan.ts +++ b/src/node-host/invoke-system-run-plan.ts @@ -33,6 +33,15 @@ const MUTABLE_ARGV1_INTERPRETER_PATTERNS = [ /^ruby$/, ] as const; +const GENERIC_MUTABLE_SCRIPT_RUNNERS = new Set([ + "esno", + "jiti", + "ts-node", + "ts-node-esm", + "tsx", + "vite-node", +]); + const BUN_SUBCOMMANDS = new Set([ "add", "audit", @@ -409,6 +418,10 @@ function resolveDenoRunScriptOperandIndex(params: { }); } +function isMutableScriptRunner(executable: string): boolean { + return GENERIC_MUTABLE_SCRIPT_RUNNERS.has(executable) || isInterpreterLikeSafeBin(executable); +} + function resolveMutableFileOperandIndex(argv: string[], cwd: string | undefined): number | null { const unwrapped = unwrapArgvForMutableOperand(argv); const executable = normalizeExecutableToken(unwrapped.argv[0] ?? ""); @@ -443,7 +456,7 @@ function resolveMutableFileOperandIndex(argv: string[], cwd: string | undefined) return unwrapped.baseIndex + denoIndex; } } - if (!isInterpreterLikeSafeBin(executable)) { + if (!isMutableScriptRunner(executable)) { return null; } const genericIndex = resolveGenericInterpreterScriptOperandIndex({ @@ -468,10 +481,10 @@ function requiresStableInterpreterApprovalBindingWithShellCommand(params: { if ((POSIX_SHELL_WRAPPERS as ReadonlySet).has(executable)) { return false; } - return isInterpreterLikeSafeBin(executable); + return isMutableScriptRunner(executable); } -function resolveMutableFileOperandSnapshotSync(params: { +export function resolveMutableFileOperandSnapshotSync(params: { argv: string[]; cwd: string | undefined; shellCommand: string | null; diff --git a/src/node-host/invoke-system-run.test.ts b/src/node-host/invoke-system-run.test.ts index c4e5bc345f6..d183f9087c3 100644 --- a/src/node-host/invoke-system-run.test.ts +++ b/src/node-host/invoke-system-run.test.ts @@ -109,27 +109,50 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { }; } - function createRuntimeScriptOperandFixture(params: { tmp: string; runtime: "bun" | "deno" }): { + function createRuntimeScriptOperandFixture(params: { + tmp: string; + runtime: "bun" | "deno" | "jiti" | "tsx"; + }): { command: string[]; scriptPath: string; initialBody: string; changedBody: string; } { const scriptPath = path.join(params.tmp, "run.ts"); - if (params.runtime === "bun") { - return { - command: ["bun", "run", "./run.ts"], - scriptPath, - initialBody: 'console.log("SAFE");\n', - changedBody: 'console.log("PWNED");\n', - }; + const initialBody = 'console.log("SAFE");\n'; + const changedBody = 'console.log("PWNED");\n'; + switch (params.runtime) { + case "bun": + return { + command: ["bun", "run", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; + case "deno": + return { + command: ["deno", "run", "-A", "--allow-read", "--", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; + case "jiti": + return { + command: ["jiti", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; + case "tsx": + return { + command: ["tsx", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; } - return { - command: ["deno", "run", "-A", "--allow-read", "--", "./run.ts"], - scriptPath, - initialBody: 'console.log("SAFE");\n', - changedBody: 'console.log("PWNED");\n', - }; + const unsupportedRuntime: never = params.runtime; + throw new Error(`unsupported runtime fixture: ${String(unsupportedRuntime)}`); } function buildNestedEnvShellCommand(params: { depth: number; payload: string }): string[] { @@ -223,7 +246,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { } async function withFakeRuntimeOnPath(params: { - runtime: "bun" | "deno"; + runtime: "bun" | "deno" | "jiti" | "tsx"; run: () => Promise; }): Promise { const tmp = fs.mkdtempSync(path.join(os.tmpdir(), `openclaw-${params.runtime}-path-`)); @@ -842,7 +865,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { } }); - for (const runtime of ["bun", "deno"] as const) { + for (const runtime of ["bun", "deno", "tsx", "jiti"] as const) { it(`denies approval-based execution when a ${runtime} script operand changes after approval`, async () => { await withFakeRuntimeOnPath({ runtime, @@ -926,6 +949,50 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { }); } + it("denies approval-based execution when tsx is missing a required mutable script binding", async () => { + await withFakeRuntimeOnPath({ + runtime: "tsx", + run: async () => { + const tmp = fs.mkdtempSync( + path.join(os.tmpdir(), "openclaw-approval-tsx-missing-binding-"), + ); + const fixture = createRuntimeScriptOperandFixture({ tmp, runtime: "tsx" }); + fs.writeFileSync(fixture.scriptPath, fixture.initialBody); + try { + const prepared = buildSystemRunApprovalPlan({ + command: fixture.command, + cwd: tmp, + }); + expect(prepared.ok).toBe(true); + if (!prepared.ok) { + throw new Error("unreachable"); + } + + const planWithoutBinding = { ...prepared.plan }; + delete planWithoutBinding.mutableFileOperand; + const { runCommand, sendInvokeResult } = await runSystemInvoke({ + preferMacAppExecHost: false, + command: prepared.plan.argv, + rawCommand: prepared.plan.commandText, + systemRunPlan: planWithoutBinding, + cwd: prepared.plan.cwd ?? tmp, + approved: true, + security: "full", + ask: "off", + }); + + expect(runCommand).not.toHaveBeenCalled(); + expectInvokeErrorMessage(sendInvokeResult, { + message: "SYSTEM_RUN_DENIED: approval missing script operand binding", + exact: true, + }); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }, + }); + }); + it("denies ./sh wrapper spoof in allowlist on-miss mode before execution", async () => { const marker = path.join(os.tmpdir(), `openclaw-wrapper-spoof-${process.pid}-${Date.now()}`); const runCommand = vi.fn(async () => { diff --git a/src/node-host/invoke-system-run.ts b/src/node-host/invoke-system-run.ts index 3ed2a30d188..3730e3b2824 100644 --- a/src/node-host/invoke-system-run.ts +++ b/src/node-host/invoke-system-run.ts @@ -29,6 +29,7 @@ import { hardenApprovedExecutionPaths, revalidateApprovedCwdSnapshot, revalidateApprovedMutableFileOperand, + resolveMutableFileOperandSnapshotSync, type ApprovedCwdSnapshot, } from "./invoke-system-run-plan.js"; import type { @@ -98,6 +99,8 @@ type SystemRunPolicyPhase = SystemRunParsePhase & { const safeBinTrustedDirWarningCache = new Set(); const APPROVAL_CWD_DRIFT_DENIED_MESSAGE = "SYSTEM_RUN_DENIED: approval cwd changed before execution"; +const APPROVAL_SCRIPT_OPERAND_BINDING_DENIED_MESSAGE = + "SYSTEM_RUN_DENIED: approval missing script operand binding"; const APPROVAL_SCRIPT_OPERAND_DRIFT_DENIED_MESSAGE = "SYSTEM_RUN_DENIED: approval script operand changed before execution"; @@ -385,6 +388,29 @@ async function executeSystemRunPhase( }); return; } + const expectedMutableFileOperand = phase.approvalPlan + ? resolveMutableFileOperandSnapshotSync({ + argv: phase.argv, + cwd: phase.cwd, + shellCommand: phase.shellPayload, + }) + : null; + if (expectedMutableFileOperand && !expectedMutableFileOperand.ok) { + logWarn(`security: system.run approval script binding blocked (runId=${phase.runId})`); + await sendSystemRunDenied(opts, phase.execution, { + reason: "approval-required", + message: expectedMutableFileOperand.message, + }); + return; + } + if (expectedMutableFileOperand?.snapshot && !phase.approvalPlan?.mutableFileOperand) { + logWarn(`security: system.run approval script binding missing (runId=${phase.runId})`); + await sendSystemRunDenied(opts, phase.execution, { + reason: "approval-required", + message: APPROVAL_SCRIPT_OPERAND_BINDING_DENIED_MESSAGE, + }); + return; + } if ( phase.approvalPlan?.mutableFileOperand && !revalidateApprovedMutableFileOperand({ diff --git a/src/plugin-sdk/llm-task.ts b/src/plugin-sdk/llm-task.ts index 164a28f0440..c69e82f36f7 100644 --- a/src/plugin-sdk/llm-task.ts +++ b/src/plugin-sdk/llm-task.ts @@ -2,4 +2,10 @@ // Keep this list additive and scoped to symbols used under extensions/llm-task. export { resolvePreferredOpenClawTmpDir } from "../infra/tmp-openclaw-dir.js"; +export { + formatThinkingLevels, + formatXHighModelHint, + normalizeThinkLevel, + supportsXHighThinking, +} from "../auto-reply/thinking.js"; export type { AnyAgentTool, OpenClawPluginApi } from "../plugins/types.js"; diff --git a/src/plugin-sdk/voice-call.ts b/src/plugin-sdk/voice-call.ts index da8a1f12613..c50b979a145 100644 --- a/src/plugin-sdk/voice-call.ts +++ b/src/plugin-sdk/voice-call.ts @@ -7,6 +7,7 @@ export { TtsModeSchema, TtsProviderSchema, } from "../config/zod-schema.core.js"; +export { resolveOpenAITtsInstructions } from "../tts/tts-core.js"; export type { GatewayRequestHandlerOptions } from "../gateway/server-methods/types.js"; export { isRequestBodyLimitError, diff --git a/src/plugins/bundled-dir.ts b/src/plugins/bundled-dir.ts index 4837ae59dc9..09f28bcdc19 100644 --- a/src/plugins/bundled-dir.ts +++ b/src/plugins/bundled-dir.ts @@ -2,8 +2,8 @@ import fs from "node:fs"; import path from "node:path"; import { fileURLToPath } from "node:url"; -export function resolveBundledPluginsDir(): string | undefined { - const override = process.env.OPENCLAW_BUNDLED_PLUGINS_DIR?.trim(); +export function resolveBundledPluginsDir(env: NodeJS.ProcessEnv = process.env): string | undefined { + const override = env.OPENCLAW_BUNDLED_PLUGINS_DIR?.trim(); if (override) { return override; } diff --git a/src/plugins/discovery.test.ts b/src/plugins/discovery.test.ts index aa33803c2ab..00430037b86 100644 --- a/src/plugins/discovery.test.ts +++ b/src/plugins/discovery.test.ts @@ -3,7 +3,6 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; import { afterEach, describe, expect, it } from "vitest"; -import { withEnvAsync } from "../test-utils/env.js"; import { clearPluginDiscoveryCache, discoverOpenClawPlugins } from "./discovery.js"; const tempDirs: string[] = []; @@ -15,24 +14,20 @@ function makeTempDir() { return dir; } -async function withStateDir(stateDir: string, fn: () => Promise) { - return await withEnvAsync( - { - OPENCLAW_STATE_DIR: stateDir, - CLAWDBOT_STATE_DIR: undefined, - OPENCLAW_BUNDLED_PLUGINS_DIR: "/nonexistent/bundled/plugins", - }, - fn, - ); +function buildDiscoveryEnv(stateDir: string): NodeJS.ProcessEnv { + return { + ...process.env, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_BUNDLED_PLUGINS_DIR: "/nonexistent/bundled/plugins", + }; } async function discoverWithStateDir( stateDir: string, params: Parameters[0], ) { - return await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins(params); - }); + return discoverOpenClawPlugins({ ...params, env: buildDiscoveryEnv(stateDir) }); } function writePluginPackageManifest(params: { @@ -80,9 +75,7 @@ describe("discoverOpenClawPlugins", () => { fs.mkdirSync(workspaceExt, { recursive: true }); fs.writeFileSync(path.join(workspaceExt, "beta.ts"), "export default function () {}", "utf-8"); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({ workspaceDir }); - }); + const { candidates } = await discoverWithStateDir(stateDir, { workspaceDir }); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("alpha"); @@ -110,9 +103,7 @@ describe("discoverOpenClawPlugins", () => { fs.mkdirSync(liveDir, { recursive: true }); fs.writeFileSync(path.join(liveDir, "index.ts"), "export default function () {}", "utf-8"); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); const ids = candidates.map((candidate) => candidate.idHint); expect(ids).toContain("live"); @@ -142,9 +133,7 @@ describe("discoverOpenClawPlugins", () => { "utf-8", ); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("pack/one"); @@ -167,9 +156,7 @@ describe("discoverOpenClawPlugins", () => { "utf-8", ); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("voice-call"); @@ -187,9 +174,7 @@ describe("discoverOpenClawPlugins", () => { }); fs.writeFileSync(path.join(packDir, "index.js"), "module.exports = {}", "utf-8"); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({ extraPaths: [packDir] }); - }); + const { candidates } = await discoverWithStateDir(stateDir, { extraPaths: [packDir] }); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("demo-plugin-dir"); @@ -266,9 +251,7 @@ describe("discoverOpenClawPlugins", () => { extensions: ["./escape.ts"], }); - const { candidates, diagnostics } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates, diagnostics } = await discoverWithStateDir(stateDir, {}); expect(candidates.some((candidate) => candidate.idHint === "pack")).toBe(false); expectEscapesPackageDiagnostic(diagnostics); @@ -303,9 +286,7 @@ describe("discoverOpenClawPlugins", () => { throw err; } - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); expect(candidates.some((candidate) => candidate.idHint === "pack")).toBe(false); }); @@ -318,9 +299,7 @@ describe("discoverOpenClawPlugins", () => { fs.writeFileSync(pluginPath, "export default function () {}", "utf-8"); fs.chmodSync(pluginPath, 0o777); - const result = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const result = await discoverWithStateDir(stateDir, {}); expect(result.candidates).toHaveLength(0); expect(result.diagnostics.some((diag) => diag.message.includes("world-writable path"))).toBe( @@ -328,6 +307,35 @@ describe("discoverOpenClawPlugins", () => { ); }); + it.runIf(process.platform !== "win32")( + "repairs world-writable bundled plugin dirs before loading them", + async () => { + const stateDir = makeTempDir(); + const bundledDir = path.join(stateDir, "bundled"); + const packDir = path.join(bundledDir, "demo-pack"); + fs.mkdirSync(packDir, { recursive: true }); + fs.writeFileSync(path.join(packDir, "index.ts"), "export default function () {}", "utf-8"); + fs.chmodSync(packDir, 0o777); + + const result = discoverOpenClawPlugins({ + env: { + ...process.env, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_BUNDLED_PLUGINS_DIR: bundledDir, + }, + }); + + expect(result.candidates.some((candidate) => candidate.idHint === "demo-pack")).toBe(true); + expect( + result.diagnostics.some( + (diag) => diag.source === packDir && diag.message.includes("world-writable path"), + ), + ).toBe(false); + expect(fs.statSync(packDir).mode & 0o777).toBe(0o755); + }, + ); + it.runIf(process.platform !== "win32" && typeof process.getuid === "function")( "blocks suspicious ownership when uid mismatch is detected", async () => { @@ -341,9 +349,7 @@ describe("discoverOpenClawPlugins", () => { ); const actualUid = (process as NodeJS.Process & { getuid: () => number }).getuid(); - const result = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({ ownershipUid: actualUid + 1 }); - }); + const result = await discoverWithStateDir(stateDir, { ownershipUid: actualUid + 1 }); const shouldBlockForMismatch = actualUid !== 0; expect(result.candidates).toHaveLength(shouldBlockForMismatch ? 0 : 1); expect(result.diagnostics.some((diag) => diag.message.includes("suspicious ownership"))).toBe( @@ -359,32 +365,32 @@ describe("discoverOpenClawPlugins", () => { const pluginPath = path.join(globalExt, "cached.ts"); fs.writeFileSync(pluginPath, "export default function () {}", "utf-8"); - const first = await withEnvAsync( - { + const first = discoverOpenClawPlugins({ + env: { + ...buildDiscoveryEnv(stateDir), OPENCLAW_PLUGIN_DISCOVERY_CACHE_MS: "5000", }, - async () => withStateDir(stateDir, async () => discoverOpenClawPlugins({})), - ); + }); expect(first.candidates.some((candidate) => candidate.idHint === "cached")).toBe(true); fs.rmSync(pluginPath, { force: true }); - const second = await withEnvAsync( - { + const second = discoverOpenClawPlugins({ + env: { + ...buildDiscoveryEnv(stateDir), OPENCLAW_PLUGIN_DISCOVERY_CACHE_MS: "5000", }, - async () => withStateDir(stateDir, async () => discoverOpenClawPlugins({})), - ); + }); expect(second.candidates.some((candidate) => candidate.idHint === "cached")).toBe(true); clearPluginDiscoveryCache(); - const third = await withEnvAsync( - { + const third = discoverOpenClawPlugins({ + env: { + ...buildDiscoveryEnv(stateDir), OPENCLAW_PLUGIN_DISCOVERY_CACHE_MS: "5000", }, - async () => withStateDir(stateDir, async () => discoverOpenClawPlugins({})), - ); + }); expect(third.candidates.some((candidate) => candidate.idHint === "cached")).toBe(false); }); }); diff --git a/src/plugins/discovery.ts b/src/plugins/discovery.ts index c03b0fe01bf..686c1f7fd86 100644 --- a/src/plugins/discovery.ts +++ b/src/plugins/discovery.ts @@ -69,10 +69,11 @@ function buildDiscoveryCacheKey(params: { workspaceDir?: string; extraPaths?: string[]; ownershipUid?: number | null; + env: NodeJS.ProcessEnv; }): string { const workspaceKey = params.workspaceDir ? resolveUserPath(params.workspaceDir) : ""; - const configExtensionsRoot = path.join(resolveConfigDir(), "extensions"); - const bundledRoot = resolveBundledPluginsDir() ?? ""; + const configExtensionsRoot = path.join(resolveConfigDir(params.env), "extensions"); + const bundledRoot = resolveBundledPluginsDir(params.env) ?? ""; const normalizedExtraPaths = (params.extraPaths ?? []) .filter((entry): entry is string => typeof entry === "string") .map((entry) => entry.trim()) @@ -153,7 +154,7 @@ function checkPathStatAndPermissions(params: { continue; } seen.add(normalized); - const stat = safeStatSync(targetPath); + let stat = safeStatSync(targetPath); if (!stat) { return { reason: "path_stat_failed", @@ -162,7 +163,28 @@ function checkPathStatAndPermissions(params: { targetPath, }; } - const modeBits = stat.mode & 0o777; + let modeBits = stat.mode & 0o777; + if ((modeBits & 0o002) !== 0 && params.origin === "bundled") { + // npm/global installs can create package-managed extension dirs without + // directory entries in the tarball, which may widen them to 0777. + // Tighten bundled dirs in place before applying the normal safety gate. + try { + fs.chmodSync(targetPath, modeBits & ~0o022); + const repairedStat = safeStatSync(targetPath); + if (!repairedStat) { + return { + reason: "path_stat_failed", + sourcePath: params.source, + rootPath: params.rootDir, + targetPath, + }; + } + stat = repairedStat; + modeBits = repairedStat.mode & 0o777; + } catch { + // Fall through to the normal block path below when repair is not possible. + } + } if ((modeBits & 0o002) !== 0) { return { reason: "path_world_writable", @@ -628,6 +650,7 @@ export function discoverOpenClawPlugins(params: { workspaceDir: params.workspaceDir, extraPaths: params.extraPaths, ownershipUid: params.ownershipUid, + env, }); if (cacheEnabled) { const cached = discoveryCache.get(cacheKey); @@ -676,7 +699,7 @@ export function discoverOpenClawPlugins(params: { } } - const bundledDir = resolveBundledPluginsDir(); + const bundledDir = resolveBundledPluginsDir(env); if (bundledDir) { discoverInDirectory({ dir: bundledDir, @@ -690,7 +713,7 @@ export function discoverOpenClawPlugins(params: { // Keep auto-discovered global extensions behind bundled plugins. // Users can still intentionally override via plugins.load.paths (origin=config). - const globalDir = path.join(resolveConfigDir(), "extensions"); + const globalDir = path.join(resolveConfigDir(env), "extensions"); discoverInDirectory({ dir: globalDir, origin: "global", diff --git a/src/plugins/manifest-registry.ts b/src/plugins/manifest-registry.ts index d392144f925..eb6702d54b1 100644 --- a/src/plugins/manifest-registry.ts +++ b/src/plugins/manifest-registry.ts @@ -1,6 +1,8 @@ import fs from "node:fs"; +import path from "node:path"; import type { OpenClawConfig } from "../config/config.js"; -import { resolveUserPath } from "../utils.js"; +import { resolveConfigDir, resolveUserPath } from "../utils.js"; +import { resolveBundledPluginsDir } from "./bundled-dir.js"; import { normalizePluginsConfig, type NormalizedPluginsConfig } from "./config-state.js"; import { discoverOpenClawPlugins, type PluginCandidate } from "./discovery.js"; import { loadPluginManifest, type PluginManifest } from "./manifest.js"; @@ -79,8 +81,11 @@ function shouldUseManifestCache(env: NodeJS.ProcessEnv): boolean { function buildCacheKey(params: { workspaceDir?: string; plugins: NormalizedPluginsConfig; + env: NodeJS.ProcessEnv; }): string { const workspaceKey = params.workspaceDir ? resolveUserPath(params.workspaceDir) : ""; + const configExtensionsRoot = path.join(resolveConfigDir(params.env), "extensions"); + const bundledRoot = resolveBundledPluginsDir(params.env) ?? ""; // The manifest registry only depends on where plugins are discovered from (workspace + load paths). // It does not depend on allow/deny/entries enable-state, so exclude those for higher cache hit rates. const loadPaths = params.plugins.loadPaths @@ -88,7 +93,7 @@ function buildCacheKey(params: { .map((p) => p.trim()) .filter(Boolean) .toSorted(); - return `${workspaceKey}::${JSON.stringify(loadPaths)}`; + return `${workspaceKey}::${configExtensionsRoot}::${bundledRoot}::${JSON.stringify(loadPaths)}`; } function safeStatMtimeMs(filePath: string): number | null { @@ -142,8 +147,8 @@ export function loadPluginManifestRegistry(params: { }): PluginManifestRegistry { const config = params.config ?? {}; const normalized = normalizePluginsConfig(config.plugins); - const cacheKey = buildCacheKey({ workspaceDir: params.workspaceDir, plugins: normalized }); const env = params.env ?? process.env; + const cacheKey = buildCacheKey({ workspaceDir: params.workspaceDir, plugins: normalized, env }); const cacheEnabled = params.cache !== false && shouldUseManifestCache(env); if (cacheEnabled) { const cached = registryCache.get(cacheKey); @@ -160,6 +165,7 @@ export function loadPluginManifestRegistry(params: { : discoverOpenClawPlugins({ workspaceDir: params.workspaceDir, extraPaths: normalized.loadPaths, + env, }); const diagnostics: PluginDiagnostic[] = [...discovery.diagnostics]; const candidates: PluginCandidate[] = discovery.candidates; diff --git a/src/telegram/bot-message-dispatch.test.ts b/src/telegram/bot-message-dispatch.test.ts index 4f5e2484d50..17ec8ac21a9 100644 --- a/src/telegram/bot-message-dispatch.test.ts +++ b/src/telegram/bot-message-dispatch.test.ts @@ -1031,6 +1031,81 @@ describe("dispatchTelegramMessage draft streaming", () => { expect(deliverReplies).not.toHaveBeenCalled(); }); + it("clears the active preview when a later final falls back after archived retain", async () => { + let answerMessageId: number | undefined; + let answerDraftParams: + | { + onSupersededPreview?: (preview: { messageId: number; textSnapshot: string }) => void; + } + | undefined; + const answerDraftStream = { + update: vi.fn().mockImplementation((text: string) => { + if (text.includes("Message B")) { + answerMessageId = 1002; + } + }), + flush: vi.fn().mockResolvedValue(undefined), + messageId: vi.fn().mockImplementation(() => answerMessageId), + clear: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + forceNewMessage: vi.fn().mockImplementation(() => { + answerMessageId = undefined; + }), + }; + const reasoningDraftStream = createDraftStream(); + createTelegramDraftStream + .mockImplementationOnce((params) => { + answerDraftParams = params as typeof answerDraftParams; + return answerDraftStream; + }) + .mockImplementationOnce(() => reasoningDraftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation( + async ({ dispatcherOptions, replyOptions }) => { + await replyOptions?.onPartialReply?.({ text: "Message A partial" }); + await replyOptions?.onAssistantMessageStart?.(); + await replyOptions?.onPartialReply?.({ text: "Message B partial" }); + answerDraftParams?.onSupersededPreview?.({ + messageId: 1001, + textSnapshot: "Message A partial", + }); + + await dispatcherOptions.deliver({ text: "Message A final" }, { kind: "final" }); + await dispatcherOptions.deliver({ text: "Message B final" }, { kind: "final" }); + return { queuedFinal: true }; + }, + ); + deliverReplies.mockResolvedValue({ delivered: true }); + const preConnectErr = new Error("connect ECONNREFUSED 149.154.167.220:443"); + (preConnectErr as NodeJS.ErrnoException).code = "ECONNREFUSED"; + editMessageTelegram + .mockRejectedValueOnce(new Error("400: Bad Request: message to edit not found")) + .mockRejectedValueOnce(preConnectErr); + + await dispatchWithContext({ context: createContext(), streamMode: "partial" }); + + expect(editMessageTelegram).toHaveBeenNthCalledWith( + 1, + 123, + 1001, + "Message A final", + expect.any(Object), + ); + expect(editMessageTelegram).toHaveBeenNthCalledWith( + 2, + 123, + 1002, + "Message B final", + expect.any(Object), + ); + const finalTextSentViaDeliverReplies = deliverReplies.mock.calls.some((call: unknown[]) => + (call[0] as { replies?: Array<{ text?: string }> })?.replies?.some( + (r: { text?: string }) => r.text === "Message B final", + ), + ); + expect(finalTextSentViaDeliverReplies).toBe(true); + expect(answerDraftStream.clear).toHaveBeenCalledTimes(1); + }); + it.each(["partial", "block"] as const)( "keeps finalized text preview when the next assistant message is media-only (%s mode)", async (streamMode) => { diff --git a/src/telegram/bot.fetch-abort.test.ts b/src/telegram/bot.fetch-abort.test.ts index 471654686f7..0d9bd53643b 100644 --- a/src/telegram/bot.fetch-abort.test.ts +++ b/src/telegram/bot.fetch-abort.test.ts @@ -1,10 +1,10 @@ import { describe, expect, it, vi } from "vitest"; import { botCtorSpy } from "./bot.create-telegram-bot.test-harness.js"; import { createTelegramBot } from "./bot.js"; +import { getTelegramNetworkErrorOrigin } from "./network-errors.js"; describe("createTelegramBot fetch abort", () => { it("aborts wrapped client fetch when fetchAbortSignal aborts", async () => { - const originalFetch = globalThis.fetch; const shutdown = new AbortController(); const fetchSpy = vi.fn( (_input: RequestInfo | URL, init?: RequestInit) => @@ -13,22 +13,78 @@ describe("createTelegramBot fetch abort", () => { signal.addEventListener("abort", () => resolve(signal), { once: true }); }), ); - globalThis.fetch = fetchSpy as unknown as typeof fetch; - try { - botCtorSpy.mockClear(); - createTelegramBot({ token: "tok", fetchAbortSignal: shutdown.signal }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch: fetchSpy as unknown as typeof fetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); - const observedSignalPromise = clientFetch("https://example.test"); - shutdown.abort(new Error("shutdown")); - const observedSignal = (await observedSignalPromise) as AbortSignal; + const observedSignalPromise = clientFetch("https://example.test"); + shutdown.abort(new Error("shutdown")); + const observedSignal = (await observedSignalPromise) as AbortSignal; - expect(observedSignal).toBeInstanceOf(AbortSignal); - expect(observedSignal.aborted).toBe(true); - } finally { - globalThis.fetch = originalFetch; - } + expect(observedSignal).toBeInstanceOf(AbortSignal); + expect(observedSignal.aborted).toBe(true); + }); + + it("tags wrapped Telegram fetch failures with the Bot API method", async () => { + const shutdown = new AbortController(); + const fetchError = Object.assign(new TypeError("fetch failed"), { + cause: Object.assign(new Error("connect timeout"), { + code: "UND_ERR_CONNECT_TIMEOUT", + }), + }); + const fetchSpy = vi.fn(async () => { + throw fetchError; + }); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch: fetchSpy as unknown as typeof fetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); + + await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( + fetchError, + ); + expect(getTelegramNetworkErrorOrigin(fetchError)).toEqual({ + method: "getupdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + }); + + it("preserves the original fetch error when tagging cannot attach metadata", async () => { + const shutdown = new AbortController(); + const frozenError = Object.freeze( + Object.assign(new TypeError("fetch failed"), { + cause: Object.assign(new Error("connect timeout"), { + code: "UND_ERR_CONNECT_TIMEOUT", + }), + }), + ); + const fetchSpy = vi.fn(async () => { + throw frozenError; + }); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch: fetchSpy as unknown as typeof fetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); + + await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( + frozenError, + ); + expect(getTelegramNetworkErrorOrigin(frozenError)).toBeNull(); }); }); diff --git a/src/telegram/bot.ts b/src/telegram/bot.ts index 48d0c745b42..b0c288efcea 100644 --- a/src/telegram/bot.ts +++ b/src/telegram/bot.ts @@ -39,6 +39,7 @@ import { } from "./bot-updates.js"; import { buildTelegramGroupPeerId, resolveTelegramStreamMode } from "./bot/helpers.js"; import { resolveTelegramFetch } from "./fetch.js"; +import { tagTelegramNetworkError } from "./network-errors.js"; import { createTelegramSendChatActionHandler } from "./sendchataction-401-backoff.js"; import { getTelegramSequentialKey } from "./sequential-key.js"; import { createTelegramThreadBindingManager } from "./thread-bindings.js"; @@ -68,6 +69,34 @@ export type TelegramBotOptions = { export { getTelegramSequentialKey }; +function readRequestUrl(input: RequestInfo | URL): string | null { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + if (typeof input === "object" && input !== null && "url" in input) { + const url = (input as { url?: unknown }).url; + return typeof url === "string" ? url : null; + } + return null; +} + +function extractTelegramApiMethod(input: RequestInfo | URL): string | null { + const url = readRequestUrl(input); + if (!url) { + return null; + } + try { + const pathname = new URL(url).pathname; + const segments = pathname.split("/").filter(Boolean); + return segments.length > 0 ? (segments.at(-1) ?? null) : null; + } catch { + return null; + } +} + export function createTelegramBot(opts: TelegramBotOptions) { const runtime: RuntimeEnv = opts.runtime ?? createNonExitingRuntime(); const cfg = opts.config ?? loadConfig(); @@ -147,6 +176,23 @@ export function createTelegramBot(opts: TelegramBotOptions) { }); }) as unknown as NonNullable; } + if (finalFetch) { + const baseFetch = finalFetch; + finalFetch = ((input: RequestInfo | URL, init?: RequestInit) => { + return Promise.resolve(baseFetch(input, init)).catch((err: unknown) => { + try { + tagTelegramNetworkError(err, { + method: extractTelegramApiMethod(input), + url: readRequestUrl(input), + }); + } catch { + // Tagging is best-effort; preserve the original fetch failure if the + // error object cannot accept extra metadata. + } + throw err; + }); + }) as unknown as NonNullable; + } const timeoutSeconds = typeof telegramCfg?.timeoutSeconds === "number" && Number.isFinite(telegramCfg.timeoutSeconds) diff --git a/src/telegram/lane-delivery-text-deliverer.ts b/src/telegram/lane-delivery-text-deliverer.ts index 56e0d974240..000087cc692 100644 --- a/src/telegram/lane-delivery-text-deliverer.ts +++ b/src/telegram/lane-delivery-text-deliverer.ts @@ -464,6 +464,12 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { !hasMedia && text.length > 0 && text.length <= params.draftMaxChars && !payload.isError; if (infoKind === "final") { + // Transient previews must decide cleanup retention per final attempt. + // Completed previews intentionally stay retained so later extra payloads + // do not clear the already-finalized message. + if (params.activePreviewLifecycleByLane[laneName] === "transient") { + params.retainPreviewOnCleanupByLane[laneName] = false; + } if (laneName === "answer") { const archivedResult = await consumeArchivedAnswerPreviewForFinal({ lane, diff --git a/src/telegram/monitor.test.ts b/src/telegram/monitor.test.ts index bd9a35fc97c..d7ebef73373 100644 --- a/src/telegram/monitor.test.ts +++ b/src/telegram/monitor.test.ts @@ -1,5 +1,6 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { monitorTelegramProvider } from "./monitor.js"; +import { tagTelegramNetworkError } from "./network-errors.js"; type MockCtx = { message: { @@ -102,6 +103,15 @@ function makeRecoverableFetchError() { }); } +function makeTaggedPollingFetchError() { + const err = makeRecoverableFetchError(); + tagTelegramNetworkError(err, { + method: "getUpdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + return err; +} + const createAbortTask = ( abort: AbortController, beforeAbort?: () => void, @@ -398,6 +408,20 @@ describe("monitorTelegramProvider (grammY)", () => { expect(createdBotStops[0]).toHaveBeenCalledTimes(1); }); + it("clears bounded cleanup timers after a clean stop", async () => { + vi.useFakeTimers(); + try { + const abort = new AbortController(); + mockRunOnceAndAbort(abort); + + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); + + expect(vi.getTimerCount()).toBe(0); + } finally { + vi.useRealTimers(); + } + }); + it("surfaces non-recoverable errors", async () => { runSpy.mockImplementationOnce(() => makeRunnerStub({ @@ -439,7 +463,7 @@ describe("monitorTelegramProvider (grammY)", () => { const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); - expect(emitUnhandledRejection(new TypeError("fetch failed"))).toBe(true); + expect(emitUnhandledRejection(makeTaggedPollingFetchError())).toBe(true); await monitor; expect(stop.mock.calls.length).toBeGreaterThanOrEqual(1); @@ -482,13 +506,54 @@ describe("monitorTelegramProvider (grammY)", () => { expect(firstSignal).toBeInstanceOf(AbortSignal); expect((firstSignal as AbortSignal).aborted).toBe(false); - expect(emitUnhandledRejection(new TypeError("fetch failed"))).toBe(true); + expect(emitUnhandledRejection(makeTaggedPollingFetchError())).toBe(true); await monitor; expect((firstSignal as AbortSignal).aborted).toBe(true); expect(stop).toHaveBeenCalled(); }); + it("ignores unrelated process-level network errors while telegram polling is active", async () => { + const abort = new AbortController(); + let running = true; + let releaseTask: (() => void) | undefined; + const stop = vi.fn(async () => { + running = false; + releaseTask?.(); + }); + + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: () => + new Promise((resolve) => { + releaseTask = resolve; + }), + stop, + isRunning: () => running, + }), + ); + + const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); + await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); + + const slackDnsError = Object.assign( + new Error("A request error occurred: getaddrinfo ENOTFOUND slack.com"), + { + code: "ENOTFOUND", + hostname: "slack.com", + }, + ); + expect(emitUnhandledRejection(slackDnsError)).toBe(false); + + abort.abort(); + await monitor; + + expect(stop).toHaveBeenCalledTimes(1); + expect(computeBackoff).not.toHaveBeenCalled(); + expect(sleepWithAbort).not.toHaveBeenCalled(); + expect(runSpy).toHaveBeenCalledTimes(1); + }); + it("passes configured webhookHost to webhook listener", async () => { await monitorTelegramProvider({ token: "tok", diff --git a/src/telegram/monitor.ts b/src/telegram/monitor.ts index 7131876e6f1..f7704f62dea 100644 --- a/src/telegram/monitor.ts +++ b/src/telegram/monitor.ts @@ -9,7 +9,10 @@ import type { RuntimeEnv } from "../runtime.js"; import { resolveTelegramAccount } from "./accounts.js"; import { resolveTelegramAllowedUpdates } from "./allowed-updates.js"; import { TelegramExecApprovalHandler } from "./exec-approvals-handler.js"; -import { isRecoverableTelegramNetworkError } from "./network-errors.js"; +import { + isRecoverableTelegramNetworkError, + isTelegramPollingNetworkError, +} from "./network-errors.js"; import { TelegramPollingSession } from "./polling-session.js"; import { makeProxyFetch } from "./proxy.js"; import { readTelegramUpdateOffset, writeTelegramUpdateOffset } from "./update-offset-store.js"; @@ -78,13 +81,14 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { const unregisterHandler = registerUnhandledRejectionHandler((err) => { const isNetworkError = isRecoverableTelegramNetworkError(err, { context: "polling" }); - if (isGrammyHttpError(err) && isNetworkError) { + const isTelegramPollingError = isTelegramPollingNetworkError(err); + if (isGrammyHttpError(err) && isNetworkError && isTelegramPollingError) { log(`[telegram] Suppressed network error: ${formatErrorMessage(err)}`); return true; } const activeRunner = pollingSession?.activeRunner; - if (isNetworkError && activeRunner && activeRunner.isRunning()) { + if (isNetworkError && isTelegramPollingError && activeRunner && activeRunner.isRunning()) { pollingSession?.markForceRestarted(); pollingSession?.abortActiveFetch(); void activeRunner.stop().catch(() => {}); diff --git a/src/telegram/network-errors.test.ts b/src/telegram/network-errors.test.ts index 6624b8f63a0..56106a292b8 100644 --- a/src/telegram/network-errors.test.ts +++ b/src/telegram/network-errors.test.ts @@ -1,12 +1,37 @@ import { describe, expect, it } from "vitest"; import { + getTelegramNetworkErrorOrigin, isRecoverableTelegramNetworkError, isSafeToRetrySendError, isTelegramClientRejection, + isTelegramPollingNetworkError, isTelegramServerError, + tagTelegramNetworkError, } from "./network-errors.js"; describe("isRecoverableTelegramNetworkError", () => { + it("tracks Telegram polling origin separately from generic network matching", () => { + const slackDnsError = Object.assign( + new Error("A request error occurred: getaddrinfo ENOTFOUND slack.com"), + { + code: "ENOTFOUND", + hostname: "slack.com", + }, + ); + expect(isRecoverableTelegramNetworkError(slackDnsError)).toBe(true); + expect(isTelegramPollingNetworkError(slackDnsError)).toBe(false); + + tagTelegramNetworkError(slackDnsError, { + method: "getUpdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + expect(getTelegramNetworkErrorOrigin(slackDnsError)).toEqual({ + method: "getupdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + expect(isTelegramPollingNetworkError(slackDnsError)).toBe(true); + }); + it("detects recoverable error codes", () => { const err = Object.assign(new Error("timeout"), { code: "ETIMEDOUT" }); expect(isRecoverableTelegramNetworkError(err)).toBe(true); diff --git a/src/telegram/network-errors.ts b/src/telegram/network-errors.ts index 66da37c4dd4..08e5d2dc2c0 100644 --- a/src/telegram/network-errors.ts +++ b/src/telegram/network-errors.ts @@ -5,6 +5,8 @@ import { readErrorName, } from "../infra/errors.js"; +const TELEGRAM_NETWORK_ORIGIN = Symbol("openclaw.telegram.network-origin"); + const RECOVERABLE_ERROR_CODES = new Set([ "ECONNRESET", "ECONNREFUSED", @@ -101,6 +103,51 @@ function getErrorCode(err: unknown): string | undefined { } export type TelegramNetworkErrorContext = "polling" | "send" | "webhook" | "unknown"; +export type TelegramNetworkErrorOrigin = { + method?: string | null; + url?: string | null; +}; + +function normalizeTelegramNetworkMethod(method?: string | null): string | null { + const trimmed = method?.trim(); + if (!trimmed) { + return null; + } + return trimmed.toLowerCase(); +} + +export function tagTelegramNetworkError(err: unknown, origin: TelegramNetworkErrorOrigin): void { + if (!err || typeof err !== "object") { + return; + } + Object.defineProperty(err, TELEGRAM_NETWORK_ORIGIN, { + value: { + method: normalizeTelegramNetworkMethod(origin.method), + url: typeof origin.url === "string" && origin.url.trim() ? origin.url : null, + } satisfies TelegramNetworkErrorOrigin, + configurable: true, + }); +} + +export function getTelegramNetworkErrorOrigin(err: unknown): TelegramNetworkErrorOrigin | null { + for (const candidate of collectTelegramErrorCandidates(err)) { + if (!candidate || typeof candidate !== "object") { + continue; + } + const origin = (candidate as Record)[TELEGRAM_NETWORK_ORIGIN]; + if (!origin || typeof origin !== "object") { + continue; + } + const method = "method" in origin && typeof origin.method === "string" ? origin.method : null; + const url = "url" in origin && typeof origin.url === "string" ? origin.url : null; + return { method, url }; + } + return null; +} + +export function isTelegramPollingNetworkError(err: unknown): boolean { + return getTelegramNetworkErrorOrigin(err)?.method === "getupdates"; +} /** * Returns true if the error is safe to retry for a non-idempotent Telegram send operation diff --git a/src/telegram/polling-session.ts b/src/telegram/polling-session.ts index 784c8b2d759..3a78747e41f 100644 --- a/src/telegram/polling-session.ts +++ b/src/telegram/polling-session.ts @@ -15,6 +15,24 @@ const TELEGRAM_POLL_RESTART_POLICY = { const POLL_STALL_THRESHOLD_MS = 90_000; const POLL_WATCHDOG_INTERVAL_MS = 30_000; +const POLL_STOP_GRACE_MS = 15_000; + +const waitForGracefulStop = async (stop: () => Promise) => { + let timer: ReturnType | undefined; + try { + await Promise.race([ + stop(), + new Promise((resolve) => { + timer = setTimeout(resolve, POLL_STOP_GRACE_MS); + timer.unref?.(); + }), + ]); + } finally { + if (timer) { + clearTimeout(timer); + } + } +}; type TelegramBot = ReturnType; @@ -176,6 +194,11 @@ export class TelegramPollingSession { const fetchAbortController = this.#activeFetchAbort; let stopPromise: Promise | undefined; let stalledRestart = false; + let forceCycleTimer: ReturnType | undefined; + let forceCycleResolve: (() => void) | undefined; + const forceCyclePromise = new Promise((resolve) => { + forceCycleResolve = resolve; + }); const stopRunner = () => { fetchAbortController?.abort(); stopPromise ??= Promise.resolve(runner.stop()) @@ -209,12 +232,24 @@ export class TelegramPollingSession { `[telegram] Polling stall detected (no getUpdates for ${formatDurationPrecise(elapsed)}); forcing restart.`, ); void stopRunner(); + void stopBot(); + if (!forceCycleTimer) { + forceCycleTimer = setTimeout(() => { + if (this.opts.abortSignal?.aborted) { + return; + } + this.opts.log( + `[telegram] Polling runner stop timed out after ${formatDurationPrecise(POLL_STOP_GRACE_MS)}; forcing restart cycle.`, + ); + forceCycleResolve?.(); + }, POLL_STOP_GRACE_MS); + } } }, POLL_WATCHDOG_INTERVAL_MS); this.opts.abortSignal?.addEventListener("abort", stopOnAbort, { once: true }); try { - await runner.task(); + await Promise.race([runner.task(), forceCyclePromise]); if (this.opts.abortSignal?.aborted) { return "exit"; } @@ -249,9 +284,12 @@ export class TelegramPollingSession { return shouldRestart ? "continue" : "exit"; } finally { clearInterval(watchdog); + if (forceCycleTimer) { + clearTimeout(forceCycleTimer); + } this.opts.abortSignal?.removeEventListener("abort", stopOnAbort); - await stopRunner(); - await stopBot(); + await waitForGracefulStop(stopRunner); + await waitForGracefulStop(stopBot); this.#activeRunner = undefined; if (this.#activeFetchAbort === fetchAbortController) { this.#activeFetchAbort = undefined; diff --git a/src/terminal/table.test.ts b/src/terminal/table.test.ts index 9c6d53eaece..bad2fe48cf2 100644 --- a/src/terminal/table.test.ts +++ b/src/terminal/table.test.ts @@ -1,9 +1,18 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { visibleWidth } from "./ansi.js"; import { wrapNoteMessage } from "./note.js"; import { renderTable } from "./table.js"; describe("renderTable", () => { + const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + + afterEach(() => { + vi.unstubAllEnvs(); + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + it("prefers shrinking flex columns to avoid wrapping non-flex labels", () => { const out = renderTable({ width: 40, @@ -170,6 +179,42 @@ describe("renderTable", () => { expect(out).toContain("before"); expect(out).toContain("after"); }); + + it("falls back to ASCII borders on legacy Windows consoles", () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + vi.stubEnv("WT_SESSION", ""); + vi.stubEnv("TERM_PROGRAM", ""); + vi.stubEnv("TERM", "vt100"); + + const out = renderTable({ + columns: [ + { key: "A", header: "A", minWidth: 6 }, + { key: "B", header: "B", minWidth: 10, flex: true }, + ], + rows: [{ A: "row", B: "value" }], + }); + + expect(out).toContain("+"); + expect(out).not.toContain("┌"); + }); + + it("keeps unicode borders on modern Windows terminals", () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + vi.stubEnv("WT_SESSION", "1"); + vi.stubEnv("TERM", ""); + vi.stubEnv("TERM_PROGRAM", ""); + + const out = renderTable({ + columns: [ + { key: "A", header: "A", minWidth: 6 }, + { key: "B", header: "B", minWidth: 10, flex: true }, + ], + rows: [{ A: "row", B: "value" }], + }); + + expect(out).toContain("┌"); + expect(out).not.toContain("+"); + }); }); describe("wrapNoteMessage", () => { diff --git a/src/terminal/table.ts b/src/terminal/table.ts index a1fbb9f570b..7c55ba7f2dd 100644 --- a/src/terminal/table.ts +++ b/src/terminal/table.ts @@ -20,6 +20,26 @@ export type RenderTableOptions = { border?: "unicode" | "ascii" | "none"; }; +function resolveDefaultBorder( + platform: NodeJS.Platform, + env: NodeJS.ProcessEnv, +): "unicode" | "ascii" { + if (platform !== "win32") { + return "unicode"; + } + + const term = env.TERM ?? ""; + const termProgram = env.TERM_PROGRAM ?? ""; + const isModernTerminal = + Boolean(env.WT_SESSION) || + term.includes("xterm") || + term.includes("cygwin") || + term.includes("msys") || + termProgram === "vscode"; + + return isModernTerminal ? "unicode" : "ascii"; +} + function repeat(ch: string, n: number): string { if (n <= 0) { return ""; @@ -267,7 +287,7 @@ export function renderTable(opts: RenderTableOptions): string { } return next; }); - const border = opts.border ?? "unicode"; + const border = opts.border ?? resolveDefaultBorder(process.platform, process.env); if (border === "none") { const columns = opts.columns; const header = columns.map((c) => c.header).join(" | "); diff --git a/src/tts/tts-core.ts b/src/tts/tts-core.ts index 08f80c3d60c..279fc3cc1ed 100644 --- a/src/tts/tts-core.ts +++ b/src/tts/tts-core.ts @@ -43,6 +43,11 @@ function normalizeOpenAITtsBaseUrl(baseUrl?: string): string { return trimmed.replace(/\/+$/, ""); } +function trimToUndefined(value?: string): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + function requireInRange(value: number, min: number, max: number, label: string): void { if (!Number.isFinite(value) || value < min || value > max) { throw new Error(`${label} must be between ${min} and ${max}`); @@ -383,6 +388,14 @@ export function isValidOpenAIModel(model: string, baseUrl?: string): boolean { return OPENAI_TTS_MODELS.includes(model as (typeof OPENAI_TTS_MODELS)[number]); } +export function resolveOpenAITtsInstructions( + model: string, + instructions?: string, +): string | undefined { + const next = trimToUndefined(instructions); + return next && model.includes("gpt-4o-mini-tts") ? next : undefined; +} + export function isValidOpenAIVoice(voice: string, baseUrl?: string): voice is OpenAiTtsVoice { // Allow any voice when using custom endpoint (e.g., Kokoro Chinese voices) if (isCustomOpenAIEndpoint(baseUrl)) { @@ -619,10 +632,14 @@ export async function openaiTTS(params: { baseUrl: string; model: string; voice: string; + speed?: number; + instructions?: string; responseFormat: "mp3" | "opus" | "pcm"; timeoutMs: number; }): Promise { - const { text, apiKey, baseUrl, model, voice, responseFormat, timeoutMs } = params; + const { text, apiKey, baseUrl, model, voice, speed, instructions, responseFormat, timeoutMs } = + params; + const effectiveInstructions = resolveOpenAITtsInstructions(model, instructions); if (!isValidOpenAIModel(model, baseUrl)) { throw new Error(`Invalid model: ${model}`); @@ -646,6 +663,8 @@ export async function openaiTTS(params: { input: text, voice, response_format: responseFormat, + ...(speed != null && { speed }), + ...(effectiveInstructions != null && { instructions: effectiveInstructions }), }), signal: controller.signal, }); diff --git a/src/tts/tts.test.ts b/src/tts/tts.test.ts index f3b5d8ce0ee..d11190a21d4 100644 --- a/src/tts/tts.test.ts +++ b/src/tts/tts.test.ts @@ -7,15 +7,16 @@ import type { OpenClawConfig } from "../config/config.js"; import { withEnv } from "../test-utils/env.js"; import * as tts from "./tts.js"; -vi.mock("@mariozechner/pi-ai", () => ({ - completeSimple: vi.fn(), -})); - -vi.mock("@mariozechner/pi-ai/oauth", () => ({ - // Some auth helpers import oauth provider metadata at module load time. - getOAuthProviders: () => [], - getOAuthApiKey: vi.fn(async () => null), -})); +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + completeSimple: vi.fn(), + // Some auth helpers import oauth provider metadata at module load time. + getOAuthProviders: () => [], + getOAuthApiKey: vi.fn(async () => null), + }; +}); vi.mock("../agents/pi-embedded-runner/model.js", () => ({ resolveModel: vi.fn((provider: string, modelId: string) => ({ @@ -57,6 +58,7 @@ const { OPENAI_TTS_MODELS, OPENAI_TTS_VOICES, parseTtsDirectives, + resolveOpenAITtsInstructions, resolveModelOverridePolicy, summarizeText, resolveOutputFormat, @@ -169,6 +171,20 @@ describe("tts", () => { }); }); + describe("resolveOpenAITtsInstructions", () => { + it("keeps instructions only for gpt-4o-mini-tts variants", () => { + expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts", " Speak warmly ")).toBe( + "Speak warmly", + ); + expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts-2025-12-15", "Speak warmly")).toBe( + "Speak warmly", + ); + expect(resolveOpenAITtsInstructions("tts-1", "Speak warmly")).toBeUndefined(); + expect(resolveOpenAITtsInstructions("tts-1-hd", "Speak warmly")).toBeUndefined(); + expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts", " ")).toBeUndefined(); + }); + }); + describe("resolveOutputFormat", () => { it("selects opus for voice-bubble channels (telegram/feishu/whatsapp) and mp3 for others", () => { const cases = [ @@ -557,6 +573,84 @@ describe("tts", () => { }); }); + describe("textToSpeechTelephony – openai instructions", () => { + const withMockedTelephonyFetch = async ( + run: (fetchMock: ReturnType) => Promise, + ) => { + const originalFetch = globalThis.fetch; + const fetchMock = vi.fn(async () => ({ + ok: true, + arrayBuffer: async () => new ArrayBuffer(2), + })); + globalThis.fetch = fetchMock as unknown as typeof fetch; + try { + await run(fetchMock); + } finally { + globalThis.fetch = originalFetch; + } + }; + + it("omits instructions for unsupported speech models", async () => { + const cfg: OpenClawConfig = { + messages: { + tts: { + provider: "openai", + openai: { + apiKey: "test-key", + model: "tts-1", + voice: "alloy", + instructions: "Speak warmly", + }, + }, + }, + }; + + await withMockedTelephonyFetch(async (fetchMock) => { + const result = await tts.textToSpeechTelephony({ + text: "Hello there, friendly caller.", + cfg, + }); + + expect(result.success).toBe(true); + expect(fetchMock).toHaveBeenCalledTimes(1); + const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; + expect(typeof init.body).toBe("string"); + const body = JSON.parse(init.body as string) as Record; + expect(body.instructions).toBeUndefined(); + }); + }); + + it("includes instructions for gpt-4o-mini-tts", async () => { + const cfg: OpenClawConfig = { + messages: { + tts: { + provider: "openai", + openai: { + apiKey: "test-key", + model: "gpt-4o-mini-tts", + voice: "alloy", + instructions: "Speak warmly", + }, + }, + }, + }; + + await withMockedTelephonyFetch(async (fetchMock) => { + const result = await tts.textToSpeechTelephony({ + text: "Hello there, friendly caller.", + cfg, + }); + + expect(result.success).toBe(true); + expect(fetchMock).toHaveBeenCalledTimes(1); + const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; + expect(typeof init.body).toBe("string"); + const body = JSON.parse(init.body as string) as Record; + expect(body.instructions).toBe("Speak warmly"); + }); + }); + }); + describe("maybeApplyTtsToPayload", () => { const baseCfg: OpenClawConfig = { agents: { defaults: { model: { primary: "openai/gpt-4o-mini" } } }, diff --git a/src/tts/tts.ts b/src/tts/tts.ts index f76000029f6..5cd306f13a9 100644 --- a/src/tts/tts.ts +++ b/src/tts/tts.ts @@ -37,6 +37,7 @@ import { isValidVoiceId, OPENAI_TTS_MODELS, OPENAI_TTS_VOICES, + resolveOpenAITtsInstructions, openaiTTS, parseTtsDirectives, scheduleCleanup, @@ -117,6 +118,8 @@ export type ResolvedTtsConfig = { baseUrl: string; model: string; voice: string; + speed?: number; + instructions?: string; }; edge: { enabled: boolean; @@ -304,6 +307,8 @@ export function resolveTtsConfig(cfg: OpenClawConfig): ResolvedTtsConfig { ).replace(/\/+$/, ""), model: raw.openai?.model ?? DEFAULT_OPENAI_MODEL, voice: raw.openai?.voice ?? DEFAULT_OPENAI_VOICE, + speed: raw.openai?.speed, + instructions: raw.openai?.instructions?.trim() || undefined, }, edge: { enabled: raw.edge?.enabled ?? true, @@ -692,6 +697,8 @@ export async function textToSpeech(params: { baseUrl: config.openai.baseUrl, model: openaiModelOverride ?? config.openai.model, voice: openaiVoiceOverride ?? config.openai.voice, + speed: config.openai.speed, + instructions: config.openai.instructions, responseFormat: output.openai, timeoutMs: config.timeoutMs, }); @@ -789,6 +796,8 @@ export async function textToSpeechTelephony(params: { baseUrl: config.openai.baseUrl, model: config.openai.model, voice: config.openai.voice, + speed: config.openai.speed, + instructions: config.openai.instructions, responseFormat: output.format, timeoutMs: config.timeoutMs, }); @@ -961,6 +970,7 @@ export const _test = { isValidOpenAIModel, OPENAI_TTS_MODELS, OPENAI_TTS_VOICES, + resolveOpenAITtsInstructions, parseTtsDirectives, resolveModelOverridePolicy, summarizeText, diff --git a/src/tui/components/chat-log.test.ts b/src/tui/components/chat-log.test.ts index 02607568b1d..b81740a2e8c 100644 --- a/src/tui/components/chat-log.test.ts +++ b/src/tui/components/chat-log.test.ts @@ -29,6 +29,17 @@ describe("ChatLog", () => { expect(rendered).toContain("recreated"); }); + it("does not append duplicate assistant components when a run is started twice", () => { + const chatLog = new ChatLog(40); + chatLog.startAssistant("first", "run-dup"); + chatLog.startAssistant("second", "run-dup"); + + const rendered = chatLog.render(120).join("\n"); + expect(rendered).toContain("second"); + expect(rendered).not.toContain("first"); + expect(chatLog.children.length).toBe(1); + }); + it("drops stale tool references when old components are pruned", () => { const chatLog = new ChatLog(20); chatLog.startTool("tool-1", "read_file", { path: "a.txt" }); diff --git a/src/tui/components/chat-log.ts b/src/tui/components/chat-log.ts index 4ddf1d5b1de..76ac7d93654 100644 --- a/src/tui/components/chat-log.ts +++ b/src/tui/components/chat-log.ts @@ -65,8 +65,14 @@ export class ChatLog extends Container { } startAssistant(text: string, runId?: string) { + const effectiveRunId = this.resolveRunId(runId); + const existing = this.streamingRuns.get(effectiveRunId); + if (existing) { + existing.setText(text); + return existing; + } const component = new AssistantMessageComponent(text); - this.streamingRuns.set(this.resolveRunId(runId), component); + this.streamingRuns.set(effectiveRunId, component); this.append(component); return component; } diff --git a/src/tui/tui-status-summary.ts b/src/tui/tui-status-summary.ts index 64fc00adad6..dcbcd00329d 100644 --- a/src/tui/tui-status-summary.ts +++ b/src/tui/tui-status-summary.ts @@ -6,6 +6,9 @@ import type { GatewayStatusSummary } from "./tui-types.js"; export function formatStatusSummary(summary: GatewayStatusSummary) { const lines: string[] = []; lines.push("Gateway status"); + if (summary.runtimeVersion) { + lines.push(`Version: ${summary.runtimeVersion}`); + } if (!summary.linkChannel) { lines.push("Link channel: unknown"); diff --git a/src/tui/tui-types.ts b/src/tui/tui-types.ts index 087d7958950..e0af351d462 100644 --- a/src/tui/tui-types.ts +++ b/src/tui/tui-types.ts @@ -49,6 +49,7 @@ export type AgentSummary = { }; export type GatewayStatusSummary = { + runtimeVersion?: string | null; linkChannel?: { id?: string; label?: string; diff --git a/src/utils.test.ts b/src/utils.test.ts index ec9a0f4a1a1..0f4823c4019 100644 --- a/src/utils.test.ts +++ b/src/utils.test.ts @@ -8,7 +8,6 @@ import { ensureDir, jidToE164, normalizeE164, - normalizePath, resolveConfigDir, resolveHomeDir, resolveJidToE164, @@ -17,7 +16,6 @@ import { shortenHomePath, sleep, toWhatsappJid, - withWhatsAppPrefix, } from "./utils.js"; function withTempDirSync(prefix: string, run: (dir: string) => T): T { @@ -29,26 +27,6 @@ function withTempDirSync(prefix: string, run: (dir: string) => T): T { } } -describe("normalizePath", () => { - it("adds leading slash when missing", () => { - expect(normalizePath("foo")).toBe("/foo"); - }); - - it("keeps existing slash", () => { - expect(normalizePath("/bar")).toBe("/bar"); - }); -}); - -describe("withWhatsAppPrefix", () => { - it("adds whatsapp prefix", () => { - expect(withWhatsAppPrefix("+1555")).toBe("whatsapp:+1555"); - }); - - it("leaves prefixed intact", () => { - expect(withWhatsAppPrefix("whatsapp:+1555")).toBe("whatsapp:+1555"); - }); -}); - describe("ensureDir", () => { it("creates nested directory", async () => { await withTempDirSync("openclaw-test-", async (tmp) => { diff --git a/src/utils.ts b/src/utils.ts index 55efabb1ba2..cb044d05b69 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -73,17 +73,6 @@ export function assertWebChannel(input: string): asserts input is WebChannel { } } -export function normalizePath(p: string): string { - if (!p.startsWith("/")) { - return `/${p}`; - } - return p; -} - -export function withWhatsAppPrefix(number: string): string { - return number.startsWith("whatsapp:") ? number : `whatsapp:${number}`; -} - export function normalizeE164(number: string): string { const withoutPrefix = number.replace(/^whatsapp:/, "").trim(); const digits = withoutPrefix.replace(/[^\d+]/g, ""); diff --git a/src/web/outbound.test.ts b/src/web/outbound.test.ts index e494392d750..506d7816630 100644 --- a/src/web/outbound.test.ts +++ b/src/web/outbound.test.ts @@ -48,6 +48,34 @@ describe("web outbound", () => { expect(sendMessage).toHaveBeenCalledWith("+1555", "hi", undefined, undefined); }); + it("trims leading whitespace before sending text and captions", async () => { + await sendMessageWhatsApp("+1555", "\n \thello", { verbose: false }); + expect(sendMessage).toHaveBeenLastCalledWith("+1555", "hello", undefined, undefined); + + const buf = Buffer.from("img"); + loadWebMediaMock.mockResolvedValueOnce({ + buffer: buf, + contentType: "image/jpeg", + kind: "image", + }); + await sendMessageWhatsApp("+1555", "\n \tcaption", { + verbose: false, + mediaUrl: "/tmp/pic.jpg", + }); + expect(sendMessage).toHaveBeenLastCalledWith("+1555", "caption", buf, "image/jpeg"); + }); + + it("skips whitespace-only text sends without media", async () => { + const result = await sendMessageWhatsApp("+1555", "\n \t", { verbose: false }); + + expect(result).toEqual({ + messageId: "", + toJid: "1555@s.whatsapp.net", + }); + expect(sendComposingTo).not.toHaveBeenCalled(); + expect(sendMessage).not.toHaveBeenCalled(); + }); + it("throws a helpful error when no active listener exists", async () => { setActiveWebListener(null); await expect( diff --git a/src/web/outbound.ts b/src/web/outbound.ts index 43136c6f779..1fcaa807c37 100644 --- a/src/web/outbound.ts +++ b/src/web/outbound.ts @@ -26,7 +26,11 @@ export async function sendMessageWhatsApp( accountId?: string; }, ): Promise<{ messageId: string; toJid: string }> { - let text = body; + let text = body.trimStart(); + const jid = toWhatsappJid(to); + if (!text && !options.mediaUrl) { + return { messageId: "", toJid: jid }; + } const correlationId = generateSecureUuid(); const startedAt = Date.now(); const { listener: active, accountId: resolvedAccountId } = requireActiveWebListener( @@ -51,7 +55,6 @@ export async function sendMessageWhatsApp( to: redactedTo, }); try { - const jid = toWhatsappJid(to); const redactedJid = redactIdentifier(jid); let mediaBuffer: Buffer | undefined; let mediaType: string | undefined; diff --git a/src/wizard/onboarding.finalize.test.ts b/src/wizard/onboarding.finalize.test.ts index 314d22d8ca3..0fa67d16a8f 100644 --- a/src/wizard/onboarding.finalize.test.ts +++ b/src/wizard/onboarding.finalize.test.ts @@ -13,6 +13,13 @@ const buildGatewayInstallPlan = vi.hoisted(() => })), ); const gatewayServiceInstall = vi.hoisted(() => vi.fn(async () => {})); +const gatewayServiceRestart = vi.hoisted(() => + vi.fn<() => Promise<{ outcome: "completed" } | { outcome: "scheduled" }>>(async () => ({ + outcome: "completed", + })), +); +const gatewayServiceUninstall = vi.hoisted(() => vi.fn(async () => {})); +const gatewayServiceIsLoaded = vi.hoisted(() => vi.fn(async () => false)); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn(async () => ({ token: undefined, @@ -56,14 +63,18 @@ vi.mock("../commands/health.js", () => ({ healthCommand: vi.fn(async () => {}), })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: vi.fn(() => ({ - isLoaded: vi.fn(async () => false), - restart: vi.fn(async () => {}), - uninstall: vi.fn(async () => {}), - install: gatewayServiceInstall, - })), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: vi.fn(() => ({ + isLoaded: gatewayServiceIsLoaded, + restart: gatewayServiceRestart, + uninstall: gatewayServiceUninstall, + install: gatewayServiceInstall, + })), + }; +}); vi.mock("../daemon/systemd.js", async (importOriginal) => { const actual = await importOriginal(); @@ -113,6 +124,11 @@ describe("finalizeOnboardingWizard", () => { setupOnboardingShellCompletion.mockClear(); buildGatewayInstallPlan.mockClear(); gatewayServiceInstall.mockClear(); + gatewayServiceIsLoaded.mockReset(); + gatewayServiceIsLoaded.mockResolvedValue(false); + gatewayServiceRestart.mockReset(); + gatewayServiceRestart.mockResolvedValue({ outcome: "completed" }); + gatewayServiceUninstall.mockReset(); resolveGatewayInstallToken.mockClear(); isSystemdUserServiceAvailable.mockReset(); isSystemdUserServiceAvailable.mockResolvedValue(true); @@ -244,4 +260,51 @@ describe("finalizeOnboardingWizard", () => { expectFirstOnboardingInstallPlanCallOmitsToken(); expect(gatewayServiceInstall).toHaveBeenCalledTimes(1); }); + + it("stops after a scheduled restart instead of reinstalling the service", async () => { + const progressUpdate = vi.fn(); + const progressStop = vi.fn(); + gatewayServiceIsLoaded.mockResolvedValue(true); + gatewayServiceRestart.mockResolvedValueOnce({ outcome: "scheduled" }); + const prompter = buildWizardPrompter({ + select: vi.fn(async (params: { message: string }) => { + if (params.message === "Gateway service already installed") { + return "restart"; + } + return "later"; + }) as never, + confirm: vi.fn(async () => false), + progress: vi.fn(() => ({ update: progressUpdate, stop: progressStop })), + }); + + await finalizeOnboardingWizard({ + flow: "advanced", + opts: { + acceptRisk: true, + authChoice: "skip", + installDaemon: true, + skipHealth: true, + skipUi: true, + }, + baseConfig: {}, + nextConfig: {}, + workspaceDir: "/tmp", + settings: { + port: 18789, + bind: "loopback", + authMode: "token", + gatewayToken: undefined, + tailscaleMode: "off", + tailscaleResetOnExit: false, + }, + prompter, + runtime: createRuntime(), + }); + + expect(gatewayServiceRestart).toHaveBeenCalledTimes(1); + expect(gatewayServiceInstall).not.toHaveBeenCalled(); + expect(gatewayServiceUninstall).not.toHaveBeenCalled(); + expect(progressUpdate).toHaveBeenCalledWith("Restarting Gateway service…"); + expect(progressStop).toHaveBeenCalledWith("Gateway service restart scheduled."); + }); }); diff --git a/src/wizard/onboarding.finalize.ts b/src/wizard/onboarding.finalize.ts index fdb1143933c..b218e160ed5 100644 --- a/src/wizard/onboarding.finalize.ts +++ b/src/wizard/onboarding.finalize.ts @@ -23,7 +23,7 @@ import { } from "../commands/onboard-helpers.js"; import type { OnboardOptions } from "../commands/onboard-types.js"; import type { OpenClawConfig } from "../config/config.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { isSystemdUserServiceAvailable } from "../daemon/systemd.js"; import { ensureControlUiAssetsBuilt } from "../infra/control-ui-assets.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -53,14 +53,16 @@ export async function finalizeOnboardingWizard( const withWizardProgress = async ( label: string, - options: { doneMessage?: string }, + options: { doneMessage?: string | (() => string | undefined) }, work: (progress: { update: (message: string) => void }) => Promise, ): Promise => { const progress = prompter.progress(label); try { return await work(progress); } finally { - progress.stop(options.doneMessage); + progress.stop( + typeof options.doneMessage === "function" ? options.doneMessage() : options.doneMessage, + ); } }; @@ -128,6 +130,7 @@ export async function finalizeOnboardingWizard( } const service = resolveGatewayService(); const loaded = await service.isLoaded({ env: process.env }); + let restartWasScheduled = false; if (loaded) { const action = await prompter.select({ message: "Gateway service already installed", @@ -138,15 +141,19 @@ export async function finalizeOnboardingWizard( ], }); if (action === "restart") { + let restartDoneMessage = "Gateway service restarted."; await withWizardProgress( "Gateway service", - { doneMessage: "Gateway service restarted." }, + { doneMessage: () => restartDoneMessage }, async (progress) => { progress.update("Restarting Gateway service…"); - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + restartDoneMessage = restartStatus.progressMessage; + restartWasScheduled = restartStatus.scheduled; }, ); } else if (action === "reinstall") { @@ -161,7 +168,10 @@ export async function finalizeOnboardingWizard( } } - if (!loaded || (loaded && !(await service.isLoaded({ env: process.env })))) { + if ( + !loaded || + (!restartWasScheduled && loaded && !(await service.isLoaded({ env: process.env }))) + ) { const progress = prompter.progress("Gateway service"); let installError: string | null = null; try { diff --git a/test/openclaw-npm-release-check.test.ts b/test/openclaw-npm-release-check.test.ts index 7bd1c98d92d..50f4cb7a5ab 100644 --- a/test/openclaw-npm-release-check.test.ts +++ b/test/openclaw-npm-release-check.test.ts @@ -8,30 +8,30 @@ import { describe("parseReleaseVersion", () => { it("parses stable CalVer releases", () => { - expect(parseReleaseVersion("2026.3.9")).toMatchObject({ - version: "2026.3.9", + expect(parseReleaseVersion("2026.3.10")).toMatchObject({ + version: "2026.3.10", channel: "stable", year: 2026, month: 3, - day: 9, + day: 10, }); }); it("parses beta CalVer releases", () => { - expect(parseReleaseVersion("2026.3.9-beta.2")).toMatchObject({ - version: "2026.3.9-beta.2", + expect(parseReleaseVersion("2026.3.10-beta.2")).toMatchObject({ + version: "2026.3.10-beta.2", channel: "beta", year: 2026, month: 3, - day: 9, + day: 10, betaNumber: 2, }); }); it("rejects legacy and malformed release formats", () => { - expect(parseReleaseVersion("2026.3.9-1")).toBeNull(); + expect(parseReleaseVersion("2026.3.10-1")).toBeNull(); expect(parseReleaseVersion("2026.03.09")).toBeNull(); - expect(parseReleaseVersion("v2026.3.9")).toBeNull(); + expect(parseReleaseVersion("v2026.3.10")).toBeNull(); expect(parseReleaseVersion("2026.2.30")).toBeNull(); expect(parseReleaseVersion("2.0.0-beta2")).toBeNull(); }); @@ -49,8 +49,8 @@ describe("collectReleaseTagErrors", () => { it("accepts versions within the two-day CalVer window", () => { expect( collectReleaseTagErrors({ - packageVersion: "2026.3.9", - releaseTag: "v2026.3.9", + packageVersion: "2026.3.10", + releaseTag: "v2026.3.10", now: new Date("2026-03-11T12:00:00Z"), }), ).toEqual([]); @@ -59,9 +59,9 @@ describe("collectReleaseTagErrors", () => { it("rejects versions outside the two-day CalVer window", () => { expect( collectReleaseTagErrors({ - packageVersion: "2026.3.9", - releaseTag: "v2026.3.9", - now: new Date("2026-03-12T00:00:00Z"), + packageVersion: "2026.3.10", + releaseTag: "v2026.3.10", + now: new Date("2026-03-13T00:00:00Z"), }), ).toContainEqual(expect.stringContaining("must be within 2 days")); }); @@ -69,9 +69,9 @@ describe("collectReleaseTagErrors", () => { it("rejects tags that do not match the current release format", () => { expect( collectReleaseTagErrors({ - packageVersion: "2026.3.9", - releaseTag: "v2026.3.9-1", - now: new Date("2026-03-09T00:00:00Z"), + packageVersion: "2026.3.10", + releaseTag: "v2026.3.10-1", + now: new Date("2026-03-10T00:00:00Z"), }), ).toContainEqual(expect.stringContaining("must match vYYYY.M.D or vYYYY.M.D-beta.N")); }); diff --git a/test/setup.ts b/test/setup.ts index f232e5fc2d0..a6f902cb90f 100644 --- a/test/setup.ts +++ b/test/setup.ts @@ -1,5 +1,15 @@ import { afterAll, afterEach, beforeAll, vi } from "vitest"; +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + getOAuthApiKey: () => undefined, + getOAuthProviders: () => [], + loginOpenAICodex: vi.fn(), + }; +}); + vi.mock("@mariozechner/pi-ai/oauth", () => ({ getOAuthApiKey: () => undefined, getOAuthProviders: () => [], diff --git a/ui/package.json b/ui/package.json index b1f548f2869..1944c788cae 100644 --- a/ui/package.json +++ b/ui/package.json @@ -12,7 +12,7 @@ "@lit-labs/signals": "^0.2.0", "@lit/context": "^1.1.6", "@noble/ed25519": "3.0.0", - "dompurify": "^3.3.2", + "dompurify": "^3.3.3", "lit": "^3.3.2", "marked": "^17.0.4", "signal-polyfill": "^0.2.2", diff --git a/ui/src/i18n/locales/en.ts b/ui/src/i18n/locales/en.ts index c4a83017c19..cd273965829 100644 --- a/ui/src/i18n/locales/en.ts +++ b/ui/src/i18n/locales/en.ts @@ -12,7 +12,9 @@ export const en: TranslationMap = { disabled: "Disabled", na: "n/a", docs: "Docs", + theme: "Theme", resources: "Resources", + search: "Search", }, nav: { chat: "Chat", @@ -21,6 +23,7 @@ export const en: TranslationMap = { settings: "Settings", expand: "Expand sidebar", collapse: "Collapse sidebar", + resize: "Resize sidebar", }, tabs: { agents: "Agents", @@ -34,23 +37,33 @@ export const en: TranslationMap = { nodes: "Nodes", chat: "Chat", config: "Config", + communications: "Communications", + appearance: "Appearance", + automation: "Automation", + infrastructure: "Infrastructure", + aiAgents: "AI & Agents", debug: "Debug", logs: "Logs", }, subtitles: { - agents: "Manage agent workspaces, tools, and identities.", - overview: "Gateway status, entry points, and a fast health read.", - channels: "Manage channels and settings.", - instances: "Presence beacons from connected clients and nodes.", - sessions: "Inspect active sessions and adjust per-session defaults.", - usage: "Monitor API usage and costs.", - cron: "Schedule wakeups and recurring agent runs.", - skills: "Manage skill availability and API key injection.", - nodes: "Paired devices, capabilities, and command exposure.", - chat: "Direct gateway chat session for quick interventions.", - config: "Edit ~/.openclaw/openclaw.json safely.", - debug: "Gateway snapshots, events, and manual RPC calls.", - logs: "Live tail of the gateway file logs.", + agents: "Workspaces, tools, identities.", + overview: "Status, entry points, health.", + channels: "Channels and settings.", + instances: "Connected clients and nodes.", + sessions: "Active sessions and defaults.", + usage: "API usage and costs.", + cron: "Wakeups and recurring runs.", + skills: "Skills and API keys.", + nodes: "Paired devices and commands.", + chat: "Gateway chat for quick interventions.", + config: "Edit openclaw.json.", + communications: "Channels, messages, and audio settings.", + appearance: "Theme, UI, and setup wizard settings.", + automation: "Commands, hooks, cron, and plugins.", + infrastructure: "Gateway, web, browser, and media settings.", + aiAgents: "Agents, models, skills, tools, memory, session.", + debug: "Snapshots, events, RPC.", + logs: "Live gateway logs.", }, overview: { access: { @@ -105,6 +118,47 @@ export const en: TranslationMap = { hint: "This page is HTTP, so the browser blocks device identity. Use HTTPS (Tailscale Serve) or open {url} on the gateway host.", stayHttp: "If you must stay on HTTP, set {config} (token-only).", }, + connection: { + title: "How to connect", + step1: "Start the gateway on your host machine:", + step2: "Get a tokenized dashboard URL:", + step3: "Paste the WebSocket URL and token above, or open the tokenized URL directly.", + step4: "Or generate a reusable token:", + docsHint: "For remote access, Tailscale Serve is recommended. ", + docsLink: "Read the docs →", + }, + cards: { + cost: "Cost", + skills: "Skills", + recentSessions: "Recent Sessions", + }, + attention: { + title: "Attention", + }, + eventLog: { + title: "Event Log", + }, + logTail: { + title: "Gateway Logs", + }, + quickActions: { + newSession: "New Session", + automation: "Automation", + refreshAll: "Refresh All", + terminal: "Terminal", + }, + streamMode: { + active: "Stream mode — values redacted", + disable: "Disable", + }, + palette: { + placeholder: "Type a command…", + noResults: "No results", + }, + }, + login: { + subtitle: "Gateway Dashboard", + passwordPlaceholder: "optional", // pragma: allowlist secret }, chat: { disconnected: "Disconnected from gateway.", diff --git a/ui/src/i18n/locales/pt-BR.ts b/ui/src/i18n/locales/pt-BR.ts index d763ca04217..f656793e78b 100644 --- a/ui/src/i18n/locales/pt-BR.ts +++ b/ui/src/i18n/locales/pt-BR.ts @@ -12,7 +12,9 @@ export const pt_BR: TranslationMap = { disabled: "Desativado", na: "n/a", docs: "Docs", + theme: "Tema", resources: "Recursos", + search: "Pesquisar", }, nav: { chat: "Chat", @@ -21,6 +23,7 @@ export const pt_BR: TranslationMap = { settings: "Configurações", expand: "Expandir barra lateral", collapse: "Recolher barra lateral", + resize: "Redimensionar barra lateral", }, tabs: { agents: "Agentes", @@ -34,23 +37,33 @@ export const pt_BR: TranslationMap = { nodes: "Nós", chat: "Chat", config: "Config", + communications: "Comunicações", + appearance: "Aparência e Configuração", + automation: "Automação", + infrastructure: "Infraestrutura", + aiAgents: "IA e Agentes", debug: "Debug", logs: "Logs", }, subtitles: { - agents: "Gerenciar espaços de trabalho, ferramentas e identidades de agentes.", - overview: "Status do gateway, pontos de entrada e leitura rápida de saúde.", - channels: "Gerenciar canais e configurações.", - instances: "Beacons de presença de clientes e nós conectados.", - sessions: "Inspecionar sessões ativas e ajustar padrões por sessão.", - usage: "Monitorar uso e custos da API.", - cron: "Agendar despertares e execuções recorrentes de agentes.", - skills: "Gerenciar disponibilidade de habilidades e injeção de chaves de API.", - nodes: "Dispositivos pareados, capacidades e exposição de comandos.", - chat: "Sessão de chat direta com o gateway para intervenções rápidas.", - config: "Editar ~/.openclaw/openclaw.json com segurança.", - debug: "Snapshots do gateway, eventos e chamadas RPC manuais.", - logs: "Acompanhamento ao vivo dos logs de arquivo do gateway.", + agents: "Espaços, ferramentas, identidades.", + overview: "Status, entrada, saúde.", + channels: "Canais e configurações.", + instances: "Clientes e nós conectados.", + sessions: "Sessões ativas e padrões.", + usage: "Uso e custos da API.", + cron: "Despertares e execuções.", + skills: "Habilidades e chaves API.", + nodes: "Dispositivos e comandos.", + chat: "Chat do gateway para intervenções rápidas.", + config: "Editar openclaw.json.", + communications: "Configurações de canais, mensagens e áudio.", + appearance: "Configurações de tema, UI e assistente de configuração.", + automation: "Configurações de comandos, hooks, cron e plugins.", + infrastructure: "Configurações de gateway, web, browser e mídia.", + aiAgents: "Configurações de agentes, modelos, habilidades, ferramentas, memória e sessão.", + debug: "Snapshots, eventos, RPC.", + logs: "Logs ao vivo do gateway.", }, overview: { access: { @@ -107,6 +120,47 @@ export const pt_BR: TranslationMap = { hint: "Esta página é HTTP, então o navegador bloqueia a identidade do dispositivo. Use HTTPS (Tailscale Serve) ou abra {url} no host do gateway.", stayHttp: "Se você precisar permanecer em HTTP, defina {config} (apenas token).", }, + connection: { + title: "Como conectar", + step1: "Inicie o gateway na sua máquina host:", + step2: "Obtenha uma URL do painel com token:", + step3: "Cole a URL do WebSocket e o token acima, ou abra a URL com token diretamente.", + step4: "Ou gere um token reutilizável:", + docsHint: "Para acesso remoto, recomendamos o Tailscale Serve. ", + docsLink: "Leia a documentação →", + }, + cards: { + cost: "Custo", + skills: "Habilidades", + recentSessions: "Sessões Recentes", + }, + attention: { + title: "Atenção", + }, + eventLog: { + title: "Log de Eventos", + }, + logTail: { + title: "Logs do Gateway", + }, + quickActions: { + newSession: "Nova Sessão", + automation: "Automação", + refreshAll: "Atualizar Tudo", + terminal: "Terminal", + }, + streamMode: { + active: "Modo stream — valores ocultos", + disable: "Desativar", + }, + palette: { + placeholder: "Digite um comando…", + noResults: "Sem resultados", + }, + }, + login: { + subtitle: "Painel do Gateway", + passwordPlaceholder: "opcional", // pragma: allowlist secret }, chat: { disconnected: "Desconectado do gateway.", diff --git a/ui/src/i18n/locales/zh-CN.ts b/ui/src/i18n/locales/zh-CN.ts index 2cf8ca35ec2..ef3cd77ae17 100644 --- a/ui/src/i18n/locales/zh-CN.ts +++ b/ui/src/i18n/locales/zh-CN.ts @@ -12,7 +12,9 @@ export const zh_CN: TranslationMap = { disabled: "已禁用", na: "不适用", docs: "文档", + theme: "主题", resources: "资源", + search: "搜索", }, nav: { chat: "聊天", @@ -21,6 +23,7 @@ export const zh_CN: TranslationMap = { settings: "设置", expand: "展开侧边栏", collapse: "折叠侧边栏", + resize: "调整侧边栏大小", }, tabs: { agents: "代理", @@ -34,23 +37,33 @@ export const zh_CN: TranslationMap = { nodes: "节点", chat: "聊天", config: "配置", + communications: "通信", + appearance: "外观与设置", + automation: "自动化", + infrastructure: "基础设施", + aiAgents: "AI 与代理", debug: "调试", logs: "日志", }, subtitles: { - agents: "管理代理工作区、工具和身份。", - overview: "网关状态、入口点和快速健康读取。", - channels: "管理频道和设置。", - instances: "来自已连接客户端和节点的在线信号。", - sessions: "检查活动会话并调整每个会话的默认设置。", - usage: "监控 API 使用情况和成本。", - cron: "安排唤醒和重复的代理运行。", - skills: "管理技能可用性和 API 密钥注入。", - nodes: "配对设备、功能和命令公开。", - chat: "用于快速干预的直接网关聊天会话。", - config: "安全地编辑 ~/.openclaw/openclaw.json。", - debug: "网关快照、事件和手动 RPC 调用。", - logs: "网关文件日志的实时追踪。", + agents: "工作区、工具、身份。", + overview: "状态、入口点、健康。", + channels: "频道和设置。", + instances: "已连接客户端和节点。", + sessions: "活动会话和默认设置。", + usage: "API 使用情况和成本。", + cron: "唤醒和重复运行。", + skills: "技能和 API 密钥。", + nodes: "配对设备和命令。", + chat: "网关聊天,快速干预。", + config: "编辑 openclaw.json。", + communications: "频道、消息和音频设置。", + appearance: "主题、界面和设置向导设置。", + automation: "命令、钩子、定时任务和插件设置。", + infrastructure: "网关、Web、浏览器和媒体设置。", + aiAgents: "代理、模型、技能、工具、记忆和会话设置。", + debug: "快照、事件、RPC。", + logs: "实时网关日志。", }, overview: { access: { @@ -104,6 +117,47 @@ export const zh_CN: TranslationMap = { hint: "此页面为 HTTP,因此浏览器阻止设备标识。请使用 HTTPS (Tailscale Serve) 或在网关主机上打开 {url}。", stayHttp: "如果您必须保持 HTTP,请设置 {config} (仅限令牌)。", }, + connection: { + title: "如何连接", + step1: "在主机上启动网关:", + step2: "获取带令牌的仪表盘 URL:", + step3: "将 WebSocket URL 和令牌粘贴到上方,或直接打开带令牌的 URL。", + step4: "或生成可重复使用的令牌:", + docsHint: "如需远程访问,建议使用 Tailscale Serve。", + docsLink: "查看文档 →", + }, + cards: { + cost: "费用", + skills: "技能", + recentSessions: "最近会话", + }, + attention: { + title: "注意事项", + }, + eventLog: { + title: "事件日志", + }, + logTail: { + title: "网关日志", + }, + quickActions: { + newSession: "新建会话", + automation: "自动化", + refreshAll: "全部刷新", + terminal: "终端", + }, + streamMode: { + active: "流模式 — 数据已隐藏", + disable: "禁用", + }, + palette: { + placeholder: "输入命令…", + noResults: "无结果", + }, + }, + login: { + subtitle: "网关仪表盘", + passwordPlaceholder: "可选", }, chat: { disconnected: "已断开与网关的连接。", diff --git a/ui/src/i18n/locales/zh-TW.ts b/ui/src/i18n/locales/zh-TW.ts index 6fb48680e75..580f8a3de92 100644 --- a/ui/src/i18n/locales/zh-TW.ts +++ b/ui/src/i18n/locales/zh-TW.ts @@ -12,7 +12,9 @@ export const zh_TW: TranslationMap = { disabled: "已禁用", na: "不適用", docs: "文檔", + theme: "主題", resources: "資源", + search: "搜尋", }, nav: { chat: "聊天", @@ -21,6 +23,7 @@ export const zh_TW: TranslationMap = { settings: "設置", expand: "展開側邊欄", collapse: "折疊側邊欄", + resize: "調整側邊欄大小", }, tabs: { agents: "代理", @@ -34,23 +37,33 @@ export const zh_TW: TranslationMap = { nodes: "節點", chat: "聊天", config: "配置", + communications: "通訊", + appearance: "外觀與設置", + automation: "自動化", + infrastructure: "基礎設施", + aiAgents: "AI 與代理", debug: "調試", logs: "日誌", }, subtitles: { - agents: "管理代理工作區、工具和身份。", - overview: "網關狀態、入口點和快速健康讀取。", - channels: "管理頻道和設置。", - instances: "來自已連接客戶端和節點的在線信號。", - sessions: "檢查活動會話並調整每個會話的默認設置。", - usage: "監控 API 使用情況和成本。", - cron: "安排喚醒和重複的代理運行。", - skills: "管理技能可用性和 API 密鑰注入。", - nodes: "配對設備、功能和命令公開。", - chat: "用於快速干預的直接網關聊天會話。", - config: "安全地編輯 ~/.openclaw/openclaw.json。", - debug: "網關快照、事件和手動 RPC 調用。", - logs: "網關文件日志的實時追蹤。", + agents: "工作區、工具、身份。", + overview: "狀態、入口點、健康。", + channels: "頻道和設置。", + instances: "已連接客戶端和節點。", + sessions: "活動會話和默認設置。", + usage: "API 使用情況和成本。", + cron: "喚醒和重複運行。", + skills: "技能和 API 密鑰。", + nodes: "配對設備和命令。", + chat: "網關聊天,快速干預。", + config: "編輯 openclaw.json。", + communications: "頻道、消息和音頻設置。", + appearance: "主題、界面和設置向導設置。", + automation: "命令、鉤子、定時任務和插件設置。", + infrastructure: "網關、Web、瀏覽器和媒體設置。", + aiAgents: "代理、模型、技能、工具、記憶和會話設置。", + debug: "快照、事件、RPC。", + logs: "實時網關日誌。", }, overview: { access: { @@ -104,6 +117,47 @@ export const zh_TW: TranslationMap = { hint: "此頁面為 HTTP,因此瀏覽器阻止設備標識。請使用 HTTPS (Tailscale Serve) 或在網關主機上打開 {url}。", stayHttp: "如果您必須保持 HTTP,請設置 {config} (僅限令牌)。", }, + connection: { + title: "如何連接", + step1: "在主機上啟動閘道:", + step2: "取得帶令牌的儀表板 URL:", + step3: "將 WebSocket URL 和令牌貼到上方,或直接開啟帶令牌的 URL。", + step4: "或產生可重複使用的令牌:", + docsHint: "如需遠端存取,建議使用 Tailscale Serve。", + docsLink: "查看文件 →", + }, + cards: { + cost: "費用", + skills: "技能", + recentSessions: "最近會話", + }, + attention: { + title: "注意事項", + }, + eventLog: { + title: "事件日誌", + }, + logTail: { + title: "閘道日誌", + }, + quickActions: { + newSession: "新建會話", + automation: "自動化", + refreshAll: "全部刷新", + terminal: "終端", + }, + streamMode: { + active: "串流模式 — 數據已隱藏", + disable: "禁用", + }, + palette: { + placeholder: "輸入指令…", + noResults: "無結果", + }, + }, + login: { + subtitle: "閘道儀表板", + passwordPlaceholder: "可選", }, chat: { disconnected: "已斷開與網關的連接。", diff --git a/ui/src/i18n/test/translate.test.ts b/ui/src/i18n/test/translate.test.ts index 178fd12b1e3..d373d3a47c9 100644 --- a/ui/src/i18n/test/translate.test.ts +++ b/ui/src/i18n/test/translate.test.ts @@ -1,56 +1,100 @@ -import { describe, it, expect, beforeEach, vi } from "vitest"; -import { i18n, t } from "../lib/translate.ts"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { pt_BR } from "../locales/pt-BR.ts"; +import { zh_CN } from "../locales/zh-CN.ts"; +import { zh_TW } from "../locales/zh-TW.ts"; + +type TranslateModule = typeof import("../lib/translate.ts"); + +function createStorageMock(): Storage { + const store = new Map(); + return { + get length() { + return store.size; + }, + clear() { + store.clear(); + }, + getItem(key: string) { + return store.get(key) ?? null; + }, + key(index: number) { + return Array.from(store.keys())[index] ?? null; + }, + removeItem(key: string) { + store.delete(key); + }, + setItem(key: string, value: string) { + store.set(key, String(value)); + }, + }; +} describe("i18n", () => { + let translate: TranslateModule; + beforeEach(async () => { + vi.resetModules(); + vi.stubGlobal("localStorage", createStorageMock()); + vi.stubGlobal("navigator", { language: "en-US" } as Navigator); + translate = await import("../lib/translate.ts"); localStorage.clear(); // Reset to English - await i18n.setLocale("en"); + await translate.i18n.setLocale("en"); + }); + + afterEach(() => { + vi.unstubAllGlobals(); }); it("should return the key if translation is missing", () => { - expect(t("non.existent.key")).toBe("non.existent.key"); + expect(translate.t("non.existent.key")).toBe("non.existent.key"); }); it("should return the correct English translation", () => { - expect(t("common.health")).toBe("Health"); + expect(translate.t("common.health")).toBe("Health"); }); it("should replace parameters correctly", () => { - expect(t("overview.stats.cronNext", { time: "10:00" })).toBe("Next wake 10:00"); + expect(translate.t("overview.stats.cronNext", { time: "10:00" })).toBe("Next wake 10:00"); }); it("should fallback to English if key is missing in another locale", async () => { // We haven't registered other locales in the test environment yet, // but the logic should fallback to 'en' map which is always there. - await i18n.setLocale("zh-CN"); + await translate.i18n.setLocale("zh-CN"); // Since we don't mock the import, it might fail to load zh-CN, // but let's assume it falls back to English for now. - expect(t("common.health")).toBeDefined(); + expect(translate.t("common.health")).toBeDefined(); }); it("loads translations even when setting the same locale again", async () => { - const internal = i18n as unknown as { + const internal = translate.i18n as unknown as { locale: string; translations: Record; }; internal.locale = "zh-CN"; delete internal.translations["zh-CN"]; - await i18n.setLocale("zh-CN"); - expect(t("common.health")).toBe("健康状况"); + await translate.i18n.setLocale("zh-CN"); + expect(translate.t("common.health")).toBe("健康状况"); }); it("loads saved non-English locale on startup", async () => { - localStorage.setItem("openclaw.i18n.locale", "zh-CN"); vi.resetModules(); + vi.stubGlobal("localStorage", createStorageMock()); + vi.stubGlobal("navigator", { language: "en-US" } as Navigator); + localStorage.setItem("openclaw.i18n.locale", "zh-CN"); const fresh = await import("../lib/translate.ts"); - - for (let index = 0; index < 5 && fresh.i18n.getLocale() !== "zh-CN"; index += 1) { - await Promise.resolve(); - } - + await vi.waitFor(() => { + expect(fresh.i18n.getLocale()).toBe("zh-CN"); + }); expect(fresh.i18n.getLocale()).toBe("zh-CN"); expect(fresh.t("common.health")).toBe("健康状况"); }); + + it("keeps the version label available in shipped locales", () => { + expect((pt_BR.common as { version?: string }).version).toBeTruthy(); + expect((zh_CN.common as { version?: string }).version).toBeTruthy(); + expect((zh_TW.common as { version?: string }).version).toBeTruthy(); + }); }); diff --git a/ui/src/ui/app-render.helpers.ts b/ui/src/ui/app-render.helpers.ts index 68dfbe5e76d..0678706cd04 100644 --- a/ui/src/ui/app-render.helpers.ts +++ b/ui/src/ui/app-render.helpers.ts @@ -490,7 +490,7 @@ function countHiddenCronSessions(sessionKey: string, sessions: SessionsListResul const THEME_ORDER: ThemeMode[] = ["system", "light", "dark"]; export function renderThemeToggle(state: AppViewState) { - const index = Math.max(0, THEME_ORDER.indexOf(state.theme)); + const index = Math.max(0, THEME_ORDER.indexOf(state.themeMode)); const applyTheme = (next: ThemeMode) => (event: MouseEvent) => { const element = event.currentTarget as HTMLElement; const context: ThemeTransitionContext = { element }; @@ -498,7 +498,7 @@ export function renderThemeToggle(state: AppViewState) { context.pointerClientX = event.clientX; context.pointerClientY = event.clientY; } - state.setTheme(next, context); + state.setThemeMode(next, context); }; return html` @@ -506,27 +506,27 @@ export function renderThemeToggle(state: AppViewState) {