diff --git a/CHANGELOG.md b/CHANGELOG.md index 959a2bd0e08..cf7a60071bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,51 @@ Docs: https://docs.openclaw.ai ## Unreleased +### Security + +- Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible `\u{...}` escapes instead of spoofing the reviewed command. (`GHSA-pcqg-f7rg-xfvv`)(#43687) Thanks @EkiXu and @vincentkoc. +- Security/device pairing: cap issued and verified device-token scopes to each paired device's approved scope baseline so stale or overbroad tokens cannot exceed approved access. (`GHSA-2pwv-x786-56f8`)(#43686) Thanks @tdjackey and @vincentkoc. +- Security/proxy attachments: restore the shared media-store size cap for persisted browser proxy files so oversized payloads are rejected instead of overriding the intended 5 MB limit. (`GHSA-6rph-mmhp-h7h9`)(#43684) Thanks @tdjackey and @vincentkoc. +- Security/host env: block inherited `GIT_EXEC_PATH` from sanitized host exec environments so Git helper resolution cannot be steered by host environment state. (`GHSA-jf5v-pqgw-gm5m`)(#43685) Thanks @zpbrent and @vincentkoc. +- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`. (`GHSA-wcxr-59v9-rxr8`)(#43754) Thanks @tdjackey and @vincentkoc. +- Models/secrets: enforce source-managed SecretRef markers in generated `models.json` so runtime-resolved provider secrets are not persisted when runtime projection is skipped. (#43759) Thanks @joshavant. +- Security/exec allowlist: preserve POSIX case sensitivity and keep `?` within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (`GHSA-f8r2-vg7x-gh8m`)(#43798) Thanks @zpbrent and @vincentkoc. + ### Changes -- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky. -- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle. -- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn. -- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc. +### Fixes + +- Cron/proactive delivery: keep isolated direct cron sends out of the write-ahead resend queue so transient-send retries do not replay duplicate proactive messages after restart. (#40646) Thanks @openperf and @vincentkoc. +- TUI/chat log: reuse the active assistant message component for the same streaming run so `openclaw tui` no longer renders duplicate assistant replies. (#35364) Thanks @lisitan. +- macOS/Reminders: add the missing `NSRemindersUsageDescription` to the bundled app so `apple-reminders` can trigger the system permission prompt from OpenClaw.app. (#8559) Thanks @dinakars777. +- iMessage/self-chat echo dedupe: drop reflected duplicate copies only when a matching `is_from_me` event was just seen for the same chat, text, and `created_at`, preventing self-chat loops without broad text-only suppression. Related to #32166. (#38440) Thanks @vincentkoc. +- Mattermost/block streaming: fix duplicate message delivery (one threaded, one top-level) when block streaming is active by excluding `replyToId` from the block reply dedup key and adding an explicit `threading` dock to the Mattermost plugin. (#41362) Thanks @mathiasnagler and @vincentkoc. +- BlueBubbles/self-chat echo dedupe: drop reflected duplicate webhook copies only when a matching `fromMe` event was just seen for the same chat, body, and timestamp, preventing self-chat loops without broad webhook suppression. Related to #32166. (#38442) Thanks @vincentkoc. +- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. + +## 2026.3.11 + +### Security + +- Gateway/WebSocket: enforce browser origin validation for all browser-originated connections regardless of whether proxy headers are present, closing a cross-site WebSocket hijacking path in `trusted-proxy` mode that could grant untrusted origins `operator.admin` access. (GHSA-5wcw-8jjv-m286) + +### Changes + +- OpenRouter/models: add temporary Hunter Alpha and Healer Alpha entries to the built-in catalog so OpenRouter users can try the new free stealth models during their roughly one-week availability window. (#43642) Thanks @ping-Toven. - iOS/Home canvas: add a bundled welcome screen with a live agent overview that refreshes on connect, reconnect, and foreground return, and move the compact connection pill off the top-left canvas overlay. (#42456) Thanks @ngutman. - iOS/Home canvas: replace floating controls with a docked toolbar, make the bundled home scaffold adapt to smaller phones, and open chat in the resolved main session instead of a synthetic `ios` session. (#42456) Thanks @ngutman. -- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman. - macOS/chat UI: add a chat model picker, persist explicit thinking-level selections across relaunch, and harden provider-aware session model sync for the shared chat composer. (#42314) Thanks @ImLukeF. +- Onboarding/Ollama: add first-class Ollama setup with Local or Cloud + Local modes, browser-based cloud sign-in, curated model suggestions, and cloud-model handling that skips unnecessary local pulls. (#41529) Thanks @BruceMacD. +- OpenCode/onboarding: add new OpenCode Go provider, treat Zen and Go as one OpenCode setup in the wizard/docs while keeping the runtime providers split, store one shared OpenCode key for both profiles, and stop overriding the built-in `opencode-go` catalog routing. (#42313) Thanks @ImLukeF and @vincentkoc. +- Memory: add opt-in multimodal image and audio indexing for `memorySearch.extraPaths` with Gemini `gemini-embedding-2-preview`, strict fallback gating, and scope-based reindexing. (#43460) Thanks @gumadeiras. +- Memory/Gemini: add `gemini-embedding-2-preview` memory-search support with configurable output dimensions and automatic reindexing when the configured dimensions change. (#42501) Thanks @BillChirico and @gumadeiras. +- macOS/onboarding: detect when remote gateways need a shared auth token, explain where to find it on the gateway host, and clarify when a successful check used paired-device auth instead. (#43100) Thanks @ngutman. +- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman. +- iOS/TestFlight: add a local beta release flow with Fastlane prepare/archive/upload support, canonical beta bundle IDs, and watch-app archive fixes. (#42991) Thanks @ngutman. +- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn. +- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky. +- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle. +- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc. ### Breaking @@ -22,74 +57,97 @@ Docs: https://docs.openclaw.ai ### Fixes - Agents/text sanitization: strip leaked model control tokens (`<|...|>` and full-width `<|...|>` variants) from user-facing assistant text, preventing GLM-5 and DeepSeek internal delimiters from reaching end users. (#42173) Thanks @imwyvern. -- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant. -- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura. -- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob. -- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes. +- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky. - Gateway/Control UI: keep dashboard auth tokens in session-scoped browser storage so same-tab refreshes preserve remote token auth without restoring long-lived localStorage token persistence, while scoping tokens to the selected gateway URL and fragment-only bootstrap flow. (#40892) thanks @velvet-shark. +- Gateway/macOS launchd restarts: keep the LaunchAgent registered during explicit restarts, hand off self-restarts through a detached launchd helper, and recover config/hot reload restart paths without unloading the service. Fixes #43311, #43406, #43035, and #43049. +- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes. +- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera. +- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura. +- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. +- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus. +- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev. +- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev. +- Telegram/final preview cleanup follow-up: clear stale cleanup-retain state only for transient preview finals so archived-preview retains no longer leave a stale partial bubble beside a later fallback-sent final. (#41763) Thanks @obviyus. +- Telegram/poll restarts: scope process-level polling restarts to real Telegram `getUpdates` failures so unrelated network errors, such as Slack DNS misses, no longer bounce Telegram polling. (#43799) Thanks @obviyus. +- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant. +- Gateway/config errors: surface up to three validation issues in top-level `config.set`, `config.patch`, and `config.apply` error messages while preserving structured issue details. (#42664) Thanks @huntharo. +- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk. +- Agents/error rendering: ignore stale assistant `errorMessage` fields on successful turns so background/tool-side failures no longer prepend synthetic billing errors over valid replies. (#40616) Thanks @ingyukoh. +- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf. +- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk. +- Agents/fallback: recognize Venice `402 Insufficient USD or Diem balance` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#43205) Thanks @Squabble9. +- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio. +- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode. +- Agents/cooldowns: default cooldown windows with no recorded failure history to `unknown` instead of `rate_limit`, avoiding false API rate-limit warnings while preserving cooldown recovery probes. (#42911) Thanks @VibhorGautam. +- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x. +- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn. +- Agents/context pruning: prune image-only tool results during soft-trim, align context-pruning coverage with the new tool-result contract, and extend historical image cleanup to the same screenshot-heavy session path. (#43045) Thanks @MoerAI. +- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab. +- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases. +- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant. +- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2. +- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo. +- CLI/skills JSON: strip ANSI and C1 control bytes from `skills list --json`, `skills info --json`, and `skills check --json` so machine-readable output stays valid for terminals and skill metadata with embedded control characters. Fixes #27530. Related #27557. Thanks @Jimmy-xuzimo and @vincentkoc. +- CLI/tables: default shared tables to ASCII borders on legacy Windows consoles while keeping Unicode borders on modern Windows terminals, so commands like `openclaw skills` stop rendering mojibake under GBK/936 consoles. Fixes #40853. Related #41015. Thanks @ApacheBin and @vincentkoc. +- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth. +- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng. +- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet. +- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931. +- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda. +- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux. +- Signal/config schema: accept `channels.signal.accountUuid` in strict config validation so loop-protection configs no longer fail with an unrecognized-key error. (#35578) Thanks @ingyukoh. +- Telegram/config schema: accept `channels.telegram.actions.editMessage` and `createForumTopic` in strict config validation so existing Telegram action toggles no longer fail as unrecognized keys. (#35498) Thanks @ingyukoh. +- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf. +- Discord/config typing: expose channel-level `autoThread` on the canonical guild-channel config type so strict config loading matches the existing Discord schema and runtime behavior. (#35608) Thanks @ingyukoh. +- fix(models): guard optional model.input capability checks (#42096) thanks @andyliu +- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu. +- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant. - Secret files: harden CLI and channel credential file reads against path-swap races by requiring direct regular files for `*File` secret inputs and rejecting symlink-backed secret files. - Archive extraction: harden TAR and external `tar.bz2` installs against destination symlink and pre-existing child-symlink escapes by extracting into staging first and merging into the canonical destination with safe file opens. -- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. -- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis. -- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek. +- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant. +- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey. +- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant. +- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting. +- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting. +- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting. +- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth. +- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set. +- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94. +- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo. +- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey. +- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob. - ACP/sessions.patch: allow `spawnedBy` and `spawnDepth` lineage fields on ACP session keys so `sessions_spawn` with `runtime: "acp"` no longer fails during child-session setup. Fixes #40971. (#40995) thanks @xaeon2026. - ACP/stop reason mapping: resolve gateway chat `state: "error"` completions as ACP `end_turn` instead of `refusal` so transient backend failures are not surfaced as deliberate refusals. (#41187) thanks @pejmanjohn. - ACP/setSessionMode: propagate gateway `sessions.patch` failures back to ACP clients so rejected mode changes no longer return silent success. (#41185) thanks @pejmanjohn. -- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf. -- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky. -- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927. -- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x. -- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky. - ACP/bridge mode: reject unsupported per-session MCP server setup and propagate rejected session-mode changes so IDE clients see explicit bridge limitations instead of silent success. (#41424) Thanks @mbelinky. - ACP/session UX: replay stored user and assistant text on `loadSession`, expose Gateway-backed session controls and metadata, and emit approximate session usage updates so IDE clients restore context more faithfully. (#41425) Thanks @mbelinky. - ACP/tool streaming: enrich `tool_call` and `tool_call_update` events with best-effort text content and file-location hints so IDE clients can follow bridge tool activity more naturally. (#41442) Thanks @mbelinky. - ACP/runtime attachments: forward normalized inbound image attachments into ACP runtime turns so ACPX sessions can preserve image prompt content on the runtime path. (#41427) Thanks @mbelinky. - ACP/regressions: add gateway RPC coverage for ACP lineage patching, ACPX runtime coverage for image prompt serialization, and an operator smoke-test procedure for live ACP spawn verification. (#41456) Thanks @mbelinky. -- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf. - ACP/follow-up hardening: make session restore and prompt completion degrade gracefully on transcript/update failures, enforce bounded tool-location traversal, and skip non-image ACPX turns the runtime cannot serialize. (#41464) Thanks @mbelinky. -- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf. -- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky. -- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera. -- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf. -- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk. +- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman. +- ACP/main session aliases: canonicalize `main` before ACP session lookup so restarted ACP main sessions rehydrate instead of failing closed with `Session is not ACP-enabled: main`. (#43285, fixes #25692) - Plugins/context-engine model auth: expose `runtime.modelAuth` and plugin-sdk auth helpers so plugins can resolve provider/model API keys through the normal auth pipeline. (#41090) thanks @xinhuagu. -- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth. -- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2. -- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo. -- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng. -- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev. +- Hooks/plugin context parity followup: pass `trigger` and `channelId` through embedded `llm_input`, `agent_end`, and `llm_output` hook contexts so plugins receive the same agent metadata across hook phases. (#42362) Thanks @zhoulf1006. +- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc. +- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis. +- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek. +- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf. +- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung. +- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf. +- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf. +- Agents/context-engine compaction: guard thrown engine-owned overflow compaction attempts and fire compaction hooks for `ownsCompaction` engines so overflow recovery no longer crashes and plugin subscribers still observe compact runs. (#41361) thanks @davidrudduck. +- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky. +- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky. +- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927. - Cron/state errors: record `lastErrorReason` in cron job state and keep the gateway schema aligned with the full failover-reason set, including regression coverage for protocol conformance. (#14382) thanks @futuremind2026. -- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo. -- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94. -- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet. -- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn. +- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn. - CI/CodeQL Swift toolchain: select Xcode 26.1 before installing Swift build tools so the CodeQL Swift job uses Swift tools 6.2 on `macos-latest`. (#41787) thanks @BunsDev. - Sandbox/subagents: pass the real configured workspace through `sessions_spawn` inheritance when a parent agent runs in a copied-workspace sandbox, so child `/agent` mounts point at the configured workspace instead of the parent sandbox copy. (#40757) Thanks @dsantoreis. -- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda. -- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux. -- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931. - Agents/fallback cooldown probing: cap cooldown-bypass probing to one attempt per provider per fallback run so multi-model same-provider cooldown chains can continue to cross-provider fallbacks instead of repeatedly stalling on duplicate cooldown probes. (#41711) Thanks @cgdusek. - Telegram/direct delivery: bridge direct delivery sends to internal `message:sent` hooks so internal hook listeners observe successful Telegram deliveries. (#40185) Thanks @vincentkoc. -- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc. -- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio. -- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus. -- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung. -- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode. -- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant. -- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant. -- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf. -- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu. -- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey. -- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman. -- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab. -- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn. -- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant. -- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases. -- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant. -- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey. -- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting. -- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting. -- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting. +- Dependencies: refresh workspace dependencies except the pinned Carbon package, and harden ACP session-config writes against non-string SDK values so newer ACP clients fail fast instead of tripping type/runtime mismatches. +- Telegram/polling restarts: clear bounded cleanup timeout handles after `runner.stop()` and `bot.stop()` settle so stall recovery no longer leaves stray 15-second timers behind on clean shutdown. (#43188) thanks @kyohwang. ## 2026.3.8 @@ -154,12 +212,16 @@ Docs: https://docs.openclaw.ai - Cron/owner-only tools: pass trusted isolated cron runs into the embedded agent with owner context so `cron`/`gateway` tooling remains available after the owner-auth hardening narrowed direct-message ownership inference. - Browser/SSRF: block private-network intermediate redirect hops in strict browser navigation flows and fail closed when remote tab-open paths cannot inspect redirect chains. Thanks @zpbrent. - MS Teams/authz: keep `groupPolicy: "allowlist"` enforcing sender allowlists even when a team/channel route allowlist is configured, so route matches no longer widen group access to every sender in that route. Thanks @zpbrent. +- Security/Gateway: block `device.token.rotate` from minting operator scopes broader than the caller session already holds, closing the critical paired-device token privilege escalation reported as GHSA-4jpw-hj22-2xmc. - Security/system.run: bind approved `bun` and `deno run` script operands to on-disk file snapshots so post-approval script rewrites are denied before execution. - Skills/download installs: pin the validated per-skill tools root before writing downloaded archives, so rebinding the lexical tools path cannot redirect download writes outside the intended tools directory. Thanks @tdjackey. - Control UI/Debug: replace the Manual RPC free-text method field with a sorted dropdown sourced from gateway-advertised methods, and stack the form vertically for narrower layouts. (#14967) thanks @rixau. - Auth/profile resolution: log debug details when auto-discovered auth profiles fail during provider API-key resolution, so `--debug` output surfaces the real refresh/keychain/credential-store failure instead of only the generic missing-key message. (#41271) thanks @he-yufeng. - ACP/cancel scoping: scope `chat.abort` and shared-session ACP event routing by `runId` so one session cannot cancel or consume another session's run when they share the same gateway session key. (#41331) Thanks @pejmanjohn. - SecretRef/models: harden custom/provider secret persistence and reuse across models.json snapshots, merge behavior, runtime headers, and secret audits. (#42554) Thanks @joshavant. +- macOS/browser proxy: serialize non-GET browser proxy request bodies through `AnyCodable.foundationValue` so nested JSON bodies no longer crash the macOS app with `Invalid type in JSON write (__SwiftValue)`. (#43069) Thanks @Effet. +- CLI/skills tables: keep terminal table borders aligned for wide graphemes, use full reported terminal width, and switch a few ambiguous skill icons to Terminal-safe emoji so `openclaw skills` renders more consistently in Terminal.app and iTerm. Thanks @vincentkoc. +- Memory/Gemini: normalize returned Gemini embeddings across direct query, direct batch, and async batch paths so memory search uses consistent vector handling for Gemini too. (#43409) Thanks @gumadeiras. ## 2026.3.7 @@ -520,6 +582,7 @@ Docs: https://docs.openclaw.ai - Browser/config schema: accept `browser.profiles.*.driver: "openclaw"` while preserving legacy `"clawd"` compatibility in validated config. (#39374; based on #35621) Thanks @gambletan and @ingyukoh. - Memory flush/bootstrap file protection: restrict memory-flush runs to append-only `read`/`write` tools and route host-side memory appends through root-enforced safe file handles so flush turns cannot overwrite bootstrap files via `exec` or unsafe raw rewrites. (#38574) Thanks @frankekn. - Mattermost/DM media uploads: resolve bare 26-character Mattermost IDs user-first for direct messages so media sends no longer fail with `403 Forbidden` when targets are configured as unprefixed user IDs. (#29925) Thanks @teconomix. +- Voice-call/OpenAI TTS config parity: add missing `speed`, `instructions`, and `baseUrl` fields to the OpenAI TTS config schema and gate `instructions` to supported models so voice-call overrides validate and route cleanly through core TTS. (#39226) Thanks @ademczuk. ## 2026.3.2 @@ -1027,6 +1090,7 @@ Docs: https://docs.openclaw.ai - Browser/Navigate: resolve the correct `targetId` in navigate responses after renderer swaps. (#25326) Thanks @stone-jin and @vincentkoc. - FS/Sandbox workspace boundaries: add a dedicated `outside-workspace` safe-open error code for root-escape checks, and propagate specific outside-workspace messages across edit/browser/media consumers instead of generic not-found/invalid-path fallbacks. (#29715) Thanks @YuzuruS. - Diagnostics/Stuck session signal: add configurable stuck-session warning threshold via `diagnostics.stuckSessionWarnMs` (default 120000ms) to reduce false-positive warnings on long multi-tool turns. (#31032) +- Agents/error classification: check billing errors before context overflow heuristics in the agent runner catch block so spend-limit and quota errors show the billing-specific message instead of being misclassified as "Context overflow: prompt too large". (#40409) Thanks @ademczuk. ## 2026.2.26 @@ -3999,6 +4063,7 @@ Thanks @AlexMikhalev, @CoreyH, @John-Rood, @KrauseFx, @MaudeBot, @Nachx639, @Nic - Gateway/Daemon/Doctor: atomic config writes; repair gateway service entrypoint + install switches; non-interactive legacy migrations; systemd unit alignment + KillMode=process; node bridge keepalive/pings; Launch at Login persistence; bundle MoltbotKit resources + Swift 6.2 compat dylib; relay version check + remove smoke test; regen Swift GatewayModels + keep agent provider string; cron jobId alias + channel alias migration + main session key normalization; heartbeat Telegram accountId resolution; avoid WhatsApp fallback for internal runs; gateway listener error wording; serveBaseUrl param; honor gateway --dev; fix wide-area discovery updates; align agents.defaults schema; provider account metadata in daemon status; refresh Carbon patch for gateway fixes; restore doctor prompter initialValue handling. - Control UI/TUI: persist per-session verbose off + hide tool cards; logs tab opens at bottom; relative asset paths + landing cleanup; session labels lookup/persistence; stop pinning main session in recents; start logs at bottom; TUI status bar refresh + timeout handling + hide reasoning label when off. - Onboarding/Configure: QuickStart single-select provider picker; avoid Codex CLI false-expiry warnings; clarify WhatsApp owner prompt; fix Minimax hosted onboarding (agents.defaults + msteams heartbeat target); remove configure Control UI prompt; honor gateway --dev flag. +- Agent loop: guard overflow compaction throws and restore compaction hooks for engine-owned context engines. (#41361) — thanks @davidrudduck ### Maintenance diff --git a/apps/android/app/build.gradle.kts b/apps/android/app/build.gradle.kts index 3b52bcf50de..32306780c72 100644 --- a/apps/android/app/build.gradle.kts +++ b/apps/android/app/build.gradle.kts @@ -63,8 +63,8 @@ android { applicationId = "ai.openclaw.app" minSdk = 31 targetSdk = 36 - versionCode = 202603090 - versionName = "2026.3.9" + versionCode = 202603110 + versionName = "2026.3.11" ndk { // Support all major ABIs — native libs are tiny (~47 KB per ABI) abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64") diff --git a/apps/ios/ActivityWidget/Info.plist b/apps/ios/ActivityWidget/Info.plist index 4c2d89e1566..4c965121bf9 100644 --- a/apps/ios/ActivityWidget/Info.plist +++ b/apps/ios/ActivityWidget/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType XPC! CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) NSExtension NSExtensionPointIdentifier diff --git a/apps/ios/Config/Signing.xcconfig b/apps/ios/Config/Signing.xcconfig index 1285d2a38a4..4fef287a09d 100644 --- a/apps/ios/Config/Signing.xcconfig +++ b/apps/ios/Config/Signing.xcconfig @@ -1,10 +1,12 @@ // Shared iOS signing defaults for local development + CI. +#include "Version.xcconfig" + OPENCLAW_IOS_DEFAULT_TEAM = Y5PE65HELJ OPENCLAW_IOS_SELECTED_TEAM = $(OPENCLAW_IOS_DEFAULT_TEAM) -OPENCLAW_APP_BUNDLE_ID = ai.openclaw.ios -OPENCLAW_WATCH_APP_BUNDLE_ID = ai.openclaw.ios.watchkitapp -OPENCLAW_WATCH_EXTENSION_BUNDLE_ID = ai.openclaw.ios.watchkitapp.extension -OPENCLAW_ACTIVITY_WIDGET_BUNDLE_ID = ai.openclaw.ios.activitywidget +OPENCLAW_APP_BUNDLE_ID = ai.openclaw.client +OPENCLAW_WATCH_APP_BUNDLE_ID = ai.openclaw.client.watchkitapp +OPENCLAW_WATCH_EXTENSION_BUNDLE_ID = ai.openclaw.client.watchkitapp.extension +OPENCLAW_ACTIVITY_WIDGET_BUNDLE_ID = ai.openclaw.client.activitywidget // Local contributors can override this by running scripts/ios-configure-signing.sh. // Keep include after defaults: xcconfig is evaluated top-to-bottom. diff --git a/apps/ios/Config/Version.xcconfig b/apps/ios/Config/Version.xcconfig new file mode 100644 index 00000000000..db38e86df80 --- /dev/null +++ b/apps/ios/Config/Version.xcconfig @@ -0,0 +1,8 @@ +// Shared iOS version defaults. +// Generated overrides live in build/Version.xcconfig (git-ignored). + +OPENCLAW_GATEWAY_VERSION = 0.0.0 +OPENCLAW_MARKETING_VERSION = 0.0.0 +OPENCLAW_BUILD_VERSION = 0 + +#include? "../build/Version.xcconfig" diff --git a/apps/ios/README.md b/apps/ios/README.md index c7c501fcbff..6eb35a1d639 100644 --- a/apps/ios/README.md +++ b/apps/ios/README.md @@ -1,15 +1,12 @@ # OpenClaw iOS (Super Alpha) -NO TEST FLIGHT AVAILABLE AT THIS POINT - This iPhone app is super-alpha and internal-use only. It connects to an OpenClaw Gateway as a `role: node`. ## Distribution Status -NO TEST FLIGHT AVAILABLE AT THIS POINT - -- Current distribution: local/manual deploy from source via Xcode. -- App Store flow is not part of the current internal development path. +- Public distribution: not available. +- Internal beta distribution: local archive + TestFlight upload via Fastlane. +- Local/manual deploy from source via Xcode remains the default development path. ## Super-Alpha Disclaimer @@ -50,6 +47,45 @@ Shortcut command (same flow + open project): pnpm ios:open ``` +## Local Beta Release Flow + +Prereqs: + +- Xcode 16+ +- `pnpm` +- `xcodegen` +- `fastlane` +- Apple account signed into Xcode for automatic signing/provisioning +- App Store Connect API key set up in Keychain via `scripts/ios-asc-keychain-setup.sh` when auto-resolving a beta build number or uploading to TestFlight + +Release behavior: + +- Local development keeps using unique per-developer bundle IDs from `scripts/ios-configure-signing.sh`. +- Beta release uses canonical `ai.openclaw.client*` bundle IDs through a temporary generated xcconfig in `apps/ios/build/BetaRelease.xcconfig`. +- The beta flow does not modify `apps/ios/.local-signing.xcconfig` or `apps/ios/LocalSigning.xcconfig`. +- Root `package.json.version` is the only version source for iOS. +- A root version like `2026.3.11-beta.1` becomes: + - `CFBundleShortVersionString = 2026.3.11` + - `CFBundleVersion = next TestFlight build number for 2026.3.11` + +Archive without upload: + +```bash +pnpm ios:beta:archive +``` + +Archive and upload to TestFlight: + +```bash +pnpm ios:beta +``` + +If you need to force a specific build number: + +```bash +pnpm ios:beta -- --build-number 7 +``` + ## APNs Expectations For Local/Manual Builds - The app calls `registerForRemoteNotifications()` at launch. diff --git a/apps/ios/ShareExtension/Info.plist b/apps/ios/ShareExtension/Info.plist index 90a7e09e0fc..9469daa08a8 100644 --- a/apps/ios/ShareExtension/Info.plist +++ b/apps/ios/ShareExtension/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType XPC! CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) NSExtension NSExtensionAttributes diff --git a/apps/ios/Signing.xcconfig b/apps/ios/Signing.xcconfig index 5966d6e2c2f..d6acc35dee8 100644 --- a/apps/ios/Signing.xcconfig +++ b/apps/ios/Signing.xcconfig @@ -2,6 +2,8 @@ // Auto-selected local team overrides live in .local-signing.xcconfig (git-ignored). // Manual local overrides can go in LocalSigning.xcconfig (git-ignored). +#include "Config/Version.xcconfig" + OPENCLAW_CODE_SIGN_STYLE = Manual OPENCLAW_DEVELOPMENT_TEAM = Y5PE65HELJ diff --git a/apps/ios/Sources/Info.plist b/apps/ios/Sources/Info.plist index 2f1f03d24a1..892d53e7ae9 100644 --- a/apps/ios/Sources/Info.plist +++ b/apps/ios/Sources/Info.plist @@ -23,7 +23,7 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleURLTypes @@ -36,7 +36,7 @@ CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) ITSAppUsesNonExemptEncryption NSAppTransportSecurity diff --git a/apps/ios/Sources/Model/NodeAppModel.swift b/apps/ios/Sources/Model/NodeAppModel.swift index babb6b449da..685b30f0887 100644 --- a/apps/ios/Sources/Model/NodeAppModel.swift +++ b/apps/ios/Sources/Model/NodeAppModel.swift @@ -2255,8 +2255,7 @@ extension NodeAppModel { from: payload) guard !decoded.actions.isEmpty else { return } self.pendingActionLogger.info( - "Pending actions pulled trigger=\(trigger, privacy: .public) " - + "count=\(decoded.actions.count, privacy: .public)") + "Pending actions pulled trigger=\(trigger, privacy: .public) count=\(decoded.actions.count, privacy: .public)") await self.applyPendingForegroundNodeActions(decoded.actions, trigger: trigger) } catch { // Best-effort only. @@ -2279,9 +2278,7 @@ extension NodeAppModel { paramsJSON: action.paramsJSON) let result = await self.handleInvoke(req) self.pendingActionLogger.info( - "Pending action replay trigger=\(trigger, privacy: .public) " - + "id=\(action.id, privacy: .public) command=\(action.command, privacy: .public) " - + "ok=\(result.ok, privacy: .public)") + "Pending action replay trigger=\(trigger, privacy: .public) id=\(action.id, privacy: .public) command=\(action.command, privacy: .public) ok=\(result.ok, privacy: .public)") guard result.ok else { return } let acked = await self.ackPendingForegroundNodeAction( id: action.id, @@ -2306,9 +2303,7 @@ extension NodeAppModel { return true } catch { self.pendingActionLogger.error( - "Pending action ack failed trigger=\(trigger, privacy: .public) " - + "id=\(id, privacy: .public) command=\(command, privacy: .public) " - + "error=\(String(describing: error), privacy: .public)") + "Pending action ack failed trigger=\(trigger, privacy: .public) id=\(id, privacy: .public) command=\(command, privacy: .public) error=\(String(describing: error), privacy: .public)") return false } } diff --git a/apps/ios/Tests/Info.plist b/apps/ios/Tests/Info.plist index 46e3fb97eb1..5bcf88ff5ad 100644 --- a/apps/ios/Tests/Info.plist +++ b/apps/ios/Tests/Info.plist @@ -17,8 +17,8 @@ CFBundlePackageType BNDL CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) diff --git a/apps/ios/WatchApp/Info.plist b/apps/ios/WatchApp/Info.plist index fa45d719b9c..3eea1e6ff09 100644 --- a/apps/ios/WatchApp/Info.plist +++ b/apps/ios/WatchApp/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) WKCompanionAppBundleIdentifier $(OPENCLAW_APP_BUNDLE_ID) WKWatchKitApp diff --git a/apps/ios/WatchExtension/Info.plist b/apps/ios/WatchExtension/Info.plist index 1d898d43757..87313064945 100644 --- a/apps/ios/WatchExtension/Info.plist +++ b/apps/ios/WatchExtension/Info.plist @@ -15,9 +15,9 @@ CFBundleName $(PRODUCT_NAME) CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) NSExtension NSExtensionAttributes diff --git a/apps/ios/fastlane/Fastfile b/apps/ios/fastlane/Fastfile index 33e6bfa8adb..e7b286b4dd5 100644 --- a/apps/ios/fastlane/Fastfile +++ b/apps/ios/fastlane/Fastfile @@ -1,8 +1,11 @@ require "shellwords" require "open3" +require "json" default_platform(:ios) +BETA_APP_IDENTIFIER = "ai.openclaw.client" + def load_env_file(path) return unless File.exist?(path) @@ -84,6 +87,111 @@ def read_asc_key_content_from_keychain end end +def repo_root + File.expand_path("../../..", __dir__) +end + +def ios_root + File.expand_path("..", __dir__) +end + +def normalize_release_version(raw_value) + version = raw_value.to_s.strip.sub(/\Av/, "") + UI.user_error!("Missing root package.json version.") unless env_present?(version) + unless version.match?(/\A\d+\.\d+\.\d+(?:[.-]?beta[.-]\d+)?\z/i) + UI.user_error!("Invalid package.json version '#{raw_value}'. Expected 2026.3.11 or 2026.3.11-beta.1.") + end + + version +end + +def read_root_package_version + package_json_path = File.join(repo_root, "package.json") + UI.user_error!("Missing package.json at #{package_json_path}.") unless File.exist?(package_json_path) + + parsed = JSON.parse(File.read(package_json_path)) + normalize_release_version(parsed["version"]) +rescue JSON::ParserError => e + UI.user_error!("Invalid package.json at #{package_json_path}: #{e.message}") +end + +def short_release_version(version) + normalize_release_version(version).sub(/([.-]?beta[.-]\d+)\z/i, "") +end + +def shell_join(parts) + Shellwords.join(parts.compact) +end + +def resolve_beta_build_number(api_key:, version:) + explicit = ENV["IOS_BETA_BUILD_NUMBER"] + if env_present?(explicit) + UI.user_error!("Invalid IOS_BETA_BUILD_NUMBER '#{explicit}'. Expected digits only.") unless explicit.match?(/\A\d+\z/) + UI.message("Using explicit iOS beta build number #{explicit}.") + return explicit + end + + short_version = short_release_version(version) + latest_build = latest_testflight_build_number( + api_key: api_key, + app_identifier: BETA_APP_IDENTIFIER, + version: short_version, + initial_build_number: 0 + ) + next_build = latest_build.to_i + 1 + UI.message("Resolved iOS beta build number #{next_build} for #{short_version} (latest TestFlight build: #{latest_build}).") + next_build.to_s +end + +def beta_build_number_needs_asc_auth? + explicit = ENV["IOS_BETA_BUILD_NUMBER"] + !env_present?(explicit) +end + +def prepare_beta_release!(version:, build_number:) + script_path = File.join(repo_root, "scripts", "ios-beta-prepare.sh") + UI.message("Preparing iOS beta release #{version} (build #{build_number}).") + sh(shell_join(["bash", script_path, "--build-number", build_number])) + + beta_xcconfig = File.join(ios_root, "build", "BetaRelease.xcconfig") + UI.user_error!("Missing beta xcconfig at #{beta_xcconfig}.") unless File.exist?(beta_xcconfig) + + ENV["XCODE_XCCONFIG_FILE"] = beta_xcconfig + beta_xcconfig +end + +def build_beta_release(context) + version = context[:version] + output_directory = File.join("build", "beta") + archive_path = File.join(output_directory, "OpenClaw-#{version}.xcarchive") + + build_app( + project: "OpenClaw.xcodeproj", + scheme: "OpenClaw", + configuration: "Release", + export_method: "app-store", + clean: true, + skip_profile_detection: true, + build_path: "build", + archive_path: archive_path, + output_directory: output_directory, + output_name: "OpenClaw-#{version}.ipa", + xcargs: "-allowProvisioningUpdates", + export_xcargs: "-allowProvisioningUpdates", + export_options: { + signingStyle: "automatic" + } + ) + + { + archive_path: archive_path, + build_number: context[:build_number], + ipa_path: lane_context[SharedValues::IPA_OUTPUT_PATH], + short_version: context[:short_version], + version: version + } +end + platform :ios do private_lane :asc_api_key do load_env_file(File.join(__dir__, ".env")) @@ -132,38 +240,48 @@ platform :ios do api_key end - desc "Build + upload to TestFlight" + private_lane :prepare_beta_context do |options| + require_api_key = options[:require_api_key] == true + needs_api_key = require_api_key || beta_build_number_needs_asc_auth? + api_key = needs_api_key ? asc_api_key : nil + version = read_root_package_version + build_number = resolve_beta_build_number(api_key: api_key, version: version) + beta_xcconfig = prepare_beta_release!(version: version, build_number: build_number) + + { + api_key: api_key, + beta_xcconfig: beta_xcconfig, + build_number: build_number, + short_version: short_release_version(version), + version: version + } + end + + desc "Build a beta archive locally without uploading" + lane :beta_archive do + context = prepare_beta_context(require_api_key: false) + build = build_beta_release(context) + UI.success("Built iOS beta archive: version=#{build[:version]} short=#{build[:short_version]} build=#{build[:build_number]}") + build + ensure + ENV.delete("XCODE_XCCONFIG_FILE") + end + + desc "Build + upload a beta to TestFlight" lane :beta do - api_key = asc_api_key - - team_id = ENV["IOS_DEVELOPMENT_TEAM"] - if team_id.nil? || team_id.strip.empty? - helper_path = File.expand_path("../../../scripts/ios-team-id.sh", __dir__) - if File.exist?(helper_path) - # Keep CI/local compatibility where teams are present in keychain but not Xcode account metadata. - team_id = sh("IOS_ALLOW_KEYCHAIN_TEAM_FALLBACK=1 bash #{helper_path.shellescape}").strip - end - end - UI.user_error!("Missing IOS_DEVELOPMENT_TEAM (Apple Team ID). Add it to fastlane/.env or export it in your shell.") if team_id.nil? || team_id.strip.empty? - - build_app( - project: "OpenClaw.xcodeproj", - scheme: "OpenClaw", - export_method: "app-store", - clean: true, - skip_profile_detection: true, - xcargs: "DEVELOPMENT_TEAM=#{team_id} -allowProvisioningUpdates", - export_xcargs: "-allowProvisioningUpdates", - export_options: { - signingStyle: "automatic" - } - ) + context = prepare_beta_context(require_api_key: true) + build = build_beta_release(context) upload_to_testflight( - api_key: api_key, + api_key: context[:api_key], + ipa: build[:ipa_path], skip_waiting_for_build_processing: true, uses_non_exempt_encryption: false ) + + UI.success("Uploaded iOS beta: version=#{build[:version]} short=#{build[:short_version]} build=#{build[:build_number]}") + ensure + ENV.delete("XCODE_XCCONFIG_FILE") end desc "Upload App Store metadata (and optionally screenshots)" diff --git a/apps/ios/fastlane/SETUP.md b/apps/ios/fastlane/SETUP.md index 8dccf264b41..67d4fcc843a 100644 --- a/apps/ios/fastlane/SETUP.md +++ b/apps/ios/fastlane/SETUP.md @@ -32,9 +32,9 @@ ASC_KEYCHAIN_ACCOUNT=YOUR_MAC_USERNAME Optional app targeting variables (helpful if Fastlane cannot auto-resolve app by bundle): ```bash -ASC_APP_IDENTIFIER=ai.openclaw.ios +ASC_APP_IDENTIFIER=ai.openclaw.client # or -ASC_APP_ID=6760218713 +ASC_APP_ID=YOUR_APP_STORE_CONNECT_APP_ID ``` File-based fallback (CI/non-macOS): @@ -60,9 +60,37 @@ cd apps/ios fastlane ios auth_check ``` -Run: +ASC auth is only required when: + +- uploading to TestFlight +- auto-resolving the next build number from App Store Connect + +If you pass `--build-number` to `pnpm ios:beta:archive`, the local archive path does not need ASC auth. + +Archive locally without upload: + +```bash +pnpm ios:beta:archive +``` + +Upload to TestFlight: + +```bash +pnpm ios:beta +``` + +Direct Fastlane entry point: ```bash cd apps/ios -fastlane beta +fastlane ios beta ``` + +Versioning rules: + +- Root `package.json.version` is the single source of truth for iOS +- Use `YYYY.M.D` for stable versions and `YYYY.M.D-beta.N` for beta versions +- Fastlane stamps `CFBundleShortVersionString` to `YYYY.M.D` +- Fastlane resolves `CFBundleVersion` as the next integer TestFlight build number for that short version +- The beta flow regenerates `apps/ios/OpenClaw.xcodeproj` from `apps/ios/project.yml` before archiving +- Local beta signing uses a temporary generated xcconfig and leaves local development signing overrides untouched diff --git a/apps/ios/fastlane/metadata/README.md b/apps/ios/fastlane/metadata/README.md index 74eb7df87d3..07e7824311f 100644 --- a/apps/ios/fastlane/metadata/README.md +++ b/apps/ios/fastlane/metadata/README.md @@ -6,7 +6,7 @@ This directory is used by `fastlane deliver` for App Store Connect text metadata ```bash cd apps/ios -ASC_APP_ID=6760218713 \ +ASC_APP_ID=YOUR_APP_STORE_CONNECT_APP_ID \ DELIVER_METADATA=1 fastlane ios metadata ``` diff --git a/apps/ios/project.yml b/apps/ios/project.yml index 0664db9c6be..91b2a8e46d1 100644 --- a/apps/ios/project.yml +++ b/apps/ios/project.yml @@ -107,8 +107,8 @@ targets: - CFBundleURLName: ai.openclaw.ios CFBundleURLSchemes: - openclaw - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" UILaunchScreen: {} UIApplicationSceneManifest: UIApplicationSupportsMultipleScenes: false @@ -168,8 +168,8 @@ targets: path: ShareExtension/Info.plist properties: CFBundleDisplayName: OpenClaw Share - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" NSExtension: NSExtensionPointIdentifier: com.apple.share-services NSExtensionPrincipalClass: "$(PRODUCT_MODULE_NAME).ShareViewController" @@ -205,8 +205,8 @@ targets: path: ActivityWidget/Info.plist properties: CFBundleDisplayName: OpenClaw Activity - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" NSSupportsLiveActivities: true NSExtension: NSExtensionPointIdentifier: com.apple.widgetkit-extension @@ -224,6 +224,7 @@ targets: Release: Config/Signing.xcconfig settings: base: + ASSETCATALOG_COMPILER_APPICON_NAME: AppIcon ENABLE_APPINTENTS_METADATA: NO ENABLE_APP_INTENTS_METADATA_GENERATION: NO PRODUCT_BUNDLE_IDENTIFIER: "$(OPENCLAW_WATCH_APP_BUNDLE_ID)" @@ -231,8 +232,8 @@ targets: path: WatchApp/Info.plist properties: CFBundleDisplayName: OpenClaw - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" WKCompanionAppBundleIdentifier: "$(OPENCLAW_APP_BUNDLE_ID)" WKWatchKitApp: true @@ -256,8 +257,8 @@ targets: path: WatchExtension/Info.plist properties: CFBundleDisplayName: OpenClaw - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" NSExtension: NSExtensionAttributes: WKAppBundleIdentifier: "$(OPENCLAW_WATCH_APP_BUNDLE_ID)" @@ -293,8 +294,8 @@ targets: path: Tests/Info.plist properties: CFBundleDisplayName: OpenClawTests - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" OpenClawLogicTests: type: bundle.unit-test @@ -319,5 +320,5 @@ targets: path: Tests/Info.plist properties: CFBundleDisplayName: OpenClawLogicTests - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" diff --git a/apps/macos/Sources/OpenClaw/AppState.swift b/apps/macos/Sources/OpenClaw/AppState.swift index 5e8238ebe92..d503686ba57 100644 --- a/apps/macos/Sources/OpenClaw/AppState.swift +++ b/apps/macos/Sources/OpenClaw/AppState.swift @@ -600,30 +600,29 @@ final class AppState { private func syncGatewayConfigIfNeeded() { guard !self.isPreview, !self.isInitializing else { return } - let connectionMode = self.connectionMode - let remoteTarget = self.remoteTarget - let remoteIdentity = self.remoteIdentity - let remoteTransport = self.remoteTransport - let remoteUrl = self.remoteUrl - let remoteToken = self.remoteToken - let remoteTokenDirty = self.remoteTokenDirty - Task { @MainActor in - // Keep app-only connection settings local to avoid overwriting remote gateway config. - let synced = Self.syncedGatewayRoot( - currentRoot: OpenClawConfigFile.loadDict(), - connectionMode: connectionMode, - remoteTransport: remoteTransport, - remoteTarget: remoteTarget, - remoteIdentity: remoteIdentity, - remoteUrl: remoteUrl, - remoteToken: remoteToken, - remoteTokenDirty: remoteTokenDirty) - guard synced.changed else { return } - OpenClawConfigFile.saveDict(synced.root) + self.syncGatewayConfigNow() } } + @MainActor + func syncGatewayConfigNow() { + guard !self.isPreview, !self.isInitializing else { return } + + // Keep app-only connection settings local to avoid overwriting remote gateway config. + let synced = Self.syncedGatewayRoot( + currentRoot: OpenClawConfigFile.loadDict(), + connectionMode: self.connectionMode, + remoteTransport: self.remoteTransport, + remoteTarget: self.remoteTarget, + remoteIdentity: self.remoteIdentity, + remoteUrl: self.remoteUrl, + remoteToken: self.remoteToken, + remoteTokenDirty: self.remoteTokenDirty) + guard synced.changed else { return } + OpenClawConfigFile.saveDict(synced.root) + } + func triggerVoiceEars(ttl: TimeInterval? = 5) { self.earBoostTask?.cancel() self.earBoostActive = true diff --git a/apps/macos/Sources/OpenClaw/ControlChannel.swift b/apps/macos/Sources/OpenClaw/ControlChannel.swift index aecf9539ef5..c4472f8f452 100644 --- a/apps/macos/Sources/OpenClaw/ControlChannel.swift +++ b/apps/macos/Sources/OpenClaw/ControlChannel.swift @@ -188,6 +188,10 @@ final class ControlChannel { return desc } + if let authIssue = RemoteGatewayAuthIssue(error: error) { + return authIssue.statusMessage + } + // If the gateway explicitly rejects the hello (e.g., auth/token mismatch), surface it. if let urlErr = error as? URLError, urlErr.code == .dataNotAllowed // used for WS close 1008 auth failures diff --git a/apps/macos/Sources/OpenClaw/GeneralSettings.swift b/apps/macos/Sources/OpenClaw/GeneralSettings.swift index b55ed439489..633879367ea 100644 --- a/apps/macos/Sources/OpenClaw/GeneralSettings.swift +++ b/apps/macos/Sources/OpenClaw/GeneralSettings.swift @@ -348,10 +348,18 @@ struct GeneralSettings: View { Text("Testing…") .font(.caption) .foregroundStyle(.secondary) - case .ok: - Label("Ready", systemImage: "checkmark.circle.fill") - .font(.caption) - .foregroundStyle(.green) + case let .ok(success): + VStack(alignment: .leading, spacing: 2) { + Label(success.title, systemImage: "checkmark.circle.fill") + .font(.caption) + .foregroundStyle(.green) + if let detail = success.detail { + Text(detail) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } case let .failed(message): Text(message) .font(.caption) @@ -518,7 +526,7 @@ struct GeneralSettings: View { private enum RemoteStatus: Equatable { case idle case checking - case ok + case ok(RemoteGatewayProbeSuccess) case failed(String) } @@ -558,114 +566,14 @@ extension GeneralSettings { @MainActor func testRemote() async { self.remoteStatus = .checking - let settings = CommandResolver.connectionSettings() - if self.state.remoteTransport == .direct { - let trimmedUrl = self.state.remoteUrl.trimmingCharacters(in: .whitespacesAndNewlines) - guard !trimmedUrl.isEmpty else { - self.remoteStatus = .failed("Set a gateway URL first") - return - } - guard Self.isValidWsUrl(trimmedUrl) else { - self.remoteStatus = .failed( - "Gateway URL must use wss:// for remote hosts (ws:// only for localhost)") - return - } - } else { - guard !settings.target.isEmpty else { - self.remoteStatus = .failed("Set an SSH target first") - return - } - - // Step 1: basic SSH reachability check - guard let sshCommand = Self.sshCheckCommand( - target: settings.target, - identity: settings.identity) - else { - self.remoteStatus = .failed("SSH target is invalid") - return - } - let sshResult = await ShellExecutor.run( - command: sshCommand, - cwd: nil, - env: nil, - timeout: 8) - - guard sshResult.ok else { - self.remoteStatus = .failed(self.formatSSHFailure(sshResult, target: settings.target)) - return - } + switch await RemoteGatewayProbe.run() { + case let .ready(success): + self.remoteStatus = .ok(success) + case let .authIssue(issue): + self.remoteStatus = .failed(issue.statusMessage) + case let .failed(message): + self.remoteStatus = .failed(message) } - - // Step 2: control channel health check - let originalMode = AppStateStore.shared.connectionMode - do { - try await ControlChannel.shared.configure(mode: .remote( - target: settings.target, - identity: settings.identity)) - let data = try await ControlChannel.shared.health(timeout: 10) - if decodeHealthSnapshot(from: data) != nil { - self.remoteStatus = .ok - } else { - self.remoteStatus = .failed("Control channel returned invalid health JSON") - } - } catch { - self.remoteStatus = .failed(error.localizedDescription) - } - - // Restore original mode if we temporarily switched - switch originalMode { - case .remote: - break - case .local: - try? await ControlChannel.shared.configure(mode: .local) - case .unconfigured: - await ControlChannel.shared.disconnect() - } - } - - private static func isValidWsUrl(_ raw: String) -> Bool { - GatewayRemoteConfig.normalizeGatewayUrl(raw) != nil - } - - private static func sshCheckCommand(target: String, identity: String) -> [String]? { - guard let parsed = CommandResolver.parseSSHTarget(target) else { return nil } - let options = [ - "-o", "BatchMode=yes", - "-o", "ConnectTimeout=5", - "-o", "StrictHostKeyChecking=accept-new", - "-o", "UpdateHostKeys=yes", - ] - let args = CommandResolver.sshArguments( - target: parsed, - identity: identity, - options: options, - remoteCommand: ["echo", "ok"]) - return ["/usr/bin/ssh"] + args - } - - private func formatSSHFailure(_ response: Response, target: String) -> String { - let payload = response.payload.flatMap { String(data: $0, encoding: .utf8) } - let trimmed = payload? - .trimmingCharacters(in: .whitespacesAndNewlines) - .split(whereSeparator: \.isNewline) - .joined(separator: " ") - if let trimmed, - trimmed.localizedCaseInsensitiveContains("host key verification failed") - { - let host = CommandResolver.parseSSHTarget(target)?.host ?? target - return "SSH check failed: Host key verification failed. Remove the old key with " + - "`ssh-keygen -R \(host)` and try again." - } - if let trimmed, !trimmed.isEmpty { - if let message = response.message, message.hasPrefix("exit ") { - return "SSH check failed: \(trimmed) (\(message))" - } - return "SSH check failed: \(trimmed)" - } - if let message = response.message { - return "SSH check failed (\(message))" - } - return "SSH check failed" } private func revealLogs() { diff --git a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift index 2981a60bbf7..932c9fc5e61 100644 --- a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift +++ b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift @@ -17,6 +17,7 @@ enum HostEnvSecurityPolicy { "BASH_ENV", "ENV", "GIT_EXTERNAL_DIFF", + "GIT_EXEC_PATH", "SHELL", "SHELLOPTS", "PS4", diff --git a/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift b/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift index 0da6510f608..367907f9fb7 100644 --- a/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift +++ b/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift @@ -146,8 +146,8 @@ actor MacNodeBrowserProxy { request.setValue(password, forHTTPHeaderField: "x-openclaw-password") } - if method != "GET", let body = params.body?.value { - request.httpBody = try JSONSerialization.data(withJSONObject: body, options: [.fragmentsAllowed]) + if method != "GET", let body = params.body { + request.httpBody = try JSONSerialization.data(withJSONObject: body.foundationValue, options: [.fragmentsAllowed]) request.setValue("application/json", forHTTPHeaderField: "Content-Type") } diff --git a/apps/macos/Sources/OpenClaw/Onboarding.swift b/apps/macos/Sources/OpenClaw/Onboarding.swift index 4eae7e092b0..ca183d35311 100644 --- a/apps/macos/Sources/OpenClaw/Onboarding.swift +++ b/apps/macos/Sources/OpenClaw/Onboarding.swift @@ -9,6 +9,13 @@ enum UIStrings { static let welcomeTitle = "Welcome to OpenClaw" } +enum RemoteOnboardingProbeState: Equatable { + case idle + case checking + case ok(RemoteGatewayProbeSuccess) + case failed(String) +} + @MainActor final class OnboardingController { static let shared = OnboardingController() @@ -72,6 +79,9 @@ struct OnboardingView: View { @State var didAutoKickoff = false @State var showAdvancedConnection = false @State var preferredGatewayID: String? + @State var remoteProbeState: RemoteOnboardingProbeState = .idle + @State var remoteAuthIssue: RemoteGatewayAuthIssue? + @State var suppressRemoteProbeReset = false @State var gatewayDiscovery: GatewayDiscoveryModel @State var onboardingChatModel: OpenClawChatViewModel @State var onboardingSkillsModel = SkillsSettingsModel() diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift index 8f4d16420bc..0beeb2bdc27 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift @@ -2,6 +2,7 @@ import AppKit import OpenClawChatUI import OpenClawDiscovery import OpenClawIPC +import OpenClawKit import SwiftUI extension OnboardingView { @@ -97,6 +98,11 @@ extension OnboardingView { self.gatewayDiscoverySection() + if self.shouldShowRemoteConnectionSection { + Divider().padding(.vertical, 4) + self.remoteConnectionSection() + } + self.connectionChoiceButton( title: "Configure later", subtitle: "Don’t start the Gateway yet.", @@ -109,6 +115,22 @@ extension OnboardingView { } } } + .onChange(of: self.state.connectionMode) { _, newValue in + guard Self.shouldResetRemoteProbeFeedback( + for: newValue, + suppressReset: self.suppressRemoteProbeReset) + else { return } + self.resetRemoteProbeFeedback() + } + .onChange(of: self.state.remoteTransport) { _, _ in + self.resetRemoteProbeFeedback() + } + .onChange(of: self.state.remoteTarget) { _, _ in + self.resetRemoteProbeFeedback() + } + .onChange(of: self.state.remoteUrl) { _, _ in + self.resetRemoteProbeFeedback() + } } private var localGatewaySubtitle: String { @@ -199,25 +221,6 @@ extension OnboardingView { .pickerStyle(.segmented) .frame(width: fieldWidth) } - GridRow { - Text("Gateway token") - .font(.callout.weight(.semibold)) - .frame(width: labelWidth, alignment: .leading) - SecureField("remote gateway auth token (gateway.remote.token)", text: self.$state.remoteToken) - .textFieldStyle(.roundedBorder) - .frame(width: fieldWidth) - } - if self.state.remoteTokenUnsupported { - GridRow { - Text("") - .frame(width: labelWidth, alignment: .leading) - Text( - "The current gateway.remote.token value is not plain text. OpenClaw for macOS cannot use it directly; enter a plaintext token here to replace it.") - .font(.caption) - .foregroundStyle(.orange) - .frame(width: fieldWidth, alignment: .leading) - } - } if self.state.remoteTransport == .direct { GridRow { Text("Gateway URL") @@ -289,6 +292,248 @@ extension OnboardingView { } } + private var shouldShowRemoteConnectionSection: Bool { + self.state.connectionMode == .remote || + self.showAdvancedConnection || + self.remoteProbeState != .idle || + self.remoteAuthIssue != nil || + Self.shouldShowRemoteTokenField( + showAdvancedConnection: self.showAdvancedConnection, + remoteToken: self.state.remoteToken, + remoteTokenUnsupported: self.state.remoteTokenUnsupported, + authIssue: self.remoteAuthIssue) + } + + private var shouldShowRemoteTokenField: Bool { + guard self.shouldShowRemoteConnectionSection else { return false } + return Self.shouldShowRemoteTokenField( + showAdvancedConnection: self.showAdvancedConnection, + remoteToken: self.state.remoteToken, + remoteTokenUnsupported: self.state.remoteTokenUnsupported, + authIssue: self.remoteAuthIssue) + } + + private var remoteProbePreflightMessage: String? { + switch self.state.remoteTransport { + case .direct: + let trimmedUrl = self.state.remoteUrl.trimmingCharacters(in: .whitespacesAndNewlines) + if trimmedUrl.isEmpty { + return "Select a nearby gateway or open Advanced to enter a gateway URL." + } + if GatewayRemoteConfig.normalizeGatewayUrl(trimmedUrl) == nil { + return "Gateway URL must use wss:// for remote hosts (ws:// only for localhost)." + } + return nil + case .ssh: + let trimmedTarget = self.state.remoteTarget.trimmingCharacters(in: .whitespacesAndNewlines) + if trimmedTarget.isEmpty { + return "Select a nearby gateway or open Advanced to enter an SSH target." + } + return CommandResolver.sshTargetValidationMessage(trimmedTarget) + } + } + + private var canProbeRemoteConnection: Bool { + self.remoteProbePreflightMessage == nil && self.remoteProbeState != .checking + } + + @ViewBuilder + private func remoteConnectionSection() -> some View { + VStack(alignment: .leading, spacing: 10) { + HStack(alignment: .top, spacing: 12) { + VStack(alignment: .leading, spacing: 2) { + Text("Remote connection") + .font(.callout.weight(.semibold)) + Text("Checks the real remote websocket and auth handshake.") + .font(.caption) + .foregroundStyle(.secondary) + } + Spacer(minLength: 0) + Button { + Task { await self.probeRemoteConnection() } + } label: { + if self.remoteProbeState == .checking { + ProgressView() + .controlSize(.small) + .frame(minWidth: 120) + } else { + Text("Check connection") + .frame(minWidth: 120) + } + } + .buttonStyle(.borderedProminent) + .disabled(!self.canProbeRemoteConnection) + } + + if self.shouldShowRemoteTokenField { + self.remoteTokenField() + } + + if let message = self.remoteProbePreflightMessage, self.remoteProbeState != .checking { + Text(message) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + + self.remoteProbeStatusView() + + if let issue = self.remoteAuthIssue { + self.remoteAuthPromptView(issue: issue) + } + } + } + + private func remoteTokenField() -> some View { + VStack(alignment: .leading, spacing: 6) { + HStack(alignment: .center, spacing: 12) { + Text("Gateway token") + .font(.callout.weight(.semibold)) + .frame(width: 110, alignment: .leading) + SecureField("remote gateway auth token (gateway.remote.token)", text: self.$state.remoteToken) + .textFieldStyle(.roundedBorder) + .frame(maxWidth: 320) + } + Text("Used when the remote gateway requires token auth.") + .font(.caption) + .foregroundStyle(.secondary) + if self.state.remoteTokenUnsupported { + Text( + "The current gateway.remote.token value is not plain text. OpenClaw for macOS cannot use it directly; enter a plaintext token here to replace it.") + .font(.caption) + .foregroundStyle(.orange) + .fixedSize(horizontal: false, vertical: true) + } + } + } + + @ViewBuilder + private func remoteProbeStatusView() -> some View { + switch self.remoteProbeState { + case .idle: + EmptyView() + case .checking: + Text("Checking remote gateway…") + .font(.caption) + .foregroundStyle(.secondary) + case let .ok(success): + VStack(alignment: .leading, spacing: 2) { + Label(success.title, systemImage: "checkmark.circle.fill") + .font(.caption) + .foregroundStyle(.green) + if let detail = success.detail { + Text(detail) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + case let .failed(message): + if self.remoteAuthIssue == nil { + Text(message) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + } + + private func remoteAuthPromptView(issue: RemoteGatewayAuthIssue) -> some View { + let promptStyle = Self.remoteAuthPromptStyle(for: issue) + return HStack(alignment: .top, spacing: 10) { + Image(systemName: promptStyle.systemImage) + .font(.caption.weight(.semibold)) + .foregroundStyle(promptStyle.tint) + .frame(width: 16, alignment: .center) + .padding(.top, 1) + VStack(alignment: .leading, spacing: 4) { + Text(issue.title) + .font(.caption.weight(.semibold)) + Text(.init(issue.body)) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + if let footnote = issue.footnote { + Text(.init(footnote)) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + } + } + + @MainActor + private func probeRemoteConnection() async { + let originalMode = self.state.connectionMode + let shouldRestoreMode = originalMode != .remote + if shouldRestoreMode { + // Reuse the shared remote endpoint stack for probing without committing the user's mode choice. + self.state.connectionMode = .remote + } + self.remoteProbeState = .checking + self.remoteAuthIssue = nil + defer { + if shouldRestoreMode { + self.suppressRemoteProbeReset = true + self.state.connectionMode = originalMode + self.suppressRemoteProbeReset = false + } + } + + switch await RemoteGatewayProbe.run() { + case let .ready(success): + self.remoteProbeState = .ok(success) + case let .authIssue(issue): + self.remoteAuthIssue = issue + self.remoteProbeState = .failed(issue.statusMessage) + case let .failed(message): + self.remoteProbeState = .failed(message) + } + } + + private func resetRemoteProbeFeedback() { + self.remoteProbeState = .idle + self.remoteAuthIssue = nil + } + + static func remoteAuthPromptStyle( + for issue: RemoteGatewayAuthIssue) + -> (systemImage: String, tint: Color) + { + switch issue { + case .tokenRequired: + return ("key.fill", .orange) + case .tokenMismatch: + return ("exclamationmark.triangle.fill", .orange) + case .gatewayTokenNotConfigured: + return ("wrench.and.screwdriver.fill", .orange) + case .passwordRequired: + return ("lock.slash.fill", .orange) + case .pairingRequired: + return ("link.badge.plus", .orange) + } + } + + static func shouldShowRemoteTokenField( + showAdvancedConnection: Bool, + remoteToken: String, + remoteTokenUnsupported: Bool, + authIssue: RemoteGatewayAuthIssue?) -> Bool + { + showAdvancedConnection || + remoteTokenUnsupported || + !remoteToken.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty || + authIssue?.showsTokenField == true + } + + static func shouldResetRemoteProbeFeedback( + for connectionMode: AppState.ConnectionMode, + suppressReset: Bool) -> Bool + { + !suppressReset && connectionMode != .remote + } + func gatewaySubtitle(for gateway: GatewayDiscoveryModel.DiscoveredGateway) -> String? { if self.state.remoteTransport == .direct { return GatewayDiscoveryHelpers.directUrl(for: gateway) ?? "Gateway pairing only" diff --git a/apps/macos/Sources/OpenClaw/RemoteGatewayProbe.swift b/apps/macos/Sources/OpenClaw/RemoteGatewayProbe.swift new file mode 100644 index 00000000000..f878d0f5e28 --- /dev/null +++ b/apps/macos/Sources/OpenClaw/RemoteGatewayProbe.swift @@ -0,0 +1,222 @@ +import Foundation +import OpenClawIPC +import OpenClawKit + +enum RemoteGatewayAuthIssue: Equatable { + case tokenRequired + case tokenMismatch + case gatewayTokenNotConfigured + case passwordRequired + case pairingRequired + + init?(error: Error) { + guard let authError = error as? GatewayConnectAuthError else { + return nil + } + switch authError.detail { + case .authTokenMissing: + self = .tokenRequired + case .authTokenMismatch: + self = .tokenMismatch + case .authTokenNotConfigured: + self = .gatewayTokenNotConfigured + case .authPasswordMissing, .authPasswordMismatch, .authPasswordNotConfigured: + self = .passwordRequired + case .pairingRequired: + self = .pairingRequired + default: + return nil + } + } + + var showsTokenField: Bool { + switch self { + case .tokenRequired, .tokenMismatch: + true + case .gatewayTokenNotConfigured, .passwordRequired, .pairingRequired: + false + } + } + + var title: String { + switch self { + case .tokenRequired: + "This gateway requires an auth token" + case .tokenMismatch: + "That token did not match the gateway" + case .gatewayTokenNotConfigured: + "This gateway host needs token setup" + case .passwordRequired: + "This gateway is using unsupported auth" + case .pairingRequired: + "This device needs pairing approval" + } + } + + var body: String { + switch self { + case .tokenRequired: + "Paste the token configured on the gateway host. On the gateway host, run `openclaw config get gateway.auth.token`. If the gateway uses an environment variable instead, use `OPENCLAW_GATEWAY_TOKEN`." + case .tokenMismatch: + "Check `gateway.auth.token` or `OPENCLAW_GATEWAY_TOKEN` on the gateway host and try again." + case .gatewayTokenNotConfigured: + "This gateway is set to token auth, but no `gateway.auth.token` is configured on the gateway host. If the gateway uses an environment variable instead, set `OPENCLAW_GATEWAY_TOKEN` before starting the gateway." + case .passwordRequired: + "This onboarding flow does not support password auth yet. Reconfigure the gateway to use token auth, then retry." + case .pairingRequired: + "Approve this device from an already-paired OpenClaw client. In your OpenClaw chat, run `/pair approve`, then click **Check connection** again." + } + } + + var footnote: String? { + switch self { + case .tokenRequired, .gatewayTokenNotConfigured: + "No token yet? Generate one on the gateway host with `openclaw doctor --generate-gateway-token`, then set it as `gateway.auth.token`." + case .pairingRequired: + "If you do not have another paired OpenClaw client yet, approve the pending request on the gateway host with `openclaw devices approve`." + case .tokenMismatch, .passwordRequired: + nil + } + } + + var statusMessage: String { + switch self { + case .tokenRequired: + "This gateway requires an auth token from the gateway host." + case .tokenMismatch: + "Gateway token mismatch. Check gateway.auth.token or OPENCLAW_GATEWAY_TOKEN on the gateway host." + case .gatewayTokenNotConfigured: + "This gateway has token auth enabled, but no gateway.auth.token is configured on the host." + case .passwordRequired: + "This gateway uses password auth. Remote onboarding on macOS cannot collect gateway passwords yet." + case .pairingRequired: + "Pairing required. In an already-paired OpenClaw client, run /pair approve, then check the connection again." + } + } +} + +enum RemoteGatewayProbeResult: Equatable { + case ready(RemoteGatewayProbeSuccess) + case authIssue(RemoteGatewayAuthIssue) + case failed(String) +} + +struct RemoteGatewayProbeSuccess: Equatable { + let authSource: GatewayAuthSource? + + var title: String { + switch self.authSource { + case .some(.deviceToken): + "Connected via paired device" + case .some(.sharedToken): + "Connected with gateway token" + case .some(.password): + "Connected with password" + case .some(GatewayAuthSource.none), nil: + "Remote gateway ready" + } + } + + var detail: String? { + switch self.authSource { + case .some(.deviceToken): + "This Mac used a stored device token. New or unpaired devices may still need the gateway token." + case .some(.sharedToken), .some(.password), .some(GatewayAuthSource.none), nil: + nil + } + } +} + +enum RemoteGatewayProbe { + @MainActor + static func run() async -> RemoteGatewayProbeResult { + AppStateStore.shared.syncGatewayConfigNow() + let settings = CommandResolver.connectionSettings() + let transport = AppStateStore.shared.remoteTransport + + if transport == .direct { + let trimmedUrl = AppStateStore.shared.remoteUrl.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmedUrl.isEmpty else { + return .failed("Set a gateway URL first") + } + guard self.isValidWsUrl(trimmedUrl) else { + return .failed("Gateway URL must use wss:// for remote hosts (ws:// only for localhost)") + } + } else { + let trimmedTarget = settings.target.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmedTarget.isEmpty else { + return .failed("Set an SSH target first") + } + if let validationMessage = CommandResolver.sshTargetValidationMessage(trimmedTarget) { + return .failed(validationMessage) + } + guard let sshCommand = self.sshCheckCommand(target: settings.target, identity: settings.identity) else { + return .failed("SSH target is invalid") + } + + let sshResult = await ShellExecutor.run( + command: sshCommand, + cwd: nil, + env: nil, + timeout: 8) + guard sshResult.ok else { + return .failed(self.formatSSHFailure(sshResult, target: settings.target)) + } + } + + do { + _ = try await GatewayConnection.shared.healthSnapshot(timeoutMs: 10_000) + let authSource = await GatewayConnection.shared.authSource() + return .ready(RemoteGatewayProbeSuccess(authSource: authSource)) + } catch { + if let authIssue = RemoteGatewayAuthIssue(error: error) { + return .authIssue(authIssue) + } + return .failed(error.localizedDescription) + } + } + + private static func isValidWsUrl(_ raw: String) -> Bool { + GatewayRemoteConfig.normalizeGatewayUrl(raw) != nil + } + + private static func sshCheckCommand(target: String, identity: String) -> [String]? { + guard let parsed = CommandResolver.parseSSHTarget(target) else { return nil } + let options = [ + "-o", "BatchMode=yes", + "-o", "ConnectTimeout=5", + "-o", "StrictHostKeyChecking=accept-new", + "-o", "UpdateHostKeys=yes", + ] + let args = CommandResolver.sshArguments( + target: parsed, + identity: identity, + options: options, + remoteCommand: ["echo", "ok"]) + return ["/usr/bin/ssh"] + args + } + + private static func formatSSHFailure(_ response: Response, target: String) -> String { + let payload = response.payload.flatMap { String(data: $0, encoding: .utf8) } + let trimmed = payload? + .trimmingCharacters(in: .whitespacesAndNewlines) + .split(whereSeparator: \.isNewline) + .joined(separator: " ") + if let trimmed, + trimmed.localizedCaseInsensitiveContains("host key verification failed") + { + let host = CommandResolver.parseSSHTarget(target)?.host ?? target + return "SSH check failed: Host key verification failed. Remove the old key with ssh-keygen -R \(host) and try again." + } + if let trimmed, !trimmed.isEmpty { + if let message = response.message, message.hasPrefix("exit ") { + return "SSH check failed: \(trimmed) (\(message))" + } + return "SSH check failed: \(trimmed)" + } + if let message = response.message { + return "SSH check failed (\(message))" + } + return "SSH check failed" + } +} diff --git a/apps/macos/Sources/OpenClaw/Resources/Info.plist b/apps/macos/Sources/OpenClaw/Resources/Info.plist index 706fe7029c4..0bfd45cc97b 100644 --- a/apps/macos/Sources/OpenClaw/Resources/Info.plist +++ b/apps/macos/Sources/OpenClaw/Resources/Info.plist @@ -15,9 +15,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + 2026.3.11 CFBundleVersion - 202603080 + 202603110 CFBundleIconFile OpenClaw CFBundleURLTypes @@ -59,6 +59,8 @@ OpenClaw uses speech recognition to detect your Voice Wake trigger phrase. NSAppleEventsUsageDescription OpenClaw needs Automation (AppleScript) permission to drive Terminal and other apps for agent actions. + NSRemindersUsageDescription + OpenClaw can access Reminders when requested by the agent for the apple-reminders skill. NSAppTransportSecurity diff --git a/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift b/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift index 8d37faa511e..9942f6e84ce 100644 --- a/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift @@ -7,6 +7,11 @@ struct GatewayChannelConnectTests { private enum FakeResponse { case helloOk(delayMs: Int) case invalid(delayMs: Int) + case authFailed( + delayMs: Int, + detailCode: String, + canRetryWithDeviceToken: Bool, + recommendedNextStep: String?) } private func makeSession(response: FakeResponse) -> GatewayTestWebSocketSession { @@ -27,6 +32,14 @@ struct GatewayChannelConnectTests { case let .invalid(ms): delayMs = ms message = .string("not json") + case let .authFailed(ms, detailCode, canRetryWithDeviceToken, recommendedNextStep): + delayMs = ms + let id = task.snapshotConnectRequestID() ?? "connect" + message = .data(GatewayWebSocketTestSupport.connectAuthFailureData( + id: id, + detailCode: detailCode, + canRetryWithDeviceToken: canRetryWithDeviceToken, + recommendedNextStep: recommendedNextStep)) } try await Task.sleep(nanoseconds: UInt64(delayMs) * 1_000_000) return message @@ -71,4 +84,29 @@ struct GatewayChannelConnectTests { }()) #expect(session.snapshotMakeCount() == 1) } + + @Test func `connect surfaces structured auth failure`() async throws { + let session = self.makeSession(response: .authFailed( + delayMs: 0, + detailCode: GatewayConnectAuthDetailCode.authTokenMissing.rawValue, + canRetryWithDeviceToken: true, + recommendedNextStep: GatewayConnectRecoveryNextStep.updateAuthConfiguration.rawValue)) + let channel = try GatewayChannelActor( + url: #require(URL(string: "ws://example.invalid")), + token: nil, + session: WebSocketSessionBox(session: session)) + + do { + try await channel.connect() + Issue.record("expected GatewayConnectAuthError") + } catch let error as GatewayConnectAuthError { + #expect(error.detail == .authTokenMissing) + #expect(error.detailCode == GatewayConnectAuthDetailCode.authTokenMissing.rawValue) + #expect(error.canRetryWithDeviceToken) + #expect(error.recommendedNextStep == .updateAuthConfiguration) + #expect(error.recommendedNextStepCode == GatewayConnectRecoveryNextStep.updateAuthConfiguration.rawValue) + } catch { + Issue.record("unexpected error: \(error)") + } + } } diff --git a/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift b/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift index 8af4ccf6905..cf2b13de5ea 100644 --- a/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift +++ b/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift @@ -52,6 +52,40 @@ enum GatewayWebSocketTestSupport { return Data(json.utf8) } + static func connectAuthFailureData( + id: String, + detailCode: String, + message: String = "gateway auth rejected", + canRetryWithDeviceToken: Bool = false, + recommendedNextStep: String? = nil) -> Data + { + let recommendedNextStepJson: String + if let recommendedNextStep { + recommendedNextStepJson = """ + , + "recommendedNextStep": "\(recommendedNextStep)" + """ + } else { + recommendedNextStepJson = "" + } + let json = """ + { + "type": "res", + "id": "\(id)", + "ok": false, + "error": { + "message": "\(message)", + "details": { + "code": "\(detailCode)", + "canRetryWithDeviceToken": \(canRetryWithDeviceToken ? "true" : "false") + \(recommendedNextStepJson) + } + } + } + """ + return Data(json.utf8) + } + static func requestID(from message: URLSessionWebSocketTask.Message) -> String? { guard let obj = self.requestFrameObject(from: message) else { return nil } guard (obj["type"] as? String) == "req" else { diff --git a/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift b/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift index c000f6d4241..b341263b21f 100644 --- a/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift @@ -38,4 +38,49 @@ struct MacNodeBrowserProxyTests { #expect(tabs.count == 1) #expect(tabs[0]["id"] as? String == "tab-1") } + + // Regression test: nested POST bodies must serialize without __SwiftValue crashes. + @Test func postRequestSerializesNestedBodyWithoutCrash() async throws { + actor BodyCapture { + private var body: Data? + + func set(_ body: Data?) { + self.body = body + } + + func get() -> Data? { + self.body + } + } + + let capturedBody = BodyCapture() + let proxy = MacNodeBrowserProxy( + endpointProvider: { + MacNodeBrowserProxy.Endpoint( + baseURL: URL(string: "http://127.0.0.1:18791")!, + token: nil, + password: nil) + }, + performRequest: { request in + await capturedBody.set(request.httpBody) + let url = try #require(request.url) + let response = try #require( + HTTPURLResponse( + url: url, + statusCode: 200, + httpVersion: nil, + headerFields: nil)) + return (Data(#"{"ok":true}"#.utf8), response) + }) + + _ = try await proxy.request( + paramsJSON: #"{"method":"POST","path":"/action","body":{"nested":{"key":"val"},"arr":[1,2]}}"#) + + let bodyData = try #require(await capturedBody.get()) + let parsed = try #require(JSONSerialization.jsonObject(with: bodyData) as? [String: Any]) + let nested = try #require(parsed["nested"] as? [String: Any]) + #expect(nested["key"] as? String == "val") + let arr = try #require(parsed["arr"] as? [Any]) + #expect(arr.count == 2) + } } diff --git a/apps/macos/Tests/OpenClawIPCTests/OnboardingRemoteAuthPromptTests.swift b/apps/macos/Tests/OpenClawIPCTests/OnboardingRemoteAuthPromptTests.swift new file mode 100644 index 00000000000..d33cff562f9 --- /dev/null +++ b/apps/macos/Tests/OpenClawIPCTests/OnboardingRemoteAuthPromptTests.swift @@ -0,0 +1,126 @@ +import OpenClawKit +import Testing +@testable import OpenClaw + +@MainActor +struct OnboardingRemoteAuthPromptTests { + @Test func `auth detail codes map to remote auth issues`() { + let tokenMissing = GatewayConnectAuthError( + message: "token missing", + detailCode: GatewayConnectAuthDetailCode.authTokenMissing.rawValue, + canRetryWithDeviceToken: false) + let tokenMismatch = GatewayConnectAuthError( + message: "token mismatch", + detailCode: GatewayConnectAuthDetailCode.authTokenMismatch.rawValue, + canRetryWithDeviceToken: false) + let tokenNotConfigured = GatewayConnectAuthError( + message: "token not configured", + detailCode: GatewayConnectAuthDetailCode.authTokenNotConfigured.rawValue, + canRetryWithDeviceToken: false) + let passwordMissing = GatewayConnectAuthError( + message: "password missing", + detailCode: GatewayConnectAuthDetailCode.authPasswordMissing.rawValue, + canRetryWithDeviceToken: false) + let pairingRequired = GatewayConnectAuthError( + message: "pairing required", + detailCode: GatewayConnectAuthDetailCode.pairingRequired.rawValue, + canRetryWithDeviceToken: false) + let unknown = GatewayConnectAuthError( + message: "other", + detailCode: "SOMETHING_ELSE", + canRetryWithDeviceToken: false) + + #expect(RemoteGatewayAuthIssue(error: tokenMissing) == .tokenRequired) + #expect(RemoteGatewayAuthIssue(error: tokenMismatch) == .tokenMismatch) + #expect(RemoteGatewayAuthIssue(error: tokenNotConfigured) == .gatewayTokenNotConfigured) + #expect(RemoteGatewayAuthIssue(error: passwordMissing) == .passwordRequired) + #expect(RemoteGatewayAuthIssue(error: pairingRequired) == .pairingRequired) + #expect(RemoteGatewayAuthIssue(error: unknown) == nil) + } + + @Test func `password detail family maps to password required issue`() { + let mismatch = GatewayConnectAuthError( + message: "password mismatch", + detailCode: GatewayConnectAuthDetailCode.authPasswordMismatch.rawValue, + canRetryWithDeviceToken: false) + let notConfigured = GatewayConnectAuthError( + message: "password not configured", + detailCode: GatewayConnectAuthDetailCode.authPasswordNotConfigured.rawValue, + canRetryWithDeviceToken: false) + + #expect(RemoteGatewayAuthIssue(error: mismatch) == .passwordRequired) + #expect(RemoteGatewayAuthIssue(error: notConfigured) == .passwordRequired) + } + + @Test func `token field visibility follows onboarding rules`() { + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: nil) == false) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: true, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: nil)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "secret", + remoteTokenUnsupported: false, + authIssue: nil)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: true, + authIssue: nil)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .tokenRequired)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .tokenMismatch)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .gatewayTokenNotConfigured) == false) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .pairingRequired) == false) + } + + @Test func `pairing required copy points users to pair approve`() { + let issue = RemoteGatewayAuthIssue.pairingRequired + + #expect(issue.title == "This device needs pairing approval") + #expect(issue.body.contains("`/pair approve`")) + #expect(issue.statusMessage.contains("/pair approve")) + #expect(issue.footnote?.contains("`openclaw devices approve`") == true) + } + + @Test func `paired device success copy explains auth source`() { + let pairedDevice = RemoteGatewayProbeSuccess(authSource: .deviceToken) + let sharedToken = RemoteGatewayProbeSuccess(authSource: .sharedToken) + let noAuth = RemoteGatewayProbeSuccess(authSource: GatewayAuthSource.none) + + #expect(pairedDevice.title == "Connected via paired device") + #expect(pairedDevice.detail == "This Mac used a stored device token. New or unpaired devices may still need the gateway token.") + #expect(sharedToken.title == "Connected with gateway token") + #expect(sharedToken.detail == nil) + #expect(noAuth.title == "Remote gateway ready") + #expect(noAuth.detail == nil) + } + + @Test func `transient probe mode restore does not clear probe feedback`() { + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .local, suppressReset: false)) + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .unconfigured, suppressReset: false)) + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .remote, suppressReset: false) == false) + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .local, suppressReset: true) == false) + } +} diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift index f822e32044e..4848043980b 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift @@ -132,38 +132,17 @@ private let defaultOperatorConnectScopes: [String] = [ ] private enum GatewayConnectErrorCodes { - static let authTokenMismatch = "AUTH_TOKEN_MISMATCH" - static let authDeviceTokenMismatch = "AUTH_DEVICE_TOKEN_MISMATCH" - static let authTokenMissing = "AUTH_TOKEN_MISSING" - static let authPasswordMissing = "AUTH_PASSWORD_MISSING" - static let authPasswordMismatch = "AUTH_PASSWORD_MISMATCH" - static let authRateLimited = "AUTH_RATE_LIMITED" - static let pairingRequired = "PAIRING_REQUIRED" - static let controlUiDeviceIdentityRequired = "CONTROL_UI_DEVICE_IDENTITY_REQUIRED" - static let deviceIdentityRequired = "DEVICE_IDENTITY_REQUIRED" -} - -private struct GatewayConnectAuthError: LocalizedError { - let message: String - let detailCode: String? - let canRetryWithDeviceToken: Bool - - var errorDescription: String? { self.message } - - var isNonRecoverable: Bool { - switch self.detailCode { - case GatewayConnectErrorCodes.authTokenMissing, - GatewayConnectErrorCodes.authPasswordMissing, - GatewayConnectErrorCodes.authPasswordMismatch, - GatewayConnectErrorCodes.authRateLimited, - GatewayConnectErrorCodes.pairingRequired, - GatewayConnectErrorCodes.controlUiDeviceIdentityRequired, - GatewayConnectErrorCodes.deviceIdentityRequired: - return true - default: - return false - } - } + static let authTokenMismatch = GatewayConnectAuthDetailCode.authTokenMismatch.rawValue + static let authDeviceTokenMismatch = GatewayConnectAuthDetailCode.authDeviceTokenMismatch.rawValue + static let authTokenMissing = GatewayConnectAuthDetailCode.authTokenMissing.rawValue + static let authTokenNotConfigured = GatewayConnectAuthDetailCode.authTokenNotConfigured.rawValue + static let authPasswordMissing = GatewayConnectAuthDetailCode.authPasswordMissing.rawValue + static let authPasswordMismatch = GatewayConnectAuthDetailCode.authPasswordMismatch.rawValue + static let authPasswordNotConfigured = GatewayConnectAuthDetailCode.authPasswordNotConfigured.rawValue + static let authRateLimited = GatewayConnectAuthDetailCode.authRateLimited.rawValue + static let pairingRequired = GatewayConnectAuthDetailCode.pairingRequired.rawValue + static let controlUiDeviceIdentityRequired = GatewayConnectAuthDetailCode.controlUiDeviceIdentityRequired.rawValue + static let deviceIdentityRequired = GatewayConnectAuthDetailCode.deviceIdentityRequired.rawValue } public actor GatewayChannelActor { @@ -278,8 +257,7 @@ public actor GatewayChannelActor { if self.shouldPauseReconnectAfterAuthFailure(error) { self.reconnectPausedForAuthFailure = true self.logger.error( - "gateway watchdog reconnect paused for non-recoverable auth failure " + - "\(error.localizedDescription, privacy: .public)" + "gateway watchdog reconnect paused for non-recoverable auth failure \(error.localizedDescription, privacy: .public)" ) continue } @@ -522,10 +500,12 @@ public actor GatewayChannelActor { let details = res.error?["details"]?.value as? [String: ProtoAnyCodable] let detailCode = details?["code"]?.value as? String let canRetryWithDeviceToken = details?["canRetryWithDeviceToken"]?.value as? Bool ?? false + let recommendedNextStep = details?["recommendedNextStep"]?.value as? String throw GatewayConnectAuthError( message: msg, - detailCode: detailCode, - canRetryWithDeviceToken: canRetryWithDeviceToken) + detailCodeRaw: detailCode, + canRetryWithDeviceToken: canRetryWithDeviceToken, + recommendedNextStepRaw: recommendedNextStep) } guard let payload = res.payload else { throw NSError( @@ -710,8 +690,7 @@ public actor GatewayChannelActor { if self.shouldPauseReconnectAfterAuthFailure(error) { self.reconnectPausedForAuthFailure = true self.logger.error( - "gateway reconnect paused for non-recoverable auth failure " + - "\(error.localizedDescription, privacy: .public)" + "gateway reconnect paused for non-recoverable auth failure \(error.localizedDescription, privacy: .public)" ) return } @@ -743,7 +722,7 @@ public actor GatewayChannelActor { return false } return authError.canRetryWithDeviceToken || - authError.detailCode == GatewayConnectErrorCodes.authTokenMismatch + authError.detail == .authTokenMismatch } private func shouldPauseReconnectAfterAuthFailure(_ error: Error) -> Bool { @@ -753,7 +732,7 @@ public actor GatewayChannelActor { if authError.isNonRecoverable { return true } - if authError.detailCode == GatewayConnectErrorCodes.authTokenMismatch && + if authError.detail == .authTokenMismatch && self.deviceTokenRetryBudgetUsed && !self.pendingDeviceTokenRetry { return true @@ -765,7 +744,7 @@ public actor GatewayChannelActor { guard let authError = error as? GatewayConnectAuthError else { return false } - return authError.detailCode == GatewayConnectErrorCodes.authDeviceTokenMismatch + return authError.detail == .authDeviceTokenMismatch } private func isTrustedDeviceRetryEndpoint() -> Bool { @@ -867,6 +846,9 @@ public actor GatewayChannelActor { // Wrap low-level URLSession/WebSocket errors with context so UI can surface them. private func wrap(_ error: Error, context: String) -> Error { + if error is GatewayConnectAuthError || error is GatewayResponseError || error is GatewayDecodingError { + return error + } if let urlError = error as? URLError { let desc = urlError.localizedDescription.isEmpty ? "cancelled" : urlError.localizedDescription return NSError( @@ -910,8 +892,7 @@ public actor GatewayChannelActor { return (id: id, data: data) } catch { self.logger.error( - "gateway \(kind) encode failed \(method, privacy: .public) " + - "error=\(error.localizedDescription, privacy: .public)") + "gateway \(kind) encode failed \(method, privacy: .public) error=\(error.localizedDescription, privacy: .public)") throw error } } diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift index 6ca81dec445..3b1d97059a3 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift @@ -1,6 +1,112 @@ import OpenClawProtocol import Foundation +public enum GatewayConnectAuthDetailCode: String, Sendable { + case authRequired = "AUTH_REQUIRED" + case authUnauthorized = "AUTH_UNAUTHORIZED" + case authTokenMismatch = "AUTH_TOKEN_MISMATCH" + case authDeviceTokenMismatch = "AUTH_DEVICE_TOKEN_MISMATCH" + case authTokenMissing = "AUTH_TOKEN_MISSING" + case authTokenNotConfigured = "AUTH_TOKEN_NOT_CONFIGURED" + case authPasswordMissing = "AUTH_PASSWORD_MISSING" + case authPasswordMismatch = "AUTH_PASSWORD_MISMATCH" + case authPasswordNotConfigured = "AUTH_PASSWORD_NOT_CONFIGURED" + case authRateLimited = "AUTH_RATE_LIMITED" + case authTailscaleIdentityMissing = "AUTH_TAILSCALE_IDENTITY_MISSING" + case authTailscaleProxyMissing = "AUTH_TAILSCALE_PROXY_MISSING" + case authTailscaleWhoisFailed = "AUTH_TAILSCALE_WHOIS_FAILED" + case authTailscaleIdentityMismatch = "AUTH_TAILSCALE_IDENTITY_MISMATCH" + case pairingRequired = "PAIRING_REQUIRED" + case controlUiDeviceIdentityRequired = "CONTROL_UI_DEVICE_IDENTITY_REQUIRED" + case deviceIdentityRequired = "DEVICE_IDENTITY_REQUIRED" + case deviceAuthInvalid = "DEVICE_AUTH_INVALID" + case deviceAuthDeviceIdMismatch = "DEVICE_AUTH_DEVICE_ID_MISMATCH" + case deviceAuthSignatureExpired = "DEVICE_AUTH_SIGNATURE_EXPIRED" + case deviceAuthNonceRequired = "DEVICE_AUTH_NONCE_REQUIRED" + case deviceAuthNonceMismatch = "DEVICE_AUTH_NONCE_MISMATCH" + case deviceAuthSignatureInvalid = "DEVICE_AUTH_SIGNATURE_INVALID" + case deviceAuthPublicKeyInvalid = "DEVICE_AUTH_PUBLIC_KEY_INVALID" +} + +public enum GatewayConnectRecoveryNextStep: String, Sendable { + case retryWithDeviceToken = "retry_with_device_token" + case updateAuthConfiguration = "update_auth_configuration" + case updateAuthCredentials = "update_auth_credentials" + case waitThenRetry = "wait_then_retry" + case reviewAuthConfiguration = "review_auth_configuration" +} + +/// Structured websocket connect-auth rejection surfaced before the channel is usable. +public struct GatewayConnectAuthError: LocalizedError, Sendable { + public let message: String + public let detailCodeRaw: String? + public let recommendedNextStepRaw: String? + public let canRetryWithDeviceToken: Bool + + public init( + message: String, + detailCodeRaw: String?, + canRetryWithDeviceToken: Bool, + recommendedNextStepRaw: String? = nil) + { + let trimmedMessage = message.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedDetailCode = detailCodeRaw?.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedRecommendedNextStep = + recommendedNextStepRaw?.trimmingCharacters(in: .whitespacesAndNewlines) + self.message = trimmedMessage.isEmpty ? "gateway connect failed" : trimmedMessage + self.detailCodeRaw = trimmedDetailCode?.isEmpty == false ? trimmedDetailCode : nil + self.canRetryWithDeviceToken = canRetryWithDeviceToken + self.recommendedNextStepRaw = + trimmedRecommendedNextStep?.isEmpty == false ? trimmedRecommendedNextStep : nil + } + + public init( + message: String, + detailCode: String?, + canRetryWithDeviceToken: Bool, + recommendedNextStep: String? = nil) + { + self.init( + message: message, + detailCodeRaw: detailCode, + canRetryWithDeviceToken: canRetryWithDeviceToken, + recommendedNextStepRaw: recommendedNextStep) + } + + public var detailCode: String? { self.detailCodeRaw } + + public var recommendedNextStepCode: String? { self.recommendedNextStepRaw } + + public var detail: GatewayConnectAuthDetailCode? { + guard let detailCodeRaw else { return nil } + return GatewayConnectAuthDetailCode(rawValue: detailCodeRaw) + } + + public var recommendedNextStep: GatewayConnectRecoveryNextStep? { + guard let recommendedNextStepRaw else { return nil } + return GatewayConnectRecoveryNextStep(rawValue: recommendedNextStepRaw) + } + + public var errorDescription: String? { self.message } + + public var isNonRecoverable: Bool { + switch self.detail { + case .authTokenMissing, + .authTokenNotConfigured, + .authPasswordMissing, + .authPasswordMismatch, + .authPasswordNotConfigured, + .authRateLimited, + .pairingRequired, + .controlUiDeviceIdentityRequired, + .deviceIdentityRequired: + return true + default: + return false + } + } +} + /// Structured error surfaced when the gateway responds with `{ ok: false }`. public struct GatewayResponseError: LocalizedError, @unchecked Sendable { public let method: String diff --git a/docs/cli/agent.md b/docs/cli/agent.md index 93c8d04b41a..430bdf50743 100644 --- a/docs/cli/agent.md +++ b/docs/cli/agent.md @@ -25,4 +25,5 @@ openclaw agent --agent ops --message "Generate report" --deliver --reply-channel ## Notes -- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names or `secretref-managed`), not resolved secret plaintext. +- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names, `secretref-env:ENV_VAR_NAME`, or `secretref-managed`), not resolved secret plaintext. +- Marker writes are source-authoritative: OpenClaw persists markers from the active source config snapshot, not from resolved runtime secret values. diff --git a/docs/cli/index.md b/docs/cli/index.md index fb68727e44b..cbcd5bff0b5 100644 --- a/docs/cli/index.md +++ b/docs/cli/index.md @@ -337,7 +337,7 @@ Options: - `--non-interactive` - `--mode ` - `--flow ` (manual is an alias for advanced) -- `--auth-choice ` +- `--auth-choice ` - `--token-provider ` (non-interactive; used with `--auth-choice token`) - `--token ` (non-interactive; used with `--auth-choice token`) - `--token-profile-id ` (non-interactive; default: `:manual`) @@ -354,6 +354,7 @@ Options: - `--zai-api-key ` - `--minimax-api-key ` - `--opencode-zen-api-key ` +- `--opencode-go-api-key ` - `--custom-base-url ` (non-interactive; used with `--auth-choice custom-api-key`) - `--custom-model-id ` (non-interactive; used with `--auth-choice custom-api-key`) - `--custom-api-key ` (non-interactive; optional; used with `--auth-choice custom-api-key`; falls back to `CUSTOM_API_KEY` when omitted) diff --git a/docs/concepts/memory.md b/docs/concepts/memory.md index b3940945249..8ed755b394c 100644 --- a/docs/concepts/memory.md +++ b/docs/concepts/memory.md @@ -284,9 +284,46 @@ Notes: - Paths can be absolute or workspace-relative. - Directories are scanned recursively for `.md` files. -- Only Markdown files are indexed. +- By default, only Markdown files are indexed. +- If `memorySearch.multimodal.enabled = true`, OpenClaw also indexes supported image/audio files under `extraPaths` only. Default memory roots (`MEMORY.md`, `memory.md`, `memory/**/*.md`) stay Markdown-only. - Symlinks are ignored (files or directories). +### Multimodal memory files (Gemini image + audio) + +OpenClaw can index image and audio files from `memorySearch.extraPaths` when using Gemini embedding 2: + +```json5 +agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: ["assets/reference", "voice-notes"], + multimodal: { + enabled: true, + modalities: ["image", "audio"], // or ["all"] + maxFileBytes: 10000000 + }, + remote: { + apiKey: "YOUR_GEMINI_API_KEY" + } + } + } +} +``` + +Notes: + +- Multimodal memory is currently supported only for `gemini-embedding-2-preview`. +- Multimodal indexing applies only to files discovered through `memorySearch.extraPaths`. +- Supported modalities in this phase: image and audio. +- `memorySearch.fallback` must stay `"none"` while multimodal memory is enabled. +- Matching image/audio file bytes are uploaded to the configured Gemini embedding endpoint during indexing. +- Supported image extensions: `.jpg`, `.jpeg`, `.png`, `.webp`, `.gif`, `.heic`, `.heif`. +- Supported audio extensions: `.mp3`, `.wav`, `.ogg`, `.opus`, `.m4a`, `.aac`, `.flac`. +- Search queries remain text, but Gemini can compare those text queries against indexed image/audio embeddings. +- `memory_get` still reads Markdown only; binary files are searchable but not returned as raw file contents. + ### Gemini embeddings (native) Set the provider to `gemini` to use the Gemini embeddings API directly: @@ -310,6 +347,29 @@ Notes: - `remote.baseUrl` is optional (defaults to the Gemini API base URL). - `remote.headers` lets you add extra headers if needed. - Default model: `gemini-embedding-001`. +- `gemini-embedding-2-preview` is also supported: 8192 token limit and configurable dimensions (768 / 1536 / 3072, default 3072). + +#### Gemini Embedding 2 (preview) + +```json5 +agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 3072, // optional: 768, 1536, or 3072 (default) + remote: { + apiKey: "YOUR_GEMINI_API_KEY" + } + } + } +} +``` + +> **⚠️ Re-index required:** Switching from `gemini-embedding-001` (768 dimensions) +> to `gemini-embedding-2-preview` (3072 dimensions) changes the vector size. The same is true if you +> change `outputDimensionality` between 768, 1536, and 3072. +> OpenClaw will automatically reindex when it detects a model or dimension change. If you want to use a **custom OpenAI-compatible endpoint** (OpenRouter, vLLM, or a proxy), you can use the `remote` configuration with the OpenAI provider: diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 6dd4c2f9c03..549875c77b4 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -86,12 +86,13 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** } ``` -### OpenCode Zen +### OpenCode -- Provider: `opencode` - Auth: `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`) -- Example model: `opencode/claude-opus-4-6` -- CLI: `openclaw onboard --auth-choice opencode-zen` +- Zen runtime provider: `opencode` +- Go runtime provider: `opencode-go` +- Example models: `opencode/claude-opus-4-6`, `opencode-go/kimi-k2.5` +- CLI: `openclaw onboard --auth-choice opencode-zen` or `openclaw onboard --auth-choice opencode-go` ```json5 { @@ -104,8 +105,8 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `google` - Auth: `GEMINI_API_KEY` - Optional rotation: `GEMINI_API_KEYS`, `GEMINI_API_KEY_1`, `GEMINI_API_KEY_2`, `GOOGLE_API_KEY` fallback, and `OPENCLAW_LIVE_GEMINI_KEY` (single override) -- Example models: `google/gemini-3.1-pro-preview`, `google/gemini-3-flash-preview`, `google/gemini-3.1-flash-lite-preview` -- Compatibility: legacy OpenClaw config using `google/gemini-3.1-flash-preview` is normalized to `google/gemini-3-flash-preview`, and bare `google/gemini-3.1-flash-lite` is normalized to `google/gemini-3.1-flash-lite-preview` +- Example models: `google/gemini-3.1-pro-preview`, `google/gemini-3-flash-preview` +- Compatibility: legacy OpenClaw config using `google/gemini-3.1-flash-preview` is normalized to `google/gemini-3-flash-preview` - CLI: `openclaw onboard --auth-choice gemini-api-key` ### Google Vertex, Antigravity, and Gemini CLI @@ -356,7 +357,7 @@ Ollama is a local LLM runtime that provides an OpenAI-compatible API: - Provider: `ollama` - Auth: None required (local server) - Example model: `ollama/llama3.3` -- Installation: [https://ollama.ai](https://ollama.ai) +- Installation: [https://ollama.com/download](https://ollama.com/download) ```bash # Install Ollama, then pull a model: @@ -371,7 +372,7 @@ ollama pull llama3.3 } ``` -Ollama is automatically detected when running locally at `http://127.0.0.1:11434/v1`. See [/providers/ollama](/providers/ollama) for model recommendations and custom configuration. +Ollama is detected locally at `http://127.0.0.1:11434` when you opt in with `OLLAMA_API_KEY`, and `openclaw onboard` can configure it directly as a first-class provider. See [/providers/ollama](/providers/ollama) for onboarding, cloud/local mode, and custom configuration. ### vLLM diff --git a/docs/concepts/models.md b/docs/concepts/models.md index 2ad809d9599..6323feef04e 100644 --- a/docs/concepts/models.md +++ b/docs/concepts/models.md @@ -55,8 +55,8 @@ subscription** (OAuth) and **Anthropic** (API key or `claude setup-token`). Model refs are normalized to lowercase. Provider aliases like `z.ai/*` normalize to `zai/*`. -Provider configuration examples (including OpenCode Zen) live in -[/gateway/configuration](/gateway/configuration#opencode-zen-multi-model-proxy). +Provider configuration examples (including OpenCode) live in +[/gateway/configuration](/gateway/configuration#opencode). ## “Model is not allowed” (and why replies stop) @@ -207,7 +207,7 @@ mode, pass `--yes` to accept defaults. ## Models registry (`models.json`) Custom providers in `models.providers` are written into `models.json` under the -agent directory (default `~/.openclaw/agents//models.json`). This file +agent directory (default `~/.openclaw/agents//agent/models.json`). This file is merged by default unless `models.mode` is set to `replace`. Merge mode precedence for matching provider IDs: @@ -215,7 +215,9 @@ Merge mode precedence for matching provider IDs: - Non-empty `baseUrl` already present in the agent `models.json` wins. - Non-empty `apiKey` in the agent `models.json` wins only when that provider is not SecretRef-managed in current config/auth-profile context. - SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets. +- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs). - Empty or missing agent `apiKey`/`baseUrl` fall back to config `models.providers`. - Other provider fields are refreshed from config and normalized catalog data. -This marker-based persistence applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`. +Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values. +This applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`. diff --git a/docs/docs.json b/docs/docs.json index 8592618cd7d..e6cf5ba382b 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -103,6 +103,10 @@ "source": "/opencode", "destination": "/providers/opencode" }, + { + "source": "/opencode-go", + "destination": "/providers/opencode-go" + }, { "source": "/qianfan", "destination": "/providers/qianfan" @@ -1013,8 +1017,7 @@ "tools/browser", "tools/browser-login", "tools/chrome-extension", - "tools/browser-linux-troubleshooting", - "tools/browser-wsl2-windows-remote-cdp-troubleshooting" + "tools/browser-linux-troubleshooting" ] }, { @@ -1112,6 +1115,7 @@ "providers/nvidia", "providers/ollama", "providers/openai", + "providers/opencode-go", "providers/opencode", "providers/openrouter", "providers/qianfan", diff --git a/docs/gateway/configuration-reference.md b/docs/gateway/configuration-reference.md index 6922234fd2a..db5077aebcf 100644 --- a/docs/gateway/configuration-reference.md +++ b/docs/gateway/configuration-reference.md @@ -2014,9 +2014,11 @@ OpenClaw uses the pi-coding-agent model catalog. Add custom providers via `model - Non-empty agent `models.json` `baseUrl` values win. - Non-empty agent `apiKey` values win only when that provider is not SecretRef-managed in current config/auth-profile context. - SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets. + - SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs). - Empty or missing agent `apiKey`/`baseUrl` fall back to `models.providers` in config. - Matching model `contextWindow`/`maxTokens` use the higher value between explicit config and implicit catalog values. - Use `models.mode: "replace"` when you want config to fully rewrite `models.json`. + - Marker persistence is source-authoritative: markers are written from the active source config snapshot (pre-resolution), not from resolved runtime secret values. ### Provider field details @@ -2079,7 +2081,7 @@ Use `cerebras/zai-glm-4.7` for Cerebras; `zai/glm-4.7` for Z.AI direct. - + ```json5 { @@ -2092,7 +2094,7 @@ Use `cerebras/zai-glm-4.7` for Cerebras; `zai/glm-4.7` for Z.AI direct. } ``` -Set `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). Shortcut: `openclaw onboard --auth-choice opencode-zen`. +Set `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). Use `opencode/...` refs for the Zen catalog or `opencode-go/...` refs for the Go catalog. Shortcut: `openclaw onboard --auth-choice opencode-zen` or `openclaw onboard --auth-choice opencode-go`. diff --git a/docs/gateway/doctor.md b/docs/gateway/doctor.md index b46b90520d1..95027906750 100644 --- a/docs/gateway/doctor.md +++ b/docs/gateway/doctor.md @@ -63,7 +63,7 @@ cat ~/.openclaw/openclaw.json - Health check + restart prompt. - Skills status summary (eligible/missing/blocked). - Config normalization for legacy values. -- OpenCode Zen provider override warnings (`models.providers.opencode`). +- OpenCode provider override warnings (`models.providers.opencode` / `models.providers.opencode-go`). - Legacy on-disk state migration (sessions/agent dir/WhatsApp auth). - Legacy cron store migration (`jobId`, `schedule.cron`, top-level delivery/payload fields, payload `provider`, simple `notify: true` webhook fallback jobs). - State integrity and permissions checks (sessions, transcripts, state dir). @@ -134,12 +134,12 @@ Doctor warnings also include account-default guidance for multi-account channels - If two or more `channels..accounts` entries are configured without `channels..defaultAccount` or `accounts.default`, doctor warns that fallback routing can pick an unexpected account. - If `channels..defaultAccount` is set to an unknown account ID, doctor warns and lists configured account IDs. -### 2b) OpenCode Zen provider overrides +### 2b) OpenCode provider overrides -If you’ve added `models.providers.opencode` (or `opencode-zen`) manually, it -overrides the built-in OpenCode Zen catalog from `@mariozechner/pi-ai`. That can -force every model onto a single API or zero out costs. Doctor warns so you can -remove the override and restore per-model API routing + costs. +If you’ve added `models.providers.opencode`, `opencode-zen`, or `opencode-go` +manually, it overrides the built-in OpenCode catalog from `@mariozechner/pi-ai`. +That can force models onto the wrong API or zero out costs. Doctor warns so you +can remove the override and restore per-model API routing + costs. ### 3) Legacy state migrations (disk layout) diff --git a/docs/gateway/local-models.md b/docs/gateway/local-models.md index 8a07a827467..4059f988776 100644 --- a/docs/gateway/local-models.md +++ b/docs/gateway/local-models.md @@ -11,6 +11,8 @@ title: "Local Models" Local is doable, but OpenClaw expects large context + strong defenses against prompt injection. Small cards truncate context and leak safety. Aim high: **≥2 maxed-out Mac Studios or equivalent GPU rig (~$30k+)**. A single **24 GB** GPU works only for lighter prompts with higher latency. Use the **largest / full-size model variant you can run**; aggressively quantized or “small” checkpoints raise prompt-injection risk (see [Security](/gateway/security)). +If you want the lowest-friction local setup, start with [Ollama](/providers/ollama) and `openclaw onboard`. This page is the opinionated guide for higher-end local stacks and custom OpenAI-compatible local servers. + ## Recommended: LM Studio + MiniMax M2.5 (Responses API, full-size) Best current local stack. Load MiniMax M2.5 in LM Studio, enable the local server (default `http://127.0.0.1:1234`), and use Responses API to keep reasoning separate from final text. diff --git a/docs/help/faq.md b/docs/help/faq.md index 8b738b60fc2..453688c1c5f 100644 --- a/docs/help/faq.md +++ b/docs/help/faq.md @@ -2084,8 +2084,21 @@ More context: [Models](/concepts/models). ### Can I use selfhosted models llamacpp vLLM Ollama -Yes. If your local server exposes an OpenAI-compatible API, you can point a -custom provider at it. Ollama is supported directly and is the easiest path. +Yes. Ollama is the easiest path for local models. + +Quickest setup: + +1. Install Ollama from `https://ollama.com/download` +2. Pull a local model such as `ollama pull glm-4.7-flash` +3. If you want Ollama Cloud too, run `ollama signin` +4. Run `openclaw onboard` and choose `Ollama` +5. Pick `Local` or `Cloud + Local` + +Notes: + +- `Cloud + Local` gives you Ollama Cloud models plus your local Ollama models +- cloud models such as `kimi-k2.5:cloud` do not need a local pull +- for manual switching, use `openclaw models list` and `openclaw models set ollama/` Security note: smaller or heavily quantized models are more vulnerable to prompt injection. We strongly recommend **large models** for any bot that can use tools. diff --git a/docs/help/testing.md b/docs/help/testing.md index 6580de4da20..db374bb03da 100644 --- a/docs/help/testing.md +++ b/docs/help/testing.md @@ -311,11 +311,11 @@ Include at least one image-capable model in `OPENCLAW_LIVE_GATEWAY_MODELS` (Clau If you have keys enabled, we also support testing via: - OpenRouter: `openrouter/...` (hundreds of models; use `openclaw models scan` to find tool+image capable candidates) -- OpenCode Zen: `opencode/...` (auth via `OPENCODE_API_KEY` / `OPENCODE_ZEN_API_KEY`) +- OpenCode: `opencode/...` for Zen and `opencode-go/...` for Go (auth via `OPENCODE_API_KEY` / `OPENCODE_ZEN_API_KEY`) More providers you can include in the live matrix (if you have creds/config): -- Built-in: `openai`, `openai-codex`, `anthropic`, `google`, `google-vertex`, `google-antigravity`, `google-gemini-cli`, `zai`, `openrouter`, `opencode`, `xai`, `groq`, `cerebras`, `mistral`, `github-copilot` +- Built-in: `openai`, `openai-codex`, `anthropic`, `google`, `google-vertex`, `google-antigravity`, `google-gemini-cli`, `zai`, `openrouter`, `opencode`, `opencode-go`, `xai`, `groq`, `cerebras`, `mistral`, `github-copilot` - Via `models.providers` (custom endpoints): `minimax` (cloud/API), plus any OpenAI/Anthropic-compatible proxy (LM Studio, vLLM, LiteLLM, etc.) Tip: don’t try to hardcode “all models” in docs. The authoritative list is whatever `discoverModels(...)` returns on your machine + whatever keys are available. diff --git a/docs/platforms/mac/release.md b/docs/platforms/mac/release.md index 180a52075ed..cd4052ac9dc 100644 --- a/docs/platforms/mac/release.md +++ b/docs/platforms/mac/release.md @@ -39,7 +39,7 @@ Notes: # Default is auto-derived from APP_VERSION when omitted. SKIP_NOTARIZE=1 \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.3.9 \ +APP_VERSION=2026.3.11 \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh @@ -47,10 +47,10 @@ scripts/package-mac-dist.sh # `package-mac-dist.sh` already creates the zip + DMG. # If you used `package-mac-app.sh` directly instead, create them manually: # If you want notarization/stapling in this step, use the NOTARIZE command below. -ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.9.zip +ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.11.zip # Optional: build a styled DMG for humans (drag to /Applications) -scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg +scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.11.dmg # Recommended: build + notarize/staple zip + DMG # First, create a keychain profile once: @@ -58,13 +58,13 @@ scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg # --apple-id "" --team-id "" --password "" NOTARIZE=1 NOTARYTOOL_PROFILE=openclaw-notary \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.3.9 \ +APP_VERSION=2026.3.11 \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh # Optional: ship dSYM alongside the release -ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.9.dSYM.zip +ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.11.dSYM.zip ``` ## Appcast entry @@ -72,7 +72,7 @@ ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenCl Use the release note generator so Sparkle renders formatted HTML notes: ```bash -SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.9.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml +SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.11.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml ``` Generates HTML release notes from `CHANGELOG.md` (via [`scripts/changelog-to-html.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/changelog-to-html.sh)) and embeds them in the appcast entry. @@ -80,7 +80,7 @@ Commit the updated `appcast.xml` alongside the release assets (zip + dSYM) when ## Publish & verify -- Upload `OpenClaw-2026.3.9.zip` (and `OpenClaw-2026.3.9.dSYM.zip`) to the GitHub release for tag `v2026.3.9`. +- Upload `OpenClaw-2026.3.11.zip` (and `OpenClaw-2026.3.11.dSYM.zip`) to the GitHub release for tag `v2026.3.11`. - Ensure the raw appcast URL matches the baked feed: `https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml`. - Sanity checks: - `curl -I https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml` returns 200. diff --git a/docs/platforms/raspberry-pi.md b/docs/platforms/raspberry-pi.md index e46076e869d..247bf757b91 100644 --- a/docs/platforms/raspberry-pi.md +++ b/docs/platforms/raspberry-pi.md @@ -153,30 +153,33 @@ sudo systemctl status openclaw journalctl -u openclaw -f ``` -## 9) Access the Dashboard +## 9) Access the OpenClaw Dashboard -Since the Pi is headless, use an SSH tunnel: +Replace `user@gateway-host` with your Pi username and hostname or IP address. + +On your computer, ask the Pi to print a fresh dashboard URL: ```bash -# From your laptop/desktop -ssh -L 18789:localhost:18789 user@gateway-host - -# Then open in browser -open http://localhost:18789 +ssh user@gateway-host 'openclaw dashboard --no-open' ``` -Or use Tailscale for always-on access: +The command prints `Dashboard URL:`. Depending on how `gateway.auth.token` +is configured, the URL may be a plain `http://127.0.0.1:18789/` link or one +that includes `#token=...`. + +In another terminal on your computer, create the SSH tunnel: ```bash -# On the Pi -curl -fsSL https://tailscale.com/install.sh | sh -sudo tailscale up - -# Update config -openclaw config set gateway.bind tailnet -sudo systemctl restart openclaw +ssh -N -L 18789:127.0.0.1:18789 user@gateway-host ``` +Then open the printed Dashboard URL in your local browser. + +If the UI asks for auth, paste the token from `gateway.auth.token` +(or `OPENCLAW_GATEWAY_TOKEN`) into Control UI settings. + +For always-on remote access, see [Tailscale](/gateway/tailscale). + --- ## Performance Optimizations diff --git a/docs/providers/index.md b/docs/providers/index.md index a4587213832..50e45c6559b 100644 --- a/docs/providers/index.md +++ b/docs/providers/index.md @@ -39,7 +39,7 @@ Looking for chat channel docs (WhatsApp/Telegram/Discord/Slack/Mattermost (plugi - [NVIDIA](/providers/nvidia) - [Ollama (local models)](/providers/ollama) - [OpenAI (API + Codex)](/providers/openai) -- [OpenCode Zen](/providers/opencode) +- [OpenCode (Zen + Go)](/providers/opencode) - [OpenRouter](/providers/openrouter) - [Qianfan](/providers/qianfan) - [Qwen (OAuth)](/providers/qwen) diff --git a/docs/providers/models.md b/docs/providers/models.md index 7da741f4077..a117d286051 100644 --- a/docs/providers/models.md +++ b/docs/providers/models.md @@ -32,7 +32,7 @@ model as `provider/model`. - [Moonshot AI (Kimi + Kimi Coding)](/providers/moonshot) - [Mistral](/providers/mistral) - [Synthetic](/providers/synthetic) -- [OpenCode Zen](/providers/opencode) +- [OpenCode (Zen + Go)](/providers/opencode) - [Z.AI](/providers/zai) - [GLM models](/providers/glm) - [MiniMax](/providers/minimax) diff --git a/docs/providers/ollama.md b/docs/providers/ollama.md index b82f6411b68..abc41361ed0 100644 --- a/docs/providers/ollama.md +++ b/docs/providers/ollama.md @@ -8,7 +8,7 @@ title: "Ollama" # Ollama -Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supporting streaming and tool calling, and can **auto-discover tool-capable models** when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry. +Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supports streaming and tool calling, and can auto-discover local Ollama models when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry. **Remote Ollama users**: Do not use the `/v1` OpenAI-compatible URL (`http://host:11434/v1`) with OpenClaw. This breaks tool calling and models may output raw tool JSON as plain text. Use the native Ollama API URL instead: `baseUrl: "http://host:11434"` (no `/v1`). @@ -16,21 +16,40 @@ Ollama is a local LLM runtime that makes it easy to run open-source models on yo ## Quick start -1. Install Ollama: [https://ollama.ai](https://ollama.ai) +1. Install Ollama: [https://ollama.com/download](https://ollama.com/download) -2. Pull a model: +2. Pull a local model if you want local inference: ```bash +ollama pull glm-4.7-flash +# or ollama pull gpt-oss:20b # or ollama pull llama3.3 -# or -ollama pull qwen2.5-coder:32b -# or -ollama pull deepseek-r1:32b ``` -3. Enable Ollama for OpenClaw (any value works; Ollama doesn't require a real key): +3. If you want Ollama Cloud models too, sign in: + +```bash +ollama signin +``` + +4. Run onboarding and choose `Ollama`: + +```bash +openclaw onboard +``` + +- `Local`: local models only +- `Cloud + Local`: local models plus Ollama Cloud models +- Cloud models such as `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, and `glm-5:cloud` do **not** require a local `ollama pull` + +OpenClaw currently suggests: + +- local default: `glm-4.7-flash` +- cloud defaults: `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, `glm-5:cloud` + +5. If you prefer manual setup, enable Ollama for OpenClaw directly (any value works; Ollama doesn't require a real key): ```bash # Set environment variable @@ -40,13 +59,20 @@ export OLLAMA_API_KEY="ollama-local" openclaw config set models.providers.ollama.apiKey "ollama-local" ``` -4. Use Ollama models: +6. Inspect or switch models: + +```bash +openclaw models list +openclaw models set ollama/glm-4.7-flash +``` + +7. Or set the default in config: ```json5 { agents: { defaults: { - model: { primary: "ollama/gpt-oss:20b" }, + model: { primary: "ollama/glm-4.7-flash" }, }, }, } @@ -56,14 +82,13 @@ openclaw config set models.providers.ollama.apiKey "ollama-local" When you set `OLLAMA_API_KEY` (or an auth profile) and **do not** define `models.providers.ollama`, OpenClaw discovers models from the local Ollama instance at `http://127.0.0.1:11434`: -- Queries `/api/tags` and `/api/show` -- Keeps only models that report `tools` capability -- Marks `reasoning` when the model reports `thinking` -- Reads `contextWindow` from `model_info[".context_length"]` when available -- Sets `maxTokens` to 10× the context window +- Queries `/api/tags` +- Uses best-effort `/api/show` lookups to read `contextWindow` when available +- Marks `reasoning` with a model-name heuristic (`r1`, `reasoning`, `think`) +- Sets `maxTokens` to the default Ollama max-token cap used by OpenClaw - Sets all costs to `0` -This avoids manual model entries while keeping the catalog aligned with Ollama's capabilities. +This avoids manual model entries while keeping the catalog aligned with the local Ollama instance. To see what models are available: @@ -98,7 +123,7 @@ Use explicit config when: - Ollama runs on another host/port. - You want to force specific context windows or model lists. -- You want to include models that do not report tool support. +- You want fully manual model definitions. ```json5 { @@ -170,7 +195,7 @@ Once configured, all your Ollama models are available: ### Reasoning models -OpenClaw marks models as reasoning-capable when Ollama reports `thinking` in `/api/show`: +OpenClaw treats models with names such as `deepseek-r1`, `reasoning`, or `think` as reasoning-capable by default: ```bash ollama pull deepseek-r1:32b @@ -230,7 +255,7 @@ When `api: "openai-completions"` is used with Ollama, OpenClaw injects `options. ### Context windows -For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it defaults to `8192`. You can override `contextWindow` and `maxTokens` in explicit provider config. +For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it falls back to the default Ollama context window used by OpenClaw. You can override `contextWindow` and `maxTokens` in explicit provider config. ## Troubleshooting @@ -250,16 +275,17 @@ curl http://localhost:11434/api/tags ### No models available -OpenClaw only auto-discovers models that report tool support. If your model isn't listed, either: +If your model is not listed, either: -- Pull a tool-capable model, or +- Pull the model locally, or - Define the model explicitly in `models.providers.ollama`. To add models: ```bash ollama list # See what's installed -ollama pull gpt-oss:20b # Pull a tool-capable model +ollama pull glm-4.7-flash +ollama pull gpt-oss:20b ollama pull llama3.3 # Or another model ``` diff --git a/docs/providers/opencode-go.md b/docs/providers/opencode-go.md new file mode 100644 index 00000000000..4552e916beb --- /dev/null +++ b/docs/providers/opencode-go.md @@ -0,0 +1,45 @@ +--- +summary: "Use the OpenCode Go catalog with the shared OpenCode setup" +read_when: + - You want the OpenCode Go catalog + - You need the runtime model refs for Go-hosted models +title: "OpenCode Go" +--- + +# OpenCode Go + +OpenCode Go is the Go catalog within [OpenCode](/providers/opencode). +It uses the same `OPENCODE_API_KEY` as the Zen catalog, but keeps the runtime +provider id `opencode-go` so upstream per-model routing stays correct. + +## Supported models + +- `opencode-go/kimi-k2.5` +- `opencode-go/glm-5` +- `opencode-go/minimax-m2.5` + +## CLI setup + +```bash +openclaw onboard --auth-choice opencode-go +# or non-interactive +openclaw onboard --opencode-go-api-key "$OPENCODE_API_KEY" +``` + +## Config snippet + +```json5 +{ + env: { OPENCODE_API_KEY: "YOUR_API_KEY_HERE" }, // pragma: allowlist secret + agents: { defaults: { model: { primary: "opencode-go/kimi-k2.5" } } }, +} +``` + +## Routing behavior + +OpenClaw handles per-model routing automatically when the model ref uses `opencode-go/...`. + +## Notes + +- Use [OpenCode](/providers/opencode) for the shared onboarding and catalog overview. +- Runtime refs stay explicit: `opencode/...` for Zen, `opencode-go/...` for Go. diff --git a/docs/providers/opencode.md b/docs/providers/opencode.md index aa0614bff80..bf8d54afc9e 100644 --- a/docs/providers/opencode.md +++ b/docs/providers/opencode.md @@ -1,25 +1,38 @@ --- -summary: "Use OpenCode Zen (curated models) with OpenClaw" +summary: "Use OpenCode Zen and Go catalogs with OpenClaw" read_when: - - You want OpenCode Zen for model access - - You want a curated list of coding-friendly models -title: "OpenCode Zen" + - You want OpenCode-hosted model access + - You want to pick between the Zen and Go catalogs +title: "OpenCode" --- -# OpenCode Zen +# OpenCode -OpenCode Zen is a **curated list of models** recommended by the OpenCode team for coding agents. -It is an optional, hosted model access path that uses an API key and the `opencode` provider. -Zen is currently in beta. +OpenCode exposes two hosted catalogs in OpenClaw: + +- `opencode/...` for the **Zen** catalog +- `opencode-go/...` for the **Go** catalog + +Both catalogs use the same OpenCode API key. OpenClaw keeps the runtime provider ids +split so upstream per-model routing stays correct, but onboarding and docs treat them +as one OpenCode setup. ## CLI setup +### Zen catalog + ```bash openclaw onboard --auth-choice opencode-zen -# or non-interactive openclaw onboard --opencode-zen-api-key "$OPENCODE_API_KEY" ``` +### Go catalog + +```bash +openclaw onboard --auth-choice opencode-go +openclaw onboard --opencode-go-api-key "$OPENCODE_API_KEY" +``` + ## Config snippet ```json5 @@ -29,8 +42,23 @@ openclaw onboard --opencode-zen-api-key "$OPENCODE_API_KEY" } ``` +## Catalogs + +### Zen + +- Runtime provider: `opencode` +- Example models: `opencode/claude-opus-4-6`, `opencode/gpt-5.2`, `opencode/gemini-3-pro` +- Best when you want the curated OpenCode multi-model proxy + +### Go + +- Runtime provider: `opencode-go` +- Example models: `opencode-go/kimi-k2.5`, `opencode-go/glm-5`, `opencode-go/minimax-m2.5` +- Best when you want the OpenCode-hosted Kimi/GLM/MiniMax lineup + ## Notes - `OPENCODE_ZEN_API_KEY` is also supported. -- You sign in to Zen, add billing details, and copy your API key. -- OpenCode Zen bills per request; check the OpenCode dashboard for details. +- Entering one OpenCode key during onboarding stores credentials for both runtime providers. +- You sign in to OpenCode, add billing details, and copy your API key. +- Billing and catalog availability are managed from the OpenCode dashboard. diff --git a/docs/reference/secretref-credential-surface.md b/docs/reference/secretref-credential-surface.md index 2a5fc5a66ac..76eb4ec2ae1 100644 --- a/docs/reference/secretref-credential-surface.md +++ b/docs/reference/secretref-credential-surface.md @@ -101,6 +101,7 @@ Notes: - Plan entries target `profiles.*.key` / `profiles.*.token` and write sibling refs (`keyRef` / `tokenRef`). - Auth-profile refs are included in runtime resolution and audit coverage. - For SecretRef-managed model providers, generated `agents/*/agent/models.json` entries persist non-secret markers (not resolved secret values) for `apiKey`/header surfaces. +- Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values. - For web search: - In explicit provider mode (`tools.web.search.provider` set), only the selected provider key is active. - In auto mode (`tools.web.search.provider` unset), only the first provider key that resolves by precedence is active. diff --git a/docs/reference/wizard.md b/docs/reference/wizard.md index 2e7a43bdecc..d58ab96c83a 100644 --- a/docs/reference/wizard.md +++ b/docs/reference/wizard.md @@ -38,7 +38,7 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard). - Sets `agents.defaults.model` to `openai-codex/gpt-5.2` when model is unset or `openai/*`. - **OpenAI API key**: uses `OPENAI_API_KEY` if present or prompts for a key, then stores it in auth profiles. - **xAI (Grok) API key**: prompts for `XAI_API_KEY` and configures xAI as a model provider. - - **OpenCode Zen (multi-model proxy)**: prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`, get it at https://opencode.ai/auth). + - **OpenCode**: prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`, get it at https://opencode.ai/auth) and lets you pick the Zen or Go catalog. - **API key**: stores the key for you. - **Vercel AI Gateway (multi-model proxy)**: prompts for `AI_GATEWAY_API_KEY`. - More detail: [Vercel AI Gateway](/providers/vercel-ai-gateway) @@ -228,7 +228,7 @@ openclaw onboard --non-interactive \ --gateway-bind loopback ``` - + ```bash openclaw onboard --non-interactive \ --mode local \ @@ -237,6 +237,7 @@ openclaw onboard --non-interactive \ --gateway-port 18789 \ --gateway-bind loopback ``` + Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog. diff --git a/docs/start/wizard-cli-automation.md b/docs/start/wizard-cli-automation.md index 14f4a9d5d32..8547f60ac19 100644 --- a/docs/start/wizard-cli-automation.md +++ b/docs/start/wizard-cli-automation.md @@ -123,7 +123,7 @@ openclaw onboard --non-interactive \ --gateway-bind loopback ``` - + ```bash openclaw onboard --non-interactive \ --mode local \ @@ -132,6 +132,7 @@ openclaw onboard --non-interactive \ --gateway-port 18789 \ --gateway-bind loopback ``` + Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog. ```bash diff --git a/docs/start/wizard-cli-reference.md b/docs/start/wizard-cli-reference.md index 44f470ea73b..20f99accd8d 100644 --- a/docs/start/wizard-cli-reference.md +++ b/docs/start/wizard-cli-reference.md @@ -155,8 +155,8 @@ What you set: Prompts for `XAI_API_KEY` and configures xAI as a model provider. - - Prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). + + Prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`) and lets you choose the Zen or Go catalog. Setup URL: [opencode.ai/auth](https://opencode.ai/auth). diff --git a/extensions/acpx/package.json b/extensions/acpx/package.json index 599d71579b0..ae4f7e695ef 100644 --- a/extensions/acpx/package.json +++ b/extensions/acpx/package.json @@ -1,10 +1,10 @@ { "name": "@openclaw/acpx", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw ACP runtime backend via acpx", "type": "module", "dependencies": { - "acpx": "0.1.16" + "acpx": "0.2.0" }, "openclaw": { "extensions": [ diff --git a/extensions/bluebubbles/package.json b/extensions/bluebubbles/package.json index 3c8605ef312..4918e9d3c02 100644 --- a/extensions/bluebubbles/package.json +++ b/extensions/bluebubbles/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/bluebubbles", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw BlueBubbles channel plugin", "type": "module", "dependencies": { diff --git a/extensions/bluebubbles/src/monitor-normalize.test.ts b/extensions/bluebubbles/src/monitor-normalize.test.ts index 3986909c259..3e06302593c 100644 --- a/extensions/bluebubbles/src/monitor-normalize.test.ts +++ b/extensions/bluebubbles/src/monitor-normalize.test.ts @@ -17,9 +17,28 @@ describe("normalizeWebhookMessage", () => { expect(result).not.toBeNull(); expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(false); expect(result?.chatGuid).toBe("iMessage;-;+15551234567"); }); + it("marks explicit sender handles as explicit identity", () => { + const result = normalizeWebhookMessage({ + type: "new-message", + data: { + guid: "msg-explicit-1", + text: "hello", + isGroup: false, + isFromMe: true, + handle: { address: "+15551234567" }, + chatGuid: "iMessage;-;+15551234567", + }, + }); + + expect(result).not.toBeNull(); + expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(true); + }); + it("does not infer sender from group chatGuid when sender handle is missing", () => { const result = normalizeWebhookMessage({ type: "new-message", @@ -72,6 +91,7 @@ describe("normalizeWebhookReaction", () => { expect(result).not.toBeNull(); expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(false); expect(result?.messageId).toBe("p:0/msg-1"); expect(result?.action).toBe("added"); }); diff --git a/extensions/bluebubbles/src/monitor-normalize.ts b/extensions/bluebubbles/src/monitor-normalize.ts index 173ea9c24a6..83454602d4c 100644 --- a/extensions/bluebubbles/src/monitor-normalize.ts +++ b/extensions/bluebubbles/src/monitor-normalize.ts @@ -191,12 +191,13 @@ function readFirstChatRecord(message: Record): Record): { senderId: string; + senderIdExplicit: boolean; senderName?: string; } { const handleValue = message.handle ?? message.sender; const handle = asRecord(handleValue) ?? (typeof handleValue === "string" ? { address: handleValue } : null); - const senderId = + const senderIdRaw = readString(handle, "address") ?? readString(handle, "handle") ?? readString(handle, "id") ?? @@ -204,13 +205,18 @@ function extractSenderInfo(message: Record): { readString(message, "sender") ?? readString(message, "from") ?? ""; + const senderId = senderIdRaw.trim(); const senderName = readString(handle, "displayName") ?? readString(handle, "name") ?? readString(message, "senderName") ?? undefined; - return { senderId, senderName }; + return { + senderId, + senderIdExplicit: Boolean(senderId), + senderName, + }; } function extractChatContext(message: Record): { @@ -441,6 +447,7 @@ export type BlueBubblesParticipant = { export type NormalizedWebhookMessage = { text: string; senderId: string; + senderIdExplicit: boolean; senderName?: string; messageId?: string; timestamp?: number; @@ -466,6 +473,7 @@ export type NormalizedWebhookReaction = { action: "added" | "removed"; emoji: string; senderId: string; + senderIdExplicit: boolean; senderName?: string; messageId: string; timestamp?: number; @@ -672,7 +680,7 @@ export function normalizeWebhookMessage( readString(message, "subject") ?? ""; - const { senderId, senderName } = extractSenderInfo(message); + const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message); const { chatGuid, chatIdentifier, chatId, chatName, isGroup, participants } = extractChatContext(message); const normalizedParticipants = normalizeParticipantList(participants); @@ -717,7 +725,7 @@ export function normalizeWebhookMessage( // BlueBubbles may omit `handle` in webhook payloads; for DM chat GUIDs we can still infer sender. const senderFallbackFromChatGuid = - !senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; + !senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || ""); if (!normalizedSender) { return null; @@ -727,6 +735,7 @@ export function normalizeWebhookMessage( return { text, senderId: normalizedSender, + senderIdExplicit, senderName, messageId, timestamp, @@ -777,7 +786,7 @@ export function normalizeWebhookReaction( const emoji = (associatedEmoji?.trim() || mapping?.emoji) ?? `reaction:${associatedType}`; const action = mapping?.action ?? resolveTapbackActionHint(associatedType) ?? "added"; - const { senderId, senderName } = extractSenderInfo(message); + const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message); const { chatGuid, chatIdentifier, chatId, chatName, isGroup } = extractChatContext(message); const fromMe = readBoolean(message, "isFromMe") ?? readBoolean(message, "is_from_me"); @@ -793,7 +802,7 @@ export function normalizeWebhookReaction( : undefined; const senderFallbackFromChatGuid = - !senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; + !senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || ""); if (!normalizedSender) { return null; @@ -803,6 +812,7 @@ export function normalizeWebhookReaction( action, emoji, senderId: normalizedSender, + senderIdExplicit, senderName, messageId: associatedGuid, timestamp, diff --git a/extensions/bluebubbles/src/monitor-processing.ts b/extensions/bluebubbles/src/monitor-processing.ts index 6eb2ab08bc0..9cf72ea1efd 100644 --- a/extensions/bluebubbles/src/monitor-processing.ts +++ b/extensions/bluebubbles/src/monitor-processing.ts @@ -38,6 +38,10 @@ import { resolveBlueBubblesMessageId, resolveReplyContextFromCache, } from "./monitor-reply-cache.js"; +import { + hasBlueBubblesSelfChatCopy, + rememberBlueBubblesSelfChatCopy, +} from "./monitor-self-chat-cache.js"; import type { BlueBubblesCoreRuntime, BlueBubblesRuntimeEnv, @@ -47,7 +51,12 @@ import { isBlueBubblesPrivateApiEnabled } from "./probe.js"; import { normalizeBlueBubblesReactionInput, sendBlueBubblesReaction } from "./reactions.js"; import { normalizeSecretInputString } from "./secret-input.js"; import { resolveChatGuidForTarget, sendMessageBlueBubbles } from "./send.js"; -import { formatBlueBubblesChatTarget, isAllowedBlueBubblesSender } from "./targets.js"; +import { + extractHandleFromChatGuid, + formatBlueBubblesChatTarget, + isAllowedBlueBubblesSender, + normalizeBlueBubblesHandle, +} from "./targets.js"; const DEFAULT_TEXT_LIMIT = 4000; const invalidAckReactions = new Set(); @@ -80,6 +89,19 @@ function normalizeSnippet(value: string): string { return stripMarkdown(value).replace(/\s+/g, " ").trim().toLowerCase(); } +function isBlueBubblesSelfChatMessage( + message: NormalizedWebhookMessage, + isGroup: boolean, +): boolean { + if (isGroup || !message.senderIdExplicit) { + return false; + } + const chatHandle = + (message.chatGuid ? extractHandleFromChatGuid(message.chatGuid) : null) ?? + normalizeBlueBubblesHandle(message.chatIdentifier ?? ""); + return Boolean(chatHandle) && chatHandle === message.senderId; +} + function prunePendingOutboundMessageIds(now = Date.now()): void { const cutoff = now - PENDING_OUTBOUND_MESSAGE_ID_TTL_MS; for (let i = pendingOutboundMessageIds.length - 1; i >= 0; i--) { @@ -453,8 +475,27 @@ export async function processMessage( ? `removed ${tapbackParsed.emoji} reaction` : `reacted with ${tapbackParsed.emoji}` : text || placeholder; + const isSelfChatMessage = isBlueBubblesSelfChatMessage(message, isGroup); + const selfChatLookup = { + accountId: account.accountId, + chatGuid: message.chatGuid, + chatIdentifier: message.chatIdentifier, + chatId: message.chatId, + senderId: message.senderId, + body: rawBody, + timestamp: message.timestamp, + }; const cacheMessageId = message.messageId?.trim(); + const confirmedOutboundCacheEntry = cacheMessageId + ? resolveReplyContextFromCache({ + accountId: account.accountId, + replyToId: cacheMessageId, + chatGuid: message.chatGuid, + chatIdentifier: message.chatIdentifier, + chatId: message.chatId, + }) + : null; let messageShortId: string | undefined; const cacheInboundMessage = () => { if (!cacheMessageId) { @@ -476,6 +517,12 @@ export async function processMessage( if (message.fromMe) { // Cache from-me messages so reply context can resolve sender/body. cacheInboundMessage(); + const confirmedAssistantOutbound = + confirmedOutboundCacheEntry?.senderLabel === "me" && + normalizeSnippet(confirmedOutboundCacheEntry.body ?? "") === normalizeSnippet(rawBody); + if (isSelfChatMessage && confirmedAssistantOutbound) { + rememberBlueBubblesSelfChatCopy(selfChatLookup); + } if (cacheMessageId) { const pending = consumePendingOutboundMessageId({ accountId: account.accountId, @@ -499,6 +546,11 @@ export async function processMessage( return; } + if (isSelfChatMessage && hasBlueBubblesSelfChatCopy(selfChatLookup)) { + logVerbose(core, runtime, `drop: reflected self-chat duplicate sender=${message.senderId}`); + return; + } + if (!rawBody) { logVerbose(core, runtime, `drop: empty text sender=${message.senderId}`); return; diff --git a/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts b/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts new file mode 100644 index 00000000000..3e843f6943d --- /dev/null +++ b/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts @@ -0,0 +1,190 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + hasBlueBubblesSelfChatCopy, + rememberBlueBubblesSelfChatCopy, + resetBlueBubblesSelfChatCache, +} from "./monitor-self-chat-cache.js"; + +describe("BlueBubbles self-chat cache", () => { + const directLookup = { + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + } as const; + + afterEach(() => { + resetBlueBubblesSelfChatCache(); + vi.useRealTimers(); + }); + + it("matches repeated lookups for the same scope, timestamp, and text", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: " hello\r\nworld ", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello\nworld", + timestamp: 123, + }), + ).toBe(true); + }); + + it("canonicalizes DM scope across chatIdentifier and chatGuid", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + accountId: "default", + chatIdentifier: "+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }), + ).toBe(true); + + resetBlueBubblesSelfChatCache(); + + rememberBlueBubblesSelfChatCopy({ + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + accountId: "default", + chatIdentifier: "+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }), + ).toBe(true); + }); + + it("expires entries after the ttl window", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello", + timestamp: 123, + }); + + vi.advanceTimersByTime(11_001); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello", + timestamp: 123, + }), + ).toBe(false); + }); + + it("evicts older entries when the cache exceeds its cap", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + for (let i = 0; i < 513; i += 1) { + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: `message-${i}`, + timestamp: i, + }); + vi.advanceTimersByTime(1_001); + } + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "message-0", + timestamp: 0, + }), + ).toBe(false); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "message-512", + timestamp: 512, + }), + ).toBe(true); + }); + + it("enforces the cache cap even when cleanup is throttled", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + for (let i = 0; i < 513; i += 1) { + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: `burst-${i}`, + timestamp: i, + }); + } + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "burst-0", + timestamp: 0, + }), + ).toBe(false); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "burst-512", + timestamp: 512, + }), + ).toBe(true); + }); + + it("does not collide long texts that differ only in the middle", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const prefix = "a".repeat(256); + const suffix = "b".repeat(256); + const longBodyA = `${prefix}${"x".repeat(300)}${suffix}`; + const longBodyB = `${prefix}${"y".repeat(300)}${suffix}`; + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyA, + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyA, + timestamp: 123, + }), + ).toBe(true); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyB, + timestamp: 123, + }), + ).toBe(false); + }); +}); diff --git a/extensions/bluebubbles/src/monitor-self-chat-cache.ts b/extensions/bluebubbles/src/monitor-self-chat-cache.ts new file mode 100644 index 00000000000..09d7167d769 --- /dev/null +++ b/extensions/bluebubbles/src/monitor-self-chat-cache.ts @@ -0,0 +1,127 @@ +import { createHash } from "node:crypto"; +import { extractHandleFromChatGuid, normalizeBlueBubblesHandle } from "./targets.js"; + +type SelfChatCacheKeyParts = { + accountId: string; + chatGuid?: string; + chatIdentifier?: string; + chatId?: number; + senderId: string; +}; + +type SelfChatLookup = SelfChatCacheKeyParts & { + body?: string; + timestamp?: number; +}; + +const SELF_CHAT_TTL_MS = 10_000; +const MAX_SELF_CHAT_CACHE_ENTRIES = 512; +const CLEANUP_MIN_INTERVAL_MS = 1_000; +const MAX_SELF_CHAT_BODY_CHARS = 32_768; +const cache = new Map(); +let lastCleanupAt = 0; + +function normalizeBody(body: string | undefined): string | null { + if (!body) { + return null; + } + const bounded = + body.length > MAX_SELF_CHAT_BODY_CHARS ? body.slice(0, MAX_SELF_CHAT_BODY_CHARS) : body; + const normalized = bounded.replace(/\r\n?/g, "\n").trim(); + return normalized ? normalized : null; +} + +function isUsableTimestamp(timestamp: number | undefined): timestamp is number { + return typeof timestamp === "number" && Number.isFinite(timestamp); +} + +function digestText(text: string): string { + return createHash("sha256").update(text).digest("base64url"); +} + +function trimOrUndefined(value?: string | null): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + +function resolveCanonicalChatTarget(parts: SelfChatCacheKeyParts): string | null { + const handleFromGuid = parts.chatGuid ? extractHandleFromChatGuid(parts.chatGuid) : null; + if (handleFromGuid) { + return handleFromGuid; + } + + const normalizedIdentifier = normalizeBlueBubblesHandle(parts.chatIdentifier ?? ""); + if (normalizedIdentifier) { + return normalizedIdentifier; + } + + return ( + trimOrUndefined(parts.chatGuid) ?? + trimOrUndefined(parts.chatIdentifier) ?? + (typeof parts.chatId === "number" ? String(parts.chatId) : null) + ); +} + +function buildScope(parts: SelfChatCacheKeyParts): string { + const target = resolveCanonicalChatTarget(parts) ?? parts.senderId; + return `${parts.accountId}:${target}`; +} + +function cleanupExpired(now = Date.now()): void { + if ( + lastCleanupAt !== 0 && + now >= lastCleanupAt && + now - lastCleanupAt < CLEANUP_MIN_INTERVAL_MS + ) { + return; + } + lastCleanupAt = now; + for (const [key, seenAt] of cache.entries()) { + if (now - seenAt > SELF_CHAT_TTL_MS) { + cache.delete(key); + } + } +} + +function enforceSizeCap(): void { + while (cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) { + const oldestKey = cache.keys().next().value; + if (typeof oldestKey !== "string") { + break; + } + cache.delete(oldestKey); + } +} + +function buildKey(lookup: SelfChatLookup): string | null { + const body = normalizeBody(lookup.body); + if (!body || !isUsableTimestamp(lookup.timestamp)) { + return null; + } + return `${buildScope(lookup)}:${lookup.timestamp}:${digestText(body)}`; +} + +export function rememberBlueBubblesSelfChatCopy(lookup: SelfChatLookup): void { + cleanupExpired(); + const key = buildKey(lookup); + if (!key) { + return; + } + cache.set(key, Date.now()); + enforceSizeCap(); +} + +export function hasBlueBubblesSelfChatCopy(lookup: SelfChatLookup): boolean { + cleanupExpired(); + const key = buildKey(lookup); + if (!key) { + return false; + } + const seenAt = cache.get(key); + return typeof seenAt === "number" && Date.now() - seenAt <= SELF_CHAT_TTL_MS; +} + +export function resetBlueBubblesSelfChatCache(): void { + cache.clear(); + lastCleanupAt = 0; +} diff --git a/extensions/bluebubbles/src/monitor.test.ts b/extensions/bluebubbles/src/monitor.test.ts index b02019058b8..1ba2e27f0b6 100644 --- a/extensions/bluebubbles/src/monitor.test.ts +++ b/extensions/bluebubbles/src/monitor.test.ts @@ -5,6 +5,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { createPluginRuntimeMock } from "../../test-utils/plugin-runtime-mock.js"; import type { ResolvedBlueBubblesAccount } from "./accounts.js"; import { fetchBlueBubblesHistory } from "./history.js"; +import { resetBlueBubblesSelfChatCache } from "./monitor-self-chat-cache.js"; import { handleBlueBubblesWebhookRequest, registerBlueBubblesWebhookTarget, @@ -246,6 +247,7 @@ describe("BlueBubbles webhook monitor", () => { vi.clearAllMocks(); // Reset short ID state between tests for predictable behavior _resetBlueBubblesShortIdState(); + resetBlueBubblesSelfChatCache(); mockFetchBlueBubblesHistory.mockResolvedValue({ entries: [], resolved: true }); mockReadAllowFromStore.mockResolvedValue([]); mockUpsertPairingRequest.mockResolvedValue({ code: "TESTCODE", created: true }); @@ -259,6 +261,7 @@ describe("BlueBubbles webhook monitor", () => { afterEach(() => { unregister?.(); + vi.useRealTimers(); }); describe("DM pairing behavior vs allowFrom", () => { @@ -2676,5 +2679,449 @@ describe("BlueBubbles webhook monitor", () => { expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); }); + + it("drops reflected self-chat duplicates after a confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const { sendMessageBlueBubbles } = await import("./send.js"); + vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "msg-self-1" }); + + mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => { + await params.dispatcherOptions.deliver({ text: "replying now" }, { kind: "final" }); + return EMPTY_DISPATCH_RESULT; + }); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const inboundPayload = { + type: "new-message", + data: { + text: "hello", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-0", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const fromMePayload = { + type: "new-message", + data: { + text: "replying now", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "replying now", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); + }); + + it("does not drop inbound messages when no fromMe self-chat copy was seen", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const inboundPayload = { + type: "new-message", + data: { + text: "genuinely new message", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-inbound-1", + chatGuid: "iMessage;-;+15551234567", + date: Date.now(), + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not drop reflected copies after the self-chat cache TTL expires", async () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "ttl me", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-ttl-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await vi.runAllTimersAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + vi.advanceTimersByTime(10_001); + + const reflectedPayload = { + type: "new-message", + data: { + text: "ttl me", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-ttl-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await vi.runAllTimersAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not cache regular fromMe DMs as self-chat reflections", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "shared text", + handle: { address: "+15557654321" }, + isGroup: false, + isFromMe: true, + guid: "msg-normal-fromme", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const inboundPayload = { + type: "new-message", + data: { + text: "shared text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-normal-inbound", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not drop user-authored self-chat prompts without a confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "user-authored self prompt", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-user-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "user-authored self prompt", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-user-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not treat a pending text-only match as confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const { sendMessageBlueBubbles } = await import("./send.js"); + vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "ok" }); + + mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => { + await params.dispatcherOptions.deliver({ text: "same text" }, { kind: "final" }); + return EMPTY_DISPATCH_RESULT; + }); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const inboundPayload = { + type: "new-message", + data: { + text: "hello", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-race-0", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const fromMePayload = { + type: "new-message", + data: { + text: "same text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-race-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "same text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-race-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not treat chatGuid-inferred sender ids as self-chat evidence", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "shared inferred text", + handle: null, + isGroup: false, + isFromMe: true, + guid: "msg-inferred-fromme", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const inboundPayload = { + type: "new-message", + data: { + text: "shared inferred text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-inferred-inbound", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); }); }); diff --git a/extensions/copilot-proxy/package.json b/extensions/copilot-proxy/package.json index e060ddd67f1..56f6c1085ee 100644 --- a/extensions/copilot-proxy/package.json +++ b/extensions/copilot-proxy/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/copilot-proxy", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Copilot Proxy provider plugin", "type": "module", diff --git a/extensions/diagnostics-otel/package.json b/extensions/diagnostics-otel/package.json index 29c9b0ac79b..91aea1e9256 100644 --- a/extensions/diagnostics-otel/package.json +++ b/extensions/diagnostics-otel/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diagnostics-otel", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw diagnostics OpenTelemetry exporter", "type": "module", "dependencies": { diff --git a/extensions/diffs/package.json b/extensions/diffs/package.json index b685f985108..c9e30cee333 100644 --- a/extensions/diffs/package.json +++ b/extensions/diffs/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diffs", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw diff viewer plugin", "type": "module", diff --git a/extensions/discord/package.json b/extensions/discord/package.json index f30f10ade51..7f291bd1c7a 100644 --- a/extensions/discord/package.json +++ b/extensions/discord/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/discord", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Discord channel plugin", "type": "module", "openclaw": { diff --git a/extensions/feishu/package.json b/extensions/feishu/package.json index fc38816e1bd..116f15f08d2 100644 --- a/extensions/feishu/package.json +++ b/extensions/feishu/package.json @@ -1,12 +1,12 @@ { "name": "@openclaw/feishu", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Feishu/Lark channel plugin (community maintained by @m1heng)", "type": "module", "dependencies": { "@larksuiteoapi/node-sdk": "^1.59.0", "@sinclair/typebox": "0.34.48", - "https-proxy-agent": "^7.0.6", + "https-proxy-agent": "^8.0.0", "zod": "^4.3.6" }, "openclaw": { diff --git a/extensions/google-gemini-cli-auth/package.json b/extensions/google-gemini-cli-auth/package.json index 2ab1c6a6ca8..7a84f58020a 100644 --- a/extensions/google-gemini-cli-auth/package.json +++ b/extensions/google-gemini-cli-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/google-gemini-cli-auth", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Gemini CLI OAuth provider plugin", "type": "module", diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index 61128b78032..2b9eee3932e 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -1,15 +1,12 @@ { "name": "@openclaw/googlechat", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Google Chat channel plugin", "type": "module", "dependencies": { "google-auth-library": "^10.6.1" }, - "devDependencies": { - "openclaw": "workspace:*" - }, "peerDependencies": { "openclaw": ">=2026.3.7" }, diff --git a/extensions/imessage/package.json b/extensions/imessage/package.json index 3f38e01efe1..8add26a2fe7 100644 --- a/extensions/imessage/package.json +++ b/extensions/imessage/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/imessage", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw iMessage channel plugin", "type": "module", diff --git a/extensions/irc/package.json b/extensions/irc/package.json index 34c7de1dcfb..e6e9bdfe6b4 100644 --- a/extensions/irc/package.json +++ b/extensions/irc/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/irc", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw IRC channel plugin", "type": "module", "dependencies": { diff --git a/extensions/line/package.json b/extensions/line/package.json index 9ec37f833e7..4f98b21c7a2 100644 --- a/extensions/line/package.json +++ b/extensions/line/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/line", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw LINE channel plugin", "type": "module", diff --git a/extensions/llm-task/package.json b/extensions/llm-task/package.json index 8a74b2ead7e..bf63c9b28fc 100644 --- a/extensions/llm-task/package.json +++ b/extensions/llm-task/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/llm-task", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw JSON-only LLM task plugin", "type": "module", diff --git a/extensions/lobster/package.json b/extensions/lobster/package.json index 4c137401fbb..c0c243b28c0 100644 --- a/extensions/lobster/package.json +++ b/extensions/lobster/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/lobster", - "version": "2026.3.9", + "version": "2026.3.11", "description": "Lobster workflow tool plugin (typed pipelines + resumable approvals)", "type": "module", "dependencies": { diff --git a/extensions/matrix/CHANGELOG.md b/extensions/matrix/CHANGELOG.md index a3b32a18c85..65f31b8445e 100644 --- a/extensions/matrix/CHANGELOG.md +++ b/extensions/matrix/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/matrix/package.json b/extensions/matrix/package.json index c1b5859b43e..8a132a9edf5 100644 --- a/extensions/matrix/package.json +++ b/extensions/matrix/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/matrix", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Matrix channel plugin", "type": "module", "dependencies": { diff --git a/extensions/mattermost/package.json b/extensions/mattermost/package.json index d532764db87..e16e158545e 100644 --- a/extensions/mattermost/package.json +++ b/extensions/mattermost/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/mattermost", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Mattermost channel plugin", "type": "module", "dependencies": { diff --git a/extensions/mattermost/src/channel.ts b/extensions/mattermost/src/channel.ts index 2dffaa6f3cf..42d167948a0 100644 --- a/extensions/mattermost/src/channel.ts +++ b/extensions/mattermost/src/channel.ts @@ -270,6 +270,16 @@ export const mattermostPlugin: ChannelPlugin = { streaming: { blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 }, }, + threading: { + resolveReplyToMode: ({ cfg, accountId }) => { + const account = resolveMattermostAccount({ cfg, accountId: accountId ?? "default" }); + const mode = account.config.replyToMode; + if (mode === "off" || mode === "first") { + return mode; + } + return "all"; + }, + }, reload: { configPrefixes: ["channels.mattermost"] }, configSchema: buildChannelConfigSchema(MattermostConfigSchema), config: { diff --git a/extensions/mattermost/src/config-schema.ts b/extensions/mattermost/src/config-schema.ts index 51d9bdbe33a..43dd7ede8d2 100644 --- a/extensions/mattermost/src/config-schema.ts +++ b/extensions/mattermost/src/config-schema.ts @@ -43,6 +43,7 @@ const MattermostAccountSchemaBase = z chunkMode: z.enum(["length", "newline"]).optional(), blockStreaming: z.boolean().optional(), blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(), + replyToMode: z.enum(["off", "first", "all"]).optional(), responsePrefix: z.string().optional(), actions: z .object({ diff --git a/extensions/mattermost/src/mattermost/monitor.test.ts b/extensions/mattermost/src/mattermost/monitor.test.ts index 1bd871714c4..d479909ac05 100644 --- a/extensions/mattermost/src/mattermost/monitor.test.ts +++ b/extensions/mattermost/src/mattermost/monitor.test.ts @@ -109,6 +109,29 @@ describe("mattermost mention gating", () => { }); }); +describe("resolveMattermostReplyRootId with block streaming payloads", () => { + it("uses threadRootId for block-streamed payloads with replyToId", () => { + // When block streaming sends a payload with replyToId from the threading + // mode, the deliver callback should still use the existing threadRootId. + expect( + resolveMattermostReplyRootId({ + threadRootId: "thread-root-1", + replyToId: "streamed-reply-id", + }), + ).toBe("thread-root-1"); + }); + + it("falls back to payload replyToId when no threadRootId in block streaming", () => { + // Top-level channel message: no threadRootId, payload carries the + // inbound post id as replyToId from the "all" threading mode. + expect( + resolveMattermostReplyRootId({ + replyToId: "inbound-post-for-threading", + }), + ).toBe("inbound-post-for-threading"); + }); +}); + describe("resolveMattermostReplyRootId", () => { it("uses replyToId for top-level replies", () => { expect( diff --git a/extensions/mattermost/src/types.ts b/extensions/mattermost/src/types.ts index ba664baa894..86de9c1a714 100644 --- a/extensions/mattermost/src/types.ts +++ b/extensions/mattermost/src/types.ts @@ -52,6 +52,8 @@ export type MattermostAccountConfig = { blockStreaming?: boolean; /** Merge streamed block replies before sending. */ blockStreamingCoalesce?: BlockStreamingCoalesceConfig; + /** Control reply threading (off|first|all). Default: "all". */ + replyToMode?: "off" | "first" | "all"; /** Outbound response prefix override for this channel/account. */ responsePrefix?: string; /** Action toggles for this account. */ diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index 0af3fc45281..640c81e1539 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -1,12 +1,9 @@ { "name": "@openclaw/memory-core", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw core memory search plugin", "type": "module", - "devDependencies": { - "openclaw": "workspace:*" - }, "peerDependencies": { "openclaw": ">=2026.3.7" }, diff --git a/extensions/memory-lancedb/package.json b/extensions/memory-lancedb/package.json index abd920833ca..2a1b2a9994b 100644 --- a/extensions/memory-lancedb/package.json +++ b/extensions/memory-lancedb/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/memory-lancedb", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw LanceDB-backed long-term memory plugin with auto-recall/capture", "type": "module", diff --git a/extensions/minimax-portal-auth/package.json b/extensions/minimax-portal-auth/package.json index 9443f37d524..6e11b99212f 100644 --- a/extensions/minimax-portal-auth/package.json +++ b/extensions/minimax-portal-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/minimax-portal-auth", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw MiniMax Portal OAuth provider plugin", "type": "module", diff --git a/extensions/msteams/CHANGELOG.md b/extensions/msteams/CHANGELOG.md index 38d5614305c..bf82200cf59 100644 --- a/extensions/msteams/CHANGELOG.md +++ b/extensions/msteams/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/msteams/package.json b/extensions/msteams/package.json index c4453f82f6e..c159d091977 100644 --- a/extensions/msteams/package.json +++ b/extensions/msteams/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/msteams", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Microsoft Teams channel plugin", "type": "module", "dependencies": { diff --git a/extensions/nextcloud-talk/package.json b/extensions/nextcloud-talk/package.json index 96797d4b76e..9ef0a1daf09 100644 --- a/extensions/nextcloud-talk/package.json +++ b/extensions/nextcloud-talk/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nextcloud-talk", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Nextcloud Talk channel plugin", "type": "module", "dependencies": { diff --git a/extensions/nostr/CHANGELOG.md b/extensions/nostr/CHANGELOG.md index 3088efcc2bb..dcb4c18fdfa 100644 --- a/extensions/nostr/CHANGELOG.md +++ b/extensions/nostr/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/nostr/package.json b/extensions/nostr/package.json index dbee4bc09d7..f02b67b6837 100644 --- a/extensions/nostr/package.json +++ b/extensions/nostr/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nostr", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Nostr channel plugin for NIP-04 encrypted DMs", "type": "module", "dependencies": { diff --git a/extensions/open-prose/package.json b/extensions/open-prose/package.json index 240a2bbcb41..de86909f961 100644 --- a/extensions/open-prose/package.json +++ b/extensions/open-prose/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/open-prose", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenProse VM skill pack plugin (slash command + telemetry).", "type": "module", diff --git a/extensions/signal/package.json b/extensions/signal/package.json index 743c8212d31..6fd516cfd42 100644 --- a/extensions/signal/package.json +++ b/extensions/signal/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/signal", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Signal channel plugin", "type": "module", diff --git a/extensions/slack/package.json b/extensions/slack/package.json index 539541bdc6d..dbc4a4483c4 100644 --- a/extensions/slack/package.json +++ b/extensions/slack/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/slack", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Slack channel plugin", "type": "module", diff --git a/extensions/synology-chat/package.json b/extensions/synology-chat/package.json index 00503898817..0e7b4847494 100644 --- a/extensions/synology-chat/package.json +++ b/extensions/synology-chat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/synology-chat", - "version": "2026.3.9", + "version": "2026.3.11", "description": "Synology Chat channel plugin for OpenClaw", "type": "module", "dependencies": { diff --git a/extensions/telegram/package.json b/extensions/telegram/package.json index 6602b46f2c8..8ffa3acf603 100644 --- a/extensions/telegram/package.json +++ b/extensions/telegram/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/telegram", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw Telegram channel plugin", "type": "module", diff --git a/extensions/tlon/package.json b/extensions/tlon/package.json index 0cb79328d89..154e1dd6dbd 100644 --- a/extensions/tlon/package.json +++ b/extensions/tlon/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/tlon", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Tlon/Urbit channel plugin", "type": "module", "dependencies": { diff --git a/extensions/twitch/CHANGELOG.md b/extensions/twitch/CHANGELOG.md index 48160f427e8..844ef13dc6c 100644 --- a/extensions/twitch/CHANGELOG.md +++ b/extensions/twitch/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/twitch/package.json b/extensions/twitch/package.json index 5fbf49cc971..3bcdf9fe847 100644 --- a/extensions/twitch/package.json +++ b/extensions/twitch/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/twitch", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Twitch channel plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/CHANGELOG.md b/extensions/voice-call/CHANGELOG.md index a8a4586116c..93aba26c868 100644 --- a/extensions/voice-call/CHANGELOG.md +++ b/extensions/voice-call/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/voice-call/openclaw.plugin.json b/extensions/voice-call/openclaw.plugin.json index d9a904c73eb..fef3ccc6ad9 100644 --- a/extensions/voice-call/openclaw.plugin.json +++ b/extensions/voice-call/openclaw.plugin.json @@ -522,11 +522,22 @@ "apiKey": { "type": "string" }, + "baseUrl": { + "type": "string" + }, "model": { "type": "string" }, "voice": { "type": "string" + }, + "speed": { + "type": "number", + "minimum": 0.25, + "maximum": 4.0 + }, + "instructions": { + "type": "string" } } }, diff --git a/extensions/voice-call/package.json b/extensions/voice-call/package.json index 420f8b41560..9bdadd3b226 100644 --- a/extensions/voice-call/package.json +++ b/extensions/voice-call/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/voice-call", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw voice-call plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/src/providers/tts-openai.ts b/extensions/voice-call/src/providers/tts-openai.ts index a27030b4578..0a7c74d90ac 100644 --- a/extensions/voice-call/src/providers/tts-openai.ts +++ b/extensions/voice-call/src/providers/tts-openai.ts @@ -1,3 +1,4 @@ +import { resolveOpenAITtsInstructions } from "openclaw/plugin-sdk/voice-call"; import { pcmToMulaw } from "../telephony-audio.js"; /** @@ -110,9 +111,11 @@ export class OpenAITTSProvider { speed: this.speed, }; - // Add instructions if using gpt-4o-mini-tts model - const effectiveInstructions = trimToUndefined(instructions) ?? this.instructions; - if (effectiveInstructions && this.model.includes("gpt-4o-mini-tts")) { + const effectiveInstructions = resolveOpenAITtsInstructions( + this.model, + trimToUndefined(instructions) ?? this.instructions, + ); + if (effectiveInstructions) { body.instructions = effectiveInstructions; } diff --git a/extensions/whatsapp/package.json b/extensions/whatsapp/package.json index c87a5f26c2b..1a21be8eba9 100644 --- a/extensions/whatsapp/package.json +++ b/extensions/whatsapp/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/whatsapp", - "version": "2026.3.9", + "version": "2026.3.11", "private": true, "description": "OpenClaw WhatsApp channel plugin", "type": "module", diff --git a/extensions/zalo/CHANGELOG.md b/extensions/zalo/CHANGELOG.md index 5ae5323034f..178f993e825 100644 --- a/extensions/zalo/CHANGELOG.md +++ b/extensions/zalo/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/zalo/package.json b/extensions/zalo/package.json index 6de5909736f..463887c68fe 100644 --- a/extensions/zalo/package.json +++ b/extensions/zalo/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalo", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Zalo channel plugin", "type": "module", "dependencies": { diff --git a/extensions/zalouser/CHANGELOG.md b/extensions/zalouser/CHANGELOG.md index 10c22ce4029..b5a0fbb6f57 100644 --- a/extensions/zalouser/CHANGELOG.md +++ b/extensions/zalouser/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/zalouser/package.json b/extensions/zalouser/package.json index 79bf5723d48..2b803b0b150 100644 --- a/extensions/zalouser/package.json +++ b/extensions/zalouser/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalouser", - "version": "2026.3.9", + "version": "2026.3.11", "description": "OpenClaw Zalo Personal Account plugin via native zca-js integration", "type": "module", "dependencies": { diff --git a/package.json b/package.json index 2e4dbc0d97e..9c1100bc49f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openclaw", - "version": "2026.3.9", + "version": "2026.3.11", "description": "Multi-channel AI gateway with extensible messaging integrations", "keywords": [], "homepage": "https://github.com/openclaw/openclaw#readme", @@ -262,10 +262,13 @@ "gateway:watch": "node scripts/watch-node.mjs gateway --force", "gen:host-env-policy:swift": "node scripts/generate-host-env-security-policy-swift.mjs --write", "ghsa:patch": "node scripts/ghsa-patch.mjs", - "ios:build": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build'", - "ios:gen": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate'", - "ios:open": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && open OpenClaw.xcodeproj'", - "ios:run": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build && xcrun simctl boot \"${IOS_SIM:-iPhone 17}\" || true && xcrun simctl launch booted ai.openclaw.ios'", + "ios:beta": "bash scripts/ios-beta-release.sh", + "ios:beta:archive": "bash scripts/ios-beta-archive.sh", + "ios:beta:prepare": "bash scripts/ios-beta-prepare.sh", + "ios:build": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build'", + "ios:gen": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate'", + "ios:open": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate && open OpenClaw.xcodeproj'", + "ios:run": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build && xcrun simctl boot \"${IOS_SIM:-iPhone 17}\" || true && xcrun simctl launch booted ai.openclaw.ios'", "lint": "oxlint --type-aware", "lint:agent:ingress-owner": "node scripts/check-ingress-agent-owner-context.mjs", "lint:all": "pnpm lint && pnpm lint:swift", @@ -335,11 +338,11 @@ "ui:install": "node scripts/ui.js install" }, "dependencies": { - "@agentclientprotocol/sdk": "0.15.0", - "@aws-sdk/client-bedrock": "^3.1004.0", + "@agentclientprotocol/sdk": "0.16.1", + "@aws-sdk/client-bedrock": "^3.1007.0", "@buape/carbon": "0.0.0-beta-20260216184201", "@clack/prompts": "^1.1.0", - "@discordjs/voice": "^0.19.0", + "@discordjs/voice": "^0.19.1", "@grammyjs/runner": "^2.0.3", "@grammyjs/transformer-throttler": "^1.2.1", "@homebridge/ciao": "^1.3.5", @@ -361,13 +364,13 @@ "cli-highlight": "^2.1.11", "commander": "^14.0.3", "croner": "^10.0.1", - "discord-api-types": "^0.38.41", + "discord-api-types": "^0.38.42", "dotenv": "^17.3.1", "express": "^5.2.1", "file-type": "^21.3.1", "grammy": "^1.41.1", "hono": "4.12.7", - "https-proxy-agent": "^7.0.6", + "https-proxy-agent": "^8.0.0", "ipaddr.js": "^2.3.0", "jiti": "^2.6.1", "json5": "^2.2.3", @@ -396,18 +399,18 @@ "@lit/context": "^1.1.6", "@types/express": "^5.0.6", "@types/markdown-it": "^14.1.2", - "@types/node": "^25.3.5", + "@types/node": "^25.4.0", "@types/qrcode-terminal": "^0.12.2", "@types/ws": "^8.18.1", - "@typescript/native-preview": "7.0.0-dev.20260308.1", + "@typescript/native-preview": "7.0.0-dev.20260311.1", "@vitest/coverage-v8": "^4.0.18", "jscpd": "4.0.8", "lit": "^3.3.2", - "oxfmt": "0.36.0", - "oxlint": "^1.51.0", + "oxfmt": "0.38.0", + "oxlint": "^1.53.0", "oxlint-tsgolint": "^0.16.0", "signal-utils": "0.21.1", - "tsdown": "0.21.0", + "tsdown": "0.21.2", "tsx": "^4.21.0", "typescript": "^5.9.3", "vitest": "^4.0.18" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 72fa7353329..1e26495971c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -26,11 +26,11 @@ importers: .: dependencies: '@agentclientprotocol/sdk': - specifier: 0.15.0 - version: 0.15.0(zod@4.3.6) + specifier: 0.16.1 + version: 0.16.1(zod@4.3.6) '@aws-sdk/client-bedrock': - specifier: ^3.1004.0 - version: 3.1004.0 + specifier: ^3.1007.0 + version: 3.1007.0 '@buape/carbon': specifier: 0.0.0-beta-20260216184201 version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) @@ -38,8 +38,8 @@ importers: specifier: ^1.1.0 version: 1.1.0 '@discordjs/voice': - specifier: ^0.19.0 - version: 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) + specifier: ^0.19.1 + version: 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) '@grammyjs/runner': specifier: ^2.0.3 version: 2.0.3(grammy@1.41.1) @@ -107,8 +107,8 @@ importers: specifier: ^10.0.1 version: 10.0.1 discord-api-types: - specifier: ^0.38.41 - version: 0.38.41 + specifier: ^0.38.42 + version: 0.38.42 dotenv: specifier: ^17.3.1 version: 17.3.1 @@ -125,8 +125,8 @@ importers: specifier: 4.12.7 version: 4.12.7 https-proxy-agent: - specifier: ^7.0.6 - version: 7.0.6 + specifier: ^8.0.0 + version: 8.0.0 ipaddr.js: specifier: ^2.3.0 version: 2.3.0 @@ -210,8 +210,8 @@ importers: specifier: ^14.1.2 version: 14.1.2 '@types/node': - specifier: ^25.3.5 - version: 25.3.5 + specifier: ^25.4.0 + version: 25.4.0 '@types/qrcode-terminal': specifier: ^0.12.2 version: 0.12.2 @@ -219,11 +219,11 @@ importers: specifier: ^8.18.1 version: 8.18.1 '@typescript/native-preview': - specifier: 7.0.0-dev.20260308.1 - version: 7.0.0-dev.20260308.1 + specifier: 7.0.0-dev.20260311.1 + version: 7.0.0-dev.20260311.1 '@vitest/coverage-v8': specifier: ^4.0.18 - version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) + version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) jscpd: specifier: 4.0.8 version: 4.0.8 @@ -231,11 +231,11 @@ importers: specifier: ^3.3.2 version: 3.3.2 oxfmt: - specifier: 0.36.0 - version: 0.36.0 + specifier: 0.38.0 + version: 0.38.0 oxlint: - specifier: ^1.51.0 - version: 1.51.0(oxlint-tsgolint@0.16.0) + specifier: ^1.53.0 + version: 1.53.0(oxlint-tsgolint@0.16.0) oxlint-tsgolint: specifier: ^0.16.0 version: 0.16.0 @@ -243,8 +243,8 @@ importers: specifier: 0.21.1 version: 0.21.1(signal-polyfill@0.2.2) tsdown: - specifier: 0.21.0 - version: 0.21.0(@typescript/native-preview@7.0.0-dev.20260308.1)(typescript@5.9.3) + specifier: 0.21.2 + version: 0.21.2(@typescript/native-preview@7.0.0-dev.20260311.1)(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -253,13 +253,13 @@ importers: version: 5.9.3 vitest: specifier: ^4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) extensions/acpx: dependencies: acpx: - specifier: 0.1.16 - version: 0.1.16(zod@4.3.6) + specifier: 0.2.0 + version: 0.2.0(zod@4.3.6) extensions/bluebubbles: dependencies: @@ -328,8 +328,8 @@ importers: specifier: 0.34.48 version: 0.34.48 https-proxy-agent: - specifier: ^7.0.6 - version: 7.0.6 + specifier: ^8.0.0 + version: 8.0.0 zod: specifier: ^4.3.6 version: 4.3.6 @@ -341,10 +341,9 @@ importers: google-auth-library: specifier: ^10.6.1 version: 10.6.1 - devDependencies: openclaw: - specifier: workspace:* - version: link:../.. + specifier: '>=2026.3.7' + version: 2026.3.8(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.7)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/imessage: {} @@ -402,10 +401,10 @@ importers: version: 4.3.6 extensions/memory-core: - devDependencies: + dependencies: openclaw: - specifier: workspace:* - version: link:../.. + specifier: '>=2026.3.7' + version: 2026.3.8(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.7)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/memory-lancedb: dependencies: @@ -551,8 +550,8 @@ importers: specifier: 3.0.0 version: 3.0.0 dompurify: - specifier: ^3.3.2 - version: 3.3.2 + specifier: ^3.3.3 + version: 3.3.3 lit: specifier: ^3.3.2 version: 3.3.2 @@ -567,17 +566,17 @@ importers: version: 0.21.1(signal-polyfill@0.2.2) vite: specifier: 7.3.1 - version: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) devDependencies: '@vitest/browser-playwright': specifier: 4.0.18 - version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) playwright: specifier: ^1.58.2 version: 1.58.2 vitest: specifier: 4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) packages: @@ -586,6 +585,11 @@ packages: peerDependencies: zod: ^3.25.0 || ^4.0.0 + '@agentclientprotocol/sdk@0.16.1': + resolution: {integrity: sha512-1ad+Sc/0sCtZGHthxxvgEUo5Wsbw16I+aF+YwdiLnPwkZG8KAGUEAPK6LM6Pf69lCyJPt1Aomk1d+8oE3C4ZEw==} + peerDependencies: + zod: ^3.25.0 || ^4.0.0 + '@anthropic-ai/sdk@0.73.0': resolution: {integrity: sha512-URURVzhxXGJDGUGFunIOtBlSl7KWvZiAAKY/ttTkZAkXT9bTPqdk2eK0b8qqSxXpikh3QKPnPYpiyX98zf5ebw==} hasBin: true @@ -622,8 +626,8 @@ packages: resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.1004.0': - resolution: {integrity: sha512-JbfZSV85IL+43S7rPBmeMbvoOYXs1wmrfbEpHkDBjkvbukRQWtoetiPAXNSKDfFq1qVsoq8sWPdoerDQwlUO8w==} + '@aws-sdk/client-bedrock@3.1007.0': + resolution: {integrity: sha512-49hH8o6ALKkCiBUgg20HkwxNamP1yYA/n8Si73Z438EqhZGpCfScP3FfxVhrfD5o+4bV4Whi9BTzPKCa/PfUww==} engines: {node: '>=20.0.0'} '@aws-sdk/client-s3@3.1000.0': @@ -638,6 +642,10 @@ packages: resolution: {integrity: sha512-GUIlegfcK2LO1J2Y98sCJy63rQSiLiDOgVw7HiHPRqfI2vb3XozTVqemwO0VSGXp54ngCnAQz0Lf0YPCBINNxA==} engines: {node: '>=20.0.0'} + '@aws-sdk/core@3.973.19': + resolution: {integrity: sha512-56KePyOcZnKTWCd89oJS1G6j3HZ9Kc+bh/8+EbvtaCCXdP6T7O7NzCiPuHRhFLWnzXIaXX3CxAz0nI5My9spHQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/crc64-nvme@3.972.3': resolution: {integrity: sha512-UExeK+EFiq5LAcbHm96CQLSia+5pvpUVSAsVApscBzayb7/6dJBJKwV4/onsk4VbWSmqxDMcfuTD+pC4RxgZHg==} engines: {node: '>=20.0.0'} @@ -650,6 +658,10 @@ packages: resolution: {integrity: sha512-HrdtnadvTGAQUr18sPzGlE5El3ICphnH6SU7UQOMOWFgRKbTRNN8msTxM4emzguUso9CzaHU2xy5ctSrmK5YNA==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-env@3.972.17': + resolution: {integrity: sha512-MBAMW6YELzE1SdkOniqr51mrjapQUv8JXSGxtwRjQV0mwVDutVsn22OPAUt4RcLRvdiHQmNBDEFP9iTeSVCOlA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-http@3.972.15': resolution: {integrity: sha512-dJuSTreu/T8f24SHDNTjd7eQ4rabr0TzPh2UTCwYexQtzG3nTDKm1e5eIdhiroTMDkPEJeY+WPkA6F9wod/20A==} engines: {node: '>=20.0.0'} @@ -658,6 +670,10 @@ packages: resolution: {integrity: sha512-NyB6smuZAixND5jZumkpkunQ0voc4Mwgkd+SZ6cvAzIB7gK8HV8Zd4rS8Kn5MmoGgusyNfVGG+RLoYc4yFiw+A==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-http@3.972.19': + resolution: {integrity: sha512-9EJROO8LXll5a7eUFqu48k6BChrtokbmgeMWmsH7lBb6lVbtjslUYz/ShLi+SHkYzTomiGBhmzTW7y+H4BxsnA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-ini@3.972.13': resolution: {integrity: sha512-JKSoGb7XeabZLBJptpqoZIFbROUIS65NuQnEHGOpuT9GuuZwag2qciKANiDLFiYk4u8nSrJC9JIOnWKVvPVjeA==} engines: {node: '>=20.0.0'} @@ -666,6 +682,10 @@ packages: resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-ini@3.972.18': + resolution: {integrity: sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-login@3.972.13': resolution: {integrity: sha512-RtYcrxdnJHKY8MFQGLltCURcjuMjnaQpAxPE6+/QEdDHHItMKZgabRe/KScX737F9vJMQsmJy9EmMOkCnoC1JQ==} engines: {node: '>=20.0.0'} @@ -674,6 +694,10 @@ packages: resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-login@3.972.18': + resolution: {integrity: sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-node@3.972.14': resolution: {integrity: sha512-WqoC2aliIjQM/L3oFf6j+op/enT2i9Cc4UTxxMEKrJNECkq4/PlKE5BOjSYFcq6G9mz65EFbXJh7zOU4CvjSKQ==} engines: {node: '>=20.0.0'} @@ -682,6 +706,10 @@ packages: resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-node@3.972.19': + resolution: {integrity: sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-process@3.972.13': resolution: {integrity: sha512-rsRG0LQA4VR+jnDyuqtXi2CePYSmfm5GNL9KxiW8DSe25YwJSr06W8TdUfONAC+rjsTI+aIH2rBGG5FjMeANrw==} engines: {node: '>=20.0.0'} @@ -690,6 +718,10 @@ packages: resolution: {integrity: sha512-n89ibATwnLEg0ZdZmUds5bq8AfBAdoYEDpqP3uzPLaRuGelsKlIvCYSNNvfgGLi8NaHPNNhs1HjJZYbqkW9b+g==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-process@3.972.17': + resolution: {integrity: sha512-c8G8wT1axpJDgaP3xzcy+q8Y1fTi9A2eIQJvyhQ9xuXrUZhlCfXbC0vM9bM1CUXiZppFQ1p7g0tuUMvil/gCPg==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-sso@3.972.13': resolution: {integrity: sha512-fr0UU1wx8kNHDhTQBXioc/YviSW8iXuAxHvnH7eQUtn8F8o/FU3uu6EUMvAQgyvn7Ne5QFnC0Cj0BFlwCk+RFw==} engines: {node: '>=20.0.0'} @@ -698,6 +730,10 @@ packages: resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-sso@3.972.18': + resolution: {integrity: sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-web-identity@3.972.13': resolution: {integrity: sha512-a6iFMh1pgUH0TdcouBppLJUfPM7Yd3R9S1xFodPtCRoLqCz2RQFA3qjA8x4112PVYXEd4/pHX2eihapq39w0rA==} engines: {node: '>=20.0.0'} @@ -706,6 +742,10 @@ packages: resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==} engines: {node: '>=20.0.0'} + '@aws-sdk/credential-provider-web-identity@3.972.18': + resolution: {integrity: sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==} + engines: {node: '>=20.0.0'} + '@aws-sdk/eventstream-handler-node@3.972.10': resolution: {integrity: sha512-g2Z9s6Y4iNh0wICaEqutgYgt/Pmhv5Ev9G3eKGFe2w9VuZDhc76vYdop6I5OocmpHV79d4TuLG+JWg5rQIVDVA==} engines: {node: '>=20.0.0'} @@ -770,6 +810,10 @@ packages: resolution: {integrity: sha512-Km90fcXt3W/iqujHzuM6IaDkYCj73gsYufcuWXApWdzoTy6KGk8fnchAjePMARU0xegIR3K4N3yIo1vy7OVe8A==} engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-user-agent@3.972.20': + resolution: {integrity: sha512-3kNTLtpUdeahxtnJRnj/oIdLAUdzTfr9N40KtxNhtdrq+Q1RPMdCJINRXq37m4t5+r3H70wgC3opW46OzFcZYA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-websocket@3.972.12': resolution: {integrity: sha512-iyPP6FVDKe/5wy5ojC0akpDFG1vX3FeCUU47JuwN8xfvT66xlEI8qUJZPtN55TJVFzzWZJpWL78eqUE31md08Q==} engines: {node: '>= 14.0.0'} @@ -782,6 +826,10 @@ packages: resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==} engines: {node: '>=20.0.0'} + '@aws-sdk/nested-clients@3.996.8': + resolution: {integrity: sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/region-config-resolver@3.972.6': resolution: {integrity: sha512-Aa5PusHLXAqLTX1UKDvI3pHQJtIsF7Q+3turCHqfz/1F61/zDMWfbTC8evjhrrYVAtz9Vsv3SJ/waSUeu7B6gw==} engines: {node: '>=20.0.0'} @@ -802,6 +850,14 @@ packages: resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==} engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.1005.0': + resolution: {integrity: sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/token-providers@3.1007.0': + resolution: {integrity: sha512-kKvVyr53vvVc5k6RbvI6jhafxufxO2SkEw8QeEzJqwOXH/IMY7Cm0IyhnBGdqj80iiIIiIM2jGe7Fn3TIdwdrw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.999.0': resolution: {integrity: sha512-cx0hHUlgXULfykx4rdu/ciNAJaa3AL5xz3rieCz7NKJ68MJwlj3664Y8WR5MGgxfyYJBdamnkjNSx5Kekuc0cg==} engines: {node: '>=20.0.0'} @@ -862,6 +918,15 @@ packages: aws-crt: optional: true + '@aws-sdk/util-user-agent-node@3.973.5': + resolution: {integrity: sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==} + engines: {node: '>=20.0.0'} + peerDependencies: + aws-crt: '>=1.0.0' + peerDependenciesMeta: + aws-crt: + optional: true + '@aws-sdk/xml-builder@3.972.10': resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} engines: {node: '>=20.0.0'} @@ -1024,6 +1089,10 @@ packages: resolution: {integrity: sha512-UyX6rGEXzVyPzb1yvjHtPfTlnLvB5jX/stAMdiytHhfoydX+98hfympdOwsnTktzr+IRvphxTbdErgYDJkEsvw==} engines: {node: '>=22.12.0'} + '@discordjs/voice@0.19.1': + resolution: {integrity: sha512-XYbFVyUBB7zhRvrjREfiWDwio24nEp/vFaVe6u9aBIC5UYuT7HvoMt8LgNfZ5hOyaCW0flFr72pkhUGz+gWw4Q==} + engines: {node: '>=22.12.0'} + '@emnapi/core@1.8.1': resolution: {integrity: sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==} @@ -2102,116 +2171,116 @@ packages: '@oxc-project/types@0.115.0': resolution: {integrity: sha512-4n91DKnebUS4yjUHl2g3/b2T+IUdCfmoZGhmwsovZCDaJSs+QkVAM+0AqqTxHSsHfeiMuueT75cZaZcT/m0pSw==} - '@oxfmt/binding-android-arm-eabi@0.36.0': - resolution: {integrity: sha512-Z4yVHJWx/swHHjtr0dXrBZb6LxS+qNz1qdza222mWwPTUK4L790+5i3LTgjx3KYGBzcYpjaiZBw4vOx94dH7MQ==} + '@oxfmt/binding-android-arm-eabi@0.38.0': + resolution: {integrity: sha512-lTN4//sgYywK8ulQo7a/EZVzOTGomGQv2IG/7tMYdqTV3xN3QTqWpXcZBGUzaicC4B882N+5zJLYZ37IWfUMcg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxfmt/binding-android-arm64@0.36.0': - resolution: {integrity: sha512-3ElCJRFNPQl7jexf2CAa9XmAm8eC5JPrIDSjc9jSchkVSFTEqyL0NtZinBB2h1a4i4JgP1oGl/5G5n8YR4FN8Q==} + '@oxfmt/binding-android-arm64@0.38.0': + resolution: {integrity: sha512-XbVgqR1WsIcCkfxwh2tdg3M1MWgR23YOboW2nbB8ab0gInNNLGy7cIAdr78XaoG/bGdaF4488XRhuGWq67xrzA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxfmt/binding-darwin-arm64@0.36.0': - resolution: {integrity: sha512-nak4znWCqIExKhYSY/mz/lWsqWIpdsS7o0+SRzXR1Q0m7GrMcG1UrF1pS7TLGZhhkf7nTfEF7q6oZzJiodRDuw==} + '@oxfmt/binding-darwin-arm64@0.38.0': + resolution: {integrity: sha512-AHb6zUzWaSJra7lnPkI+Sqwu33bVWVTwCozcw9QTX8vwHaI1+5d5STqBcsJf63eSuRVRlflwMS4erlAPh3fXZw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxfmt/binding-darwin-x64@0.36.0': - resolution: {integrity: sha512-V4GP96thDnpKx6ADnMDnhIXNdtV+Ql9D4HUU+a37VTeVbs5qQSF/s6hhUP1b3xUqU7iRcwh72jUU2Y12rtGHAw==} + '@oxfmt/binding-darwin-x64@0.38.0': + resolution: {integrity: sha512-VmlmTyn7LL7Xi5htjosxGpJJHf3Drx5mgXxKE8+NT10uBXTaG3FHpRYhW3Zg5Qp7omH92Lj1+IHYqQG/HZpLnw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxfmt/binding-freebsd-x64@0.36.0': - resolution: {integrity: sha512-/xapWCADfI5wrhxpEUjhI9fnw7MV5BUZizVa8e24n3VSK6A3Y1TB/ClOP1tfxNspykFKXp4NBWl6NtDJP3osqQ==} + '@oxfmt/binding-freebsd-x64@0.38.0': + resolution: {integrity: sha512-LynMLRqaUEAV6n4svTFanFOAnJ9D6aCCfymJ2yhMSh5fYFgCCO4q5LzPV2nATKKoyPocSErFSmYREsOFbkIlCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxfmt/binding-linux-arm-gnueabihf@0.36.0': - resolution: {integrity: sha512-1lOmv61XMFIH5uNm27620kRRzWt/RK6tdn250BRDoG9W7OXGOQ5UyI1HVT+SFkoOoKztBiinWgi68+NA1MjBVQ==} + '@oxfmt/binding-linux-arm-gnueabihf@0.38.0': + resolution: {integrity: sha512-HRRZtOXcss5+bGqQcYahILgt14+Iu/Olf6fnoKq5ctOzU21PGHVB+zuocgt+/+ixoMLV1Drvok3ns7QwnLwNTA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxfmt/binding-linux-arm-musleabihf@0.36.0': - resolution: {integrity: sha512-vMH23AskdR1ujUS9sPck2Df9rBVoZUnCVY86jisILzIQ/QQ/yKUTi7tgnIvydPx7TyB/48wsQ5QMr5Knq5p/aw==} + '@oxfmt/binding-linux-arm-musleabihf@0.38.0': + resolution: {integrity: sha512-kScH8XnH7TRUckMOSZ5115Vvr2CQq+iPsuXPEzwUXSxh+gDLzt+GsXuvCsaPxp1KP+dQj88VrIjeQ4V0f9NRKw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxfmt/binding-linux-arm64-gnu@0.36.0': - resolution: {integrity: sha512-Hy1V+zOBHpBiENRx77qrUTt5aPDHeCASRc8K5KwwAHkX2AKP0nV89eL17hsZrE9GmnXFjsNmd80lyf7aRTXsbw==} + '@oxfmt/binding-linux-arm64-gnu@0.38.0': + resolution: {integrity: sha512-PUVn/vGsMs83eLhNXLNjR+Qw/EPiNxU9Tx+p+aZBK0RT9/k6RNgh/O4F1TxS4tdISmf3SSgjdnMOVW3ZfQZ2mA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxfmt/binding-linux-arm64-musl@0.36.0': - resolution: {integrity: sha512-SPGLJkOIHSIC6ABUQ5V8NqJpvYhMJueJv26NYqfCnwi/Mn6A61amkpJJ9Suy0Nmvs+OWESJpcebrBUbXPGZyQQ==} + '@oxfmt/binding-linux-arm64-musl@0.38.0': + resolution: {integrity: sha512-LhtmaLCMGtAIEtaTBAoKLF3QVt+IDKIjdEZvsf0msLeTUFKxyoTNScYBXbkmvqGrm37vV0JjTPvm+OaSh3np5A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxfmt/binding-linux-ppc64-gnu@0.36.0': - resolution: {integrity: sha512-3EuoyB8x9x8ysYJjbEO/M9fkSk72zQKnXCvpZMDHXlnY36/1qMp55Nm0PrCwjGO/1pen5hdOVkz9WmP3nAp2IQ==} + '@oxfmt/binding-linux-ppc64-gnu@0.38.0': + resolution: {integrity: sha512-tO6tPaS21o0MaRqmOi9e3sDotlW4c+1gCx4SwdrfDXm3Y1vmIZWh0qB6t/Xh77bIGVr/4fC95eKOhKLPGwdL+Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@oxfmt/binding-linux-riscv64-gnu@0.36.0': - resolution: {integrity: sha512-MpY3itLwpGh8dnywtrZtaZ604T1m715SydCKy0+qTxetv+IHzuA+aO/AGzrlzUNYZZmtWtmDBrChZGibvZxbRQ==} + '@oxfmt/binding-linux-riscv64-gnu@0.38.0': + resolution: {integrity: sha512-djEqwFUHczstFKp5aT43TuRWxyKZSkIZUfGXIEKa0srmIAt1CXQO5O8xLgNG4SGkXTRB1domFfCE68t9SkSmfA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxfmt/binding-linux-riscv64-musl@0.36.0': - resolution: {integrity: sha512-mmDhe4Vtx+XwQPRPn/V25+APnkApYgZ23q+6GVsNYY98pf3aU0aI3Me96pbRs/AfJ1jIiGC+/6q71FEu8dHcHw==} + '@oxfmt/binding-linux-riscv64-musl@0.38.0': + resolution: {integrity: sha512-76EgMMtS6sIE+9Pl9q2GZgZpbZSzqtjQhUUIWl0RVNfHg66tstdJMhY2LXESjDYhc5vFYt9qdQNM0w0zg3onPw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxfmt/binding-linux-s390x-gnu@0.36.0': - resolution: {integrity: sha512-AYXhU+DmNWLSnvVwkHM92fuYhogtVHab7UQrPNaDf1sxadugg9gWVmcgJDlIwxJdpk5CVW/TFvwUKwI432zhhA==} + '@oxfmt/binding-linux-s390x-gnu@0.38.0': + resolution: {integrity: sha512-JYNr3i9z/YguZg088kopjvz49hDxTEL193mYL2/02uq/6BLlQRMaKrePEITTHm/vUu4ZquAKgu4mDib6pGWdyg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@oxfmt/binding-linux-x64-gnu@0.36.0': - resolution: {integrity: sha512-H16QhhQ3usoakMleiAAQ2mg0NsBDAdyE9agUgfC8IHHh3jZEbr0rIKwjEqwbOHK5M0EmfhJmr+aGO/MgZPsneA==} + '@oxfmt/binding-linux-x64-gnu@0.38.0': + resolution: {integrity: sha512-Lf+/Keaw1kBKx0U3HT5PsA7/3VO4ZOmaqo4sWaeAJ6tYeX8h/2IZcEONhjry6T4BETza78z6xI3Qx+18QZix6A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxfmt/binding-linux-x64-musl@0.36.0': - resolution: {integrity: sha512-EFFGkixA39BcmHiCe2ECdrq02D6FCve5ka6ObbvrheXl4V+R0U/E+/uLyVx1X65LW8TA8QQHdnbdDallRekohw==} + '@oxfmt/binding-linux-x64-musl@0.38.0': + resolution: {integrity: sha512-4O6sf6OQuz1flk0TDrrtmXOVO3letA7fYe2IEAiJOQvKhJcMU08NiIVODQjMGZ6IQh1q91B+TlliDfbsYalw8A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxfmt/binding-openharmony-arm64@0.36.0': - resolution: {integrity: sha512-zr/t369wZWFOj1qf06Z5gGNjFymfUNDrxKMmr7FKiDRVI1sNsdKRCuRL4XVjtcptKQ+ao3FfxLN1vrynivmCYg==} + '@oxfmt/binding-openharmony-arm64@0.38.0': + resolution: {integrity: sha512-GNocbjYnielmKVBk+r/2Vc4E3oTsAO4+5gRuroUVx86Jv+mpD+hyFkf260/by0YtpF1ipqyxR8chOSgRQvD2zQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxfmt/binding-win32-arm64-msvc@0.36.0': - resolution: {integrity: sha512-FxO7UksTv8h4olzACgrqAXNF6BP329+H322323iDrMB5V/+a1kcAw07fsOsUmqNrb9iJBsCQgH/zqcqp5903ag==} + '@oxfmt/binding-win32-arm64-msvc@0.38.0': + resolution: {integrity: sha512-AwgjBHRxPckbazLpECuPOSzYlppYR1CBeUSuzZuClsmTnlZA9O1MexCEP9CROe03Yo1xBGvYtiCjwKZMBChGkg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxfmt/binding-win32-ia32-msvc@0.36.0': - resolution: {integrity: sha512-OjoMQ89H01M0oLMfr/CPNH1zi48ZIwxAKObUl57oh7ssUBNDp/2Vjf7E1TQ8M4oj4VFQ/byxl2SmcPNaI2YNDg==} + '@oxfmt/binding-win32-ia32-msvc@0.38.0': + resolution: {integrity: sha512-c3u+ak6Zrh1g6pM2TgNVvOgkm7q1XaIX+5Mgxvu38ozJ5OfM8c7HZk3glMdBzlTD2uK0sSfgBq1kuXwCe1NOGg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxfmt/binding-win32-x64-msvc@0.36.0': - resolution: {integrity: sha512-MoyeQ9S36ZTz/4bDhOKJgOBIDROd4dQ5AkT9iezhEaUBxAPdNX9Oq0jD8OSnCj3G4wam/XNxVWKMA52kmzmPtQ==} + '@oxfmt/binding-win32-x64-msvc@0.38.0': + resolution: {integrity: sha512-wud1Hz0D2hYrhk6exxQQndn1htcA28wAcFb1vtP3ZXSzPFtMvc7ag/VNPv6nz6mDzM8X660jUwGEac99QcrVsA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -2246,116 +2315,116 @@ packages: cpu: [x64] os: [win32] - '@oxlint/binding-android-arm-eabi@1.51.0': - resolution: {integrity: sha512-jJYIqbx4sX+suIxWstc4P7SzhEwb4ArWA2KVrmEuu9vH2i0qM6QIHz/ehmbGE4/2fZbpuMuBzTl7UkfNoqiSgw==} + '@oxlint/binding-android-arm-eabi@1.53.0': + resolution: {integrity: sha512-JC89/jAx4d2zhDIbK8MC4L659FN1WiMXMBkNg7b33KXSkYpUgcbf+0nz7+EPRg+VwWiZVfaoFkNHJ7RXYb5Neg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxlint/binding-android-arm64@1.51.0': - resolution: {integrity: sha512-GtXyBCcH4ti98YdiMNCrpBNGitx87EjEWxevnyhcBK12k/Vu4EzSB45rzSC4fGFUD6sQgeaxItRCEEWeVwPafw==} + '@oxlint/binding-android-arm64@1.53.0': + resolution: {integrity: sha512-CY+pZfi+uyeU7AwFrEnjsNT+VfxYmKLMuk7bVxArd8f+09hQbJb8f7C7EpvTfNqrCK1J8zZlaYI4LltmEctgbQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxlint/binding-darwin-arm64@1.51.0': - resolution: {integrity: sha512-3QJbeYaMHn6Bh2XeBXuITSsbnIctyTjvHf5nRjKYrT9pPeErNIpp5VDEeAXC0CZSwSVTsc8WOSDwgrAI24JolQ==} + '@oxlint/binding-darwin-arm64@1.53.0': + resolution: {integrity: sha512-0aqsC4HDQ94oI6kMz64iaOJ1f3bCVArxvaHJGOScBvFz6CcQedXi5b70Xg09CYjKNaHA56dW0QJfoZ/111kz1A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxlint/binding-darwin-x64@1.51.0': - resolution: {integrity: sha512-NzErhMaTEN1cY0E8C5APy74lw5VwsNfJfVPBMWPVQLqAbO0k4FFLjvHURvkUL+Y18Wu+8Vs1kbqPh2hjXYA4pg==} + '@oxlint/binding-darwin-x64@1.53.0': + resolution: {integrity: sha512-e+KvuaWtnisyWojO/t5qKDbp2dvVpg+1dl4MGnTb21QpY4+4+9Y1XmZPaztcA2XNvy4BIaXFW+9JH9tMpSBqUg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxlint/binding-freebsd-x64@1.51.0': - resolution: {integrity: sha512-msAIh3vPAoKoHlOE/oe6Q5C/n9umypv/k81lED82ibrJotn+3YG2Qp1kiR8o/Dg5iOEU97c6tl0utxcyFenpFw==} + '@oxlint/binding-freebsd-x64@1.53.0': + resolution: {integrity: sha512-hpU0ZHVeblFjmZDfgi9BxhhCpURh0KjoFy5V+Tvp9sg/fRcnMUEfaJrgz+jQfOX4jctlVWrAs1ANs91+5iV+zA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxlint/binding-linux-arm-gnueabihf@1.51.0': - resolution: {integrity: sha512-CqQPcvqYyMe9ZBot2stjGogEzk1z8gGAngIX7srSzrzexmXixwVxBdFZyxTVM0CjGfDeV+Ru0w25/WNjlMM2Hw==} + '@oxlint/binding-linux-arm-gnueabihf@1.53.0': + resolution: {integrity: sha512-ccKxOpw+X4xa2pO+qbTOpxQ2x1+Ag3ViRQMnWt3gHp1LcpNgS1xd6GYc3OvehmHtrXqEV3YGczZ0I1qpBB4/2A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxlint/binding-linux-arm-musleabihf@1.51.0': - resolution: {integrity: sha512-dstrlYQgZMnyOssxSbolGCge/sDbko12N/35RBNuqLpoPbft2aeBidBAb0dvQlyBd9RJ6u8D4o4Eh8Un6iTgyQ==} + '@oxlint/binding-linux-arm-musleabihf@1.53.0': + resolution: {integrity: sha512-UBkBvmzSmlyH2ZObQMDKW/TuyTmUtP/XClPUyU2YLwj0qLopZTZxnDz4VG5d3wz1HQuZXO0o1QqsnQUW1v4a6Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxlint/binding-linux-arm64-gnu@1.51.0': - resolution: {integrity: sha512-QEjUpXO7d35rP1/raLGGbAsBLLGZIzV3ZbeSjqWlD3oRnxpRIZ6iL4o51XQHkconn3uKssc+1VKdtHJ81BBhDA==} + '@oxlint/binding-linux-arm64-gnu@1.53.0': + resolution: {integrity: sha512-PQJJ1izoH9p61las6rZ0BWOznAhTDMmdUPL2IEBLuXFwhy2mSloYHvRkk39PSYJ1DyG+trqU5Z9ZbtHSGH6plg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxlint/binding-linux-arm64-musl@1.51.0': - resolution: {integrity: sha512-YSJua5irtG4DoMAjUapDTPhkQLHhBIY0G9JqlZS6/SZPzqDkPku/1GdWs0D6h/wyx0Iz31lNCfIaWKBQhzP0wQ==} + '@oxlint/binding-linux-arm64-musl@1.53.0': + resolution: {integrity: sha512-GXI1o4Thn/rtnRIL38BwrDMwVcUbIHKCsOixIWf/CkU3fCG3MXFzFTtDMt+34ik0Qk452d8kcpksL0w/hUkMZA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxlint/binding-linux-ppc64-gnu@1.51.0': - resolution: {integrity: sha512-7L4Wj2IEUNDETKssB9IDYt16T6WlF+X2jgC/hBq3diGHda9vJLpAgb09+D3quFq7TdkFtI7hwz/jmuQmQFPc1Q==} + '@oxlint/binding-linux-ppc64-gnu@1.53.0': + resolution: {integrity: sha512-Uahk7IVs2yBamCgeJ3XKpKT9Vh+de0pDKISFKnjEcI3c/w2CFHk1+W6Q6G3KI56HGwE9PWCp6ayhA9whXWkNIQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@oxlint/binding-linux-riscv64-gnu@1.51.0': - resolution: {integrity: sha512-cBUHqtOXy76G41lOB401qpFoKx1xq17qYkhWrLSM7eEjiHM9sOtYqpr6ZdqCnN9s6ZpzudX4EkeHOFH2E9q0vA==} + '@oxlint/binding-linux-riscv64-gnu@1.53.0': + resolution: {integrity: sha512-sWtcU9UkrKMWsGKdFy8R6jkm9Q0VVG1VCpxVuh0HzRQQi3ENI1Nh5CkpsdfUs2MKRcOoHKbXqTscunuXjhxoxQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxlint/binding-linux-riscv64-musl@1.51.0': - resolution: {integrity: sha512-WKbg8CysgZcHfZX0ixQFBRSBvFZUHa3SBnEjHY2FVYt2nbNJEjzTxA3ZR5wMU0NOCNKIAFUFvAh5/XJKPRJuJg==} + '@oxlint/binding-linux-riscv64-musl@1.53.0': + resolution: {integrity: sha512-aXew1+HDvCdExijX/8NBVC854zJwxhKP3l9AHFSHQNo4EanlHtzDMIlIvP3raUkL0vXtFCkTFYezzU5HjstB8A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxlint/binding-linux-s390x-gnu@1.51.0': - resolution: {integrity: sha512-N1QRUvJTxqXNSu35YOufdjsAVmKVx5bkrggOWAhTWBc3J4qjcBwr1IfyLh/6YCg8sYRSR1GraldS9jUgJL/U4A==} + '@oxlint/binding-linux-s390x-gnu@1.53.0': + resolution: {integrity: sha512-rVpyBSqPGou9sITcsoXqUoGBUH74bxYLYOAGUqN599Zu6BQBlBU9hh3bJQ/20D1xrhhrsbiCpVPvXpLPM5nL1w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@oxlint/binding-linux-x64-gnu@1.51.0': - resolution: {integrity: sha512-e0Mz0DizsCoqNIjeOg6OUKe8JKJWZ5zZlwsd05Bmr51Jo3AOL4UJnPvwKumr4BBtBrDZkCmOLhCvDGm95nJM2g==} + '@oxlint/binding-linux-x64-gnu@1.53.0': + resolution: {integrity: sha512-eOyeQ8qFQ2geXmlWJuXAOaek0hFhbMLlYsU457NMLKDRoC43Xf+eDPZ9Yk0n9jDaGJ5zBl/3Dy8wo41cnIXuLA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxlint/binding-linux-x64-musl@1.51.0': - resolution: {integrity: sha512-wD8HGTWhYBKXvRDvoBVB1y+fEYV01samhWQSy1Zkxq2vpezvMnjaFKRuiP6tBNITLGuffbNDEXOwcAhJ3gI5Ug==} + '@oxlint/binding-linux-x64-musl@1.53.0': + resolution: {integrity: sha512-S6rBArW/zD1tob8M9PwKYrRmz+j1ss1+wjbRAJCWKd7TC3JB6noDiA95pIj9zOZVVp04MIzy5qymnYusrEyXzg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxlint/binding-openharmony-arm64@1.51.0': - resolution: {integrity: sha512-5NSwQ2hDEJ0GPXqikjWtwzgAQCsS7P9aLMNenjjKa+gknN3lTCwwwERsT6lKXSirfU3jLjexA2XQvQALh5h27w==} + '@oxlint/binding-openharmony-arm64@1.53.0': + resolution: {integrity: sha512-sd/A0Ny5sN0D/MJtlk7w2jGY4bJQou7gToa9WZF7Sj6HTyVzvlzKJWiOHfr4SulVk4ndiFQ8rKmF9rXP0EcF3A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxlint/binding-win32-arm64-msvc@1.51.0': - resolution: {integrity: sha512-JEZyah1M0RHMw8d+jjSSJmSmO8sABA1J1RtrHYujGPeCkYg1NeH0TGuClpe2h5QtioRTaF57y/TZfn/2IFV6fA==} + '@oxlint/binding-win32-arm64-msvc@1.53.0': + resolution: {integrity: sha512-QC3q7b51Er/ZurEFcFzc7RpQ/YEoEBLJuCp3WoOzhSHHH/nkUKFy+igOxlj1z3LayhEZPDQQ7sXvv2PM2cdG3Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxlint/binding-win32-ia32-msvc@1.51.0': - resolution: {integrity: sha512-q3cEoKH6kwjz/WRyHwSf0nlD2F5Qw536kCXvmlSu+kaShzgrA0ojmh45CA81qL+7udfCaZL2SdKCZlLiGBVFlg==} + '@oxlint/binding-win32-ia32-msvc@1.53.0': + resolution: {integrity: sha512-3OvLgOqwd705hWHV2i8ni80pilvg6BUgpC2+xtVu++e/q28LKVohGh5J5QYJOrRMfWmxK0M/AUu43vUw62LAKQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxlint/binding-win32-x64-msvc@1.51.0': - resolution: {integrity: sha512-Q14+fOGb9T28nWF/0EUsYqERiRA7cl1oy4TJrGmLaqhm+aO2cV+JttboHI3CbdeMCAyDI1+NoSlrM7Melhp/cw==} + '@oxlint/binding-win32-x64-msvc@1.53.0': + resolution: {integrity: sha512-xTiOkntexCdJytZ7ArIIgl3vGW5ujMM3sJNM7/+iqGAVJagCqjFFWn68HRWRLeyT66c95uR+CeFmQFI6mLQqDw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -2461,97 +2530,97 @@ packages: resolution: {integrity: sha512-DmCG8GzysnCZ15bres3N5AHCmwBwYgp0As6xjhQ47rAUTUXxJiK+lLUxaGsX3hd/30qUpVElh05PbGuxRPgJwA==} engines: {node: '>= 10'} - '@rolldown/binding-android-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-/uadfNUaMLFFBGvcIOiq8NnlhvTZTjOyybJaJnhGxD0n9k5vZRJfTaitH5GHnbwmc6T2PC+ZpS1FQH+vXyS/UA==} + '@rolldown/binding-android-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-lcJL0bN5hpgJfSIz/8PIf02irmyL43P+j1pTCfbD1DbLkmGRuFIA4DD3B3ZOvGqG0XiVvRznbKtN0COQVaKUTg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@rolldown/binding-darwin-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-zokYr1KgRn0hRA89dmgtPj/BmKp9DxgrfAJvOEFfXa8nfYWW2nmgiYIBGpSIAJrEg7Qc/Qznovy6xYwmKh0M8g==} + '@rolldown/binding-darwin-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-J7Zk3kLYFsLtuH6U+F4pS2sYVzac0qkjcO5QxHS7OS7yZu2LRs+IXo+uvJ/mvpyUljDJ3LROZPoQfgBIpCMhdQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@rolldown/binding-darwin-x64@1.0.0-rc.7': - resolution: {integrity: sha512-eZFjbmrapCBVgMmuLALH3pmQQQStHFuRhsFceJHk6KISW8CkI2e9OPLp9V4qXksrySQcD8XM8fpvGLs5l5C7LQ==} + '@rolldown/binding-darwin-x64@1.0.0-rc.9': + resolution: {integrity: sha512-iwtmmghy8nhfRGeNAIltcNXzD0QMNaaA5U/NyZc1Ia4bxrzFByNMDoppoC+hl7cDiUq5/1CnFthpT9n+UtfFyg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@rolldown/binding-freebsd-x64@1.0.0-rc.7': - resolution: {integrity: sha512-xjMrh8Dmu2DNwdY6DZsrF6YPGeesc3PaTlkh8v9cqmkSCNeTxnhX3ErhVnuv1j3n8t2IuuhQIwM9eZDINNEt5Q==} + '@rolldown/binding-freebsd-x64@1.0.0-rc.9': + resolution: {integrity: sha512-DLFYI78SCiZr5VvdEplsVC2Vx53lnA4/Ga5C65iyldMVaErr86aiqCoNBLl92PXPfDtUYjUh+xFFor40ueNs4Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.7': - resolution: {integrity: sha512-mOvftrHiXg4/xFdxJY3T9Wl1/zDAOSlMN8z9an2bXsCwuvv3RdyhYbSMZDuDO52S04w9z7+cBd90lvQSPTAQtw==} + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.9': + resolution: {integrity: sha512-CsjTmTwd0Hri6iTw/DRMK7kOZ7FwAkrO4h8YWKoX/kcj833e4coqo2wzIFywtch/8Eb5enQ/lwLM7w6JX1W5RQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-TuUkeuEEPRyXMBbJ86NRhAiPNezxHW8merl3Om2HASA9Pl1rI+VZcTtsVQ6v/P0MDIFpSl0k0+tUUze9HIXyEw==} + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-2x9O2JbSPxpxMDhP9Z74mahAStibTlrBMW0520+epJH5sac7/LwZW5Bmg/E6CXuEF53JJFW509uP+lSedaUNxg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rolldown/binding-linux-arm64-musl@1.0.0-rc.7': - resolution: {integrity: sha512-G43ZElEvaby+YSOgrXfBgpeQv42LdS0ivFFYQufk2tBDWeBfzE/+ob5DmO8Izbyn4Y8k6GgLF11jFDYNnmU/3w==} + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.9': + resolution: {integrity: sha512-JA1QRW31ogheAIRhIg9tjMfsYbglXXYGNPLdPEYrwFxdbkQCAzvpSCSHCDWNl4hTtrol8WeboCSEpjdZK8qrCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-Y48ShVxGE2zUTt0A0PR3grCLNxW4DWtAfe5lxf6L3uYEQujwo/LGuRogMsAtOJeYLCPTJo2i714LOdnK34cHpw==} + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-aOKU9dJheda8Kj8Y3w9gnt9QFOO+qKPAl8SWd7JPHP+Cu0EuDAE5wokQubLzIDQWg2myXq2XhTpOVS07qqvT+w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-KU5DUYvX3qI8/TX6D3RA4awXi4Ge/1+M6Jqv7kRiUndpqoVGgD765xhV3Q6QvtABnYjLJenrWDl3S1B5U56ixA==} + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-OalO94fqj7IWRn3VdXWty75jC5dk4C197AWEuMhIpvVv2lw9fiPhud0+bW2ctCxb3YoBZor71QHbY+9/WToadA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@rolldown/binding-linux-x64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-1THb6FdBkAEL12zvUue2bmK4W1+P+tz8Pgu5uEzq+xrtYa3iBzmmKNlyfUzCFNCqsPd8WJEQrYdLcw4iMW4AVw==} + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-cVEl1vZtBsBZna3YMjGXNvnYYrOJ7RzuWvZU0ffvJUexWkukMaDuGhUXn0rjnV0ptzGVkvc+vW9Yqy6h8YX4pg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rolldown/binding-linux-x64-musl@1.0.0-rc.7': - resolution: {integrity: sha512-12o73atFNWDgYnLyA52QEUn9AH8pHIe12W28cmqjyHt4bIEYRzMICvYVCPa2IQm6DJBvCBrEhD9K+ct4wr2hwg==} + '@rolldown/binding-linux-x64-musl@1.0.0-rc.9': + resolution: {integrity: sha512-UzYnKCIIc4heAKgI4PZ3dfBGUZefGCJ1TPDuLHoCzgrMYPb5Rv6TLFuYtyM4rWyHM7hymNdsg5ik2C+UD9VDbA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rolldown/binding-openharmony-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-+uUgGwvuUCXl894MTsmTS2J0BnCZccFsmzV7y1jFxW5pTSxkuwL5agyPuDvDOztPeS6RrdqWkn7sT0jRd0ECkg==} + '@rolldown/binding-openharmony-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-+6zoiF+RRyf5cdlFQP7nm58mq7+/2PFaY2DNQeD4B87N36JzfF/l9mdBkkmTvSYcYPE8tMh/o3cRlsx1ldLfog==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@rolldown/binding-wasm32-wasi@1.0.0-rc.7': - resolution: {integrity: sha512-53p2L/NSy21UiFOqUGlC11kJDZS2Nx2GJRz1QvbkXovypA3cOHbsyZHLkV72JsLSbiEQe+kg4tndUhSiC31UEA==} + '@rolldown/binding-wasm32-wasi@1.0.0-rc.9': + resolution: {integrity: sha512-rgFN6sA/dyebil3YTlL2evvi/M+ivhfnyxec7AccTpRPccno/rPoNlqybEZQBkcbZu8Hy+eqNJCqfBR8P7Pg8g==} engines: {node: '>=14.0.0'} cpu: [wasm32] - '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.7': - resolution: {integrity: sha512-K6svNRljO6QrL6VTKxwh4yThhlR9DT/tK0XpaFQMnJwwQKng+NYcVEtUkAM0WsoiZHw+Hnh3DGnn3taf/pNYGg==} + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.9': + resolution: {integrity: sha512-lHVNUG/8nlF1IQk1C0Ci574qKYyty2goMiPlRqkC5R+3LkXDkL5Dhx8ytbxq35m+pkHVIvIxviD+TWLdfeuadA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@rolldown/binding-win32-x64-msvc@1.0.0-rc.7': - resolution: {integrity: sha512-3ZJBT47VWLKVKIyvHhUSUgVwHzzZW761YAIkM3tOT+8ZTjFVp0acCM0Y2Z2j3jCl+XYi2d9y2uEWQ8H0PvvpPw==} + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.9': + resolution: {integrity: sha512-G0oA4+w1iY5AGi5HcDTxWsoxF509hrFIPB2rduV5aDqS9FtDg1CAfa7V34qImbjfhIcA8C+RekocJZA96EarwQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] - '@rolldown/pluginutils@1.0.0-rc.7': - resolution: {integrity: sha512-qujRfC8sFVInYSPPMLQByRh7zhwkGFS4+tyMQ83srV1qrxL4g8E2tyxVVyxd0+8QeBM1mIk9KbWxkegRr76XzA==} + '@rolldown/pluginutils@1.0.0-rc.9': + resolution: {integrity: sha512-w6oiRWgEBl04QkFZgmW+jnU1EC9b57Oihi2ot3HNWIQRqgHp5PnYDia5iZ5FF7rpa4EQdiqMDXjlqKGXBhsoXw==} '@rollup/rollup-android-arm-eabi@4.59.0': resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} @@ -3138,6 +3207,93 @@ packages: resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} engines: {node: '>=18.0.0'} + '@snazzah/davey-android-arm-eabi@0.1.10': + resolution: {integrity: sha512-7bwHxSNEI2wVXOT6xnmpnO9SHb2xwAnf9oEdL45dlfVHTgU1Okg5rwGwRvZ2aLVFFbTyecfC8EVZyhpyTkjLSw==} + engines: {node: '>= 10'} + cpu: [arm] + os: [android] + + '@snazzah/davey-android-arm64@0.1.10': + resolution: {integrity: sha512-68WUf2LQwQTP9MgPcCqTWwJztJSIk0keGfF2Y/b+MihSDh29fYJl7C0rbz69aUrVCvCC2lYkB/46P8X1kBz7yg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@snazzah/davey-darwin-arm64@0.1.10': + resolution: {integrity: sha512-nYC+DWCGUC1jUGEenCNQE/jJpL/02m0ebY/NvTCQbul5ktI/ShVzgA3kzssEhZvhf6jbH048Rs39wDhp/b24Jg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@snazzah/davey-darwin-x64@0.1.10': + resolution: {integrity: sha512-0q5Rrcs+O9sSSnPX+A3R3djEQs2nTAtMe5N3lApO6lZas/QNMl6wkEWCvTbDc2cfAYBMSk2jgc1awlRXi4LX3Q==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@snazzah/davey-freebsd-x64@0.1.10': + resolution: {integrity: sha512-/Gq5YDD6Oz8iBqVJLswUnetCv9JCRo1quYX5ujzpAG8zPCNItZo4g4h5p9C+h4Yoay2quWBYhoaVqQKT96bm8g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@snazzah/davey-linux-arm-gnueabihf@0.1.10': + resolution: {integrity: sha512-0Z7Vrt0WIbgxws9CeHB9qlueYJlvltI44rUuZmysdi70UcHGxlr7nE3MnzYCr9nRWRegohn8EQPWHMKMDJH2GA==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@snazzah/davey-linux-arm64-gnu@0.1.10': + resolution: {integrity: sha512-xhZQycn4QB+qXhqm/QmZ+kb9MHMXcbjjoPfvcIL4WMQXFG/zUWHW8EiBk7ZTEGMOpeab3F9D1+MlgumglYByUQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@snazzah/davey-linux-arm64-musl@0.1.10': + resolution: {integrity: sha512-pudzQCP9rZItwW4qHHvciMwtNd9kWH4l73g6Id1LRpe6sc8jiFBV7W+YXITj2PZbI0by6XPfkRP6Dk5IkGOuAw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@snazzah/davey-linux-x64-gnu@0.1.10': + resolution: {integrity: sha512-DC8qRmk+xJEFNqjxKB46cETKeDQqgUqE5p39KXS2k6Vl/XTi8pw8pXOxrPfYte5neoqlWAVQzbxuLnwpyRJVEQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@snazzah/davey-linux-x64-musl@0.1.10': + resolution: {integrity: sha512-wPR5/2QmsF7sR0WUaCwbk4XI3TLcxK9PVK8mhgcAYyuRpbhcVgNGWXs8ulcyMSXve5pFRJAFAuMTGCEb014peg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@snazzah/davey-wasm32-wasi@0.1.10': + resolution: {integrity: sha512-SfQavU+eKTDbRmPeLRodrVSfsWq25PYTmH1nIZW3B27L6IkijzjXZZuxiU1ZG1gdI5fB7mwXrOTtx34t+vAG7Q==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + + '@snazzah/davey-win32-arm64-msvc@0.1.10': + resolution: {integrity: sha512-Raafk53smYs67wZCY9bQXHXzbaiRMS5QCdjTdin3D9fF5A06T/0Zv1z7/YnaN+O3GSL/Ou3RvynF7SziToYiFQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@snazzah/davey-win32-ia32-msvc@0.1.10': + resolution: {integrity: sha512-pAs43l/DiZ+icqBwxIwNePzuYxFM1ZblVuf7t6vwwSLxvova7vnREnU7qDVjbc5/YTUHOsqYy3S6TpZMzDo2lw==} + engines: {node: '>= 10'} + cpu: [ia32] + os: [win32] + + '@snazzah/davey-win32-x64-msvc@0.1.10': + resolution: {integrity: sha512-kr6148VVBoUT4CtD+5hYshTFRny7R/xQZxXFhFc0fYjtmdMVM8Px9M91olg1JFNxuNzdfMfTufR58Q3wfBocug==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@snazzah/davey@0.1.10': + resolution: {integrity: sha512-J5f7vV5/tnj0xGnqufFRd6qiWn3FcR3iXjpjpEmO2Ok+Io0AASkMaZ3I39TsL45as0Qo5bq9wWuamFQ77PjJ+g==} + engines: {node: '>= 10'} + '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -3304,11 +3460,11 @@ packages: '@types/node@20.19.37': resolution: {integrity: sha512-8kzdPJ3FsNsVIurqBs7oodNnCEVbni9yUEkaHbgptDACOPW04jimGagZ51E6+lXUwJjgnBw+hyko/lkFWCldqw==} - '@types/node@24.11.0': - resolution: {integrity: sha512-fPxQqz4VTgPI/IQ+lj9r0h+fDR66bzoeMGHp8ASee+32OSGIkeASsoZuJixsQoVef1QJbeubcPBxKk22QVoWdw==} + '@types/node@24.12.0': + resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==} - '@types/node@25.3.5': - resolution: {integrity: sha512-oX8xrhvpiyRCQkG1MFchB09f+cXftgIXb3a7UUa4Y3wpmZPw5tyZGTLWhlESOLq1Rq6oDlc8npVU2/9xiCuXMA==} + '@types/node@25.4.0': + resolution: {integrity: sha512-9wLpoeWuBlcbBpOY3XmzSTG3oscB6xjBEEtn+pYXTfhyXhIxC5FsBer2KTopBlvKEiW9l13po9fq+SJY/5lkhw==} '@types/qrcode-terminal@0.12.2': resolution: {integrity: sha512-v+RcIEJ+Uhd6ygSQ0u5YYY7ZM+la7GgPbs0V/7l/kFs2uO4S8BcIUEMoP7za4DNIqNnUD5npf0A/7kBhrCKG5Q==} @@ -3355,43 +3511,43 @@ packages: '@types/yauzl@2.10.3': resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-mywkctYr45fUBUYD35poInc9HEjup0zyCO5z3ZU2QC9eCQShpwYSDceoSCwxVKB/b/f/CU6H3LqINFeIz5CvrQ==} + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-k3UqlA40U9m8meAyliJdbTayDSGZRBGNsEDP2rtjOomLUo2IA0eIi4vNAjQKzsXFtyfoQ59MGAqOLSO/CzVrQA==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-iF+Y4USbCiD5BxmXI6xYuy+S6d2BhxKDb3YHjchzqg3AgleDNTd2rqSzlWv4ku26V2iOSfpM9t1H/xluL9pgNw==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-8PNUCS1HPeXMK1F+1D3A4MyD+9Nil2mM3mWSwayUZpqT/A+dfEtcoo4Oe7Gz6qvMZbhCjbipwhTC84ilisiE1g==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-uEIIbW1JYPGEesVh/P5xA+xox7pQ6toeFPeke2X2H2bs5YkWHVaUQtVZuKNmGelw+2PCG6XRrXvMgMp056ebuQ==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-WwRJO5ryMEs4Flro6JKNq0T+hR78eYFrItautu9o6EsIpeevk7Cq7T0BBgCrAf+A5aKts21HpiWzfHI0YP/CuQ==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-vg8hwfwIhT8CmYJI5lG3PP8IoNzKKBGbq1cKjxQabSZTPuQKwVFVity2XKTKZKd+qRGL7xW4UWMJZLFgSx3b2Q==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-9T8kwNALCWzuNe00ri/f6wwoVD64YZW24cqkycFeptIF+DfNxfHMddWd7fvtHf0OKzPtkL83mkjBtviNeVKOfQ==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-Yd/ht0CGE4NYUAjuHa1u4VbiJbyUgvDh+b2o+Zcb2h5t8B761DIzDm24QqVXh+KhvGUoEodXWg3g3APxLHqj8Q==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-oMm3cb4njzMLBb61TI4EGq5Igxc+hoPHHNpMWqORfiYu/uQZWnter/twamTrZo6boCFtIa59mrGkhR3Qz7kauA==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-Klk6BoiHegfPmkO0YYrXmbYVdPjOfN25lRkzenqDIwbyzPlABHvICCyo5YRvWD3HU4EeDfLisIFU9wEd/0duCw==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-EQ5nz4qrwtzMZ5bjdMVQ2ke5BHQWDBz9IQsdh/8UU819cs5ZBnKmFFe5wOrIngqFvq4EoWKDXf983Vw0q4erkg==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-4LrXmaMfzedwczANIkD/M9guPD4EWuQnCxOJsJkdYi3ExWQDjIFwfmxTtAmfPBWxVExLfn7UUkz/yCtcv2Wd+w==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-Y/5A7BaRFV1Pro4BqNW3nVDuId7YdPXktl769x1yUjTDQLH6YJEJVeBkFkT0+4e1O5IL92rxxr8rWMLypNKnTw==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-8a3oe5IAfBkEfMouRheNhOXUScBSHIUknPvUdsbxx7s+Ja1lxFNA1X1TTl2T18vu72Q/mM86vxefw5eW8/ps3g==} + '@typescript/native-preview@7.0.0-dev.20260311.1': + resolution: {integrity: sha512-BnyOW/mdZVZGevyeJ4RRY60CI4F121QBa++8Rwd+/Ms48OKQ30eMhaIKWGowz/u4WjJZmrzhFxIzN92XeSWMCQ==} hasBin: true '@typespec/ts-http-runtime@0.3.3': @@ -3523,8 +3679,8 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - acpx@0.1.16: - resolution: {integrity: sha512-CxHkUIP9dPSjh+RyoZkQg0AXjSiSus/dF4xKEeG9c+7JboZp5bZuWie/n4V7sBeKTMheMoEYGrMUslrdUadrqg==} + acpx@0.2.0: + resolution: {integrity: sha512-5E38uizINoEpTuHjLvlkWTfFqeLRqnO7vS3z3qmAXZCEZVExE+oYhJ1TClIl8KZZ9gKaoJF+5c0ltDcJDzG67g==} engines: {node: '>=22.12.0'} hasBin: true @@ -3536,6 +3692,10 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + agent-base@8.0.0: + resolution: {integrity: sha512-QT8i0hCz6C/KQ+KTAbSNwCHDGdmUJl2tp2ZpNlGSWCfhUNVbYG2WLE3MdZGBAgXPV4GAvjGMxo+C1hroyxmZEg==} + engines: {node: '>= 14'} + ajv-formats@3.0.1: resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} peerDependencies: @@ -3697,6 +3857,36 @@ packages: bare-abort-controller: optional: true + bare-fs@4.5.5: + resolution: {integrity: sha512-XvwYM6VZqKoqDll8BmSww5luA5eflDzY0uEFfBJtFKe4PAAtxBjU3YIxzIBzhyaEQBy1VXEQBto4cpN5RZJw+w==} + engines: {bare: '>=1.16.0'} + peerDependencies: + bare-buffer: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + + bare-os@3.7.1: + resolution: {integrity: sha512-ebvMaS5BgZKmJlvuWh14dg9rbUI84QeV3WlWn6Ph6lFI8jJoh7ADtVTyD2c93euwbe+zgi0DVrl4YmqXeM9aIA==} + engines: {bare: '>=1.14.0'} + + bare-path@3.0.0: + resolution: {integrity: sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==} + + bare-stream@2.8.1: + resolution: {integrity: sha512-bSeR8RfvbRwDpD7HWZvn8M3uYNDrk7m9DQjYOFkENZlXW8Ju/MPaqUPQq5LqJ3kyjEm07siTaAQ7wBKCU59oHg==} + peerDependencies: + bare-buffer: '*' + bare-events: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + bare-events: + optional: true + + bare-url@2.3.2: + resolution: {integrity: sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw==} + base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} @@ -4059,8 +4249,8 @@ packages: discord-api-types@0.38.37: resolution: {integrity: sha512-Cv47jzY1jkGkh5sv0bfHYqGgKOWO1peOrGMkDFM4UmaGMOTgOW8QSexhvixa9sVOiz8MnVOBryWYyw/CEVhj7w==} - discord-api-types@0.38.41: - resolution: {integrity: sha512-yMECyR8j9c2fVTvCQ+Qc24pweYFIZk/XoxDOmt1UvPeSw5tK6gXBd/2hhP+FEAe9Y6ny8pRMaf618XDK4U53OQ==} + discord-api-types@0.38.42: + resolution: {integrity: sha512-qs1kya7S84r5RR8m9kgttywGrmmoHaRifU1askAoi+wkoSefLpZP6aGXusjNw5b0jD3zOg3LTwUa3Tf2iHIceQ==} doctypes@1.1.0: resolution: {integrity: sha512-LLBi6pEqS6Do3EKQ3J0NqHWV5hhb78Pi8vvESYwyOy2c31ZEZVdtitdzsQsKb7878PEERhzUk0ftqGhG6Mz+pQ==} @@ -4075,9 +4265,8 @@ packages: resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} engines: {node: '>= 4'} - dompurify@3.3.2: - resolution: {integrity: sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==} - engines: {node: '>=20'} + dompurify@3.3.3: + resolution: {integrity: sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==} domutils@3.2.2: resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} @@ -4559,6 +4748,10 @@ packages: resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} engines: {node: '>= 14'} + https-proxy-agent@8.0.0: + resolution: {integrity: sha512-YYeW+iCnAS3xhvj2dvVoWgsbca3RfQy/IlaNHHOtDmU0jMqPI9euIq3Y9BJETdxk16h9NHHCKqp/KB9nIMStCQ==} + engines: {node: '>= 14'} + human-signals@1.1.1: resolution: {integrity: sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==} engines: {node: '>=8.12.0'} @@ -5332,6 +5525,14 @@ packages: zod: optional: true + openclaw@2026.3.8: + resolution: {integrity: sha512-e5Rk2Aj55sD/5LyX94mdYCQj7zpHXo0xIZsl+k140+nRopePfPAxC7nsu0V/NyypPRtaotP1riFfzK7IhaYkuQ==} + engines: {node: '>=22.12.0'} + hasBin: true + peerDependencies: + '@napi-rs/canvas': ^0.1.89 + node-llama-cpp: 3.16.2 + opus-decoder@0.7.11: resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==} @@ -5346,8 +5547,8 @@ packages: resolution: {integrity: sha512-4/8JfsetakdeEa4vAYV45FW20aY+B/+K8NEXp5Eiar3wR8726whgHrbSg5Ar/ZY1FLJ/AGtUqV7W2IVF+Gvp9A==} engines: {node: '>=20'} - oxfmt@0.36.0: - resolution: {integrity: sha512-/ejJ+KoSW6J9bcNT9a9UtJSJNWhJ3yOLSBLbkoFHJs/8CZjmaZVZAJe4YgO1KMJlKpNQasrn/G9JQUEZI3p0EQ==} + oxfmt@0.38.0: + resolution: {integrity: sha512-RGYfnnxmCz8dMQ1Oo5KrYkNRc9cne2WL2vfE+datWNkgiSAkfUsqpGLR7rnkN6cQFgQkHDZH400eXN6izJ8Lww==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -5355,8 +5556,8 @@ packages: resolution: {integrity: sha512-4RuJK2jP08XwqtUu+5yhCbxEauCm6tv2MFHKEMsjbosK2+vy5us82oI3VLuHwbNyZG7ekZA26U2LLHnGR4frIA==} hasBin: true - oxlint@1.51.0: - resolution: {integrity: sha512-g6DNPaV9/WI9MoX2XllafxQuxwY1TV++j7hP8fTJByVBuCoVtm3dy9f/2vtH/HU40JztcgWF4G7ua+gkainklQ==} + oxlint@1.53.0: + resolution: {integrity: sha512-TLW0PzGbpO1JxUnuy1pIqVPjQUGh4fNfxu5XJbdFIRFVaJ0UFzTjjk/hSFTMRxN6lZub53xL/IwJNEkrh7VtDg==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: @@ -5781,8 +5982,8 @@ packages: resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} hasBin: true - rolldown-plugin-dts@0.22.4: - resolution: {integrity: sha512-pueqTPyN1N6lWYivyDGad+j+GO3DT67pzpct8s8e6KGVIezvnrDjejuw1AXFeyDRas3xTq4Ja6Lj5R5/04C5GQ==} + rolldown-plugin-dts@0.22.5: + resolution: {integrity: sha512-M/HXfM4cboo+jONx9Z0X+CUf3B5tCi7ni+kR5fUW50Fp9AlZk0oVLesibGWgCXDKFp5lpgQ9yhKoImUFjl3VZw==} engines: {node: '>=20.19.0'} peerDependencies: '@ts-macro/tsc': ^0.3.6 @@ -5800,8 +6001,8 @@ packages: vue-tsc: optional: true - rolldown@1.0.0-rc.7: - resolution: {integrity: sha512-5X0zEeQFzDpB3MqUWQZyO2TUQqP9VnT7CqXHF2laTFRy487+b6QZyotCazOySAuZLAvplCaOVsg1tVn/Zlmwfg==} + rolldown@1.0.0-rc.9: + resolution: {integrity: sha512-9EbgWge7ZH+yqb4d2EnELAntgPTWbfL8ajiTW+SyhJEC4qhBbkCKbqFV4Ge4zmu5ziQuVbWxb/XwLZ+RIO7E8Q==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -6115,13 +6316,16 @@ packages: resolution: {integrity: sha512-iK5/YhZxq5GO5z8wb0bY1317uDF3Zjpha0QFFLA8/trAoiLbQD0HUbMesEaxyzUgDxi2QlcbM8IvqOlEjgoXBA==} engines: {node: '>=12.17'} - tar-stream@3.1.7: - resolution: {integrity: sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==} + tar-stream@3.1.8: + resolution: {integrity: sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==} tar@7.5.11: resolution: {integrity: sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ==} engines: {node: '>=18'} + teex@1.0.1: + resolution: {integrity: sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==} + text-decoder@1.2.7: resolution: {integrity: sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ==} @@ -6194,14 +6398,14 @@ packages: ts-algebra@2.0.0: resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==} - tsdown@0.21.0: - resolution: {integrity: sha512-Sw/ehzVhjYLD7HVBPybJHDxpcaeyFjPcaDCME23o9O4fyuEl6ibYEdrnB8W8UchYAGoayKqzWQqx/oIp3jn/Vg==} + tsdown@0.21.2: + resolution: {integrity: sha512-pP8eAcd1XAWjl5gjosuJs0BAuVoheUe3V8VDHx31QK7YOgXjcCMsBSyFWO3CMh/CSUkjRUzR96JtGH3WJFTExQ==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: '@arethetypeswrong/core': ^0.18.1 - '@tsdown/css': 0.21.0 - '@tsdown/exe': 0.21.0 + '@tsdown/css': 0.21.2 + '@tsdown/exe': 0.21.2 '@vitejs/devtools': '*' publint: ^0.3.0 typescript: ^5.0.0 @@ -6324,8 +6528,8 @@ packages: resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} engines: {node: '>= 0.8'} - unrun@0.2.30: - resolution: {integrity: sha512-a4W1wDADI0gvDDr14T0ho1FgMhmfjq6M8Iz8q234EnlxgH/9cMHDueUSLwTl1fwSBs5+mHrLFYH+7B8ao36EBA==} + unrun@0.2.32: + resolution: {integrity: sha512-opd3z6791rf281JdByf0RdRQrpcc7WyzqittqIXodM/5meNWdTwrVxeyzbaCp4/Rgls/um14oUaif1gomO8YGg==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: @@ -6580,6 +6784,10 @@ snapshots: dependencies: zod: 4.3.6 + '@agentclientprotocol/sdk@0.16.1(zod@4.3.6)': + dependencies: + zod: 4.3.6 + '@anthropic-ai/sdk@0.73.0(zod@4.3.6)': dependencies: json-schema-to-ts: 3.1.1 @@ -6685,22 +6893,22 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.1004.0': + '@aws-sdk/client-bedrock@3.1007.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-node': 3.972.18 + '@aws-sdk/core': 3.973.19 + '@aws-sdk/credential-provider-node': 3.972.19 '@aws-sdk/middleware-host-header': 3.972.7 '@aws-sdk/middleware-logger': 3.972.7 '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/middleware-user-agent': 3.972.20 '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/token-providers': 3.1004.0 + '@aws-sdk/token-providers': 3.1007.0 '@aws-sdk/types': 3.973.5 '@aws-sdk/util-endpoints': 3.996.4 '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 + '@aws-sdk/util-user-agent-node': 3.973.5 '@smithy/config-resolver': 4.4.10 '@smithy/core': 3.23.9 '@smithy/fetch-http-handler': 5.3.13 @@ -6822,6 +7030,22 @@ snapshots: '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 + '@aws-sdk/core@3.973.19': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws-sdk/xml-builder': 3.972.10 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@aws-sdk/crc64-nvme@3.972.3': dependencies: '@smithy/types': 4.13.0 @@ -6843,6 +7067,14 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/credential-provider-env@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/credential-provider-http@3.972.15': dependencies: '@aws-sdk/core': 3.973.15 @@ -6869,6 +7101,19 @@ snapshots: '@smithy/util-stream': 4.5.17 tslib: 2.8.1 + '@aws-sdk/credential-provider-http@3.972.19': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/node-http-handler': 4.4.14 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-stream': 4.5.17 + tslib: 2.8.1 + '@aws-sdk/credential-provider-ini@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -6907,6 +7152,25 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-ini@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/credential-provider-env': 3.972.17 + '@aws-sdk/credential-provider-http': 3.972.19 + '@aws-sdk/credential-provider-login': 3.972.18 + '@aws-sdk/credential-provider-process': 3.972.17 + '@aws-sdk/credential-provider-sso': 3.972.18 + '@aws-sdk/credential-provider-web-identity': 3.972.18 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-login@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -6933,6 +7197,19 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-login@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-node@3.972.14': dependencies: '@aws-sdk/credential-provider-env': 3.972.13 @@ -6967,6 +7244,23 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-node@3.972.19': + dependencies: + '@aws-sdk/credential-provider-env': 3.972.17 + '@aws-sdk/credential-provider-http': 3.972.19 + '@aws-sdk/credential-provider-ini': 3.972.18 + '@aws-sdk/credential-provider-process': 3.972.17 + '@aws-sdk/credential-provider-sso': 3.972.18 + '@aws-sdk/credential-provider-web-identity': 3.972.18 + '@aws-sdk/types': 3.973.5 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-process@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -6985,6 +7279,15 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/credential-provider-process@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/credential-provider-sso@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -7011,6 +7314,19 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-sso@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/token-providers': 3.1005.0 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/credential-provider-web-identity@3.972.13': dependencies: '@aws-sdk/core': 3.973.15 @@ -7035,6 +7351,18 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/credential-provider-web-identity@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/eventstream-handler-node@3.972.10': dependencies: '@aws-sdk/types': 3.973.5 @@ -7175,6 +7503,17 @@ snapshots: '@smithy/util-retry': 4.2.11 tslib: 2.8.1 + '@aws-sdk/middleware-user-agent@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@smithy/core': 3.23.9 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-retry': 4.2.11 + tslib: 2.8.1 + '@aws-sdk/middleware-websocket@3.972.12': dependencies: '@aws-sdk/types': 3.973.5 @@ -7276,6 +7615,49 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/nested-clients@3.996.8': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.19 + '@aws-sdk/middleware-host-header': 3.972.7 + '@aws-sdk/middleware-logger': 3.972.7 + '@aws-sdk/middleware-recursion-detection': 3.972.7 + '@aws-sdk/middleware-user-agent': 3.972.20 + '@aws-sdk/region-config-resolver': 3.972.7 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@aws-sdk/util-user-agent-browser': 3.972.7 + '@aws-sdk/util-user-agent-node': 3.973.5 + '@smithy/config-resolver': 4.4.10 + '@smithy/core': 3.23.9 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/hash-node': 4.2.11 + '@smithy/invalid-dependency': 4.2.11 + '@smithy/middleware-content-length': 4.2.11 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-retry': 4.4.40 + '@smithy/middleware-serde': 4.2.12 + '@smithy/middleware-stack': 4.2.11 + '@smithy/node-config-provider': 4.3.11 + '@smithy/node-http-handler': 4.4.14 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.39 + '@smithy/util-defaults-mode-node': 4.2.42 + '@smithy/util-endpoints': 3.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-retry': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/region-config-resolver@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -7324,6 +7706,30 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/token-providers@3.1005.0': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/token-providers@3.1007.0': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/nested-clients': 3.996.8 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/token-providers@3.999.0': dependencies: '@aws-sdk/core': 3.973.15 @@ -7414,6 +7820,14 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/util-user-agent-node@3.973.5': + dependencies: + '@aws-sdk/middleware-user-agent': 3.972.20 + '@aws-sdk/types': 3.973.5 + '@smithy/node-config-provider': 4.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/xml-builder@3.972.10': dependencies: '@smithy/types': 4.13.0 @@ -7499,7 +7913,7 @@ snapshots: '@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1)': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 discord-api-types: 0.38.37 optionalDependencies: '@cloudflare/workers-types': 4.20260120.0 @@ -7657,7 +8071,24 @@ snapshots: '@discordjs/voice@0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1)': dependencies: '@types/ws': 8.18.1 - discord-api-types: 0.38.41 + discord-api-types: 0.38.42 + prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1) + tslib: 2.8.1 + ws: 8.19.0 + transitivePeerDependencies: + - '@discordjs/opus' + - bufferutil + - ffmpeg-static + - node-opus + - opusscript + - utf-8-validate + optional: true + + '@discordjs/voice@0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1)': + dependencies: + '@snazzah/davey': 0.1.10 + '@types/ws': 8.18.1 + discord-api-types: 0.38.42 prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1) tslib: 2.8.1 ws: 8.19.0 @@ -8049,7 +8480,7 @@ snapshots: '@line/bot-sdk@10.6.0': dependencies: - '@types/node': 24.11.0 + '@types/node': 24.12.0 optionalDependencies: axios: 1.13.5 transitivePeerDependencies: @@ -8768,61 +9199,61 @@ snapshots: '@oxc-project/types@0.115.0': {} - '@oxfmt/binding-android-arm-eabi@0.36.0': + '@oxfmt/binding-android-arm-eabi@0.38.0': optional: true - '@oxfmt/binding-android-arm64@0.36.0': + '@oxfmt/binding-android-arm64@0.38.0': optional: true - '@oxfmt/binding-darwin-arm64@0.36.0': + '@oxfmt/binding-darwin-arm64@0.38.0': optional: true - '@oxfmt/binding-darwin-x64@0.36.0': + '@oxfmt/binding-darwin-x64@0.38.0': optional: true - '@oxfmt/binding-freebsd-x64@0.36.0': + '@oxfmt/binding-freebsd-x64@0.38.0': optional: true - '@oxfmt/binding-linux-arm-gnueabihf@0.36.0': + '@oxfmt/binding-linux-arm-gnueabihf@0.38.0': optional: true - '@oxfmt/binding-linux-arm-musleabihf@0.36.0': + '@oxfmt/binding-linux-arm-musleabihf@0.38.0': optional: true - '@oxfmt/binding-linux-arm64-gnu@0.36.0': + '@oxfmt/binding-linux-arm64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-arm64-musl@0.36.0': + '@oxfmt/binding-linux-arm64-musl@0.38.0': optional: true - '@oxfmt/binding-linux-ppc64-gnu@0.36.0': + '@oxfmt/binding-linux-ppc64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-riscv64-gnu@0.36.0': + '@oxfmt/binding-linux-riscv64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-riscv64-musl@0.36.0': + '@oxfmt/binding-linux-riscv64-musl@0.38.0': optional: true - '@oxfmt/binding-linux-s390x-gnu@0.36.0': + '@oxfmt/binding-linux-s390x-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-x64-gnu@0.36.0': + '@oxfmt/binding-linux-x64-gnu@0.38.0': optional: true - '@oxfmt/binding-linux-x64-musl@0.36.0': + '@oxfmt/binding-linux-x64-musl@0.38.0': optional: true - '@oxfmt/binding-openharmony-arm64@0.36.0': + '@oxfmt/binding-openharmony-arm64@0.38.0': optional: true - '@oxfmt/binding-win32-arm64-msvc@0.36.0': + '@oxfmt/binding-win32-arm64-msvc@0.38.0': optional: true - '@oxfmt/binding-win32-ia32-msvc@0.36.0': + '@oxfmt/binding-win32-ia32-msvc@0.38.0': optional: true - '@oxfmt/binding-win32-x64-msvc@0.36.0': + '@oxfmt/binding-win32-x64-msvc@0.38.0': optional: true '@oxlint-tsgolint/darwin-arm64@0.16.0': @@ -8843,61 +9274,61 @@ snapshots: '@oxlint-tsgolint/win32-x64@0.16.0': optional: true - '@oxlint/binding-android-arm-eabi@1.51.0': + '@oxlint/binding-android-arm-eabi@1.53.0': optional: true - '@oxlint/binding-android-arm64@1.51.0': + '@oxlint/binding-android-arm64@1.53.0': optional: true - '@oxlint/binding-darwin-arm64@1.51.0': + '@oxlint/binding-darwin-arm64@1.53.0': optional: true - '@oxlint/binding-darwin-x64@1.51.0': + '@oxlint/binding-darwin-x64@1.53.0': optional: true - '@oxlint/binding-freebsd-x64@1.51.0': + '@oxlint/binding-freebsd-x64@1.53.0': optional: true - '@oxlint/binding-linux-arm-gnueabihf@1.51.0': + '@oxlint/binding-linux-arm-gnueabihf@1.53.0': optional: true - '@oxlint/binding-linux-arm-musleabihf@1.51.0': + '@oxlint/binding-linux-arm-musleabihf@1.53.0': optional: true - '@oxlint/binding-linux-arm64-gnu@1.51.0': + '@oxlint/binding-linux-arm64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-arm64-musl@1.51.0': + '@oxlint/binding-linux-arm64-musl@1.53.0': optional: true - '@oxlint/binding-linux-ppc64-gnu@1.51.0': + '@oxlint/binding-linux-ppc64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-riscv64-gnu@1.51.0': + '@oxlint/binding-linux-riscv64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-riscv64-musl@1.51.0': + '@oxlint/binding-linux-riscv64-musl@1.53.0': optional: true - '@oxlint/binding-linux-s390x-gnu@1.51.0': + '@oxlint/binding-linux-s390x-gnu@1.53.0': optional: true - '@oxlint/binding-linux-x64-gnu@1.51.0': + '@oxlint/binding-linux-x64-gnu@1.53.0': optional: true - '@oxlint/binding-linux-x64-musl@1.51.0': + '@oxlint/binding-linux-x64-musl@1.53.0': optional: true - '@oxlint/binding-openharmony-arm64@1.51.0': + '@oxlint/binding-openharmony-arm64@1.53.0': optional: true - '@oxlint/binding-win32-arm64-msvc@1.51.0': + '@oxlint/binding-win32-arm64-msvc@1.53.0': optional: true - '@oxlint/binding-win32-ia32-msvc@1.51.0': + '@oxlint/binding-win32-ia32-msvc@1.53.0': optional: true - '@oxlint/binding-win32-x64-msvc@1.51.0': + '@oxlint/binding-win32-x64-msvc@1.53.0': optional: true '@pierre/diffs@1.0.11(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': @@ -8982,54 +9413,54 @@ snapshots: '@reflink/reflink-win32-x64-msvc': 0.1.19 optional: true - '@rolldown/binding-android-arm64@1.0.0-rc.7': + '@rolldown/binding-android-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-darwin-arm64@1.0.0-rc.7': + '@rolldown/binding-darwin-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-darwin-x64@1.0.0-rc.7': + '@rolldown/binding-darwin-x64@1.0.0-rc.9': optional: true - '@rolldown/binding-freebsd-x64@1.0.0-rc.7': + '@rolldown/binding-freebsd-x64@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.7': + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm64-musl@1.0.0-rc.7': + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-x64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-x64-musl@1.0.0-rc.7': + '@rolldown/binding-linux-x64-musl@1.0.0-rc.9': optional: true - '@rolldown/binding-openharmony-arm64@1.0.0-rc.7': + '@rolldown/binding-openharmony-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-wasm32-wasi@1.0.0-rc.7': + '@rolldown/binding-wasm32-wasi@1.0.0-rc.9': dependencies: '@napi-rs/wasm-runtime': 1.1.1 optional: true - '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.7': + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.9': optional: true - '@rolldown/binding-win32-x64-msvc@1.0.0-rc.7': + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.9': optional: true - '@rolldown/pluginutils@1.0.0-rc.7': {} + '@rolldown/pluginutils@1.0.0-rc.9': {} '@rollup/rollup-android-arm-eabi@4.59.0': optional: true @@ -9187,14 +9618,14 @@ snapshots: '@slack/logger@4.0.0': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@slack/oauth@3.0.4': dependencies: '@slack/logger': 4.0.0 '@slack/web-api': 7.14.1 '@types/jsonwebtoken': 9.0.10 - '@types/node': 25.3.5 + '@types/node': 25.4.0 jsonwebtoken: 9.0.3 transitivePeerDependencies: - debug @@ -9203,7 +9634,7 @@ snapshots: dependencies: '@slack/logger': 4.0.0 '@slack/web-api': 7.14.1 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/ws': 8.18.1 eventemitter3: 5.0.4 ws: 8.19.0 @@ -9218,7 +9649,7 @@ snapshots: dependencies: '@slack/logger': 4.0.0 '@slack/types': 2.20.0 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/retry': 0.12.0 axios: 1.13.5 eventemitter3: 5.0.4 @@ -9855,6 +10286,67 @@ snapshots: dependencies: tslib: 2.8.1 + '@snazzah/davey-android-arm-eabi@0.1.10': + optional: true + + '@snazzah/davey-android-arm64@0.1.10': + optional: true + + '@snazzah/davey-darwin-arm64@0.1.10': + optional: true + + '@snazzah/davey-darwin-x64@0.1.10': + optional: true + + '@snazzah/davey-freebsd-x64@0.1.10': + optional: true + + '@snazzah/davey-linux-arm-gnueabihf@0.1.10': + optional: true + + '@snazzah/davey-linux-arm64-gnu@0.1.10': + optional: true + + '@snazzah/davey-linux-arm64-musl@0.1.10': + optional: true + + '@snazzah/davey-linux-x64-gnu@0.1.10': + optional: true + + '@snazzah/davey-linux-x64-musl@0.1.10': + optional: true + + '@snazzah/davey-wasm32-wasi@0.1.10': + dependencies: + '@napi-rs/wasm-runtime': 1.1.1 + optional: true + + '@snazzah/davey-win32-arm64-msvc@0.1.10': + optional: true + + '@snazzah/davey-win32-ia32-msvc@0.1.10': + optional: true + + '@snazzah/davey-win32-x64-msvc@0.1.10': + optional: true + + '@snazzah/davey@0.1.10': + optionalDependencies: + '@snazzah/davey-android-arm-eabi': 0.1.10 + '@snazzah/davey-android-arm64': 0.1.10 + '@snazzah/davey-darwin-arm64': 0.1.10 + '@snazzah/davey-darwin-x64': 0.1.10 + '@snazzah/davey-freebsd-x64': 0.1.10 + '@snazzah/davey-linux-arm-gnueabihf': 0.1.10 + '@snazzah/davey-linux-arm64-gnu': 0.1.10 + '@snazzah/davey-linux-arm64-musl': 0.1.10 + '@snazzah/davey-linux-x64-gnu': 0.1.10 + '@snazzah/davey-linux-x64-musl': 0.1.10 + '@snazzah/davey-wasm32-wasi': 0.1.10 + '@snazzah/davey-win32-arm64-msvc': 0.1.10 + '@snazzah/davey-win32-ia32-msvc': 0.1.10 + '@snazzah/davey-win32-x64-msvc': 0.1.10 + '@standard-schema/spec@1.1.0': {} '@swc/helpers@0.5.19': @@ -9982,7 +10474,7 @@ snapshots: '@types/body-parser@1.19.6': dependencies: '@types/connect': 3.4.38 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/bun@1.3.9': dependencies: @@ -10002,7 +10494,7 @@ snapshots: '@types/connect@3.4.38': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/deep-eql@4.0.2': {} @@ -10010,14 +10502,14 @@ snapshots: '@types/express-serve-static-core@4.19.8': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 '@types/express-serve-static-core@5.1.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 @@ -10046,7 +10538,7 @@ snapshots: '@types/jsonwebtoken@9.0.10': dependencies: '@types/ms': 2.1.0 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/linkify-it@5.0.0': {} @@ -10075,11 +10567,11 @@ snapshots: dependencies: undici-types: 6.21.0 - '@types/node@24.11.0': + '@types/node@24.12.0': dependencies: undici-types: 7.16.0 - '@types/node@25.3.5': + '@types/node@25.4.0': dependencies: undici-types: 7.18.2 @@ -10092,7 +10584,7 @@ snapshots: '@types/request@2.48.13': dependencies: '@types/caseless': 0.12.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/tough-cookie': 4.0.5 form-data: 2.5.4 @@ -10103,22 +10595,22 @@ snapshots: '@types/send@0.17.6': dependencies: '@types/mime': 1.3.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/send@1.2.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/serve-static@1.15.10': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/send': 0.17.6 '@types/serve-static@2.2.0': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/tough-cookie@4.0.5': {} @@ -10128,43 +10620,43 @@ snapshots: '@types/ws@8.18.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 '@types/yauzl@2.10.3': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 optional: true - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260311.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260308.1': + '@typescript/native-preview@7.0.0-dev.20260311.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260308.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260311.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260311.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260311.1 '@typespec/ts-http-runtime@0.3.3': dependencies: @@ -10205,29 +10697,29 @@ snapshots: - '@cypress/request' - supports-color - '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) playwright: 1.58.2 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - bufferutil - msw - utf-8-validate - vite - '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/utils': 4.0.18 magic-string: 0.30.21 pixelmatch: 7.1.0 pngjs: 7.0.0 sirv: 3.0.2 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) ws: 8.19.0 transitivePeerDependencies: - bufferutil @@ -10235,7 +10727,7 @@ snapshots: - utf-8-validate - vite - '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': + '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.18 @@ -10247,9 +10739,9 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) optionalDependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) '@vitest/expect@4.0.18': dependencies: @@ -10260,13 +10752,13 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: - vite: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) '@vitest/pretty-format@4.0.18': dependencies: @@ -10363,13 +10855,14 @@ snapshots: acorn@8.16.0: {} - acpx@0.1.16(zod@4.3.6): + acpx@0.2.0(zod@4.3.6): dependencies: '@agentclientprotocol/sdk': 0.15.0(zod@4.3.6) commander: 14.0.3 skillflag: 0.1.4 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a - zod @@ -10382,6 +10875,8 @@ snapshots: agent-base@7.1.4: {} + agent-base@8.0.0: {} + ajv-formats@3.0.1(ajv@8.18.0): optionalDependencies: ajv: 8.18.0 @@ -10526,6 +11021,37 @@ snapshots: bare-events@2.8.2: {} + bare-fs@4.5.5: + dependencies: + bare-events: 2.8.2 + bare-path: 3.0.0 + bare-stream: 2.8.1(bare-events@2.8.2) + bare-url: 2.3.2 + fast-fifo: 1.3.2 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + + bare-os@3.7.1: {} + + bare-path@3.0.0: + dependencies: + bare-os: 3.7.1 + + bare-stream@2.8.1(bare-events@2.8.2): + dependencies: + streamx: 2.23.0 + teex: 1.0.1 + optionalDependencies: + bare-events: 2.8.2 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + + bare-url@2.3.2: + dependencies: + bare-path: 3.0.0 + base64-js@1.5.1: {} basic-auth@2.0.1: @@ -10613,7 +11139,7 @@ snapshots: bun-types@1.3.9: dependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 optional: true bytes@3.1.2: {} @@ -10865,7 +11391,7 @@ snapshots: discord-api-types@0.38.37: {} - discord-api-types@0.38.41: {} + discord-api-types@0.38.42: {} doctypes@1.1.0: {} @@ -10881,7 +11407,7 @@ snapshots: dependencies: domelementtype: 2.3.0 - dompurify@3.3.2: + dompurify@3.3.3: optionalDependencies: '@types/trusted-types': 2.0.7 @@ -11507,6 +12033,13 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@8.0.0: + dependencies: + agent-base: 8.0.0 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + human-signals@1.1.1: {} iconv-lite@0.4.24: @@ -12305,6 +12838,81 @@ snapshots: ws: 8.19.0 zod: 4.3.6 + openclaw@2026.3.8(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.7)(node-llama-cpp@3.16.2(typescript@5.9.3)): + dependencies: + '@agentclientprotocol/sdk': 0.15.0(zod@4.3.6) + '@aws-sdk/client-bedrock': 3.1007.0 + '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) + '@clack/prompts': 1.1.0 + '@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) + '@grammyjs/runner': 2.0.3(grammy@1.41.1) + '@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1) + '@homebridge/ciao': 1.3.5 + '@larksuiteoapi/node-sdk': 1.59.0 + '@line/bot-sdk': 10.6.0 + '@lydell/node-pty': 1.2.0-beta.3 + '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.57.1 + '@mozilla/readability': 0.6.0 + '@napi-rs/canvas': 0.1.95 + '@sinclair/typebox': 0.34.48 + '@slack/bolt': 4.6.0(@types/express@5.0.6) + '@slack/web-api': 7.14.1 + '@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) + ajv: 8.18.0 + chalk: 5.6.2 + chokidar: 5.0.0 + cli-highlight: 2.1.11 + commander: 14.0.3 + croner: 10.0.1 + discord-api-types: 0.38.42 + dotenv: 17.3.1 + express: 5.2.1 + file-type: 21.3.1 + grammy: 1.41.1 + https-proxy-agent: 7.0.6 + ipaddr.js: 2.3.0 + jiti: 2.6.1 + json5: 2.2.3 + jszip: 3.10.1 + linkedom: 0.18.12 + long: 5.3.2 + markdown-it: 14.1.1 + node-edge-tts: 1.2.10 + node-llama-cpp: 3.16.2(typescript@5.9.3) + opusscript: 0.1.1 + osc-progress: 0.3.0 + pdfjs-dist: 5.5.207 + playwright-core: 1.58.2 + qrcode-terminal: 0.12.0 + sharp: 0.34.5 + sqlite-vec: 0.1.7-alpha.2 + tar: 7.5.11 + tslog: 4.10.2 + undici: 7.22.0 + ws: 8.19.0 + yaml: 2.8.2 + zod: 4.3.6 + transitivePeerDependencies: + - '@discordjs/opus' + - '@modelcontextprotocol/sdk' + - '@types/express' + - audio-decode + - aws-crt + - bufferutil + - canvas + - debug + - encoding + - ffmpeg-static + - hono + - jimp + - link-preview-js + - node-opus + - supports-color + - utf-8-validate + opus-decoder@0.7.11: dependencies: '@wasm-audio-decoders/common': 9.0.7 @@ -12325,29 +12933,29 @@ snapshots: osc-progress@0.3.0: {} - oxfmt@0.36.0: + oxfmt@0.38.0: dependencies: tinypool: 2.1.0 optionalDependencies: - '@oxfmt/binding-android-arm-eabi': 0.36.0 - '@oxfmt/binding-android-arm64': 0.36.0 - '@oxfmt/binding-darwin-arm64': 0.36.0 - '@oxfmt/binding-darwin-x64': 0.36.0 - '@oxfmt/binding-freebsd-x64': 0.36.0 - '@oxfmt/binding-linux-arm-gnueabihf': 0.36.0 - '@oxfmt/binding-linux-arm-musleabihf': 0.36.0 - '@oxfmt/binding-linux-arm64-gnu': 0.36.0 - '@oxfmt/binding-linux-arm64-musl': 0.36.0 - '@oxfmt/binding-linux-ppc64-gnu': 0.36.0 - '@oxfmt/binding-linux-riscv64-gnu': 0.36.0 - '@oxfmt/binding-linux-riscv64-musl': 0.36.0 - '@oxfmt/binding-linux-s390x-gnu': 0.36.0 - '@oxfmt/binding-linux-x64-gnu': 0.36.0 - '@oxfmt/binding-linux-x64-musl': 0.36.0 - '@oxfmt/binding-openharmony-arm64': 0.36.0 - '@oxfmt/binding-win32-arm64-msvc': 0.36.0 - '@oxfmt/binding-win32-ia32-msvc': 0.36.0 - '@oxfmt/binding-win32-x64-msvc': 0.36.0 + '@oxfmt/binding-android-arm-eabi': 0.38.0 + '@oxfmt/binding-android-arm64': 0.38.0 + '@oxfmt/binding-darwin-arm64': 0.38.0 + '@oxfmt/binding-darwin-x64': 0.38.0 + '@oxfmt/binding-freebsd-x64': 0.38.0 + '@oxfmt/binding-linux-arm-gnueabihf': 0.38.0 + '@oxfmt/binding-linux-arm-musleabihf': 0.38.0 + '@oxfmt/binding-linux-arm64-gnu': 0.38.0 + '@oxfmt/binding-linux-arm64-musl': 0.38.0 + '@oxfmt/binding-linux-ppc64-gnu': 0.38.0 + '@oxfmt/binding-linux-riscv64-gnu': 0.38.0 + '@oxfmt/binding-linux-riscv64-musl': 0.38.0 + '@oxfmt/binding-linux-s390x-gnu': 0.38.0 + '@oxfmt/binding-linux-x64-gnu': 0.38.0 + '@oxfmt/binding-linux-x64-musl': 0.38.0 + '@oxfmt/binding-openharmony-arm64': 0.38.0 + '@oxfmt/binding-win32-arm64-msvc': 0.38.0 + '@oxfmt/binding-win32-ia32-msvc': 0.38.0 + '@oxfmt/binding-win32-x64-msvc': 0.38.0 oxlint-tsgolint@0.16.0: optionalDependencies: @@ -12358,27 +12966,27 @@ snapshots: '@oxlint-tsgolint/win32-arm64': 0.16.0 '@oxlint-tsgolint/win32-x64': 0.16.0 - oxlint@1.51.0(oxlint-tsgolint@0.16.0): + oxlint@1.53.0(oxlint-tsgolint@0.16.0): optionalDependencies: - '@oxlint/binding-android-arm-eabi': 1.51.0 - '@oxlint/binding-android-arm64': 1.51.0 - '@oxlint/binding-darwin-arm64': 1.51.0 - '@oxlint/binding-darwin-x64': 1.51.0 - '@oxlint/binding-freebsd-x64': 1.51.0 - '@oxlint/binding-linux-arm-gnueabihf': 1.51.0 - '@oxlint/binding-linux-arm-musleabihf': 1.51.0 - '@oxlint/binding-linux-arm64-gnu': 1.51.0 - '@oxlint/binding-linux-arm64-musl': 1.51.0 - '@oxlint/binding-linux-ppc64-gnu': 1.51.0 - '@oxlint/binding-linux-riscv64-gnu': 1.51.0 - '@oxlint/binding-linux-riscv64-musl': 1.51.0 - '@oxlint/binding-linux-s390x-gnu': 1.51.0 - '@oxlint/binding-linux-x64-gnu': 1.51.0 - '@oxlint/binding-linux-x64-musl': 1.51.0 - '@oxlint/binding-openharmony-arm64': 1.51.0 - '@oxlint/binding-win32-arm64-msvc': 1.51.0 - '@oxlint/binding-win32-ia32-msvc': 1.51.0 - '@oxlint/binding-win32-x64-msvc': 1.51.0 + '@oxlint/binding-android-arm-eabi': 1.53.0 + '@oxlint/binding-android-arm64': 1.53.0 + '@oxlint/binding-darwin-arm64': 1.53.0 + '@oxlint/binding-darwin-x64': 1.53.0 + '@oxlint/binding-freebsd-x64': 1.53.0 + '@oxlint/binding-linux-arm-gnueabihf': 1.53.0 + '@oxlint/binding-linux-arm-musleabihf': 1.53.0 + '@oxlint/binding-linux-arm64-gnu': 1.53.0 + '@oxlint/binding-linux-arm64-musl': 1.53.0 + '@oxlint/binding-linux-ppc64-gnu': 1.53.0 + '@oxlint/binding-linux-riscv64-gnu': 1.53.0 + '@oxlint/binding-linux-riscv64-musl': 1.53.0 + '@oxlint/binding-linux-s390x-gnu': 1.53.0 + '@oxlint/binding-linux-x64-gnu': 1.53.0 + '@oxlint/binding-linux-x64-musl': 1.53.0 + '@oxlint/binding-openharmony-arm64': 1.53.0 + '@oxlint/binding-win32-arm64-msvc': 1.53.0 + '@oxlint/binding-win32-ia32-msvc': 1.53.0 + '@oxlint/binding-win32-x64-msvc': 1.53.0 oxlint-tsgolint: 0.16.0 p-finally@1.0.0: {} @@ -12594,7 +13202,7 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 25.3.5 + '@types/node': 25.4.0 long: 5.3.2 proxy-addr@2.0.7: @@ -12833,7 +13441,7 @@ snapshots: dependencies: glob: 10.5.0 - rolldown-plugin-dts@0.22.4(@typescript/native-preview@7.0.0-dev.20260308.1)(rolldown@1.0.0-rc.7)(typescript@5.9.3): + rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260311.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.2 '@babel/helper-validator-identifier': 8.0.0-rc.2 @@ -12844,33 +13452,33 @@ snapshots: dts-resolver: 2.1.3 get-tsconfig: 4.13.6 obug: 2.1.1 - rolldown: 1.0.0-rc.7 + rolldown: 1.0.0-rc.9 optionalDependencies: - '@typescript/native-preview': 7.0.0-dev.20260308.1 + '@typescript/native-preview': 7.0.0-dev.20260311.1 typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver - rolldown@1.0.0-rc.7: + rolldown@1.0.0-rc.9: dependencies: '@oxc-project/types': 0.115.0 - '@rolldown/pluginutils': 1.0.0-rc.7 + '@rolldown/pluginutils': 1.0.0-rc.9 optionalDependencies: - '@rolldown/binding-android-arm64': 1.0.0-rc.7 - '@rolldown/binding-darwin-arm64': 1.0.0-rc.7 - '@rolldown/binding-darwin-x64': 1.0.0-rc.7 - '@rolldown/binding-freebsd-x64': 1.0.0-rc.7 - '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.7 - '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.7 - '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-x64-musl': 1.0.0-rc.7 - '@rolldown/binding-openharmony-arm64': 1.0.0-rc.7 - '@rolldown/binding-wasm32-wasi': 1.0.0-rc.7 - '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.7 - '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.7 + '@rolldown/binding-android-arm64': 1.0.0-rc.9 + '@rolldown/binding-darwin-arm64': 1.0.0-rc.9 + '@rolldown/binding-darwin-x64': 1.0.0-rc.9 + '@rolldown/binding-freebsd-x64': 1.0.0-rc.9 + '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.9 + '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.9 + '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-x64-musl': 1.0.0-rc.9 + '@rolldown/binding-openharmony-arm64': 1.0.0-rc.9 + '@rolldown/binding-wasm32-wasi': 1.0.0-rc.9 + '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.9 + '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.9 rollup@4.59.0: dependencies: @@ -13114,9 +13722,10 @@ snapshots: skillflag@0.1.4: dependencies: '@clack/prompts': 1.1.0 - tar-stream: 3.1.7 + tar-stream: 3.1.8 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a sleep-promise@9.1.0: {} @@ -13300,13 +13909,15 @@ snapshots: array-back: 6.2.2 wordwrapjs: 5.1.1 - tar-stream@3.1.7: + tar-stream@3.1.8: dependencies: b4a: 1.8.0 + bare-fs: 4.5.5 fast-fifo: 1.3.2 streamx: 2.23.0 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a tar@7.5.11: @@ -13317,6 +13928,13 @@ snapshots: minizlib: 3.1.0 yallist: 5.0.0 + teex@1.0.1: + dependencies: + streamx: 2.23.0 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + text-decoder@1.2.7: dependencies: b4a: 1.8.0 @@ -13381,7 +13999,7 @@ snapshots: ts-algebra@2.0.0: {} - tsdown@0.21.0(@typescript/native-preview@7.0.0-dev.20260308.1)(typescript@5.9.3): + tsdown@0.21.2(@typescript/native-preview@7.0.0-dev.20260311.1)(typescript@5.9.3): dependencies: ansis: 4.2.0 cac: 7.0.0 @@ -13391,14 +14009,14 @@ snapshots: import-without-cache: 0.2.5 obug: 2.1.1 picomatch: 4.0.3 - rolldown: 1.0.0-rc.7 - rolldown-plugin-dts: 0.22.4(@typescript/native-preview@7.0.0-dev.20260308.1)(rolldown@1.0.0-rc.7)(typescript@5.9.3) + rolldown: 1.0.0-rc.9 + rolldown-plugin-dts: 0.22.5(@typescript/native-preview@7.0.0-dev.20260311.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3) semver: 7.7.4 tinyexec: 1.0.2 tinyglobby: 0.2.15 tree-kill: 1.2.2 unconfig-core: 7.5.0 - unrun: 0.2.30 + unrun: 0.2.32 optionalDependencies: typescript: 5.9.3 transitivePeerDependencies: @@ -13496,9 +14114,9 @@ snapshots: unpipe@1.0.0: {} - unrun@0.2.30: + unrun@0.2.32: dependencies: - rolldown: 1.0.0-rc.7 + rolldown: 1.0.0-rc.9 url-join@4.0.1: {} @@ -13537,7 +14155,7 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 - vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): dependencies: esbuild: 0.27.3 fdir: 6.5.0(picomatch@4.0.3) @@ -13546,17 +14164,17 @@ snapshots: rollup: 4.59.0 tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 25.3.5 + '@types/node': 25.4.0 fsevents: 2.3.3 jiti: 2.6.1 lightningcss: 1.30.2 tsx: 4.21.0 yaml: 2.8.2 - vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.4.0)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): dependencies: '@vitest/expect': 4.0.18 - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/pretty-format': 4.0.18 '@vitest/runner': 4.0.18 '@vitest/snapshot': 4.0.18 @@ -13573,12 +14191,12 @@ snapshots: tinyexec: 1.0.2 tinyglobby: 0.2.15 tinyrainbow: 3.0.3 - vite: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.0 - '@types/node': 25.3.5 - '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@types/node': 25.4.0 + '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) transitivePeerDependencies: - jiti - less diff --git a/scripts/ios-beta-archive.sh b/scripts/ios-beta-archive.sh new file mode 100755 index 00000000000..c65e9991389 --- /dev/null +++ b/scripts/ios-beta-archive.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + scripts/ios-beta-archive.sh [--build-number 7] + +Archives and exports a beta-release IPA locally without uploading. +EOF +} + +BUILD_NUMBER="${IOS_BETA_BUILD_NUMBER:-}" +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +while [[ $# -gt 0 ]]; do + case "$1" in + --) + shift + ;; + --build-number) + BUILD_NUMBER="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +( + cd "${ROOT_DIR}/apps/ios" + IOS_BETA_BUILD_NUMBER="${BUILD_NUMBER}" fastlane ios beta_archive +) diff --git a/scripts/ios-beta-prepare.sh b/scripts/ios-beta-prepare.sh new file mode 100755 index 00000000000..1d88add46db --- /dev/null +++ b/scripts/ios-beta-prepare.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + scripts/ios-beta-prepare.sh --build-number 7 [--team-id TEAMID] + +Prepares local beta-release inputs without touching local signing overrides: +- reads package.json.version and writes apps/ios/build/Version.xcconfig +- writes apps/ios/build/BetaRelease.xcconfig with canonical bundle IDs +- regenerates apps/ios/OpenClaw.xcodeproj via xcodegen +EOF +} + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +IOS_DIR="${ROOT_DIR}/apps/ios" +BUILD_DIR="${IOS_DIR}/build" +BETA_XCCONFIG="${IOS_DIR}/build/BetaRelease.xcconfig" +TEAM_HELPER="${ROOT_DIR}/scripts/ios-team-id.sh" +VERSION_HELPER="${ROOT_DIR}/scripts/ios-write-version-xcconfig.sh" + +BUILD_NUMBER="" +TEAM_ID="${IOS_DEVELOPMENT_TEAM:-}" +PACKAGE_VERSION="$(cd "${ROOT_DIR}" && node -p "require('./package.json').version" 2>/dev/null || true)" + +prepare_build_dir() { + if [[ -L "${BUILD_DIR}" ]]; then + echo "Refusing to use symlinked build directory: ${BUILD_DIR}" >&2 + exit 1 + fi + + mkdir -p "${BUILD_DIR}" +} + +write_generated_file() { + local output_path="$1" + local tmp_file="" + + if [[ -e "${output_path}" && -L "${output_path}" ]]; then + echo "Refusing to overwrite symlinked file: ${output_path}" >&2 + exit 1 + fi + + tmp_file="$(mktemp "${output_path}.XXXXXX")" + cat >"${tmp_file}" + mv -f "${tmp_file}" "${output_path}" +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --) + shift + ;; + --build-number) + BUILD_NUMBER="${2:-}" + shift 2 + ;; + --team-id) + TEAM_ID="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +if [[ -z "${BUILD_NUMBER}" ]]; then + echo "Missing required --build-number." >&2 + usage + exit 1 +fi + +if [[ -z "${TEAM_ID}" ]]; then + TEAM_ID="$(IOS_ALLOW_KEYCHAIN_TEAM_FALLBACK=1 bash "${TEAM_HELPER}")" +fi + +if [[ -z "${TEAM_ID}" ]]; then + echo "Could not resolve Apple Team ID. Set IOS_DEVELOPMENT_TEAM or sign into Xcode." >&2 + exit 1 +fi + +prepare_build_dir + +( + bash "${VERSION_HELPER}" --build-number "${BUILD_NUMBER}" +) + +write_generated_file "${BETA_XCCONFIG}" <&2 + usage + exit 1 + ;; + esac +done + +( + cd "${ROOT_DIR}/apps/ios" + IOS_BETA_BUILD_NUMBER="${BUILD_NUMBER}" fastlane ios beta +) diff --git a/scripts/ios-write-version-xcconfig.sh b/scripts/ios-write-version-xcconfig.sh new file mode 100755 index 00000000000..b63d3e81adb --- /dev/null +++ b/scripts/ios-write-version-xcconfig.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + scripts/ios-write-version-xcconfig.sh [--build-number 7] + +Writes apps/ios/build/Version.xcconfig from root package.json.version: +- OPENCLAW_GATEWAY_VERSION = exact package.json version +- OPENCLAW_MARKETING_VERSION = short iOS/App Store version +- OPENCLAW_BUILD_VERSION = explicit build number or local numeric fallback +EOF +} + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +IOS_DIR="${ROOT_DIR}/apps/ios" +BUILD_DIR="${IOS_DIR}/build" +VERSION_XCCONFIG="${IOS_DIR}/build/Version.xcconfig" +PACKAGE_VERSION="$(cd "${ROOT_DIR}" && node -p "require('./package.json').version" 2>/dev/null || true)" +BUILD_NUMBER="" + +prepare_build_dir() { + if [[ -L "${BUILD_DIR}" ]]; then + echo "Refusing to use symlinked build directory: ${BUILD_DIR}" >&2 + exit 1 + fi + + mkdir -p "${BUILD_DIR}" +} + +write_generated_file() { + local output_path="$1" + local tmp_file="" + + if [[ -e "${output_path}" && -L "${output_path}" ]]; then + echo "Refusing to overwrite symlinked file: ${output_path}" >&2 + exit 1 + fi + + tmp_file="$(mktemp "${output_path}.XXXXXX")" + cat >"${tmp_file}" + mv -f "${tmp_file}" "${output_path}" +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --) + shift + ;; + --build-number) + BUILD_NUMBER="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +PACKAGE_VERSION="$(printf '%s' "${PACKAGE_VERSION}" | tr -d '\n' | xargs)" +if [[ -z "${PACKAGE_VERSION}" ]]; then + echo "Unable to read package.json.version from ${ROOT_DIR}/package.json." >&2 + exit 1 +fi + +if [[ "${PACKAGE_VERSION}" =~ ^([0-9]{4}\.[0-9]{1,2}\.[0-9]{1,2})([.-]?beta[.-][0-9]+)?$ ]]; then + MARKETING_VERSION="${BASH_REMATCH[1]}" +else + echo "Unsupported package.json.version '${PACKAGE_VERSION}'. Expected 2026.3.11 or 2026.3.11-beta.1." >&2 + exit 1 +fi + +if [[ -z "${BUILD_NUMBER}" ]]; then + BUILD_NUMBER="$(cd "${ROOT_DIR}" && git rev-list --count HEAD 2>/dev/null || printf '0')" +fi + +if [[ ! "${BUILD_NUMBER}" =~ ^[0-9]+$ ]]; then + echo "Invalid build number '${BUILD_NUMBER}'. Expected digits only." >&2 + exit 1 +fi + +prepare_build_dir + +write_generated_file "${VERSION_XCCONFIG}" < { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -321,7 +324,7 @@ export class AcpSessionManager { sessionKey: string; signal?: AbortSignal; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -397,7 +400,7 @@ export class AcpSessionManager { sessionKey: string; runtimeMode: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -452,7 +455,7 @@ export class AcpSessionManager { key: string; value: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -525,7 +528,7 @@ export class AcpSessionManager { sessionKey: string; patch: Partial; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); const validatedPatch = validateRuntimeOptionPatch(params.patch); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); @@ -555,7 +558,7 @@ export class AcpSessionManager { cfg: OpenClawConfig; sessionKey: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -591,7 +594,10 @@ export class AcpSessionManager { } async runTurn(input: AcpRunTurnInput): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -738,7 +744,7 @@ export class AcpSessionManager { sessionKey: string; reason?: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -806,7 +812,10 @@ export class AcpSessionManager { } async closeSession(input: AcpCloseSessionInput): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } diff --git a/src/acp/control-plane/manager.test.ts b/src/acp/control-plane/manager.test.ts index ebdf356ca9f..8152944834c 100644 --- a/src/acp/control-plane/manager.test.ts +++ b/src/acp/control-plane/manager.test.ts @@ -170,6 +170,57 @@ describe("AcpSessionManager", () => { expect(resolved.error.message).toContain("ACP metadata is missing"); }); + it("canonicalizes the main alias before ACP rehydrate after restart", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey; + if (sessionKey !== "agent:main:main") { + return null; + } + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + agent: "main", + runtimeSessionName: sessionKey, + }, + }; + }); + + const manager = new AcpSessionManager(); + const cfg = { + ...baseCfg, + session: { mainKey: "main" }, + agents: { list: [{ id: "main", default: true }] }, + } as OpenClawConfig; + + await manager.runTurn({ + cfg, + sessionKey: "main", + text: "after restart", + mode: "prompt", + requestId: "r-main", + }); + + expect(hoisted.readAcpSessionEntryMock).toHaveBeenCalledWith( + expect.objectContaining({ + cfg, + sessionKey: "agent:main:main", + }), + ); + expect(runtimeState.ensureSession).toHaveBeenCalledWith( + expect.objectContaining({ + agent: "main", + sessionKey: "agent:main:main", + }), + ); + }); + it("serializes concurrent turns for the same ACP session", async () => { const runtimeState = createRuntime(); hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ diff --git a/src/acp/control-plane/manager.utils.ts b/src/acp/control-plane/manager.utils.ts index 17729c6c2fc..90f7c516538 100644 --- a/src/acp/control-plane/manager.utils.ts +++ b/src/acp/control-plane/manager.utils.ts @@ -1,6 +1,14 @@ import type { OpenClawConfig } from "../../config/config.js"; +import { + canonicalizeMainSessionAlias, + resolveMainSessionKey, +} from "../../config/sessions/main-session.js"; import type { SessionAcpMeta } from "../../config/sessions/types.js"; -import { normalizeAgentId, parseAgentSessionKey } from "../../routing/session-key.js"; +import { + normalizeAgentId, + normalizeMainKey, + parseAgentSessionKey, +} from "../../routing/session-key.js"; import { ACP_ERROR_CODES, AcpRuntimeError } from "../runtime/errors.js"; import type { AcpSessionResolution } from "./manager.types.js"; @@ -42,6 +50,33 @@ export function normalizeSessionKey(sessionKey: string): string { return sessionKey.trim(); } +export function canonicalizeAcpSessionKey(params: { + cfg: OpenClawConfig; + sessionKey: string; +}): string { + const normalized = normalizeSessionKey(params.sessionKey); + if (!normalized) { + return ""; + } + const lowered = normalized.toLowerCase(); + if (lowered === "global" || lowered === "unknown") { + return lowered; + } + const parsed = parseAgentSessionKey(lowered); + if (parsed) { + return canonicalizeMainSessionAlias({ + cfg: params.cfg, + agentId: parsed.agentId, + sessionKey: lowered, + }); + } + const mainKey = normalizeMainKey(params.cfg.session?.mainKey); + if (lowered === "main" || lowered === mainKey) { + return resolveMainSessionKey(params.cfg); + } + return lowered; +} + export function normalizeActorKey(sessionKey: string): string { return sessionKey.trim().toLowerCase(); } diff --git a/src/acp/translator.session-rate-limit.test.ts b/src/acp/translator.session-rate-limit.test.ts index d08ae1a1567..d0f774678a9 100644 --- a/src/acp/translator.session-rate-limit.test.ts +++ b/src/acp/translator.session-rate-limit.test.ts @@ -52,7 +52,7 @@ function createSetSessionModeRequest(sessionId: string, modeId: string): SetSess function createSetSessionConfigOptionRequest( sessionId: string, configId: string, - value: string, + value: string | boolean, ): SetSessionConfigOptionRequest { return { sessionId, @@ -644,6 +644,55 @@ describe("acp setSessionConfigOption bridge behavior", () => { sessionStore.clearAllSessionsForTest(); }); + + it("rejects non-string ACP config option values", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "bool-config-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "minimal", + modelProvider: "openai", + model: "gpt-5.4", + }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("bool-config-session")); + + await expect( + agent.setSessionConfigOption( + createSetSessionConfigOptionRequest("bool-config-session", "thought_level", false), + ), + ).rejects.toThrow( + 'ACP bridge does not support non-string session config option values for "thought_level".', + ); + expect(request).not.toHaveBeenCalledWith( + "sessions.patch", + expect.objectContaining({ key: "bool-config-session" }), + ); + + sessionStore.clearAllSessionsForTest(); + }); }); describe("acp tool streaming bridge behavior", () => { diff --git a/src/acp/translator.ts b/src/acp/translator.ts index 585f97c8f43..bb52db7b26b 100644 --- a/src/acp/translator.ts +++ b/src/acp/translator.ts @@ -937,11 +937,16 @@ export class AcpGatewayAgent implements Agent { private resolveSessionConfigPatch( configId: string, - value: string, + value: string | boolean, ): { overrides: Partial; patch: Record; } { + if (typeof value !== "string") { + throw new Error( + `ACP bridge does not support non-string session config option values for "${configId}".`, + ); + } switch (configId) { case ACP_THOUGHT_LEVEL_CONFIG_ID: return { diff --git a/src/agents/auth-profiles/usage.test.ts b/src/agents/auth-profiles/usage.test.ts index 261eae6efd5..6dd5697cc99 100644 --- a/src/agents/auth-profiles/usage.test.ts +++ b/src/agents/auth-profiles/usage.test.ts @@ -207,7 +207,7 @@ describe("resolveProfilesUnavailableReason", () => { ).toBe("overloaded"); }); - it("falls back to rate_limit when active cooldown has no reason history", () => { + it("falls back to unknown when active cooldown has no reason history", () => { const now = Date.now(); const store = makeStore({ "anthropic:default": { @@ -221,7 +221,7 @@ describe("resolveProfilesUnavailableReason", () => { profileIds: ["anthropic:default"], now, }), - ).toBe("rate_limit"); + ).toBe("unknown"); }); it("ignores expired windows and returns null when no profile is actively unavailable", () => { diff --git a/src/agents/auth-profiles/usage.ts b/src/agents/auth-profiles/usage.ts index 273fd754595..20e1cbaa497 100644 --- a/src/agents/auth-profiles/usage.ts +++ b/src/agents/auth-profiles/usage.ts @@ -110,7 +110,11 @@ export function resolveProfilesUnavailableReason(params: { recordedReason = true; } if (!recordedReason) { - addScore("rate_limit", 1); + // No failure counts recorded for this cooldown window. Previously this + // defaulted to "rate_limit", which caused false "rate limit reached" + // warnings when the actual reason was unknown (e.g. transient network + // blip or server error without a classified failure count). + addScore("unknown", 1); } } diff --git a/src/agents/huggingface-models.ts b/src/agents/huggingface-models.ts index 7d3755adefb..0e7ae4270f7 100644 --- a/src/agents/huggingface-models.ts +++ b/src/agents/huggingface-models.ts @@ -1,5 +1,6 @@ import type { ModelDefinitionConfig } from "../config/types.models.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; +import { isReasoningModelHeuristic } from "./ollama-models.js"; const log = createSubsystemLogger("huggingface-models"); @@ -125,7 +126,7 @@ export function buildHuggingfaceModelDefinition( */ function inferredMetaFromModelId(id: string): { name: string; reasoning: boolean } { const base = id.split("/").pop() ?? id; - const reasoning = /r1|reasoning|thinking|reason/i.test(id) || /-\d+[tb]?-thinking/i.test(base); + const reasoning = isReasoningModelHeuristic(id); const name = base.replace(/-/g, " ").replace(/\b(\w)/g, (c) => c.toUpperCase()); return { name, reasoning }; } diff --git a/src/agents/live-model-filter.ts b/src/agents/live-model-filter.ts index 03de7d772cc..059e12d9711 100644 --- a/src/agents/live-model-filter.ts +++ b/src/agents/live-model-filter.ts @@ -81,7 +81,7 @@ export function isModernModelRef(ref: ModelRef): boolean { return false; } - if (provider === "openrouter" || provider === "opencode") { + if (provider === "openrouter" || provider === "opencode" || provider === "opencode-go") { // OpenRouter/opencode are pass-through proxies; accept any model ID // rather than restricting to a static prefix list. return true; diff --git a/src/agents/memory-search.test.ts b/src/agents/memory-search.test.ts index 9372b4c7696..1d04b730351 100644 --- a/src/agents/memory-search.test.ts +++ b/src/agents/memory-search.test.ts @@ -131,6 +131,113 @@ describe("memory search config", () => { expect(resolved?.extraPaths).toEqual(["/shared/notes", "docs", "../team-notes"]); }); + it("normalizes multimodal settings", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + multimodal: { + enabled: true, + modalities: ["all"], + maxFileBytes: 8192, + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: 8192, + }); + }); + + it("keeps an explicit empty multimodal modalities list empty", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + multimodal: { + enabled: true, + modalities: [], + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: [], + maxFileBytes: 10 * 1024 * 1024, + }); + expect(resolved?.provider).toBe("gemini"); + }); + + it("does not enforce multimodal provider validation when no modalities are active", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + model: "text-embedding-3-small", + fallback: "openai", + multimodal: { + enabled: true, + modalities: [], + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: [], + maxFileBytes: 10 * 1024 * 1024, + }); + }); + + it("rejects multimodal memory on unsupported providers", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + model: "text-embedding-3-small", + multimodal: { enabled: true, modalities: ["image"] }, + }, + }, + }, + }); + expect(() => resolveMemorySearchConfig(cfg, "main")).toThrow( + /memorySearch\.multimodal requires memorySearch\.provider = "gemini"/, + ); + }); + + it("rejects multimodal memory when fallback is configured", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "openai", + multimodal: { enabled: true, modalities: ["image"] }, + }, + }, + }, + }); + expect(() => resolveMemorySearchConfig(cfg, "main")).toThrow( + /memorySearch\.multimodal does not support memorySearch\.fallback/, + ); + }); + it("includes batch defaults for openai without remote overrides", () => { const cfg = configWithDefaultProvider("openai"); const resolved = resolveMemorySearchConfig(cfg, "main"); diff --git a/src/agents/memory-search.ts b/src/agents/memory-search.ts index e14fd5a0b3b..d00dae70639 100644 --- a/src/agents/memory-search.ts +++ b/src/agents/memory-search.ts @@ -3,6 +3,12 @@ import path from "node:path"; import type { OpenClawConfig, MemorySearchConfig } from "../config/config.js"; import { resolveStateDir } from "../config/paths.js"; import type { SecretInput } from "../config/types.secrets.js"; +import { + isMemoryMultimodalEnabled, + normalizeMemoryMultimodalSettings, + supportsMemoryMultimodalEmbeddings, + type MemoryMultimodalSettings, +} from "../memory/multimodal.js"; import { clampInt, clampNumber, resolveUserPath } from "../utils.js"; import { resolveAgentConfig } from "./agent-scope.js"; @@ -10,6 +16,7 @@ export type ResolvedMemorySearchConfig = { enabled: boolean; sources: Array<"memory" | "sessions">; extraPaths: string[]; + multimodal: MemoryMultimodalSettings; provider: "openai" | "local" | "gemini" | "voyage" | "mistral" | "ollama" | "auto"; remote?: { baseUrl?: string; @@ -28,6 +35,7 @@ export type ResolvedMemorySearchConfig = { }; fallback: "openai" | "gemini" | "local" | "voyage" | "mistral" | "ollama" | "none"; model: string; + outputDimensionality?: number; local: { modelPath?: string; modelCacheDir?: string; @@ -193,6 +201,7 @@ function mergeConfig( ? DEFAULT_OLLAMA_MODEL : undefined; const model = overrides?.model ?? defaults?.model ?? modelDefault ?? ""; + const outputDimensionality = overrides?.outputDimensionality ?? defaults?.outputDimensionality; const local = { modelPath: overrides?.local?.modelPath ?? defaults?.local?.modelPath, modelCacheDir: overrides?.local?.modelCacheDir ?? defaults?.local?.modelCacheDir, @@ -202,6 +211,11 @@ function mergeConfig( .map((value) => value.trim()) .filter(Boolean); const extraPaths = Array.from(new Set(rawPaths)); + const multimodal = normalizeMemoryMultimodalSettings({ + enabled: overrides?.multimodal?.enabled ?? defaults?.multimodal?.enabled, + modalities: overrides?.multimodal?.modalities ?? defaults?.multimodal?.modalities, + maxFileBytes: overrides?.multimodal?.maxFileBytes ?? defaults?.multimodal?.maxFileBytes, + }); const vector = { enabled: overrides?.store?.vector?.enabled ?? defaults?.store?.vector?.enabled ?? true, extensionPath: @@ -305,6 +319,7 @@ function mergeConfig( enabled, sources, extraPaths, + multimodal, provider, remote, experimental: { @@ -312,6 +327,7 @@ function mergeConfig( }, fallback, model, + outputDimensionality, local, store, chunking: { tokens: Math.max(1, chunking.tokens), overlap }, @@ -362,5 +378,22 @@ export function resolveMemorySearchConfig( if (!resolved.enabled) { return null; } + const multimodalActive = isMemoryMultimodalEnabled(resolved.multimodal); + if ( + multimodalActive && + !supportsMemoryMultimodalEmbeddings({ + provider: resolved.provider, + model: resolved.model, + }) + ) { + throw new Error( + 'agents.*.memorySearch.multimodal requires memorySearch.provider = "gemini" and model = "gemini-embedding-2-preview".', + ); + } + if (multimodalActive && resolved.fallback !== "none") { + throw new Error( + 'agents.*.memorySearch.multimodal does not support memorySearch.fallback. Set fallback to "none".', + ); + } return resolved; } diff --git a/src/agents/model-auth-env-vars.ts b/src/agents/model-auth-env-vars.ts index 0f387bf3ce3..fbe5a78917d 100644 --- a/src/agents/model-auth-env-vars.ts +++ b/src/agents/model-auth-env-vars.ts @@ -4,6 +4,7 @@ export const PROVIDER_ENV_API_KEY_CANDIDATES: Record = { chutes: ["CHUTES_OAUTH_TOKEN", "CHUTES_API_KEY"], zai: ["ZAI_API_KEY", "Z_AI_API_KEY"], opencode: ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], + "opencode-go": ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], "qwen-portal": ["QWEN_OAUTH_TOKEN", "QWEN_PORTAL_API_KEY"], volcengine: ["VOLCANO_ENGINE_API_KEY"], "volcengine-plan": ["VOLCANO_ENGINE_API_KEY"], diff --git a/src/agents/model-auth.profiles.test.ts b/src/agents/model-auth.profiles.test.ts index 24a881a63cd..a1fc511aaf8 100644 --- a/src/agents/model-auth.profiles.test.ts +++ b/src/agents/model-auth.profiles.test.ts @@ -412,4 +412,18 @@ describe("getApiKeyForModel", () => { }, ); }); + + it("resolveEnvApiKey('opencode-go') falls back to OPENCODE_ZEN_API_KEY", async () => { + await withEnvAsync( + { + OPENCODE_API_KEY: undefined, + OPENCODE_ZEN_API_KEY: "sk-opencode-zen-fallback", // pragma: allowlist secret + }, + async () => { + const resolved = resolveEnvApiKey("opencode-go"); + expect(resolved?.apiKey).toBe("sk-opencode-zen-fallback"); + expect(resolved?.source).toContain("OPENCODE_ZEN_API_KEY"); + }, + ); + }); }); diff --git a/src/agents/model-compat.test.ts b/src/agents/model-compat.test.ts index 3c1894bb390..fc52ee2205e 100644 --- a/src/agents/model-compat.test.ts +++ b/src/agents/model-compat.test.ts @@ -313,6 +313,12 @@ describe("isModernModelRef", () => { expect(isModernModelRef({ provider: "opencode", id: "claude-opus-4-6" })).toBe(true); expect(isModernModelRef({ provider: "opencode", id: "gemini-3-pro" })).toBe(true); }); + + it("accepts all opencode-go models without zen exclusions", () => { + expect(isModernModelRef({ provider: "opencode-go", id: "kimi-k2.5" })).toBe(true); + expect(isModernModelRef({ provider: "opencode-go", id: "glm-5" })).toBe(true); + expect(isModernModelRef({ provider: "opencode-go", id: "minimax-m2.5" })).toBe(true); + }); }); describe("resolveForwardCompatModel", () => { diff --git a/src/agents/model-fallback.test.ts b/src/agents/model-fallback.test.ts index 8bc1a6ecb47..f8422b4aa14 100644 --- a/src/agents/model-fallback.test.ts +++ b/src/agents/model-fallback.test.ts @@ -555,7 +555,7 @@ describe("runWithModelFallback", () => { usageStat: { cooldownUntil: Date.now() + 5 * 60_000, }, - expectedReason: "rate_limit", + expectedReason: "unknown", }); }); diff --git a/src/agents/model-fallback.ts b/src/agents/model-fallback.ts index cda7771d329..d14ede7658b 100644 --- a/src/agents/model-fallback.ts +++ b/src/agents/model-fallback.ts @@ -449,7 +449,7 @@ function resolveCooldownDecision(params: { store: params.authStore, profileIds: params.profileIds, now: params.now, - }) ?? "rate_limit"; + }) ?? "unknown"; const isPersistentAuthIssue = inferredReason === "auth" || inferredReason === "auth_permanent"; if (isPersistentAuthIssue) { return { @@ -483,7 +483,10 @@ function resolveCooldownDecision(params: { // limits, which are often model-scoped and can recover on a sibling model. const shouldAttemptDespiteCooldown = (params.isPrimary && (!params.requestedModel || shouldProbe)) || - (!params.isPrimary && (inferredReason === "rate_limit" || inferredReason === "overloaded")); + (!params.isPrimary && + (inferredReason === "rate_limit" || + inferredReason === "overloaded" || + inferredReason === "unknown")); if (!shouldAttemptDespiteCooldown) { return { type: "skip", @@ -588,13 +591,16 @@ export async function runWithModelFallback(params: { if ( decision.reason === "rate_limit" || decision.reason === "overloaded" || - decision.reason === "billing" + decision.reason === "billing" || + decision.reason === "unknown" ) { // Probe at most once per provider per fallback run when all profiles // are cooldowned. Re-probing every same-provider candidate can stall // cross-provider fallback on providers with long internal retries. const isTransientCooldownReason = - decision.reason === "rate_limit" || decision.reason === "overloaded"; + decision.reason === "rate_limit" || + decision.reason === "overloaded" || + decision.reason === "unknown"; if (isTransientCooldownReason && cooldownProbeUsedProviders.has(candidate.provider)) { const error = `Provider ${candidate.provider} is in cooldown (probe already attempted this run)`; attempts.push({ diff --git a/src/agents/model-scan.ts b/src/agents/model-scan.ts index a0f05e05475..dec46b4db21 100644 --- a/src/agents/model-scan.ts +++ b/src/agents/model-scan.ts @@ -326,12 +326,12 @@ async function probeImage( } function ensureImageInput(model: OpenAIModel): OpenAIModel { - if (model.input.includes("image")) { + if (model.input?.includes("image")) { return model; } return { ...model, - input: Array.from(new Set([...model.input, "image"])), + input: Array.from(new Set([...(model.input ?? []), "image"])), }; } @@ -472,7 +472,7 @@ export async function scanOpenRouterModels( }; const toolResult = await probeTool(model, apiKey, timeoutMs); - const imageResult = model.input.includes("image") + const imageResult = model.input?.includes("image") ? await probeImage(ensureImageInput(model), apiKey, timeoutMs) : { ok: false, latencyMs: null, skipped: true }; diff --git a/src/agents/model-selection.ts b/src/agents/model-selection.ts index 75df5ed22fa..205c2f1cce0 100644 --- a/src/agents/model-selection.ts +++ b/src/agents/model-selection.ts @@ -46,6 +46,9 @@ export function normalizeProviderId(provider: string): string { if (normalized === "opencode-zen") { return "opencode"; } + if (normalized === "opencode-go-auth") { + return "opencode-go"; + } if (normalized === "qwen") { return "qwen-portal"; } diff --git a/src/agents/models-config.plan.ts b/src/agents/models-config.plan.ts index 40777c2cd0d..601a0edfda1 100644 --- a/src/agents/models-config.plan.ts +++ b/src/agents/models-config.plan.ts @@ -6,6 +6,7 @@ import { type ExistingProviderConfig, } from "./models-config.merge.js"; import { + enforceSourceManagedProviderSecrets, normalizeProviders, resolveImplicitProviders, type ProviderConfig, @@ -86,6 +87,7 @@ async function resolveProvidersForMode(params: { export async function planOpenClawModelsJson(params: { cfg: OpenClawConfig; + sourceConfigForSecrets?: OpenClawConfig; agentDir: string; env: NodeJS.ProcessEnv; existingRaw: string; @@ -106,6 +108,8 @@ export async function planOpenClawModelsJson(params: { agentDir, env, secretDefaults: cfg.secrets?.defaults, + sourceProviders: params.sourceConfigForSecrets?.models?.providers, + sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults, secretRefManagedProviders, }) ?? providers; const mergedProviders = await resolveProvidersForMode({ @@ -115,7 +119,14 @@ export async function planOpenClawModelsJson(params: { secretRefManagedProviders, explicitBaseUrlProviders: resolveExplicitBaseUrlProviders(cfg.models), }); - const nextContents = `${JSON.stringify({ providers: mergedProviders }, null, 2)}\n`; + const secretEnforcedProviders = + enforceSourceManagedProviderSecrets({ + providers: mergedProviders, + sourceProviders: params.sourceConfigForSecrets?.models?.providers, + sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults, + secretRefManagedProviders, + }) ?? mergedProviders; + const nextContents = `${JSON.stringify({ providers: secretEnforcedProviders }, null, 2)}\n`; if (params.existingRaw === nextContents) { return { action: "noop" }; diff --git a/src/agents/models-config.providers.discovery.ts b/src/agents/models-config.providers.discovery.ts index caab5cafb4e..64e1a9abe61 100644 --- a/src/agents/models-config.providers.discovery.ts +++ b/src/agents/models-config.providers.discovery.ts @@ -9,27 +9,27 @@ import { buildHuggingfaceModelDefinition, } from "./huggingface-models.js"; import { discoverKilocodeModels } from "./kilocode-models.js"; -import { OLLAMA_NATIVE_BASE_URL } from "./ollama-stream.js"; +import { + enrichOllamaModelsWithContext, + OLLAMA_DEFAULT_CONTEXT_WINDOW, + OLLAMA_DEFAULT_COST, + OLLAMA_DEFAULT_MAX_TOKENS, + isReasoningModelHeuristic, + resolveOllamaApiBase, + type OllamaTagsResponse, +} from "./ollama-models.js"; import { discoverVeniceModels, VENICE_BASE_URL } from "./venice-models.js"; import { discoverVercelAiGatewayModels, VERCEL_AI_GATEWAY_BASE_URL } from "./vercel-ai-gateway.js"; +export { resolveOllamaApiBase } from "./ollama-models.js"; + type ModelsConfig = NonNullable; type ProviderConfig = NonNullable[string]; const log = createSubsystemLogger("agents/model-providers"); -const OLLAMA_BASE_URL = OLLAMA_NATIVE_BASE_URL; -const OLLAMA_API_BASE_URL = OLLAMA_BASE_URL; const OLLAMA_SHOW_CONCURRENCY = 8; const OLLAMA_SHOW_MAX_MODELS = 200; -const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000; -const OLLAMA_DEFAULT_MAX_TOKENS = 8192; -const OLLAMA_DEFAULT_COST = { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, -}; const VLLM_BASE_URL = "http://127.0.0.1:8000/v1"; const VLLM_DEFAULT_CONTEXT_WINDOW = 128000; @@ -41,76 +41,12 @@ const VLLM_DEFAULT_COST = { cacheWrite: 0, }; -interface OllamaModel { - name: string; - modified_at: string; - size: number; - digest: string; - details?: { - family?: string; - parameter_size?: string; - }; -} - -interface OllamaTagsResponse { - models: OllamaModel[]; -} - type VllmModelsResponse = { data?: Array<{ id?: string; }>; }; -/** - * Derive the Ollama native API base URL from a configured base URL. - * - * Users typically configure `baseUrl` with a `/v1` suffix (e.g. - * `http://192.168.20.14:11434/v1`) for the OpenAI-compatible endpoint. - * The native Ollama API lives at the root (e.g. `/api/tags`), so we - * strip the `/v1` suffix when present. - */ -export function resolveOllamaApiBase(configuredBaseUrl?: string): string { - if (!configuredBaseUrl) { - return OLLAMA_API_BASE_URL; - } - // Strip trailing slash, then strip /v1 suffix if present - const trimmed = configuredBaseUrl.replace(/\/+$/, ""); - return trimmed.replace(/\/v1$/i, ""); -} - -async function queryOllamaContextWindow( - apiBase: string, - modelName: string, -): Promise { - try { - const response = await fetch(`${apiBase}/api/show`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ name: modelName }), - signal: AbortSignal.timeout(3000), - }); - if (!response.ok) { - return undefined; - } - const data = (await response.json()) as { model_info?: Record }; - if (!data.model_info) { - return undefined; - } - for (const [key, value] of Object.entries(data.model_info)) { - if (key.endsWith(".context_length") && typeof value === "number" && Number.isFinite(value)) { - const contextWindow = Math.floor(value); - if (contextWindow > 0) { - return contextWindow; - } - } - } - return undefined; - } catch { - return undefined; - } -} - async function discoverOllamaModels( baseUrl?: string, opts?: { quiet?: boolean }, @@ -140,29 +76,18 @@ async function discoverOllamaModels( `Capping Ollama /api/show inspection to ${OLLAMA_SHOW_MAX_MODELS} models (received ${data.models.length})`, ); } - const discovered: ModelDefinitionConfig[] = []; - for (let index = 0; index < modelsToInspect.length; index += OLLAMA_SHOW_CONCURRENCY) { - const batch = modelsToInspect.slice(index, index + OLLAMA_SHOW_CONCURRENCY); - const batchDiscovered = await Promise.all( - batch.map(async (model) => { - const modelId = model.name; - const contextWindow = await queryOllamaContextWindow(apiBase, modelId); - const isReasoning = - modelId.toLowerCase().includes("r1") || modelId.toLowerCase().includes("reasoning"); - return { - id: modelId, - name: modelId, - reasoning: isReasoning, - input: ["text"], - cost: OLLAMA_DEFAULT_COST, - contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, - maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, - } satisfies ModelDefinitionConfig; - }), - ); - discovered.push(...batchDiscovered); - } - return discovered; + const discovered = await enrichOllamaModelsWithContext(apiBase, modelsToInspect, { + concurrency: OLLAMA_SHOW_CONCURRENCY, + }); + return discovered.map((model) => ({ + id: model.name, + name: model.name, + reasoning: isReasoningModelHeuristic(model.name), + input: ["text"], + cost: OLLAMA_DEFAULT_COST, + contextWindow: model.contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, + maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, + })); } catch (error) { if (!opts?.quiet) { log.warn(`Failed to discover Ollama models: ${String(error)}`); @@ -204,13 +129,10 @@ async function discoverVllmModels( .filter((model) => Boolean(model.id)) .map((model) => { const modelId = model.id; - const lower = modelId.toLowerCase(); - const isReasoning = - lower.includes("r1") || lower.includes("reasoning") || lower.includes("think"); return { id: modelId, name: modelId, - reasoning: isReasoning, + reasoning: isReasoningModelHeuristic(modelId), input: ["text"], cost: VLLM_DEFAULT_COST, contextWindow: VLLM_DEFAULT_CONTEXT_WINDOW, diff --git a/src/agents/models-config.providers.normalize-keys.test.ts b/src/agents/models-config.providers.normalize-keys.test.ts index f8422d797dd..b39705d8ec2 100644 --- a/src/agents/models-config.providers.normalize-keys.test.ts +++ b/src/agents/models-config.providers.normalize-keys.test.ts @@ -4,7 +4,10 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; -import { normalizeProviders } from "./models-config.providers.js"; +import { + enforceSourceManagedProviderSecrets, + normalizeProviders, +} from "./models-config.providers.js"; describe("normalizeProviders", () => { it("trims provider keys so image models remain discoverable for custom providers", async () => { @@ -136,4 +139,38 @@ describe("normalizeProviders", () => { await fs.rm(agentDir, { recursive: true, force: true }); } }); + + it("ignores non-object provider entries during source-managed enforcement", () => { + const providers = { + openai: null, + moonshot: { + baseUrl: "https://api.moonshot.ai/v1", + api: "openai-completions", + apiKey: "sk-runtime-moonshot", // pragma: allowlist secret + models: [], + }, + } as unknown as NonNullable["providers"]>; + + const sourceProviders: NonNullable["providers"]> = { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + models: [], + }, + moonshot: { + baseUrl: "https://api.moonshot.ai/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" }, // pragma: allowlist secret + models: [], + }, + }; + + const enforced = enforceSourceManagedProviderSecrets({ + providers, + sourceProviders, + }); + expect((enforced as Record).openai).toBeNull(); + expect(enforced?.moonshot?.apiKey).toBe("MOONSHOT_API_KEY"); // pragma: allowlist secret + }); }); diff --git a/src/agents/models-config.providers.static.ts b/src/agents/models-config.providers.static.ts index 08b3d1c2a66..c525cb32f53 100644 --- a/src/agents/models-config.providers.static.ts +++ b/src/agents/models-config.providers.static.ts @@ -429,6 +429,24 @@ export function buildOpenrouterProvider(): ProviderConfig { contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW, maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS, }, + { + id: "openrouter/hunter-alpha", + name: "Hunter Alpha", + reasoning: true, + input: ["text"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: 1048576, + maxTokens: 65536, + }, + { + id: "openrouter/healer-alpha", + name: "Healer Alpha", + reasoning: true, + input: ["text", "image"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: 262144, + maxTokens: 65536, + }, ], }; } diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index c63ed6865a8..411072f2d7a 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -4,6 +4,7 @@ import { DEFAULT_COPILOT_API_BASE_URL, resolveCopilotApiToken, } from "../providers/github-copilot-token.js"; +import { isRecord } from "../utils.js"; import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js"; import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js"; import { discoverBedrockModels } from "./bedrock-discovery.js"; @@ -70,6 +71,11 @@ export { resolveOllamaApiBase } from "./models-config.providers.discovery.js"; type ModelsConfig = NonNullable; export type ProviderConfig = NonNullable[string]; +type SecretDefaults = { + env?: string; + file?: string; + exec?: string; +}; const ENV_VAR_NAME_RE = /^[A-Z_][A-Z0-9_]*$/; @@ -97,13 +103,7 @@ function resolveAwsSdkApiKeyVarName(env: NodeJS.ProcessEnv = process.env): strin function normalizeHeaderValues(params: { headers: ProviderConfig["headers"] | undefined; - secretDefaults: - | { - env?: string; - file?: string; - exec?: string; - } - | undefined; + secretDefaults: SecretDefaults | undefined; }): { headers: ProviderConfig["headers"] | undefined; mutated: boolean } { const { headers } = params; if (!headers) { @@ -276,15 +276,155 @@ function normalizeAntigravityProvider(provider: ProviderConfig): ProviderConfig return normalizeProviderModels(provider, normalizeAntigravityModelId); } +function normalizeSourceProviderLookup( + providers: ModelsConfig["providers"] | undefined, +): Record { + if (!providers) { + return {}; + } + const out: Record = {}; + for (const [key, provider] of Object.entries(providers)) { + const normalizedKey = key.trim(); + if (!normalizedKey || !isRecord(provider)) { + continue; + } + out[normalizedKey] = provider; + } + return out; +} + +function resolveSourceManagedApiKeyMarker(params: { + sourceProvider: ProviderConfig | undefined; + sourceSecretDefaults: SecretDefaults | undefined; +}): string | undefined { + const sourceApiKeyRef = resolveSecretInputRef({ + value: params.sourceProvider?.apiKey, + defaults: params.sourceSecretDefaults, + }).ref; + if (!sourceApiKeyRef || !sourceApiKeyRef.id.trim()) { + return undefined; + } + return sourceApiKeyRef.source === "env" + ? sourceApiKeyRef.id.trim() + : resolveNonEnvSecretRefApiKeyMarker(sourceApiKeyRef.source); +} + +function resolveSourceManagedHeaderMarkers(params: { + sourceProvider: ProviderConfig | undefined; + sourceSecretDefaults: SecretDefaults | undefined; +}): Record { + const sourceHeaders = isRecord(params.sourceProvider?.headers) + ? (params.sourceProvider.headers as Record) + : undefined; + if (!sourceHeaders) { + return {}; + } + const markers: Record = {}; + for (const [headerName, headerValue] of Object.entries(sourceHeaders)) { + const sourceHeaderRef = resolveSecretInputRef({ + value: headerValue, + defaults: params.sourceSecretDefaults, + }).ref; + if (!sourceHeaderRef || !sourceHeaderRef.id.trim()) { + continue; + } + markers[headerName] = + sourceHeaderRef.source === "env" + ? resolveEnvSecretRefHeaderValueMarker(sourceHeaderRef.id) + : resolveNonEnvSecretRefHeaderValueMarker(sourceHeaderRef.source); + } + return markers; +} + +export function enforceSourceManagedProviderSecrets(params: { + providers: ModelsConfig["providers"]; + sourceProviders: ModelsConfig["providers"] | undefined; + sourceSecretDefaults?: SecretDefaults; + secretRefManagedProviders?: Set; +}): ModelsConfig["providers"] { + const { providers } = params; + if (!providers) { + return providers; + } + const sourceProvidersByKey = normalizeSourceProviderLookup(params.sourceProviders); + if (Object.keys(sourceProvidersByKey).length === 0) { + return providers; + } + + let nextProviders: Record | null = null; + for (const [providerKey, provider] of Object.entries(providers)) { + if (!isRecord(provider)) { + continue; + } + const sourceProvider = sourceProvidersByKey[providerKey.trim()]; + if (!sourceProvider) { + continue; + } + let nextProvider = provider; + let providerMutated = false; + + const sourceApiKeyMarker = resolveSourceManagedApiKeyMarker({ + sourceProvider, + sourceSecretDefaults: params.sourceSecretDefaults, + }); + if (sourceApiKeyMarker) { + params.secretRefManagedProviders?.add(providerKey.trim()); + if (nextProvider.apiKey !== sourceApiKeyMarker) { + providerMutated = true; + nextProvider = { + ...nextProvider, + apiKey: sourceApiKeyMarker, + }; + } + } + + const sourceHeaderMarkers = resolveSourceManagedHeaderMarkers({ + sourceProvider, + sourceSecretDefaults: params.sourceSecretDefaults, + }); + if (Object.keys(sourceHeaderMarkers).length > 0) { + const currentHeaders = isRecord(nextProvider.headers) + ? (nextProvider.headers as Record) + : undefined; + const nextHeaders = { + ...(currentHeaders as Record[string]>), + }; + let headersMutated = !currentHeaders; + for (const [headerName, marker] of Object.entries(sourceHeaderMarkers)) { + if (nextHeaders[headerName] === marker) { + continue; + } + headersMutated = true; + nextHeaders[headerName] = marker; + } + if (headersMutated) { + providerMutated = true; + nextProvider = { + ...nextProvider, + headers: nextHeaders, + }; + } + } + + if (!providerMutated) { + continue; + } + if (!nextProviders) { + nextProviders = { ...providers }; + } + nextProviders[providerKey] = nextProvider; + } + + return nextProviders ?? providers; +} + export function normalizeProviders(params: { providers: ModelsConfig["providers"]; agentDir: string; env?: NodeJS.ProcessEnv; - secretDefaults?: { - env?: string; - file?: string; - exec?: string; - }; + secretDefaults?: SecretDefaults; + sourceProviders?: ModelsConfig["providers"]; + sourceSecretDefaults?: SecretDefaults; secretRefManagedProviders?: Set; }): ModelsConfig["providers"] { const { providers } = params; @@ -434,7 +574,13 @@ export function normalizeProviders(params: { next[normalizedKey] = normalizedProvider; } - return mutated ? next : providers; + const normalizedProviders = mutated ? next : providers; + return enforceSourceManagedProviderSecrets({ + providers: normalizedProviders, + sourceProviders: params.sourceProviders, + sourceSecretDefaults: params.sourceSecretDefaults, + secretRefManagedProviders: params.secretRefManagedProviders, + }); } type ImplicitProviderParams = { diff --git a/src/agents/models-config.runtime-source-snapshot.test.ts b/src/agents/models-config.runtime-source-snapshot.test.ts index 4c5889769cc..cc033fb56a6 100644 --- a/src/agents/models-config.runtime-source-snapshot.test.ts +++ b/src/agents/models-config.runtime-source-snapshot.test.ts @@ -209,4 +209,152 @@ describe("models-config runtime source snapshot", () => { } }); }); + + it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => { + await withTempHome(async () => { + const sourceConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const runtimeConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const incompatibleCandidate: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + await ensureOpenClawModelsJson(incompatibleCandidate); + + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); + + it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => { + await withTempHome(async () => { + const sourceConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: { + source: "env", + provider: "default", + id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret + }, + "X-Tenant-Token": { + source: "file", + provider: "vault", + id: "/providers/openai/tenantToken", + }, + }, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const runtimeConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + gateway: { + auth: { + mode: "token", + }, + }, + }; + const incompatibleCandidate: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + await ensureOpenClawModelsJson(incompatibleCandidate); + + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.openai?.headers?.Authorization).toBe( + "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret + ); + expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); }); diff --git a/src/agents/models-config.ts b/src/agents/models-config.ts index 99714a1a792..3e013799b0b 100644 --- a/src/agents/models-config.ts +++ b/src/agents/models-config.ts @@ -42,15 +42,31 @@ async function writeModelsFileAtomic(targetPath: string, contents: string): Prom await fs.rename(tempPath, targetPath); } -function resolveModelsConfigInput(config?: OpenClawConfig): OpenClawConfig { +function resolveModelsConfigInput(config?: OpenClawConfig): { + config: OpenClawConfig; + sourceConfigForSecrets: OpenClawConfig; +} { const runtimeSource = getRuntimeConfigSourceSnapshot(); if (!config) { - return runtimeSource ?? loadConfig(); + const loaded = loadConfig(); + return { + config: runtimeSource ?? loaded, + sourceConfigForSecrets: runtimeSource ?? loaded, + }; } if (!runtimeSource) { - return config; + return { + config, + sourceConfigForSecrets: config, + }; } - return projectConfigOntoRuntimeSourceSnapshot(config); + const projected = projectConfigOntoRuntimeSourceSnapshot(config); + return { + config: projected, + // If projection is skipped (for example incompatible top-level shape), + // keep managed secret persistence anchored to the active source snapshot. + sourceConfigForSecrets: projected === config ? runtimeSource : projected, + }; } async function withModelsJsonWriteLock(targetPath: string, run: () => Promise): Promise { @@ -76,7 +92,8 @@ export async function ensureOpenClawModelsJson( config?: OpenClawConfig, agentDirOverride?: string, ): Promise<{ agentDir: string; wrote: boolean }> { - const cfg = resolveModelsConfigInput(config); + const resolved = resolveModelsConfigInput(config); + const cfg = resolved.config; const agentDir = agentDirOverride?.trim() ? agentDirOverride.trim() : resolveOpenClawAgentDir(); const targetPath = path.join(agentDir, "models.json"); @@ -87,6 +104,7 @@ export async function ensureOpenClawModelsJson( const existingModelsFile = await readExistingModelsFile(targetPath); const plan = await planOpenClawModelsJson({ cfg, + sourceConfigForSecrets: resolved.sourceConfigForSecrets, agentDir, env, existingRaw: existingModelsFile.raw, diff --git a/src/agents/models.profiles.live.test.ts b/src/agents/models.profiles.live.test.ts index 6386eaef158..81c7a64cb8c 100644 --- a/src/agents/models.profiles.live.test.ts +++ b/src/agents/models.profiles.live.test.ts @@ -9,10 +9,6 @@ import { isAnthropicBillingError, isAnthropicRateLimitError, } from "./live-auth-keys.js"; -import { - isMiniMaxModelNotFoundErrorMessage, - isModelNotFoundErrorMessage, -} from "./live-model-errors.js"; import { isModernModelRef } from "./live-model-filter.js"; import { getApiKeyForModel, requireApiKey } from "./model-auth.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; @@ -86,6 +82,35 @@ function isGoogleModelNotFoundError(err: unknown): boolean { return false; } +function isModelNotFoundErrorMessage(raw: string): boolean { + const msg = raw.trim(); + if (!msg) { + return false; + } + if (/\b404\b/.test(msg) && /not(?:[\s_-]+)?found/i.test(msg)) { + return true; + } + if (/not_found_error/i.test(msg)) { + return true; + } + if (/model:\s*[a-z0-9._-]+/i.test(msg) && /not(?:[\s_-]+)?found/i.test(msg)) { + return true; + } + return false; +} + +describe("isModelNotFoundErrorMessage", () => { + it("matches whitespace-separated not found errors", () => { + expect(isModelNotFoundErrorMessage("404 model not found")).toBe(true); + expect(isModelNotFoundErrorMessage("model: minimax-text-01 not found")).toBe(true); + }); + + it("still matches underscore and hyphen variants", () => { + expect(isModelNotFoundErrorMessage("404 model not_found")).toBe(true); + expect(isModelNotFoundErrorMessage("404 model not-found")).toBe(true); + }); +}); + function isChatGPTUsageLimitErrorMessage(raw: string): boolean { const msg = raw.toLowerCase(); return msg.includes("hit your chatgpt usage limit") && msg.includes("try again in"); @@ -475,11 +500,7 @@ describeLive("live models (profile keys)", () => { if (ok.res.stopReason === "error") { const msg = ok.res.errorMessage ?? ""; - if ( - allowNotFoundSkip && - (isModelNotFoundErrorMessage(msg) || - (model.provider === "minimax" && isMiniMaxModelNotFoundErrorMessage(msg))) - ) { + if (allowNotFoundSkip && isModelNotFoundErrorMessage(msg)) { skipped.push({ model: id, reason: msg }); logProgress(`${progressLabel}: skip (model not found)`); break; @@ -500,7 +521,9 @@ describeLive("live models (profile keys)", () => { } if ( ok.text.length === 0 && - (model.provider === "openrouter" || model.provider === "opencode") + (model.provider === "openrouter" || + model.provider === "opencode" || + model.provider === "opencode-go") ) { skipped.push({ model: id, @@ -563,15 +586,6 @@ describeLive("live models (profile keys)", () => { logProgress(`${progressLabel}: skip (google model not found)`); break; } - if ( - allowNotFoundSkip && - model.provider === "minimax" && - isMiniMaxModelNotFoundErrorMessage(message) - ) { - skipped.push({ model: id, reason: message }); - logProgress(`${progressLabel}: skip (model not found)`); - break; - } if ( allowNotFoundSkip && model.provider === "minimax" && @@ -592,7 +606,7 @@ describeLive("live models (profile keys)", () => { } if ( allowNotFoundSkip && - model.provider === "opencode" && + (model.provider === "opencode" || model.provider === "opencode-go") && isRateLimitErrorMessage(message) ) { skipped.push({ model: id, reason: message }); diff --git a/src/agents/ollama-models.test.ts b/src/agents/ollama-models.test.ts new file mode 100644 index 00000000000..7877d40bdf9 --- /dev/null +++ b/src/agents/ollama-models.test.ts @@ -0,0 +1,61 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + enrichOllamaModelsWithContext, + resolveOllamaApiBase, + type OllamaTagModel, +} from "./ollama-models.js"; + +function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); +} + +function requestUrl(input: string | URL | Request): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +function requestBody(body: BodyInit | null | undefined): string { + return typeof body === "string" ? body : "{}"; +} + +describe("ollama-models", () => { + afterEach(() => { + vi.unstubAllGlobals(); + }); + + it("strips /v1 when resolving the Ollama API base", () => { + expect(resolveOllamaApiBase("http://127.0.0.1:11434/v1")).toBe("http://127.0.0.1:11434"); + expect(resolveOllamaApiBase("http://127.0.0.1:11434///")).toBe("http://127.0.0.1:11434"); + }); + + it("enriches discovered models with context windows from /api/show", async () => { + const models: OllamaTagModel[] = [{ name: "llama3:8b" }, { name: "deepseek-r1:14b" }]; + const fetchMock = vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = requestUrl(input); + if (!url.endsWith("/api/show")) { + throw new Error(`Unexpected fetch: ${url}`); + } + const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + if (body.name === "llama3:8b") { + return jsonResponse({ model_info: { "llama.context_length": 65536 } }); + } + return jsonResponse({}); + }); + vi.stubGlobal("fetch", fetchMock); + + const enriched = await enrichOllamaModelsWithContext("http://127.0.0.1:11434", models); + + expect(enriched).toEqual([ + { name: "llama3:8b", contextWindow: 65536 }, + { name: "deepseek-r1:14b", contextWindow: undefined }, + ]); + }); +}); diff --git a/src/agents/ollama-models.ts b/src/agents/ollama-models.ts new file mode 100644 index 00000000000..20406b3a80e --- /dev/null +++ b/src/agents/ollama-models.ts @@ -0,0 +1,143 @@ +import type { ModelDefinitionConfig } from "../config/types.models.js"; +import { OLLAMA_NATIVE_BASE_URL } from "./ollama-stream.js"; + +export const OLLAMA_DEFAULT_BASE_URL = OLLAMA_NATIVE_BASE_URL; +export const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000; +export const OLLAMA_DEFAULT_MAX_TOKENS = 8192; +export const OLLAMA_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + +export type OllamaTagModel = { + name: string; + modified_at?: string; + size?: number; + digest?: string; + remote_host?: string; + details?: { + family?: string; + parameter_size?: string; + }; +}; + +export type OllamaTagsResponse = { + models?: OllamaTagModel[]; +}; + +export type OllamaModelWithContext = OllamaTagModel & { + contextWindow?: number; +}; + +const OLLAMA_SHOW_CONCURRENCY = 8; + +/** + * Derive the Ollama native API base URL from a configured base URL. + * + * Users typically configure `baseUrl` with a `/v1` suffix (e.g. + * `http://192.168.20.14:11434/v1`) for the OpenAI-compatible endpoint. + * The native Ollama API lives at the root (e.g. `/api/tags`), so we + * strip the `/v1` suffix when present. + */ +export function resolveOllamaApiBase(configuredBaseUrl?: string): string { + if (!configuredBaseUrl) { + return OLLAMA_DEFAULT_BASE_URL; + } + const trimmed = configuredBaseUrl.replace(/\/+$/, ""); + return trimmed.replace(/\/v1$/i, ""); +} + +export async function queryOllamaContextWindow( + apiBase: string, + modelName: string, +): Promise { + try { + const response = await fetch(`${apiBase}/api/show`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name: modelName }), + signal: AbortSignal.timeout(3000), + }); + if (!response.ok) { + return undefined; + } + const data = (await response.json()) as { model_info?: Record }; + if (!data.model_info) { + return undefined; + } + for (const [key, value] of Object.entries(data.model_info)) { + if (key.endsWith(".context_length") && typeof value === "number" && Number.isFinite(value)) { + const contextWindow = Math.floor(value); + if (contextWindow > 0) { + return contextWindow; + } + } + } + return undefined; + } catch { + return undefined; + } +} + +export async function enrichOllamaModelsWithContext( + apiBase: string, + models: OllamaTagModel[], + opts?: { concurrency?: number }, +): Promise { + const concurrency = Math.max(1, Math.floor(opts?.concurrency ?? OLLAMA_SHOW_CONCURRENCY)); + const enriched: OllamaModelWithContext[] = []; + for (let index = 0; index < models.length; index += concurrency) { + const batch = models.slice(index, index + concurrency); + const batchResults = await Promise.all( + batch.map(async (model) => ({ + ...model, + contextWindow: await queryOllamaContextWindow(apiBase, model.name), + })), + ); + enriched.push(...batchResults); + } + return enriched; +} + +/** Heuristic: treat models with "r1", "reasoning", or "think" in the name as reasoning models. */ +export function isReasoningModelHeuristic(modelId: string): boolean { + return /r1|reasoning|think|reason/i.test(modelId); +} + +/** Build a ModelDefinitionConfig for an Ollama model with default values. */ +export function buildOllamaModelDefinition( + modelId: string, + contextWindow?: number, +): ModelDefinitionConfig { + return { + id: modelId, + name: modelId, + reasoning: isReasoningModelHeuristic(modelId), + input: ["text"], + cost: OLLAMA_DEFAULT_COST, + contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, + maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, + }; +} + +/** Fetch the model list from a running Ollama instance. */ +export async function fetchOllamaModels( + baseUrl: string, +): Promise<{ reachable: boolean; models: OllamaTagModel[] }> { + try { + const apiBase = resolveOllamaApiBase(baseUrl); + const response = await fetch(`${apiBase}/api/tags`, { + signal: AbortSignal.timeout(5000), + }); + if (!response.ok) { + return { reachable: true, models: [] }; + } + const data = (await response.json()) as OllamaTagsResponse; + const models = (data.models ?? []).filter((m) => m.name); + return { reachable: true, models }; + } catch { + return { reachable: false, models: [] }; + } +} diff --git a/src/agents/openai-responses.reasoning-replay.test.ts b/src/agents/openai-responses.reasoning-replay.test.ts index b5ccc50e4b4..0fcb02ece6d 100644 --- a/src/agents/openai-responses.reasoning-replay.test.ts +++ b/src/agents/openai-responses.reasoning-replay.test.ts @@ -30,6 +30,13 @@ function extractInputTypes(input: unknown[]) { .filter((t): t is string => typeof t === "string"); } +function extractInputMessages(input: unknown[]) { + return input.filter( + (item): item is Record => + !!item && typeof item === "object" && (item as Record).type === "message", + ); +} + const ZERO_USAGE = { input: 0, output: 0, @@ -184,4 +191,36 @@ describe("openai-responses reasoning replay", () => { expect(types).toContain("reasoning"); expect(types).toContain("message"); }); + + it.each(["commentary", "final_answer"] as const)( + "replays assistant message phase metadata for %s", + async (phase) => { + const assistantWithText = buildAssistantMessage({ + stopReason: "stop", + content: [ + buildReasoningPart(), + { + type: "text", + text: "hello", + textSignature: JSON.stringify({ v: 1, id: `msg_${phase}`, phase }), + }, + ], + }); + + const { input, types } = await runAbortedOpenAIResponsesStream({ + messages: [ + { role: "user", content: "Hi", timestamp: Date.now() }, + assistantWithText, + { role: "user", content: "Ok", timestamp: Date.now() }, + ], + }); + + expect(types).toContain("message"); + + const replayedMessage = extractInputMessages(input).find( + (item) => item.id === `msg_${phase}`, + ); + expect(replayedMessage?.phase).toBe(phase); + }, + ); }); diff --git a/src/agents/openai-ws-connection.test.ts b/src/agents/openai-ws-connection.test.ts index fb80f510ac1..2a7b95f7eb9 100644 --- a/src/agents/openai-ws-connection.test.ts +++ b/src/agents/openai-ws-connection.test.ts @@ -595,14 +595,12 @@ describe("OpenAIWebSocketManager", () => { manager.warmUp({ model: "gpt-5.2", - tools: [{ type: "function", function: { name: "exec", description: "Run a command" } }], + tools: [{ type: "function", name: "exec", description: "Run a command" }], }); const sent = JSON.parse(sock.sentMessages[0] ?? "{}") as Record; expect(sent["tools"]).toHaveLength(1); - expect((sent["tools"] as Array<{ function?: { name?: string } }>)[0]?.function?.name).toBe( - "exec", - ); + expect((sent["tools"] as Array<{ name?: string }>)[0]?.name).toBe("exec"); }); }); diff --git a/src/agents/openai-ws-connection.ts b/src/agents/openai-ws-connection.ts index a765c0f3780..2d9c6ffe7e6 100644 --- a/src/agents/openai-ws-connection.ts +++ b/src/agents/openai-ws-connection.ts @@ -37,12 +37,15 @@ export interface UsageInfo { total_tokens: number; } +export type OpenAIResponsesAssistantPhase = "commentary" | "final_answer"; + export type OutputItem = | { type: "message"; id: string; role: "assistant"; content: Array<{ type: "output_text"; text: string }>; + phase?: OpenAIResponsesAssistantPhase; status?: "in_progress" | "completed"; } | { @@ -190,6 +193,7 @@ export type InputItem = type: "message"; role: "system" | "developer" | "user" | "assistant"; content: string | ContentPart[]; + phase?: OpenAIResponsesAssistantPhase; } | { type: "function_call"; id?: string; call_id?: string; name: string; arguments: string } | { type: "function_call_output"; call_id: string; output: string } @@ -204,11 +208,10 @@ export type ToolChoice = export interface FunctionToolDefinition { type: "function"; - function: { - name: string; - description?: string; - parameters?: Record; - }; + name: string; + description?: string; + parameters?: Record; + strict?: boolean; } /** Standard response.create event payload (full turn) */ diff --git a/src/agents/openai-ws-stream.e2e.test.ts b/src/agents/openai-ws-stream.e2e.test.ts index 2b90d0dbc78..1146d71ffe3 100644 --- a/src/agents/openai-ws-stream.e2e.test.ts +++ b/src/agents/openai-ws-stream.e2e.test.ts @@ -14,6 +14,7 @@ * Skipped in CI — no API key available and we avoid billable external calls. */ +import type { AssistantMessage, Context } from "@mariozechner/pi-ai"; import { describe, it, expect, afterEach } from "vitest"; import { createOpenAIWebSocketStreamFn, @@ -28,14 +29,13 @@ const testFn = LIVE ? it : it.skip; const model = { api: "openai-responses" as const, provider: "openai", - id: "gpt-4o-mini", - name: "gpt-4o-mini", - baseUrl: "", - reasoning: false, - input: { maxTokens: 128_000 }, - output: { maxTokens: 16_384 }, - cache: false, - compat: {}, + id: "gpt-5.2", + name: "gpt-5.2", + contextWindow: 128_000, + maxTokens: 4_096, + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, } as unknown as Parameters>[0]; type StreamFnParams = Parameters>; @@ -47,6 +47,61 @@ function makeContext(userMessage: string): StreamFnParams[1] { } as unknown as StreamFnParams[1]; } +function makeToolContext(userMessage: string): StreamFnParams[1] { + return { + systemPrompt: "You are a precise assistant. Follow tool instructions exactly.", + messages: [{ role: "user" as const, content: userMessage }], + tools: [ + { + name: "noop", + description: "Return the supplied tool result to the user.", + parameters: { + type: "object", + additionalProperties: false, + properties: {}, + }, + }, + ], + } as unknown as Context; +} + +function makeToolResultMessage( + callId: string, + output: string, +): StreamFnParams[1]["messages"][number] { + return { + role: "toolResult" as const, + toolCallId: callId, + toolName: "noop", + content: [{ type: "text" as const, text: output }], + isError: false, + timestamp: Date.now(), + } as unknown as StreamFnParams[1]["messages"][number]; +} + +async function collectEvents( + stream: ReturnType>, +): Promise> { + const events: Array<{ type: string; message?: AssistantMessage }> = []; + for await (const event of stream as AsyncIterable<{ type: string; message?: AssistantMessage }>) { + events.push(event); + } + return events; +} + +function expectDone(events: Array<{ type: string; message?: AssistantMessage }>): AssistantMessage { + const done = events.find((event) => event.type === "done")?.message; + expect(done).toBeDefined(); + return done!; +} + +function assistantText(message: AssistantMessage): string { + return message.content + .filter((block) => block.type === "text") + .map((block) => block.text) + .join(""); +} + /** Each test gets a unique session ID to avoid cross-test interference. */ const sessions: string[] = []; function freshSession(name: string): string { @@ -68,26 +123,14 @@ describe("OpenAI WebSocket e2e", () => { async () => { const sid = freshSession("single"); const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); - const stream = streamFn(model, makeContext("What is 2+2?"), {}); + const stream = streamFn(model, makeContext("What is 2+2?"), { transport: "websocket" }); + const done = expectDone(await collectEvents(stream)); - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } - - const done = events.find((e) => e.type === "done") as - | { type: "done"; message: { content: Array<{ type: string; text?: string }> } } - | undefined; - expect(done).toBeDefined(); - expect(done!.message.content.length).toBeGreaterThan(0); - - const text = done!.message.content - .filter((c) => c.type === "text") - .map((c) => c.text) - .join(""); + expect(done.content.length).toBeGreaterThan(0); + const text = assistantText(done); expect(text).toMatch(/4/); }, - 30_000, + 45_000, ); testFn( @@ -96,19 +139,80 @@ describe("OpenAI WebSocket e2e", () => { const sid = freshSession("temp"); const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); const stream = streamFn(model, makeContext("Pick a random number between 1 and 1000."), { + transport: "websocket", temperature: 0.8, }); - - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } + const events = await collectEvents(stream); // Stream must complete (done or error with fallback) — must NOT hang. const hasTerminal = events.some((e) => e.type === "done" || e.type === "error"); expect(hasTerminal).toBe(true); }, - 30_000, + 45_000, + ); + + testFn( + "reuses the websocket session for tool-call follow-up turns", + async () => { + const sid = freshSession("tool-roundtrip"); + const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); + const firstContext = makeToolContext( + "Call the tool `noop` with {}. After the tool result arrives, reply with exactly the tool output and nothing else.", + ); + const firstEvents = await collectEvents( + streamFn(model, firstContext, { + transport: "websocket", + toolChoice: "required", + maxTokens: 128, + } as unknown as StreamFnParams[2]), + ); + const firstDone = expectDone(firstEvents); + const toolCall = firstDone.content.find((block) => block.type === "toolCall") as + | { type: "toolCall"; id: string; name: string } + | undefined; + expect(toolCall?.name).toBe("noop"); + expect(toolCall?.id).toBeTruthy(); + + const secondContext = { + ...firstContext, + messages: [ + ...firstContext.messages, + firstDone, + makeToolResultMessage(toolCall!.id, "TOOL_OK"), + ], + } as unknown as StreamFnParams[1]; + const secondDone = expectDone( + await collectEvents( + streamFn(model, secondContext, { + transport: "websocket", + maxTokens: 128, + }), + ), + ); + + expect(assistantText(secondDone)).toMatch(/TOOL_OK/); + }, + 60_000, + ); + + testFn( + "supports websocket warm-up before the first request", + async () => { + const sid = freshSession("warmup"); + const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); + const done = expectDone( + await collectEvents( + streamFn(model, makeContext("Reply with the word warmed."), { + transport: "websocket", + openaiWsWarmup: true, + maxTokens: 32, + } as unknown as StreamFnParams[2]), + ), + ); + + expect(assistantText(done).toLowerCase()).toContain("warmed"); + }, + 45_000, ); testFn( @@ -119,16 +223,13 @@ describe("OpenAI WebSocket e2e", () => { expect(hasWsSession(sid)).toBe(false); - const stream = streamFn(model, makeContext("Say hello."), {}); - for await (const _ of stream as AsyncIterable) { - /* consume */ - } + await collectEvents(streamFn(model, makeContext("Say hello."), { transport: "websocket" })); expect(hasWsSession(sid)).toBe(true); releaseWsSession(sid); expect(hasWsSession(sid)).toBe(false); }, - 30_000, + 45_000, ); testFn( @@ -137,15 +238,11 @@ describe("OpenAI WebSocket e2e", () => { const sid = freshSession("fallback"); const streamFn = createOpenAIWebSocketStreamFn("sk-invalid-key", sid); const stream = streamFn(model, makeContext("Hello"), {}); - - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } + const events = await collectEvents(stream); const hasTerminal = events.some((e) => e.type === "done" || e.type === "error"); expect(hasTerminal).toBe(true); }, - 30_000, + 45_000, ); }); diff --git a/src/agents/openai-ws-stream.test.ts b/src/agents/openai-ws-stream.test.ts index a9c3679f561..cd3425bec83 100644 --- a/src/agents/openai-ws-stream.test.ts +++ b/src/agents/openai-ws-stream.test.ts @@ -224,6 +224,7 @@ type FakeMessage = | { role: "assistant"; content: unknown[]; + phase?: "commentary" | "final_answer"; stopReason: string; api: string; provider: string; @@ -247,6 +248,7 @@ function userMsg(text: string): FakeMessage { function assistantMsg( textBlocks: string[], toolCalls: Array<{ id: string; name: string; args: Record }> = [], + phase?: "commentary" | "final_answer", ): FakeMessage { const content: unknown[] = []; for (const t of textBlocks) { @@ -258,6 +260,7 @@ function assistantMsg( return { role: "assistant", content, + phase, stopReason: toolCalls.length > 0 ? "toolUse" : "stop", api: "openai-responses", provider: "openai", @@ -302,6 +305,7 @@ function makeResponseObject( id: string, outputText?: string, toolCallName?: string, + phase?: "commentary" | "final_answer", ): ResponseObject { const output: ResponseObject["output"] = []; if (outputText) { @@ -310,6 +314,7 @@ function makeResponseObject( id: "item_1", role: "assistant", content: [{ type: "output_text", text: outputText }], + phase, }); } if (toolCallName) { @@ -357,18 +362,16 @@ describe("convertTools", () => { expect(result).toHaveLength(1); expect(result[0]).toMatchObject({ type: "function", - function: { - name: "exec", - description: "Run a command", - parameters: { type: "object", properties: { cmd: { type: "string" } } }, - }, + name: "exec", + description: "Run a command", + parameters: { type: "object", properties: { cmd: { type: "string" } } }, }); }); it("handles tools without description", () => { const tools = [{ name: "ping", description: "", parameters: {} }]; const result = convertTools(tools as Parameters[0]); - expect(result[0]?.function?.name).toBe("ping"); + expect(result[0]?.name).toBe("ping"); }); }); @@ -391,6 +394,19 @@ describe("convertMessagesToInputItems", () => { expect(items[0]).toMatchObject({ type: "message", role: "assistant", content: "Hi there." }); }); + it("preserves assistant phase on replayed assistant messages", () => { + const items = convertMessagesToInputItems([ + assistantMsg(["Working on it."], [], "commentary"), + ] as Parameters[0]); + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: "message", + role: "assistant", + content: "Working on it.", + phase: "commentary", + }); + }); + it("converts an assistant message with a tool call", () => { const msg = assistantMsg( ["Let me run that."], @@ -408,10 +424,58 @@ describe("convertMessagesToInputItems", () => { call_id: "call_1", name: "exec", }); + expect(textItem).not.toHaveProperty("phase"); const fc = fcItem as { arguments: string }; expect(JSON.parse(fc.arguments)).toEqual({ cmd: "ls" }); }); + it("preserves assistant phase on commentary text before tool calls", () => { + const msg = assistantMsg( + ["Let me run that."], + [{ id: "call_1", name: "exec", args: { cmd: "ls" } }], + "commentary", + ); + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + const textItem = items.find((i) => i.type === "message"); + expect(textItem).toMatchObject({ + type: "message", + role: "assistant", + content: "Let me run that.", + phase: "commentary", + }); + }); + + it("preserves assistant phase from textSignature metadata without local phase field", () => { + const msg = { + role: "assistant" as const, + content: [ + { + type: "text" as const, + text: "Working on it.", + textSignature: JSON.stringify({ v: 1, id: "msg_sig", phase: "commentary" }), + }, + ], + stopReason: "stop", + api: "openai-responses", + provider: "openai", + model: "gpt-5.2", + usage: {}, + timestamp: 0, + }; + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: "message", + role: "assistant", + content: "Working on it.", + phase: "commentary", + }); + }); + it("converts a tool result message", () => { const items = convertMessagesToInputItems([toolResultMsg("call_1", "file.txt")] as Parameters< typeof convertMessagesToInputItems @@ -518,6 +582,34 @@ describe("convertMessagesToInputItems", () => { expect((items[0] as { content?: unknown }).content).toBe("Here is my answer."); }); + it("replays reasoning blocks from thinking signatures", () => { + const msg = { + role: "assistant" as const, + content: [ + { + type: "thinking" as const, + thinking: "internal reasoning...", + thinkingSignature: JSON.stringify({ + type: "reasoning", + id: "rs_test", + summary: [], + }), + }, + { type: "text" as const, text: "Here is my answer." }, + ], + stopReason: "stop", + api: "openai-responses", + provider: "openai", + model: "gpt-5.2", + usage: {}, + timestamp: 0, + }; + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + expect(items.map((item) => item.type)).toEqual(["reasoning", "message"]); + }); + it("returns empty array for empty messages", () => { expect(convertMessagesToInputItems([])).toEqual([]); }); @@ -594,6 +686,16 @@ describe("buildAssistantMessageFromResponse", () => { expect(msg.content).toEqual([]); expect(msg.stopReason).toBe("stop"); }); + + it("preserves phase from assistant message output items", () => { + const response = makeResponseObject("resp_8", "Final answer", undefined, "final_answer"); + const msg = buildAssistantMessageFromResponse(response, modelInfo) as { + phase?: string; + content: Array<{ type: string; text?: string }>; + }; + expect(msg.phase).toBe("final_answer"); + expect(msg.content[0]?.text).toBe("Final answer"); + }); }); // ───────────────────────────────────────────────────────────────────────────── @@ -633,6 +735,7 @@ describe("createOpenAIWebSocketStreamFn", () => { releaseWsSession("sess-fallback"); releaseWsSession("sess-incremental"); releaseWsSession("sess-full"); + releaseWsSession("sess-phase"); releaseWsSession("sess-tools"); releaseWsSession("sess-store-default"); releaseWsSession("sess-store-compat"); @@ -795,6 +898,40 @@ describe("createOpenAIWebSocketStreamFn", () => { expect(doneEvent?.message.content[0]?.text).toBe("Hello back!"); }); + it("keeps assistant phase on completed WebSocket responses", async () => { + const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-phase"); + const stream = streamFn( + modelStub as Parameters[0], + contextStub as Parameters[1], + ); + + const events: unknown[] = []; + const done = (async () => { + for await (const ev of await resolveStream(stream)) { + events.push(ev); + } + })(); + + await new Promise((r) => setImmediate(r)); + const manager = MockManager.lastInstance!; + manager.simulateEvent({ + type: "response.completed", + response: makeResponseObject("resp_phase", "Working...", "exec", "commentary"), + }); + + await done; + + const doneEvent = events.find((e) => (e as { type?: string }).type === "done") as + | { + type: string; + reason: string; + message: { phase?: string; stopReason: string }; + } + | undefined; + expect(doneEvent?.message.phase).toBe("commentary"); + expect(doneEvent?.message.stopReason).toBe("toolUse"); + }); + it("falls back to HTTP when WebSocket connect fails (session pre-broken via flag)", async () => { // Set the class-level flag BEFORE calling streamFn so the new instance // fails on connect(). We patch the static default via MockManager directly. diff --git a/src/agents/openai-ws-stream.ts b/src/agents/openai-ws-stream.ts index dd82ced9e95..5b7a80f52ec 100644 --- a/src/agents/openai-ws-stream.ts +++ b/src/agents/openai-ws-stream.ts @@ -37,6 +37,7 @@ import { type ContentPart, type FunctionToolDefinition, type InputItem, + type OpenAIResponsesAssistantPhase, type OpenAIWebSocketManagerOptions, type ResponseObject, } from "./openai-ws-connection.js"; @@ -100,6 +101,8 @@ export function hasWsSession(sessionId: string): boolean { // ───────────────────────────────────────────────────────────────────────────── type AnyMessage = Message & { role: string; content: unknown }; +type AssistantMessageWithPhase = AssistantMessage & { phase?: OpenAIResponsesAssistantPhase }; +type ReplayModelInfo = { input?: ReadonlyArray }; function toNonEmptyString(value: unknown): string | null { if (typeof value !== "string") { @@ -109,6 +112,50 @@ function toNonEmptyString(value: unknown): string | null { return trimmed.length > 0 ? trimmed : null; } +function normalizeAssistantPhase(value: unknown): OpenAIResponsesAssistantPhase | undefined { + return value === "commentary" || value === "final_answer" ? value : undefined; +} + +function encodeAssistantTextSignature(params: { + id: string; + phase?: OpenAIResponsesAssistantPhase; +}): string { + return JSON.stringify({ + v: 1, + id: params.id, + ...(params.phase ? { phase: params.phase } : {}), + }); +} + +function parseAssistantTextSignature( + value: unknown, +): { id: string; phase?: OpenAIResponsesAssistantPhase } | null { + if (typeof value !== "string" || value.trim().length === 0) { + return null; + } + if (!value.startsWith("{")) { + return { id: value }; + } + try { + const parsed = JSON.parse(value) as { v?: unknown; id?: unknown; phase?: unknown }; + if (parsed.v !== 1 || typeof parsed.id !== "string") { + return null; + } + return { + id: parsed.id, + ...(normalizeAssistantPhase(parsed.phase) + ? { phase: normalizeAssistantPhase(parsed.phase) } + : {}), + }; + } catch { + return null; + } +} + +function supportsImageInput(modelOverride?: ReplayModelInfo): boolean { + return !Array.isArray(modelOverride?.input) || modelOverride.input.includes("image"); +} + /** Convert pi-ai content (string | ContentPart[]) to plain text. */ function contentToText(content: unknown): string { if (typeof content === "string") { @@ -117,30 +164,50 @@ function contentToText(content: unknown): string { if (!Array.isArray(content)) { return ""; } - return (content as Array<{ type?: string; text?: string }>) - .filter((p) => p.type === "text" && typeof p.text === "string") - .map((p) => p.text as string) + return content + .filter( + (part): part is { type?: string; text?: string } => Boolean(part) && typeof part === "object", + ) + .filter( + (part) => + (part.type === "text" || part.type === "input_text" || part.type === "output_text") && + typeof part.text === "string", + ) + .map((part) => part.text as string) .join(""); } /** Convert pi-ai content to OpenAI ContentPart[]. */ -function contentToOpenAIParts(content: unknown): ContentPart[] { +function contentToOpenAIParts(content: unknown, modelOverride?: ReplayModelInfo): ContentPart[] { if (typeof content === "string") { return content ? [{ type: "input_text", text: content }] : []; } if (!Array.isArray(content)) { return []; } + + const includeImages = supportsImageInput(modelOverride); const parts: ContentPart[] = []; for (const part of content as Array<{ type?: string; text?: string; data?: string; mimeType?: string; + source?: unknown; }>) { - if (part.type === "text" && typeof part.text === "string") { + if ( + (part.type === "text" || part.type === "input_text" || part.type === "output_text") && + typeof part.text === "string" + ) { parts.push({ type: "input_text", text: part.text }); - } else if (part.type === "image" && typeof part.data === "string") { + continue; + } + + if (!includeImages) { + continue; + } + + if (part.type === "image" && typeof part.data === "string") { parts.push({ type: "input_image", source: { @@ -149,11 +216,60 @@ function contentToOpenAIParts(content: unknown): ContentPart[] { data: part.data, }, }); + continue; + } + + if ( + part.type === "input_image" && + part.source && + typeof part.source === "object" && + typeof (part.source as { type?: unknown }).type === "string" + ) { + parts.push({ + type: "input_image", + source: part.source as + | { type: "url"; url: string } + | { type: "base64"; media_type: string; data: string }, + }); } } return parts; } +function parseReasoningItem(value: unknown): Extract | null { + if (!value || typeof value !== "object") { + return null; + } + const record = value as { + type?: unknown; + content?: unknown; + encrypted_content?: unknown; + summary?: unknown; + }; + if (record.type !== "reasoning") { + return null; + } + return { + type: "reasoning", + ...(typeof record.content === "string" ? { content: record.content } : {}), + ...(typeof record.encrypted_content === "string" + ? { encrypted_content: record.encrypted_content } + : {}), + ...(typeof record.summary === "string" ? { summary: record.summary } : {}), + }; +} + +function parseThinkingSignature(value: unknown): Extract | null { + if (typeof value !== "string" || value.trim().length === 0) { + return null; + } + try { + return parseReasoningItem(JSON.parse(value)); + } catch { + return null; + } +} + /** Convert pi-ai tool array to OpenAI FunctionToolDefinition[]. */ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] { if (!tools || tools.length === 0) { @@ -161,11 +277,9 @@ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] } return tools.map((tool) => ({ type: "function" as const, - function: { - name: tool.name, - description: typeof tool.description === "string" ? tool.description : undefined, - parameters: (tool.parameters ?? {}) as Record, - }, + name: tool.name, + description: typeof tool.description === "string" ? tool.description : undefined, + parameters: (tool.parameters ?? {}) as Record, })); } @@ -173,14 +287,24 @@ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] * Convert the full pi-ai message history to an OpenAI `input` array. * Handles user messages, assistant text+tool-call messages, and tool results. */ -export function convertMessagesToInputItems(messages: Message[]): InputItem[] { +export function convertMessagesToInputItems( + messages: Message[], + modelOverride?: ReplayModelInfo, +): InputItem[] { const items: InputItem[] = []; for (const msg of messages) { - const m = msg as AnyMessage; + const m = msg as AnyMessage & { + phase?: unknown; + toolCallId?: unknown; + toolUseId?: unknown; + }; if (m.role === "user") { - const parts = contentToOpenAIParts(m.content); + const parts = contentToOpenAIParts(m.content, modelOverride); + if (parts.length === 0) { + continue; + } items.push({ type: "message", role: "user", @@ -194,87 +318,116 @@ export function convertMessagesToInputItems(messages: Message[]): InputItem[] { if (m.role === "assistant") { const content = m.content; + let assistantPhase = normalizeAssistantPhase(m.phase); if (Array.isArray(content)) { - // Collect text blocks and tool calls separately const textParts: string[] = []; - for (const block of content as Array<{ - type?: string; - text?: string; - id?: string; - name?: string; - arguments?: Record; - thinking?: string; - }>) { - if (block.type === "text" && typeof block.text === "string") { - textParts.push(block.text); - } else if (block.type === "thinking" && typeof block.thinking === "string") { - // Skip thinking blocks — not sent back to the model - } else if (block.type === "toolCall") { - // Push accumulated text first - if (textParts.length > 0) { - items.push({ - type: "message", - role: "assistant", - content: textParts.join(""), - }); - textParts.length = 0; - } - const callId = toNonEmptyString(block.id); - const toolName = toNonEmptyString(block.name); - if (!callId || !toolName) { - continue; - } - // Push function_call item - items.push({ - type: "function_call", - call_id: callId, - name: toolName, - arguments: - typeof block.arguments === "string" - ? block.arguments - : JSON.stringify(block.arguments ?? {}), - }); + const pushAssistantText = () => { + if (textParts.length === 0) { + return; } - } - if (textParts.length > 0) { items.push({ type: "message", role: "assistant", content: textParts.join(""), + ...(assistantPhase ? { phase: assistantPhase } : {}), }); - } - } else { - const text = contentToText(m.content); - if (text) { + textParts.length = 0; + }; + + for (const block of content as Array<{ + type?: string; + text?: string; + textSignature?: unknown; + id?: unknown; + name?: unknown; + arguments?: unknown; + thinkingSignature?: unknown; + }>) { + if (block.type === "text" && typeof block.text === "string") { + const parsedSignature = parseAssistantTextSignature(block.textSignature); + if (!assistantPhase) { + assistantPhase = parsedSignature?.phase; + } + textParts.push(block.text); + continue; + } + + if (block.type === "thinking") { + pushAssistantText(); + const reasoningItem = parseThinkingSignature(block.thinkingSignature); + if (reasoningItem) { + items.push(reasoningItem); + } + continue; + } + + if (block.type !== "toolCall") { + continue; + } + + pushAssistantText(); + const callIdRaw = toNonEmptyString(block.id); + const toolName = toNonEmptyString(block.name); + if (!callIdRaw || !toolName) { + continue; + } + const [callId, itemId] = callIdRaw.split("|", 2); items.push({ - type: "message", - role: "assistant", - content: text, + type: "function_call", + ...(itemId ? { id: itemId } : {}), + call_id: callId, + name: toolName, + arguments: + typeof block.arguments === "string" + ? block.arguments + : JSON.stringify(block.arguments ?? {}), }); } + + pushAssistantText(); + continue; } + + const text = contentToText(content); + if (!text) { + continue; + } + items.push({ + type: "message", + role: "assistant", + content: text, + ...(assistantPhase ? { phase: assistantPhase } : {}), + }); continue; } - if (m.role === "toolResult") { - const tr = m as unknown as { - toolCallId?: string; - toolUseId?: string; - content: unknown; - isError: boolean; - }; - const callId = toNonEmptyString(tr.toolCallId) ?? toNonEmptyString(tr.toolUseId); - if (!callId) { - continue; - } - const outputText = contentToText(tr.content); - items.push({ - type: "function_call_output", - call_id: callId, - output: outputText, - }); + if (m.role !== "toolResult") { continue; } + + const toolCallId = toNonEmptyString(m.toolCallId) ?? toNonEmptyString(m.toolUseId); + if (!toolCallId) { + continue; + } + const [callId] = toolCallId.split("|", 2); + const parts = Array.isArray(m.content) ? contentToOpenAIParts(m.content, modelOverride) : []; + const textOutput = contentToText(m.content); + const imageParts = parts.filter((part) => part.type === "input_image"); + items.push({ + type: "function_call_output", + call_id: callId, + output: textOutput || (imageParts.length > 0 ? "(see attached image)" : ""), + }); + if (imageParts.length > 0) { + items.push({ + type: "message", + role: "user", + content: [ + { type: "input_text", text: "Attached image(s) from tool result:" }, + ...imageParts, + ], + }); + } } return items; @@ -289,12 +442,24 @@ export function buildAssistantMessageFromResponse( modelInfo: { api: string; provider: string; id: string }, ): AssistantMessage { const content: (TextContent | ToolCall)[] = []; + let assistantPhase: OpenAIResponsesAssistantPhase | undefined; for (const item of response.output ?? []) { if (item.type === "message") { + const itemPhase = normalizeAssistantPhase(item.phase); + if (itemPhase) { + assistantPhase = itemPhase; + } for (const part of item.content ?? []) { if (part.type === "output_text" && part.text) { - content.push({ type: "text", text: part.text }); + content.push({ + type: "text", + text: part.text, + textSignature: encodeAssistantTextSignature({ + id: item.id, + ...(itemPhase ? { phase: itemPhase } : {}), + }), + }); } } } else if (item.type === "function_call") { @@ -321,7 +486,7 @@ export function buildAssistantMessageFromResponse( const hasToolCalls = content.some((c) => c.type === "toolCall"); const stopReason: StopReason = hasToolCalls ? "toolUse" : "stop"; - return buildAssistantMessage({ + const message = buildAssistantMessage({ model: modelInfo, content, stopReason, @@ -331,6 +496,10 @@ export function buildAssistantMessageFromResponse( totalTokens: response.usage?.total_tokens ?? 0, }), }); + + return assistantPhase + ? ({ ...message, phase: assistantPhase } as AssistantMessageWithPhase) + : message; } // ───────────────────────────────────────────────────────────────────────────── @@ -504,6 +673,7 @@ export function createOpenAIWebSocketStreamFn( if (resolveWsWarmup(options) && !session.warmUpAttempted) { session.warmUpAttempted = true; + let warmupFailed = false; try { await runWarmUp({ manager: session.manager, @@ -517,10 +687,33 @@ export function createOpenAIWebSocketStreamFn( if (signal?.aborted) { throw warmErr instanceof Error ? warmErr : new Error(String(warmErr)); } + warmupFailed = true; log.warn( `[ws-stream] warm-up failed for session=${sessionId}; continuing without warm-up. error=${String(warmErr)}`, ); } + if (warmupFailed && !session.manager.isConnected()) { + try { + session.manager.close(); + } catch { + /* ignore */ + } + try { + await session.manager.connect(apiKey); + session.everConnected = true; + log.debug(`[ws-stream] reconnected after warm-up failure for session=${sessionId}`); + } catch (reconnectErr) { + session.broken = true; + wsRegistry.delete(sessionId); + if (transport === "websocket") { + throw reconnectErr instanceof Error ? reconnectErr : new Error(String(reconnectErr)); + } + log.warn( + `[ws-stream] reconnect after warm-up failed for session=${sessionId}; falling back to HTTP. error=${String(reconnectErr)}`, + ); + return fallbackToHttp(model, context, options, eventStream, opts.signal); + } + } } // ── 3. Compute incremental vs full input ───────────────────────────── @@ -537,16 +730,16 @@ export function createOpenAIWebSocketStreamFn( log.debug( `[ws-stream] session=${sessionId}: no new tool results found; sending full context`, ); - inputItems = buildFullInput(context); + inputItems = buildFullInput(context, model); } else { - inputItems = convertMessagesToInputItems(toolResults); + inputItems = convertMessagesToInputItems(toolResults, model); } log.debug( `[ws-stream] session=${sessionId}: incremental send (${inputItems.length} tool results) previous_response_id=${prevResponseId}`, ); } else { // First turn: send full context - inputItems = buildFullInput(context); + inputItems = buildFullInput(context, model); log.debug( `[ws-stream] session=${sessionId}: full context send (${inputItems.length} items)`, ); @@ -605,10 +798,9 @@ export function createOpenAIWebSocketStreamFn( ...extraParams, }; const nextPayload = await options?.onPayload?.(payload, model); - const requestPayload = - nextPayload && typeof nextPayload === "object" - ? (nextPayload as Parameters[0]) - : (payload as Parameters[0]); + const requestPayload = (nextPayload ?? payload) as Parameters< + OpenAIWebSocketManager["send"] + >[0]; try { session.manager.send(requestPayload); @@ -734,8 +926,8 @@ export function createOpenAIWebSocketStreamFn( // ───────────────────────────────────────────────────────────────────────────── /** Build full input items from context (system prompt is passed via `instructions` field). */ -function buildFullInput(context: Context): InputItem[] { - return convertMessagesToInputItems(context.messages); +function buildFullInput(context: Context, model: ReplayModelInfo): InputItem[] { + return convertMessagesToInputItems(context.messages, model); } /** diff --git a/src/agents/openclaw-tools.session-status.test.ts b/src/agents/openclaw-tools.session-status.test.ts index db45e8d48b8..193deb6304f 100644 --- a/src/agents/openclaw-tools.session-status.test.ts +++ b/src/agents/openclaw-tools.session-status.test.ts @@ -2,6 +2,22 @@ import { describe, expect, it, vi } from "vitest"; const loadSessionStoreMock = vi.fn(); const updateSessionStoreMock = vi.fn(); +const callGatewayMock = vi.fn(); + +const createMockConfig = () => ({ + session: { mainKey: "main", scope: "per-sender" }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + }, + }, + tools: { + agentToAgent: { enabled: false }, + }, +}); + +let mockConfig: Record = createMockConfig(); vi.mock("../config/sessions.js", async (importOriginal) => { const actual = await importOriginal(); @@ -22,19 +38,15 @@ vi.mock("../config/sessions.js", async (importOriginal) => { }; }); +vi.mock("../gateway/call.js", () => ({ + callGateway: (opts: unknown) => callGatewayMock(opts), +})); + vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); return { ...actual, - loadConfig: () => ({ - session: { mainKey: "main", scope: "per-sender" }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - }, - }, - }), + loadConfig: () => mockConfig, }; }); @@ -82,13 +94,17 @@ import { createOpenClawTools } from "./openclaw-tools.js"; function resetSessionStore(store: Record) { loadSessionStoreMock.mockClear(); updateSessionStoreMock.mockClear(); + callGatewayMock.mockClear(); loadSessionStoreMock.mockReturnValue(store); + callGatewayMock.mockResolvedValue({}); + mockConfig = createMockConfig(); } -function getSessionStatusTool(agentSessionKey = "main") { - const tool = createOpenClawTools({ agentSessionKey }).find( - (candidate) => candidate.name === "session_status", - ); +function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) { + const tool = createOpenClawTools({ + agentSessionKey, + sandboxed: options?.sandboxed, + }).find((candidate) => candidate.name === "session_status"); expect(tool).toBeDefined(); if (!tool) { throw new Error("missing session_status tool"); @@ -176,6 +192,153 @@ describe("session_status tool", () => { ); }); + it("blocks sandboxed child session_status access outside its tree before store lookup", async () => { + resetSessionStore({ + "agent:main:subagent:child": { + sessionId: "s-child", + updatedAt: 20, + }, + "agent:main:main": { + sessionId: "s-parent", + updatedAt: 10, + }, + }); + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { sessions: [] }; + } + return {}; + }); + + const tool = getSessionStatusTool("agent:main:subagent:child", { + sandboxed: true, + }); + const expectedError = "Session status visibility is restricted to the current session tree"; + + await expect( + tool.execute("call6", { + sessionKey: "agent:main:main", + model: "anthropic/claude-sonnet-4-5", + }), + ).rejects.toThrow(expectedError); + + await expect( + tool.execute("call7", { + sessionKey: "agent:main:subagent:missing", + }), + ).rejects.toThrow(expectedError); + + expect(loadSessionStoreMock).not.toHaveBeenCalled(); + expect(updateSessionStoreMock).not.toHaveBeenCalled(); + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "agent:main:subagent:child", + }, + }); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "agent:main:subagent:child", + }, + }); + }); + + it("keeps legacy main requester keys for sandboxed session tree checks", async () => { + resetSessionStore({ + "agent:main:main": { + sessionId: "s-main", + updatedAt: 10, + }, + "agent:main:subagent:child": { + sessionId: "s-child", + updatedAt: 20, + }, + }); + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { + sessions: + request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], + }; + } + return {}; + }); + + const tool = getSessionStatusTool("main", { + sandboxed: true, + }); + + const mainResult = await tool.execute("call8", {}); + const mainDetails = mainResult.details as { ok?: boolean; sessionKey?: string }; + expect(mainDetails.ok).toBe(true); + expect(mainDetails.sessionKey).toBe("agent:main:main"); + + const childResult = await tool.execute("call9", { + sessionKey: "agent:main:subagent:child", + }); + const childDetails = childResult.details as { ok?: boolean; sessionKey?: string }; + expect(childDetails.ok).toBe(true); + expect(childDetails.sessionKey).toBe("agent:main:subagent:child"); + + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "main", + }, + }); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy: "main", + }, + }); + }); + it("scopes bare session keys to the requester agent", async () => { loadSessionStoreMock.mockClear(); updateSessionStoreMock.mockClear(); diff --git a/src/agents/openclaw-tools.ts b/src/agents/openclaw-tools.ts index 8473e4a06e8..a400ac133cd 100644 --- a/src/agents/openclaw-tools.ts +++ b/src/agents/openclaw-tools.ts @@ -200,6 +200,7 @@ export function createOpenClawTools( createSessionStatusTool({ agentSessionKey: options?.agentSessionKey, config: options?.config, + sandboxed: options?.sandboxed, }), ...(webSearchTool ? [webSearchTool] : []), ...(webFetchTool ? [webFetchTool] : []), diff --git a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts index 608483b99bf..27c89afe425 100644 --- a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts +++ b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts @@ -106,6 +106,9 @@ describe("isBillingErrorMessage", () => { "Payment Required", "HTTP 402 Payment Required", "plans & billing", + // Venice returns "Insufficient USD or Diem balance" which has extra words + // between "insufficient" and "balance" + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", ]; for (const sample of samples) { expect(isBillingErrorMessage(sample)).toBe(true); @@ -149,6 +152,11 @@ describe("isBillingErrorMessage", () => { expect(longResponse.length).toBeGreaterThan(512); expect(isBillingErrorMessage(longResponse)).toBe(false); }); + it("does not false-positive on short non-billing text that mentions insufficient and balance", () => { + const sample = "The evidence is insufficient to reconcile the final balance after compaction."; + expect(isBillingErrorMessage(sample)).toBe(false); + expect(classifyFailoverReason(sample)).toBeNull(); + }); it("still matches explicit 402 markers in long payloads", () => { const longStructuredError = '{"error":{"code":402,"message":"payment required","details":"' + "x".repeat(700) + '"}}'; @@ -439,6 +447,18 @@ describe("isLikelyContextOverflowError", () => { expect(isLikelyContextOverflowError(sample)).toBe(false); } }); + + it("excludes billing errors even when text matches context overflow patterns", () => { + const samples = [ + "402 Payment Required: request token limit exceeded for this billing plan", + "insufficient credits: request size exceeds your current plan limits", + "Your credit balance is too low. Maximum request token limit exceeded.", + ]; + for (const sample of samples) { + expect(isBillingErrorMessage(sample)).toBe(true); + expect(isLikelyContextOverflowError(sample)).toBe(false); + } + }); }); describe("isTransientHttpError", () => { @@ -638,6 +658,12 @@ describe("classifyFailoverReason", () => { expect(classifyFailoverReason(TOGETHER_ENGINE_OVERLOADED_MESSAGE)).toBe("overloaded"); expect(classifyFailoverReason(GROQ_TOO_MANY_REQUESTS_MESSAGE)).toBe("rate_limit"); expect(classifyFailoverReason(GROQ_SERVICE_UNAVAILABLE_MESSAGE)).toBe("overloaded"); + // Venice 402 billing error with extra words between "insufficient" and "balance" + expect( + classifyFailoverReason( + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", + ), + ).toBe("billing"); }); it("classifies internal and compatibility error messages", () => { diff --git a/src/agents/pi-embedded-helpers/errors.ts b/src/agents/pi-embedded-helpers/errors.ts index 181ba89d8ce..e9bfd92951e 100644 --- a/src/agents/pi-embedded-helpers/errors.ts +++ b/src/agents/pi-embedded-helpers/errors.ts @@ -138,6 +138,13 @@ export function isLikelyContextOverflowError(errorMessage?: string): boolean { return false; } + // Billing/quota errors can contain patterns like "request size exceeds" or + // "maximum token limit exceeded" that match the context overflow heuristic. + // Billing is a more specific error class — exclude it early. + if (isBillingErrorMessage(errorMessage)) { + return false; + } + if (CONTEXT_WINDOW_TOO_SMALL_RE.test(errorMessage)) { return false; } diff --git a/src/agents/pi-embedded-helpers/failover-matches.ts b/src/agents/pi-embedded-helpers/failover-matches.ts index a7948703f39..a9f16fa6202 100644 --- a/src/agents/pi-embedded-helpers/failover-matches.ts +++ b/src/agents/pi-embedded-helpers/failover-matches.ts @@ -52,6 +52,7 @@ const ERROR_PATTERNS = { "credit balance", "plans & billing", "insufficient balance", + "insufficient usd or diem balance", ], authPermanent: [ /api[_ ]?key[_ ]?(?:revoked|invalid|deactivated|deleted)/i, diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index 232cdfcaa0b..3f6fb7a2f5a 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -276,7 +276,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { model: "deepseek/deepseek-r1" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -308,7 +308,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -332,7 +332,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning_effort: "high" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -357,7 +357,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning: { max_tokens: 256 } }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -381,7 +381,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning_effort: "medium" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -588,7 +588,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { thinking: "off" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -619,7 +619,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { thinking: "off" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -650,7 +650,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -674,7 +674,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { tool_choice: "required" }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -699,7 +699,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -749,7 +749,7 @@ describe("applyExtraParamsToAgent", () => { ], tool_choice: { type: "tool", name: "read" }, }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -793,7 +793,7 @@ describe("applyExtraParamsToAgent", () => { }, ], }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -832,7 +832,7 @@ describe("applyExtraParamsToAgent", () => { }, ], }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -896,7 +896,7 @@ describe("applyExtraParamsToAgent", () => { }, }, }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -943,7 +943,7 @@ describe("applyExtraParamsToAgent", () => { }, }, }; - options?.onPayload?.(payload, _model); + options?.onPayload?.(payload, model); payloads.push(payload); return {} as ReturnType; }; @@ -1081,7 +1081,7 @@ describe("applyExtraParamsToAgent", () => { expect(calls).toHaveLength(1); expect(calls[0]?.transport).toBe("auto"); - expect(calls[0]?.openaiWsWarmup).toBe(true); + expect(calls[0]?.openaiWsWarmup).toBe(false); }); it("lets runtime options override OpenAI default transport", () => { @@ -1449,6 +1449,20 @@ describe("applyExtraParamsToAgent", () => { expect(payload.store).toBe(true); }); + it("forces store=true for azure-openai provider with openai-responses API (#42800)", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "azure-openai", + applyModelId: "gpt-5-mini", + model: { + api: "openai-responses", + provider: "azure-openai", + id: "gpt-5-mini", + baseUrl: "https://myresource.openai.azure.com/openai/v1", + } as unknown as Model<"openai-responses">, + }); + expect(payload.store).toBe(true); + }); + it("injects configured OpenAI service_tier into Responses payloads", () => { const payload = runResponsesPayloadMutationCase({ applyProvider: "openai", diff --git a/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts b/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts index 2d658aada32..0aa665e0635 100644 --- a/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts +++ b/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts @@ -981,7 +981,7 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }), ).rejects.toMatchObject({ name: "FailoverError", - reason: "rate_limit", + reason: "unknown", provider: "openai", model: "mock-1", }); @@ -1153,7 +1153,7 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }), ).rejects.toMatchObject({ name: "FailoverError", - reason: "rate_limit", + reason: "unknown", provider: "openai", model: "mock-1", }); diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts index 9ef2a3efe76..dc1511a5e05 100644 --- a/src/agents/pi-embedded-runner/compact.hooks.test.ts +++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts @@ -7,6 +7,7 @@ const { sessionCompactImpl, triggerInternalHook, sanitizeSessionHistoryMock, + contextEngineCompactMock, } = vi.hoisted(() => ({ hookRunner: { hasHooks: vi.fn(), @@ -28,6 +29,14 @@ const { })), triggerInternalHook: vi.fn(), sanitizeSessionHistoryMock: vi.fn(async (params: { messages: unknown[] }) => params.messages), + contextEngineCompactMock: vi.fn(async () => ({ + ok: true as boolean, + compacted: true as boolean, + reason: undefined as string | undefined, + result: { summary: "engine-summary", tokensAfter: 50 } as + | { summary: string; tokensAfter: number } + | undefined, + })), })); vi.mock("../../plugins/hook-runner-global.js", () => ({ @@ -123,6 +132,27 @@ vi.mock("../session-write-lock.js", () => ({ resolveSessionLockMaxHoldFromTimeout: vi.fn(() => 0), })); +vi.mock("../../context-engine/index.js", () => ({ + ensureContextEnginesInitialized: vi.fn(), + resolveContextEngine: vi.fn(async () => ({ + info: { ownsCompaction: true }, + compact: contextEngineCompactMock, + })), +})); + +vi.mock("../../process/command-queue.js", () => ({ + enqueueCommandInLane: vi.fn((_lane: unknown, task: () => unknown) => task()), +})); + +vi.mock("./lanes.js", () => ({ + resolveSessionLane: vi.fn(() => "test-session-lane"), + resolveGlobalLane: vi.fn(() => "test-global-lane"), +})); + +vi.mock("../context-window-guard.js", () => ({ + resolveContextWindowInfo: vi.fn(() => ({ tokens: 128_000 })), +})); + vi.mock("../bootstrap-files.js", () => ({ makeBootstrapWarn: vi.fn(() => () => {}), resolveBootstrapContextForRun: vi.fn(async () => ({ contextFiles: [] })), @@ -160,7 +190,7 @@ vi.mock("../transcript-policy.js", () => ({ })); vi.mock("./extensions.js", () => ({ - buildEmbeddedExtensionFactories: vi.fn(() => []), + buildEmbeddedExtensionFactories: vi.fn(() => ({ factories: [] })), })); vi.mock("./history.js", () => ({ @@ -251,7 +281,7 @@ vi.mock("./utils.js", () => ({ import { getApiProvider, unregisterApiProviders } from "@mariozechner/pi-ai"; import { getCustomApiRegistrySourceId } from "../custom-api-registry.js"; -import { compactEmbeddedPiSessionDirect } from "./compact.js"; +import { compactEmbeddedPiSessionDirect, compactEmbeddedPiSession } from "./compact.js"; const sessionHook = (action: string) => triggerInternalHook.mock.calls.find( @@ -436,3 +466,103 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { expect(result.ok).toBe(true); }); }); + +describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { + beforeEach(() => { + hookRunner.hasHooks.mockReset(); + hookRunner.runBeforeCompaction.mockReset(); + hookRunner.runAfterCompaction.mockReset(); + contextEngineCompactMock.mockReset(); + contextEngineCompactMock.mockResolvedValue({ + ok: true, + compacted: true, + reason: undefined, + result: { summary: "engine-summary", tokensAfter: 50 }, + }); + resolveModelMock.mockReset(); + resolveModelMock.mockReturnValue({ + model: { provider: "openai", api: "responses", id: "fake", input: [] }, + error: null, + authStorage: { setRuntimeApiKey: vi.fn() }, + modelRegistry: {}, + }); + }); + + it("fires before_compaction with sentinel -1 and after_compaction on success", async () => { + hookRunner.hasHooks.mockReturnValue(true); + + const result = await compactEmbeddedPiSession({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + messageChannel: "telegram", + customInstructions: "focus on decisions", + enqueue: (task) => task(), + }); + + expect(result.ok).toBe(true); + expect(result.compacted).toBe(true); + + expect(hookRunner.runBeforeCompaction).toHaveBeenCalledWith( + { messageCount: -1, sessionFile: "/tmp/session.jsonl" }, + expect.objectContaining({ + sessionKey: "agent:main:session-1", + messageProvider: "telegram", + }), + ); + expect(hookRunner.runAfterCompaction).toHaveBeenCalledWith( + { + messageCount: -1, + compactedCount: -1, + tokenCount: 50, + sessionFile: "/tmp/session.jsonl", + }, + expect.objectContaining({ + sessionKey: "agent:main:session-1", + messageProvider: "telegram", + }), + ); + }); + + it("does not fire after_compaction when compaction fails", async () => { + hookRunner.hasHooks.mockReturnValue(true); + contextEngineCompactMock.mockResolvedValue({ + ok: false, + compacted: false, + reason: "nothing to compact", + result: undefined, + }); + + const result = await compactEmbeddedPiSession({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions: "focus on decisions", + enqueue: (task) => task(), + }); + + expect(result.ok).toBe(false); + expect(hookRunner.runBeforeCompaction).toHaveBeenCalled(); + expect(hookRunner.runAfterCompaction).not.toHaveBeenCalled(); + }); + + it("catches and logs hook exceptions without aborting compaction", async () => { + hookRunner.hasHooks.mockReturnValue(true); + hookRunner.runBeforeCompaction.mockRejectedValue(new Error("hook boom")); + + const result = await compactEmbeddedPiSession({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions: "focus on decisions", + enqueue: (task) => task(), + }); + + expect(result.ok).toBe(true); + expect(result.compacted).toBe(true); + expect(contextEngineCompactMock).toHaveBeenCalled(); + }); +}); diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts index 91f99571db4..feba0f81493 100644 --- a/src/agents/pi-embedded-runner/compact.ts +++ b/src/agents/pi-embedded-runner/compact.ts @@ -936,6 +936,43 @@ export async function compactEmbeddedPiSession( modelContextWindow: ceModel?.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); + // When the context engine owns compaction, its compact() implementation + // bypasses compactEmbeddedPiSessionDirect (which fires the hooks internally). + // Fire before_compaction / after_compaction hooks here so plugin subscribers + // are notified regardless of which engine is active. + const engineOwnsCompaction = contextEngine.info.ownsCompaction === true; + const hookRunner = engineOwnsCompaction ? getGlobalHookRunner() : null; + const hookSessionKey = params.sessionKey?.trim() || params.sessionId; + const { sessionAgentId } = resolveSessionAgentIds({ + sessionKey: params.sessionKey, + config: params.config, + }); + const resolvedMessageProvider = params.messageChannel ?? params.messageProvider; + const hookCtx = { + sessionId: params.sessionId, + agentId: sessionAgentId, + sessionKey: hookSessionKey, + workspaceDir: resolveUserPath(params.workspaceDir), + messageProvider: resolvedMessageProvider, + }; + // Engine-owned compaction doesn't load the transcript at this level, so + // message counts are unavailable. We pass sessionFile so hook subscribers + // can read the transcript themselves if they need exact counts. + if (hookRunner?.hasHooks("before_compaction")) { + try { + await hookRunner.runBeforeCompaction( + { + messageCount: -1, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (err) { + log.warn("before_compaction hook failed", { + errorMessage: err instanceof Error ? err.message : String(err), + }); + } + } const result = await contextEngine.compact({ sessionId: params.sessionId, sessionFile: params.sessionFile, @@ -944,6 +981,23 @@ export async function compactEmbeddedPiSession( force: params.trigger === "manual", runtimeContext: params as Record, }); + if (result.ok && result.compacted && hookRunner?.hasHooks("after_compaction")) { + try { + await hookRunner.runAfterCompaction( + { + messageCount: -1, + compactedCount: -1, + tokenCount: result.result?.tokensAfter, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (err) { + log.warn("after_compaction hook failed", { + errorMessage: err instanceof Error ? err.message : String(err), + }); + } + } return { ok: result.ok, compacted: result.compacted, diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts index 105f929b9b6..062369d9a96 100644 --- a/src/agents/pi-embedded-runner/model.test.ts +++ b/src/agents/pi-embedded-runner/model.test.ts @@ -202,6 +202,42 @@ describe("buildInlineProviderModels", () => { }); describe("resolveModel", () => { + it("defaults model input to text when discovery omits input", () => { + mockDiscoveredModel({ + provider: "custom", + modelId: "missing-input", + templateModel: { + id: "missing-input", + name: "missing-input", + api: "openai-completions", + provider: "custom", + baseUrl: "http://localhost:9999", + reasoning: false, + // NOTE: deliberately omit input to simulate buggy/custom catalogs. + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 1024, + }, + }); + + const result = resolveModel("custom", "missing-input", "/tmp/agent", { + models: { + providers: { + custom: { + baseUrl: "http://localhost:9999", + api: "openai-completions", + // Intentionally keep this minimal — the discovered model provides the rest. + models: [{ id: "missing-input", name: "missing-input" }], + }, + }, + }, + } as unknown as OpenClawConfig); + + expect(result.error).toBeUndefined(); + expect(Array.isArray(result.model?.input)).toBe(true); + expect(result.model?.input).toEqual(["text"]); + }); + it("includes provider baseUrl in fallback model", () => { const cfg = { models: { @@ -346,6 +382,40 @@ describe("resolveModel", () => { expect(result.model?.reasoning).toBe(true); }); + it("matches prefixed OpenRouter native ids in configured fallback models", () => { + const cfg = { + models: { + providers: { + openrouter: { + baseUrl: "https://openrouter.ai/api/v1", + api: "openai-completions", + models: [ + { + ...makeModel("openrouter/healer-alpha"), + reasoning: true, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 65536, + }, + ], + }, + }, + }, + } as OpenClawConfig; + + const result = resolveModel("openrouter", "openrouter/healer-alpha", "/tmp/agent", cfg); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openrouter", + id: "openrouter/healer-alpha", + reasoning: true, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 65536, + }); + }); + it("prefers configured provider api metadata over discovered registry model", () => { mockDiscoveredModel({ provider: "onehub", diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index 6f2852203bd..eb9fa675b8a 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -93,12 +93,18 @@ function applyConfiguredProviderOverrides(params: { headers: discoveredHeaders, }; } + const resolvedInput = configuredModel?.input ?? discoveredModel.input; + const normalizedInput = + Array.isArray(resolvedInput) && resolvedInput.length > 0 + ? resolvedInput.filter((item) => item === "text" || item === "image") + : (["text"] as Array<"text" | "image">); + return { ...discoveredModel, api: configuredModel?.api ?? providerConfig.api ?? discoveredModel.api, baseUrl: providerConfig.baseUrl ?? discoveredModel.baseUrl, reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning, - input: configuredModel?.input ?? discoveredModel.input, + input: normalizedInput, cost: configuredModel?.cost ?? discoveredModel.cost, contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow, maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens, diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index 3fc46dac0ae..c9bc2304f97 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -6,7 +6,7 @@ import { log } from "./logger.js"; type OpenAIServiceTier = "auto" | "default" | "flex" | "priority"; const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]); -const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai-responses"]); +const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]); function isDirectOpenAIBaseUrl(baseUrl: unknown): boolean { if (typeof baseUrl !== "string" || !baseUrl.trim()) { @@ -250,7 +250,7 @@ export function createOpenAIDefaultTransportWrapper(baseStreamFn: StreamFn | und const mergedOptions = { ...options, transport: options?.transport ?? "auto", - openaiWsWarmup: typedOptions?.openaiWsWarmup ?? true, + openaiWsWarmup: typedOptions?.openaiWsWarmup ?? false, } as SimpleStreamOptions; return underlying(model, context, mergedOptions); }; diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts index 8c7afc834d2..8c320f765be 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts @@ -9,16 +9,18 @@ export function makeOverflowError(message: string = DEFAULT_OVERFLOW_ERROR_MESSA export function makeCompactionSuccess(params: { summary: string; - firstKeptEntryId: string; - tokensBefore: number; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; }) { return { ok: true as const, compacted: true as const, result: { summary: params.summary, - firstKeptEntryId: params.firstKeptEntryId, - tokensBefore: params.tokensBefore, + ...(params.firstKeptEntryId ? { firstKeptEntryId: params.firstKeptEntryId } : {}), + ...(params.tokensBefore !== undefined ? { tokensBefore: params.tokensBefore } : {}), + ...(params.tokensAfter !== undefined ? { tokensAfter: params.tokensAfter } : {}), }, }; } @@ -55,8 +57,9 @@ type MockCompactDirect = { compacted: true; result: { summary: string; - firstKeptEntryId: string; - tokensBefore: number; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; }; }) => unknown; }; diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts index 5980170be62..7a2550ba1e9 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts @@ -2,9 +2,13 @@ import "./run.overflow-compaction.mocks.shared.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { isCompactionFailureError, isLikelyContextOverflowError } from "../pi-embedded-helpers.js"; -vi.mock("../../utils.js", () => ({ - resolveUserPath: vi.fn((p: string) => p), -})); +vi.mock(import("../../utils.js"), async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveUserPath: vi.fn((p: string) => p), + }; +}); import { log } from "./logger.js"; import { runEmbeddedPiAgent } from "./run.js"; @@ -16,6 +20,7 @@ import { queueOverflowAttemptWithOversizedToolOutput, } from "./run.overflow-compaction.fixture.js"; import { + mockedContextEngine, mockedCompactDirect, mockedRunEmbeddedAttempt, mockedSessionLikelyHasOversizedToolResults, @@ -30,6 +35,11 @@ const mockedIsLikelyContextOverflowError = vi.mocked(isLikelyContextOverflowErro describe("overflow compaction in run loop", () => { beforeEach(() => { vi.clearAllMocks(); + mockedRunEmbeddedAttempt.mockReset(); + mockedCompactDirect.mockReset(); + mockedSessionLikelyHasOversizedToolResults.mockReset(); + mockedTruncateOversizedToolResultsInSession.mockReset(); + mockedContextEngine.info.ownsCompaction = false; mockedIsCompactionFailureError.mockImplementation((msg?: string) => { if (!msg) { return false; @@ -72,7 +82,9 @@ describe("overflow compaction in run loop", () => { expect(mockedCompactDirect).toHaveBeenCalledTimes(1); expect(mockedCompactDirect).toHaveBeenCalledWith( - expect.objectContaining({ authProfileId: "test-profile" }), + expect.objectContaining({ + runtimeContext: expect.objectContaining({ authProfileId: "test-profile" }), + }), ); expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(2); expect(log.warn).toHaveBeenCalledWith( diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts index 22dee7b49cd..51f711508b1 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts @@ -6,6 +6,25 @@ import type { PluginHookBeforePromptBuildResult, } from "../../plugins/types.js"; +type MockCompactionResult = + | { + ok: true; + compacted: true; + result: { + summary: string; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; + }; + reason?: string; + } + | { + ok: false; + compacted: false; + reason: string; + result?: undefined; + }; + export const mockedGlobalHookRunner = { hasHooks: vi.fn((_hookName: string) => false), runBeforeAgentStart: vi.fn( @@ -26,12 +45,35 @@ export const mockedGlobalHookRunner = { _ctx: PluginHookAgentContext, ): Promise => undefined, ), + runBeforeCompaction: vi.fn(async () => undefined), + runAfterCompaction: vi.fn(async () => undefined), }; +export const mockedContextEngine = { + info: { ownsCompaction: false as boolean }, + compact: vi.fn<(params: unknown) => Promise>(async () => ({ + ok: false as const, + compacted: false as const, + reason: "nothing to compact", + })), +}; + +export const mockedContextEngineCompact = vi.mocked(mockedContextEngine.compact); +export const mockedEnsureRuntimePluginsLoaded: (...args: unknown[]) => void = vi.fn(); + vi.mock("../../plugins/hook-runner-global.js", () => ({ getGlobalHookRunner: vi.fn(() => mockedGlobalHookRunner), })); +vi.mock("../../context-engine/index.js", () => ({ + ensureContextEnginesInitialized: vi.fn(), + resolveContextEngine: vi.fn(async () => mockedContextEngine), +})); + +vi.mock("../runtime-plugins.js", () => ({ + ensureRuntimePluginsLoaded: mockedEnsureRuntimePluginsLoaded, +})); + vi.mock("../auth-profiles.js", () => ({ isProfileInCooldown: vi.fn(() => false), markAuthProfileFailure: vi.fn(async () => {}), @@ -141,9 +183,13 @@ vi.mock("../../process/command-queue.js", () => ({ enqueueCommandInLane: vi.fn((_lane: string, task: () => unknown) => task()), })); -vi.mock("../../utils/message-channel.js", () => ({ - isMarkdownCapableMessageChannel: vi.fn(() => true), -})); +vi.mock(import("../../utils/message-channel.js"), async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isMarkdownCapableMessageChannel: vi.fn(() => true), + }; +}); vi.mock("../agent-paths.js", () => ({ resolveOpenClawAgentDir: vi.fn(() => "/tmp/agent-dir"), diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts index 45bab82e1b8..c697ac9526a 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts @@ -1,5 +1,8 @@ import { vi } from "vitest"; -import { compactEmbeddedPiSessionDirect } from "./compact.js"; +import { + mockedContextEngine, + mockedContextEngineCompact, +} from "./run.overflow-compaction.mocks.shared.js"; import { runEmbeddedAttempt } from "./run/attempt.js"; import { sessionLikelyHasOversizedToolResults, @@ -7,13 +10,14 @@ import { } from "./tool-result-truncation.js"; export const mockedRunEmbeddedAttempt = vi.mocked(runEmbeddedAttempt); -export const mockedCompactDirect = vi.mocked(compactEmbeddedPiSessionDirect); +export const mockedCompactDirect = mockedContextEngineCompact; export const mockedSessionLikelyHasOversizedToolResults = vi.mocked( sessionLikelyHasOversizedToolResults, ); export const mockedTruncateOversizedToolResultsInSession = vi.mocked( truncateOversizedToolResultsInSession, ); +export { mockedContextEngine }; export const overflowBaseRunParams = { sessionId: "test-session", diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts index 19b4a81d279..b29394eedfd 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts @@ -11,6 +11,7 @@ import { } from "./run.overflow-compaction.fixture.js"; import { mockedGlobalHookRunner } from "./run.overflow-compaction.mocks.shared.js"; import { + mockedContextEngine, mockedCompactDirect, mockedRunEmbeddedAttempt, mockedSessionLikelyHasOversizedToolResults, @@ -22,6 +23,25 @@ const mockedPickFallbackThinkingLevel = vi.mocked(pickFallbackThinkingLevel); describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { beforeEach(() => { vi.clearAllMocks(); + mockedRunEmbeddedAttempt.mockReset(); + mockedCompactDirect.mockReset(); + mockedSessionLikelyHasOversizedToolResults.mockReset(); + mockedTruncateOversizedToolResultsInSession.mockReset(); + mockedGlobalHookRunner.runBeforeAgentStart.mockReset(); + mockedGlobalHookRunner.runBeforeCompaction.mockReset(); + mockedGlobalHookRunner.runAfterCompaction.mockReset(); + mockedContextEngine.info.ownsCompaction = false; + mockedCompactDirect.mockResolvedValue({ + ok: false, + compacted: false, + reason: "nothing to compact", + }); + mockedSessionLikelyHasOversizedToolResults.mockReturnValue(false); + mockedTruncateOversizedToolResultsInSession.mockResolvedValue({ + truncated: false, + truncatedCount: 0, + reason: "no oversized tool results", + }); mockedGlobalHookRunner.hasHooks.mockImplementation(() => false); }); @@ -81,8 +101,12 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(mockedCompactDirect).toHaveBeenCalledTimes(1); expect(mockedCompactDirect).toHaveBeenCalledWith( expect.objectContaining({ - trigger: "overflow", - authProfileId: "test-profile", + sessionId: "test-session", + sessionFile: "/tmp/session.json", + runtimeContext: expect.objectContaining({ + trigger: "overflow", + authProfileId: "test-profile", + }), }), ); }); @@ -132,6 +156,63 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(result.meta.error?.kind).toBe("context_overflow"); }); + it("fires compaction hooks during overflow recovery for ownsCompaction engines", async () => { + mockedContextEngine.info.ownsCompaction = true; + mockedGlobalHookRunner.hasHooks.mockImplementation( + (hookName) => hookName === "before_compaction" || hookName === "after_compaction", + ); + mockedRunEmbeddedAttempt + .mockResolvedValueOnce(makeAttemptResult({ promptError: makeOverflowError() })) + .mockResolvedValueOnce(makeAttemptResult({ promptError: null })); + mockedCompactDirect.mockResolvedValueOnce({ + ok: true, + compacted: true, + result: { + summary: "engine-owned compaction", + tokensAfter: 50, + }, + }); + + await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedGlobalHookRunner.runBeforeCompaction).toHaveBeenCalledWith( + { messageCount: -1, sessionFile: "/tmp/session.json" }, + expect.objectContaining({ + sessionKey: "test-key", + }), + ); + expect(mockedGlobalHookRunner.runAfterCompaction).toHaveBeenCalledWith( + { + messageCount: -1, + compactedCount: -1, + tokenCount: 50, + sessionFile: "/tmp/session.json", + }, + expect.objectContaining({ + sessionKey: "test-key", + }), + ); + }); + + it("guards thrown engine-owned overflow compaction attempts", async () => { + mockedContextEngine.info.ownsCompaction = true; + mockedGlobalHookRunner.hasHooks.mockImplementation( + (hookName) => hookName === "before_compaction" || hookName === "after_compaction", + ); + mockedRunEmbeddedAttempt.mockResolvedValueOnce( + makeAttemptResult({ promptError: makeOverflowError() }), + ); + mockedCompactDirect.mockRejectedValueOnce(new Error("engine boom")); + + const result = await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedCompactDirect).toHaveBeenCalledTimes(1); + expect(mockedGlobalHookRunner.runBeforeCompaction).toHaveBeenCalledTimes(1); + expect(mockedGlobalHookRunner.runAfterCompaction).not.toHaveBeenCalled(); + expect(result.meta.error?.kind).toBe("context_overflow"); + expect(result.payloads?.[0]?.isError).toBe(true); + }); + it("returns retry_limit when repeated retries never converge", async () => { mockedRunEmbeddedAttempt.mockClear(); mockedCompactDirect.mockClear(); diff --git a/src/agents/pi-embedded-runner/run.ts b/src/agents/pi-embedded-runner/run.ts index 7f5f4f525b7..09d5adda724 100644 --- a/src/agents/pi-embedded-runner/run.ts +++ b/src/agents/pi-embedded-runner/run.ts @@ -553,7 +553,7 @@ export async function runEmbeddedPiAgent( resolveProfilesUnavailableReason({ store: authStore, profileIds, - }) ?? "rate_limit" + }) ?? "unknown" ); } const classified = classifyFailoverReason(params.message); @@ -669,14 +669,15 @@ export async function runEmbeddedPiAgent( ? (resolveProfilesUnavailableReason({ store: authStore, profileIds: autoProfileCandidates, - }) ?? "rate_limit") + }) ?? "unknown") : null; const allowTransientCooldownProbe = params.allowTransientCooldownProbe === true && allAutoProfilesInCooldown && (unavailableReason === "rate_limit" || unavailableReason === "overloaded" || - unavailableReason === "billing"); + unavailableReason === "billing" || + unavailableReason === "unknown"); let didTransientCooldownProbe = false; while (profileIndex < profileCandidates.length) { @@ -1027,37 +1028,84 @@ export async function runEmbeddedPiAgent( log.warn( `context overflow detected (attempt ${overflowCompactionAttempts}/${MAX_OVERFLOW_COMPACTION_ATTEMPTS}); attempting auto-compaction for ${provider}/${modelId}`, ); - const compactResult = await contextEngine.compact({ - sessionId: params.sessionId, - sessionFile: params.sessionFile, - tokenBudget: ctxInfo.tokens, - force: true, - compactionTarget: "budget", - runtimeContext: { - sessionKey: params.sessionKey, - messageChannel: params.messageChannel, - messageProvider: params.messageProvider, - agentAccountId: params.agentAccountId, - authProfileId: lastProfileId, - workspaceDir: resolvedWorkspace, - agentDir, - config: params.config, - skillsSnapshot: params.skillsSnapshot, - senderIsOwner: params.senderIsOwner, - provider, - model: modelId, - runId: params.runId, - thinkLevel, - reasoningLevel: params.reasoningLevel, - bashElevated: params.bashElevated, - extraSystemPrompt: params.extraSystemPrompt, - ownerNumbers: params.ownerNumbers, - trigger: "overflow", - diagId: overflowDiagId, - attempt: overflowCompactionAttempts, - maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS, - }, - }); + let compactResult: Awaited>; + // When the engine owns compaction, hooks are not fired inside + // compactEmbeddedPiSessionDirect (which is bypassed). Fire them + // here so subscribers (memory extensions, usage trackers) are + // notified even on overflow-recovery compactions. + const overflowEngineOwnsCompaction = contextEngine.info.ownsCompaction === true; + const overflowHookRunner = overflowEngineOwnsCompaction ? hookRunner : null; + if (overflowHookRunner?.hasHooks("before_compaction")) { + try { + await overflowHookRunner.runBeforeCompaction( + { messageCount: -1, sessionFile: params.sessionFile }, + hookCtx, + ); + } catch (hookErr) { + log.warn( + `before_compaction hook failed during overflow recovery: ${String(hookErr)}`, + ); + } + } + try { + compactResult = await contextEngine.compact({ + sessionId: params.sessionId, + sessionFile: params.sessionFile, + tokenBudget: ctxInfo.tokens, + force: true, + compactionTarget: "budget", + runtimeContext: { + sessionKey: params.sessionKey, + messageChannel: params.messageChannel, + messageProvider: params.messageProvider, + agentAccountId: params.agentAccountId, + authProfileId: lastProfileId, + workspaceDir: resolvedWorkspace, + agentDir, + config: params.config, + skillsSnapshot: params.skillsSnapshot, + senderIsOwner: params.senderIsOwner, + provider, + model: modelId, + runId: params.runId, + thinkLevel, + reasoningLevel: params.reasoningLevel, + bashElevated: params.bashElevated, + extraSystemPrompt: params.extraSystemPrompt, + ownerNumbers: params.ownerNumbers, + trigger: "overflow", + diagId: overflowDiagId, + attempt: overflowCompactionAttempts, + maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS, + }, + }); + } catch (compactErr) { + log.warn( + `contextEngine.compact() threw during overflow recovery for ${provider}/${modelId}: ${String(compactErr)}`, + ); + compactResult = { ok: false, compacted: false, reason: String(compactErr) }; + } + if ( + compactResult.ok && + compactResult.compacted && + overflowHookRunner?.hasHooks("after_compaction") + ) { + try { + await overflowHookRunner.runAfterCompaction( + { + messageCount: -1, + compactedCount: -1, + tokenCount: compactResult.result?.tokensAfter, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (hookErr) { + log.warn( + `after_compaction hook failed during overflow recovery: ${String(hookErr)}`, + ); + } + } if (compactResult.compacted) { autoCompactionCount += 1; log.info(`auto-compaction succeeded for ${provider}/${modelId}; retrying prompt`); diff --git a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts index 0341ee97587..3801231f1f2 100644 --- a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts @@ -79,6 +79,7 @@ vi.mock("../../../infra/machine-name.js", () => ({ })); vi.mock("../../../infra/net/undici-global-dispatcher.js", () => ({ + ensureGlobalUndiciEnvProxyDispatcher: () => {}, ensureGlobalUndiciStreamTimeouts: () => {}, })); diff --git a/src/agents/pi-embedded-runner/run/attempt.test.ts b/src/agents/pi-embedded-runner/run/attempt.test.ts index 9821adc0e0b..33a4f9654df 100644 --- a/src/agents/pi-embedded-runner/run/attempt.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.test.ts @@ -13,6 +13,7 @@ import { shouldInjectOllamaCompatNumCtx, decodeHtmlEntitiesInObject, wrapOllamaCompatNumCtx, + wrapStreamFnRepairMalformedToolCallArguments, wrapStreamFnTrimToolCallNames, } from "./attempt.js"; @@ -430,6 +431,137 @@ describe("wrapStreamFnTrimToolCallNames", () => { }); }); +describe("wrapStreamFnRepairMalformedToolCallArguments", () => { + function createFakeStream(params: { events: unknown[]; resultMessage: unknown }): { + result: () => Promise; + [Symbol.asyncIterator]: () => AsyncIterator; + } { + return { + async result() { + return params.resultMessage; + }, + [Symbol.asyncIterator]() { + return (async function* () { + for (const event of params.events) { + yield event; + } + })(); + }, + }; + } + + async function invokeWrappedStream(baseFn: (...args: never[]) => unknown) { + const wrappedFn = wrapStreamFnRepairMalformedToolCallArguments(baseFn as never); + return await wrappedFn({} as never, {} as never, {} as never); + } + + it("repairs anthropic-compatible tool arguments when trailing junk follows valid JSON", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const endMessageToolCall = { type: "toolCall", name: "read", arguments: {} }; + const finalToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const endMessage = { role: "assistant", content: [endMessageToolCall] }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}', + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "xx", + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + message: endMessage, + }, + ], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + const result = await stream.result(); + + expect(partialToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(streamedToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(endMessageToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(finalToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(result).toBe(finalMessage); + }); + + it("keeps incomplete partial JSON unchanged until a complete object exists", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp', + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + }); + + it("does not repair tool arguments when trailing junk exceeds the Kimi-specific allowance", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}oops', + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + expect(streamedToolCall.arguments).toEqual({}); + }); +}); + describe("isOllamaCompatProvider", () => { it("detects native ollama provider id", () => { expect( diff --git a/src/agents/pi-embedded-runner/run/attempt.ts b/src/agents/pi-embedded-runner/run/attempt.ts index 084a6d39746..790323b8232 100644 --- a/src/agents/pi-embedded-runner/run/attempt.ts +++ b/src/agents/pi-embedded-runner/run/attempt.ts @@ -11,7 +11,10 @@ import { resolveHeartbeatPrompt } from "../../../auto-reply/heartbeat.js"; import { resolveChannelCapabilities } from "../../../config/channel-capabilities.js"; import type { OpenClawConfig } from "../../../config/config.js"; import { getMachineDisplayName } from "../../../infra/machine-name.js"; -import { ensureGlobalUndiciStreamTimeouts } from "../../../infra/net/undici-global-dispatcher.js"; +import { + ensureGlobalUndiciEnvProxyDispatcher, + ensureGlobalUndiciStreamTimeouts, +} from "../../../infra/net/undici-global-dispatcher.js"; import { MAX_IMAGE_BYTES } from "../../../media/constants.js"; import { getGlobalHookRunner } from "../../../plugins/hook-runner-global.js"; import type { @@ -433,6 +436,258 @@ export function wrapStreamFnTrimToolCallNames( }; } +function extractBalancedJsonPrefix(raw: string): string | null { + let start = 0; + while (start < raw.length && /\s/.test(raw[start] ?? "")) { + start += 1; + } + const startChar = raw[start]; + if (startChar !== "{" && startChar !== "[") { + return null; + } + + let depth = 0; + let inString = false; + let escaped = false; + for (let i = start; i < raw.length; i += 1) { + const char = raw[i]; + if (char === undefined) { + break; + } + if (inString) { + if (escaped) { + escaped = false; + } else if (char === "\\") { + escaped = true; + } else if (char === '"') { + inString = false; + } + continue; + } + if (char === '"') { + inString = true; + continue; + } + if (char === "{" || char === "[") { + depth += 1; + continue; + } + if (char === "}" || char === "]") { + depth -= 1; + if (depth === 0) { + return raw.slice(start, i + 1); + } + } + } + return null; +} + +const MAX_TOOLCALL_REPAIR_BUFFER_CHARS = 64_000; +const MAX_TOOLCALL_REPAIR_TRAILING_CHARS = 3; +const TOOLCALL_REPAIR_ALLOWED_TRAILING_RE = /^[^\s{}[\]":,\\]{1,3}$/; + +function shouldAttemptMalformedToolCallRepair(partialJson: string, delta: string): boolean { + if (/[}\]]/.test(delta)) { + return true; + } + const trimmedDelta = delta.trim(); + return ( + trimmedDelta.length > 0 && + trimmedDelta.length <= MAX_TOOLCALL_REPAIR_TRAILING_CHARS && + /[}\]]/.test(partialJson) + ); +} + +type ToolCallArgumentRepair = { + args: Record; + trailingSuffix: string; +}; + +function tryParseMalformedToolCallArguments(raw: string): ToolCallArgumentRepair | undefined { + if (!raw.trim()) { + return undefined; + } + try { + JSON.parse(raw); + return undefined; + } catch { + const jsonPrefix = extractBalancedJsonPrefix(raw); + if (!jsonPrefix) { + return undefined; + } + const suffix = raw.slice(raw.indexOf(jsonPrefix) + jsonPrefix.length).trim(); + if ( + suffix.length === 0 || + suffix.length > MAX_TOOLCALL_REPAIR_TRAILING_CHARS || + !TOOLCALL_REPAIR_ALLOWED_TRAILING_RE.test(suffix) + ) { + return undefined; + } + try { + const parsed = JSON.parse(jsonPrefix) as unknown; + return parsed && typeof parsed === "object" && !Array.isArray(parsed) + ? { args: parsed as Record, trailingSuffix: suffix } + : undefined; + } catch { + return undefined; + } + } +} + +function repairToolCallArgumentsInMessage( + message: unknown, + contentIndex: number, + repairedArgs: Record, +): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + const block = content[contentIndex]; + if (!block || typeof block !== "object") { + return; + } + const typedBlock = block as { type?: unknown; arguments?: unknown }; + if (!isToolCallBlockType(typedBlock.type)) { + return; + } + typedBlock.arguments = repairedArgs; +} + +function repairMalformedToolCallArgumentsInMessage( + message: unknown, + repairedArgsByIndex: Map>, +): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + for (const [index, repairedArgs] of repairedArgsByIndex.entries()) { + repairToolCallArgumentsInMessage(message, index, repairedArgs); + } +} + +function wrapStreamRepairMalformedToolCallArguments( + stream: ReturnType, +): ReturnType { + const partialJsonByIndex = new Map(); + const repairedArgsByIndex = new Map>(); + const disabledIndices = new Set(); + const loggedRepairIndices = new Set(); + const originalResult = stream.result.bind(stream); + stream.result = async () => { + const message = await originalResult(); + repairMalformedToolCallArgumentsInMessage(message, repairedArgsByIndex); + partialJsonByIndex.clear(); + repairedArgsByIndex.clear(); + disabledIndices.clear(); + loggedRepairIndices.clear(); + return message; + }; + + const originalAsyncIterator = stream[Symbol.asyncIterator].bind(stream); + (stream as { [Symbol.asyncIterator]: typeof originalAsyncIterator })[Symbol.asyncIterator] = + function () { + const iterator = originalAsyncIterator(); + return { + async next() { + const result = await iterator.next(); + if (!result.done && result.value && typeof result.value === "object") { + const event = result.value as { + type?: unknown; + contentIndex?: unknown; + delta?: unknown; + partial?: unknown; + message?: unknown; + toolCall?: unknown; + }; + if ( + typeof event.contentIndex === "number" && + Number.isInteger(event.contentIndex) && + event.type === "toolcall_delta" && + typeof event.delta === "string" + ) { + if (disabledIndices.has(event.contentIndex)) { + return result; + } + const nextPartialJson = + (partialJsonByIndex.get(event.contentIndex) ?? "") + event.delta; + if (nextPartialJson.length > MAX_TOOLCALL_REPAIR_BUFFER_CHARS) { + partialJsonByIndex.delete(event.contentIndex); + repairedArgsByIndex.delete(event.contentIndex); + disabledIndices.add(event.contentIndex); + return result; + } + partialJsonByIndex.set(event.contentIndex, nextPartialJson); + if (shouldAttemptMalformedToolCallRepair(nextPartialJson, event.delta)) { + const repair = tryParseMalformedToolCallArguments(nextPartialJson); + if (repair) { + repairedArgsByIndex.set(event.contentIndex, repair.args); + repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repair.args); + repairToolCallArgumentsInMessage(event.message, event.contentIndex, repair.args); + if (!loggedRepairIndices.has(event.contentIndex)) { + loggedRepairIndices.add(event.contentIndex); + log.warn( + `repairing kimi-coding tool call arguments after ${repair.trailingSuffix.length} trailing chars`, + ); + } + } + } + } + if ( + typeof event.contentIndex === "number" && + Number.isInteger(event.contentIndex) && + event.type === "toolcall_end" + ) { + const repairedArgs = repairedArgsByIndex.get(event.contentIndex); + if (repairedArgs) { + if (event.toolCall && typeof event.toolCall === "object") { + (event.toolCall as { arguments?: unknown }).arguments = repairedArgs; + } + repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repairedArgs); + repairToolCallArgumentsInMessage(event.message, event.contentIndex, repairedArgs); + } + partialJsonByIndex.delete(event.contentIndex); + disabledIndices.delete(event.contentIndex); + loggedRepairIndices.delete(event.contentIndex); + } + } + return result; + }, + async return(value?: unknown) { + return iterator.return?.(value) ?? { done: true as const, value: undefined }; + }, + async throw(error?: unknown) { + return iterator.throw?.(error) ?? { done: true as const, value: undefined }; + }, + }; + }; + + return stream; +} + +export function wrapStreamFnRepairMalformedToolCallArguments(baseFn: StreamFn): StreamFn { + return (model, context, options) => { + const maybeStream = baseFn(model, context, options); + if (maybeStream && typeof maybeStream === "object" && "then" in maybeStream) { + return Promise.resolve(maybeStream).then((stream) => + wrapStreamRepairMalformedToolCallArguments(stream), + ); + } + return wrapStreamRepairMalformedToolCallArguments(maybeStream); + }; +} + +function shouldRepairMalformedAnthropicToolCallArguments(provider?: string): boolean { + return normalizeProviderId(provider ?? "") === "kimi-coding"; +} + // --------------------------------------------------------------------------- // xAI / Grok: decode HTML entities in tool call arguments // --------------------------------------------------------------------------- @@ -749,6 +1004,9 @@ export async function runEmbeddedAttempt( const resolvedWorkspace = resolveUserPath(params.workspaceDir); const prevCwd = process.cwd(); const runAbortController = new AbortController(); + // Proxy bootstrap must happen before timeout tuning so the timeouts wrap the + // active EnvHttpProxyAgent instead of being replaced by a bare proxy dispatcher. + ensureGlobalUndiciEnvProxyDispatcher(); ensureGlobalUndiciStreamTimeouts(); log.debug( @@ -1373,6 +1631,15 @@ export async function runEmbeddedAttempt( allowedToolNames, ); + if ( + params.model.api === "anthropic-messages" && + shouldRepairMalformedAnthropicToolCallArguments(params.provider) + ) { + activeSession.agent.streamFn = wrapStreamFnRepairMalformedToolCallArguments( + activeSession.agent.streamFn, + ); + } + if (isXaiProvider(params.provider, params.modelId)) { activeSession.agent.streamFn = wrapStreamFnDecodeXaiToolCallArguments( activeSession.agent.streamFn, @@ -1768,6 +2035,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -1976,6 +2245,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -2036,6 +2307,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { diff --git a/src/agents/pi-embedded-runner/run/history-image-prune.test.ts b/src/agents/pi-embedded-runner/run/history-image-prune.test.ts index bf4b27f5beb..dbed0335435 100644 --- a/src/agents/pi-embedded-runner/run/history-image-prune.test.ts +++ b/src/agents/pi-embedded-runner/run/history-image-prune.test.ts @@ -49,6 +49,30 @@ describe("pruneProcessedHistoryImages", () => { expect(first.content[1]).toMatchObject({ type: "image", data: "abc" }); }); + it("prunes image blocks from toolResult messages that already have assistant replies", () => { + const messages: AgentMessage[] = [ + castAgentMessage({ + role: "toolResult", + toolName: "read", + content: [{ type: "text", text: "screenshot bytes" }, { ...image }], + }), + castAgentMessage({ + role: "assistant", + content: "ack", + }), + ]; + + const didMutate = pruneProcessedHistoryImages(messages); + + expect(didMutate).toBe(true); + const firstTool = messages[0] as Extract | undefined; + if (!firstTool || !Array.isArray(firstTool.content)) { + throw new Error("expected toolResult array content"); + } + expect(firstTool.content).toHaveLength(2); + expect(firstTool.content[1]).toMatchObject({ type: "text", text: PRUNED_HISTORY_IMAGE_MARKER }); + }); + it("does not change messages when no assistant turn exists", () => { const messages: AgentMessage[] = [ castAgentMessage({ diff --git a/src/agents/pi-embedded-runner/run/history-image-prune.ts b/src/agents/pi-embedded-runner/run/history-image-prune.ts index d7dbea5de38..4e92bb08f01 100644 --- a/src/agents/pi-embedded-runner/run/history-image-prune.ts +++ b/src/agents/pi-embedded-runner/run/history-image-prune.ts @@ -21,7 +21,11 @@ export function pruneProcessedHistoryImages(messages: AgentMessage[]): boolean { let didMutate = false; for (let i = 0; i < lastAssistantIndex; i++) { const message = messages[i]; - if (!message || message.role !== "user" || !Array.isArray(message.content)) { + if ( + !message || + (message.role !== "user" && message.role !== "toolResult") || + !Array.isArray(message.content) + ) { continue; } for (let j = 0; j < message.content.length; j++) { diff --git a/src/agents/pi-embedded-runner/run/payloads.errors.test.ts b/src/agents/pi-embedded-runner/run/payloads.errors.test.ts index 4268e177dfc..a2e7873aedf 100644 --- a/src/agents/pi-embedded-runner/run/payloads.errors.test.ts +++ b/src/agents/pi-embedded-runner/run/payloads.errors.test.ts @@ -101,6 +101,18 @@ describe("buildEmbeddedRunPayloads", () => { expect(payloads[0]?.isError).toBe(true); }); + it("does not emit a synthetic billing error for successful turns with stale errorMessage", () => { + const payloads = buildPayloads({ + lastAssistant: makeAssistant({ + stopReason: "stop", + errorMessage: "insufficient credits for embedding model", + content: [{ type: "text", text: "Handle payment required errors in your API." }], + }), + }); + + expectSinglePayloadText(payloads, "Handle payment required errors in your API."); + }); + it("suppresses raw error JSON even when errorMessage is missing", () => { const payloads = buildPayloads({ assistantTexts: [errorJsonPretty], diff --git a/src/agents/pi-embedded-runner/run/payloads.ts b/src/agents/pi-embedded-runner/run/payloads.ts index 16a78ec2e97..c0e0ded136e 100644 --- a/src/agents/pi-embedded-runner/run/payloads.ts +++ b/src/agents/pi-embedded-runner/run/payloads.ts @@ -128,16 +128,17 @@ export function buildEmbeddedRunPayloads(params: { const useMarkdown = params.toolResultFormat === "markdown"; const suppressAssistantArtifacts = params.didSendDeterministicApprovalPrompt === true; const lastAssistantErrored = params.lastAssistant?.stopReason === "error"; - const errorText = params.lastAssistant - ? suppressAssistantArtifacts - ? undefined - : formatAssistantErrorText(params.lastAssistant, { - cfg: params.config, - sessionKey: params.sessionKey, - provider: params.provider, - model: params.model, - }) - : undefined; + const errorText = + params.lastAssistant && lastAssistantErrored + ? suppressAssistantArtifacts + ? undefined + : formatAssistantErrorText(params.lastAssistant, { + cfg: params.config, + sessionKey: params.sessionKey, + provider: params.provider, + model: params.model, + }) + : undefined; const rawErrorMessage = lastAssistantErrored ? params.lastAssistant?.errorMessage?.trim() || undefined : undefined; diff --git a/src/agents/pi-embedded-utils.test.ts b/src/agents/pi-embedded-utils.test.ts index 6a5ce710c85..ab84a375d94 100644 --- a/src/agents/pi-embedded-utils.test.ts +++ b/src/agents/pi-embedded-utils.test.ts @@ -134,6 +134,20 @@ describe("extractAssistantText", () => { ); }); + it("preserves response when errorMessage set from background failure (#13935)", () => { + const responseText = "Handle payment required errors in your API."; + const msg = makeAssistantMessage({ + role: "assistant", + errorMessage: "insufficient credits for embedding model", + stopReason: "stop", + content: [{ type: "text", text: responseText }], + timestamp: Date.now(), + }); + + const result = extractAssistantText(msg); + expect(result).toBe(responseText); + }); + it("strips Minimax tool invocations with extra attributes", () => { const msg = makeAssistantMessage({ role: "assistant", diff --git a/src/agents/pi-embedded-utils.ts b/src/agents/pi-embedded-utils.ts index da1dd7911b8..375df11654d 100644 --- a/src/agents/pi-embedded-utils.ts +++ b/src/agents/pi-embedded-utils.ts @@ -245,7 +245,9 @@ export function extractAssistantText(msg: AssistantMessage): string { }) ?? ""; // Only apply keyword-based error rewrites when the assistant message is actually an error. // Otherwise normal prose that *mentions* errors (e.g. "context overflow") can get clobbered. - const errorContext = msg.stopReason === "error" || Boolean(msg.errorMessage?.trim()); + // Gate on stopReason only — a non-error response with an errorMessage set (e.g. from a + // background tool failure) should not have its content rewritten (#13935). + const errorContext = msg.stopReason === "error"; return sanitizeUserFacingText(extracted, { errorContext }); } diff --git a/src/agents/pi-extensions/context-pruning.test.ts b/src/agents/pi-extensions/context-pruning.test.ts index 7812f5db00a..9dedff97def 100644 --- a/src/agents/pi-extensions/context-pruning.test.ts +++ b/src/agents/pi-extensions/context-pruning.test.ts @@ -358,21 +358,26 @@ describe("context-pruning", () => { expect(toolText(findToolResult(next, "t2"))).toContain("y".repeat(20_000)); }); - it("skips tool results that contain images (no soft trim, no hard clear)", () => { + it("replaces image blocks in tool results during soft trim", () => { const messages: AgentMessage[] = [ makeUser("u1"), makeImageToolResult({ toolCallId: "t1", toolName: "exec", - text: "x".repeat(20_000), + text: "visible tool text", }), ]; - const next = pruneWithAggressiveDefaults(messages); + const next = pruneWithAggressiveDefaults(messages, { + hardClearRatio: 10.0, + hardClear: { enabled: false, placeholder: "[cleared]" }, + softTrim: { maxChars: 200, headChars: 100, tailChars: 100 }, + }); const tool = findToolResult(next, "t1"); - expect(tool.content.some((b) => b.type === "image")).toBe(true); - expect(toolText(tool)).toContain("x".repeat(20_000)); + expect(tool.content.some((b) => b.type === "image")).toBe(false); + expect(toolText(tool)).toContain("[image removed during context pruning]"); + expect(toolText(tool)).toContain("visible tool text"); }); it("soft-trims across block boundaries", () => { diff --git a/src/agents/pi-extensions/context-pruning/pruner.test.ts b/src/agents/pi-extensions/context-pruning/pruner.test.ts index 3985bb2feb1..a847bff0e8c 100644 --- a/src/agents/pi-extensions/context-pruning/pruner.test.ts +++ b/src/agents/pi-extensions/context-pruning/pruner.test.ts @@ -45,6 +45,19 @@ function makeAssistant(content: AssistantMessage["content"]): AgentMessage { }; } +function makeToolResult( + content: Array< + { type: "text"; text: string } | { type: "image"; data: string; mimeType: string } + >, +): AgentMessage { + return { + role: "toolResult", + toolName: "read", + content, + timestamp: Date.now(), + } as AgentMessage; +} + describe("pruneContextMessages", () => { it("does not crash on assistant message with malformed thinking block (missing thinking string)", () => { const messages: AgentMessage[] = [ @@ -109,4 +122,119 @@ describe("pruneContextMessages", () => { }); expect(result).toHaveLength(2); }); + + it("soft-trims image-containing tool results by replacing image blocks with placeholders", () => { + const messages: AgentMessage[] = [ + makeUser("summarize this"), + makeToolResult([ + { type: "text", text: "A".repeat(120) }, + { type: "image", data: "img", mimeType: "image/png" }, + { type: "text", text: "B".repeat(120) }, + ]), + makeAssistant([{ type: "text", text: "done" }]), + ]; + + const result = pruneContextMessages({ + messages, + settings: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS, + keepLastAssistants: 1, + softTrimRatio: 0, + hardClear: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS.hardClear, + enabled: false, + }, + softTrim: { + maxChars: 200, + headChars: 170, + tailChars: 30, + }, + }, + ctx: CONTEXT_WINDOW_1M, + isToolPrunable: () => true, + contextWindowTokensOverride: 16, + }); + + const toolResult = result[1] as Extract; + expect(toolResult.content).toHaveLength(1); + expect(toolResult.content[0]).toMatchObject({ type: "text" }); + const textBlock = toolResult.content[0] as { type: "text"; text: string }; + expect(textBlock.text).toContain("[image removed during context pruning]"); + expect(textBlock.text).toContain( + "[Tool result trimmed: kept first 170 chars and last 30 chars", + ); + }); + + it("replaces image-only tool results with placeholders even when text trimming is not needed", () => { + const messages: AgentMessage[] = [ + makeUser("summarize this"), + makeToolResult([{ type: "image", data: "img", mimeType: "image/png" }]), + makeAssistant([{ type: "text", text: "done" }]), + ]; + + const result = pruneContextMessages({ + messages, + settings: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS, + keepLastAssistants: 1, + softTrimRatio: 0, + hardClearRatio: 10, + hardClear: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS.hardClear, + enabled: false, + }, + softTrim: { + maxChars: 5_000, + headChars: 2_000, + tailChars: 2_000, + }, + }, + ctx: CONTEXT_WINDOW_1M, + isToolPrunable: () => true, + contextWindowTokensOverride: 1, + }); + + const toolResult = result[1] as Extract; + expect(toolResult.content).toEqual([ + { type: "text", text: "[image removed during context pruning]" }, + ]); + }); + + it("hard-clears image-containing tool results once ratios require clearing", () => { + const messages: AgentMessage[] = [ + makeUser("summarize this"), + makeToolResult([ + { type: "text", text: "small text" }, + { type: "image", data: "img", mimeType: "image/png" }, + ]), + makeAssistant([{ type: "text", text: "done" }]), + ]; + + const placeholder = "[hard cleared test placeholder]"; + const result = pruneContextMessages({ + messages, + settings: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS, + keepLastAssistants: 1, + softTrimRatio: 0, + hardClearRatio: 0, + minPrunableToolChars: 1, + softTrim: { + maxChars: 5_000, + headChars: 2_000, + tailChars: 2_000, + }, + hardClear: { + enabled: true, + placeholder, + }, + }, + ctx: CONTEXT_WINDOW_1M, + isToolPrunable: () => true, + contextWindowTokensOverride: 8, + }); + + const toolResult = result[1] as Extract; + expect(toolResult.content).toEqual([{ type: "text", text: placeholder }]); + }); }); diff --git a/src/agents/pi-extensions/context-pruning/pruner.ts b/src/agents/pi-extensions/context-pruning/pruner.ts index c195fa79e09..a0f4458f6d4 100644 --- a/src/agents/pi-extensions/context-pruning/pruner.ts +++ b/src/agents/pi-extensions/context-pruning/pruner.ts @@ -5,9 +5,8 @@ import type { EffectiveContextPruningSettings } from "./settings.js"; import { makeToolPrunablePredicate } from "./tools.js"; const CHARS_PER_TOKEN_ESTIMATE = 4; -// We currently skip pruning tool results that contain images. Still, we count them (approx.) so -// we start trimming prunable tool results earlier when image-heavy context is consuming the window. const IMAGE_CHAR_ESTIMATE = 8_000; +const PRUNED_CONTEXT_IMAGE_MARKER = "[image removed during context pruning]"; function asText(text: string): TextContent { return { type: "text", text }; @@ -23,6 +22,22 @@ function collectTextSegments(content: ReadonlyArray) return parts; } +function collectPrunableToolResultSegments( + content: ReadonlyArray, +): string[] { + const parts: string[] = []; + for (const block of content) { + if (block.type === "text") { + parts.push(block.text); + continue; + } + if (block.type === "image") { + parts.push(PRUNED_CONTEXT_IMAGE_MARKER); + } + } + return parts; +} + function estimateJoinedTextLength(parts: string[]): number { if (parts.length === 0) { return 0; @@ -190,21 +205,25 @@ function softTrimToolResultMessage(params: { settings: EffectiveContextPruningSettings; }): ToolResultMessage | null { const { msg, settings } = params; - // Ignore image tool results for now: these are often directly relevant and hard to partially prune safely. - if (hasImageBlocks(msg.content)) { - return null; - } - - const parts = collectTextSegments(msg.content); + const hasImages = hasImageBlocks(msg.content); + const parts = hasImages + ? collectPrunableToolResultSegments(msg.content) + : collectTextSegments(msg.content); const rawLen = estimateJoinedTextLength(parts); if (rawLen <= settings.softTrim.maxChars) { - return null; + if (!hasImages) { + return null; + } + return { ...msg, content: [asText(parts.join("\n"))] }; } const headChars = Math.max(0, settings.softTrim.headChars); const tailChars = Math.max(0, settings.softTrim.tailChars); if (headChars + tailChars >= rawLen) { - return null; + if (!hasImages) { + return null; + } + return { ...msg, content: [asText(parts.join("\n"))] }; } const head = takeHeadFromJoinedText(parts, headChars); @@ -274,9 +293,6 @@ export function pruneContextMessages(params: { if (!isToolPrunable(msg.toolName)) { continue; } - if (hasImageBlocks(msg.content)) { - continue; - } prunableToolIndexes.push(i); const updated = softTrimToolResultMessage({ diff --git a/src/agents/pi-tools.workspace-only-false.test.ts b/src/agents/pi-tools.workspace-only-false.test.ts index fb18260db09..146eb943c49 100644 --- a/src/agents/pi-tools.workspace-only-false.test.ts +++ b/src/agents/pi-tools.workspace-only-false.test.ts @@ -3,10 +3,14 @@ import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -vi.mock("@mariozechner/pi-ai/oauth", () => ({ - getOAuthApiKey: () => undefined, - getOAuthProviders: () => [], -})); +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + getOAuthApiKey: () => undefined, + getOAuthProviders: () => [], + }; +}); import { createOpenClawCodingTools } from "./pi-tools.js"; diff --git a/src/agents/provider-capabilities.test.ts b/src/agents/provider-capabilities.test.ts index 5e162c87794..90d2b52ff5a 100644 --- a/src/agents/provider-capabilities.test.ts +++ b/src/agents/provider-capabilities.test.ts @@ -47,6 +47,7 @@ describe("resolveProviderCapabilities", () => { it("flags providers that opt out of OpenAI-compatible turn validation", () => { expect(supportsOpenAiCompatTurnValidation("openrouter")).toBe(false); expect(supportsOpenAiCompatTurnValidation("opencode")).toBe(false); + expect(supportsOpenAiCompatTurnValidation("opencode-go")).toBe(false); expect(supportsOpenAiCompatTurnValidation("moonshot")).toBe(true); }); @@ -63,6 +64,12 @@ describe("resolveProviderCapabilities", () => { modelId: "gemini-2.0-flash", }), ).toBe(true); + expect( + shouldSanitizeGeminiThoughtSignaturesForModel({ + provider: "opencode-go", + modelId: "google/gemini-2.5-pro-preview", + }), + ).toBe(true); expect(resolveTranscriptToolCallIdMode("mistral", "mistral-large-latest")).toBe("strict9"); }); diff --git a/src/agents/provider-capabilities.ts b/src/agents/provider-capabilities.ts index 62007b810f8..27aadbcd7d3 100644 --- a/src/agents/provider-capabilities.ts +++ b/src/agents/provider-capabilities.ts @@ -66,6 +66,11 @@ const PROVIDER_CAPABILITIES: Record> = { geminiThoughtSignatureSanitization: true, geminiThoughtSignatureModelHints: ["gemini"], }, + "opencode-go": { + openAiCompatTurnValidation: false, + geminiThoughtSignatureSanitization: true, + geminiThoughtSignatureModelHints: ["gemini"], + }, kilocode: { geminiThoughtSignatureSanitization: true, geminiThoughtSignatureModelHints: ["gemini"], diff --git a/src/agents/sandbox/fs-bridge-path-safety.ts b/src/agents/sandbox/fs-bridge-path-safety.ts index dfc6c6692a1..9ca4c52e537 100644 --- a/src/agents/sandbox/fs-bridge-path-safety.ts +++ b/src/agents/sandbox/fs-bridge-path-safety.ts @@ -24,6 +24,11 @@ export type PinnedSandboxEntry = { basename: string; }; +export type AnchoredSandboxEntry = { + canonicalParentPath: string; + basename: string; +}; + export type PinnedSandboxDirectoryEntry = { mountRootPath: string; relativePath: string; @@ -154,6 +159,48 @@ export class SandboxFsPathGuard { }; } + async resolveAnchoredSandboxEntry( + target: SandboxResolvedFsPath, + action: string, + ): Promise { + const basename = path.posix.basename(target.containerPath); + if (!basename || basename === "." || basename === "/") { + throw new Error(`Invalid sandbox entry target: ${target.containerPath}`); + } + const parentPath = normalizeContainerPath(path.posix.dirname(target.containerPath)); + const canonicalParentPath = await this.resolveCanonicalContainerPath({ + containerPath: parentPath, + allowFinalSymlinkForUnlink: false, + }); + this.resolveRequiredMount(canonicalParentPath, action); + return { + canonicalParentPath, + basename, + }; + } + + async resolveAnchoredPinnedEntry( + target: SandboxResolvedFsPath, + action: string, + ): Promise { + const anchoredTarget = await this.resolveAnchoredSandboxEntry(target, action); + const mount = this.resolveRequiredMount(anchoredTarget.canonicalParentPath, action); + const relativeParentPath = path.posix.relative( + mount.containerRoot, + anchoredTarget.canonicalParentPath, + ); + if (relativeParentPath.startsWith("..") || path.posix.isAbsolute(relativeParentPath)) { + throw new Error( + `Sandbox path escapes allowed mounts; cannot ${action}: ${target.containerPath}`, + ); + } + return { + mountRootPath: mount.containerRoot, + relativeParentPath: relativeParentPath === "." ? "" : relativeParentPath, + basename: anchoredTarget.basename, + }; + } + resolvePinnedDirectoryEntry( target: SandboxResolvedFsPath, action: string, diff --git a/src/agents/sandbox/fs-bridge-shell-command-plans.ts b/src/agents/sandbox/fs-bridge-shell-command-plans.ts index 2987472762b..4bcd1ae04de 100644 --- a/src/agents/sandbox/fs-bridge-shell-command-plans.ts +++ b/src/agents/sandbox/fs-bridge-shell-command-plans.ts @@ -1,4 +1,4 @@ -import type { PathSafetyCheck } from "./fs-bridge-path-safety.js"; +import type { AnchoredSandboxEntry, PathSafetyCheck } from "./fs-bridge-path-safety.js"; import type { SandboxResolvedFsPath } from "./fs-paths.js"; export type SandboxFsCommandPlan = { @@ -10,11 +10,14 @@ export type SandboxFsCommandPlan = { allowFailure?: boolean; }; -export function buildStatPlan(target: SandboxResolvedFsPath): SandboxFsCommandPlan { +export function buildStatPlan( + target: SandboxResolvedFsPath, + anchoredTarget: AnchoredSandboxEntry, +): SandboxFsCommandPlan { return { checks: [{ target, options: { action: "stat files" } }], - script: 'set -eu; stat -c "%F|%s|%Y" -- "$1"', - args: [target.containerPath], + script: 'set -eu\ncd -- "$1"\nstat -c "%F|%s|%Y" -- "$2"', + args: [anchoredTarget.canonicalParentPath, anchoredTarget.basename], allowFailure: true, }; } diff --git a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts index 9b15f02adf5..a2a17194347 100644 --- a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts +++ b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts @@ -4,7 +4,12 @@ import { describe, expect, it } from "vitest"; import { createSandbox, createSandboxFsBridge, + dockerExecResult, + findCallsByScriptFragment, + findCallByDockerArg, + findCallByScriptFragment, getDockerArg, + getDockerScript, installFsBridgeTestHarness, mockedExecDockerRaw, withTempDir, @@ -66,6 +71,13 @@ describe("sandbox fs bridge anchored ops", () => { }); const pinnedCases = [ + { + name: "write pins canonical parent + basename", + invoke: (bridge: ReturnType) => + bridge.writeFile({ filePath: "nested/file.txt", data: "updated" }), + expectedArgs: ["write", "/workspace", "nested", "file.txt", "1"], + forbiddenArgs: ["/workspace/nested/file.txt"], + }, { name: "mkdirp pins mount root + relative path", invoke: (bridge: ReturnType) => @@ -121,4 +133,74 @@ describe("sandbox fs bridge anchored ops", () => { }); }); }); + + it.runIf(process.platform !== "win32")( + "write resolves symlink parents to canonical pinned paths", + async () => { + await withTempDir("openclaw-fs-bridge-contract-write-", async (stateDir) => { + const workspaceDir = path.join(stateDir, "workspace"); + const realDir = path.join(workspaceDir, "real"); + await fs.mkdir(realDir, { recursive: true }); + await fs.symlink(realDir, path.join(workspaceDir, "alias")); + + mockedExecDockerRaw.mockImplementation(async (args) => { + const script = getDockerScript(args); + if (script.includes('readlink -f -- "$cursor"')) { + const target = getDockerArg(args, 1); + return dockerExecResult(`${target.replace("/workspace/alias", "/workspace/real")}\n`); + } + if (script.includes('stat -c "%F|%s|%Y"')) { + return dockerExecResult("regular file|1|2"); + } + return dockerExecResult(""); + }); + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await bridge.writeFile({ filePath: "alias/note.txt", data: "updated" }); + + const writeCall = findCallByDockerArg(1, "write"); + expect(writeCall).toBeDefined(); + const args = writeCall?.[0] ?? []; + expect(getDockerArg(args, 2)).toBe("/workspace"); + expect(getDockerArg(args, 3)).toBe("real"); + expect(getDockerArg(args, 4)).toBe("note.txt"); + expect(args).not.toContain("alias"); + + const canonicalCalls = findCallsByScriptFragment('readlink -f -- "$cursor"'); + expect( + canonicalCalls.some(([callArgs]) => getDockerArg(callArgs, 1) === "/workspace/alias"), + ).toBe(true); + }); + }, + ); + + it("stat anchors parent + basename", async () => { + await withTempDir("openclaw-fs-bridge-contract-stat-", async (stateDir) => { + const workspaceDir = path.join(stateDir, "workspace"); + await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); + await fs.writeFile(path.join(workspaceDir, "nested", "file.txt"), "bye", "utf8"); + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await bridge.stat({ filePath: "nested/file.txt" }); + + const statCall = findCallByScriptFragment('stat -c "%F|%s|%Y" -- "$2"'); + expect(statCall).toBeDefined(); + const args = statCall?.[0] ?? []; + expect(getDockerArg(args, 1)).toBe("/workspace/nested"); + expect(getDockerArg(args, 2)).toBe("file.txt"); + expect(args).not.toContain("/workspace/nested/file.txt"); + }); + }); }); diff --git a/src/agents/sandbox/fs-bridge.ts b/src/agents/sandbox/fs-bridge.ts index 83504d9b908..7a9a22d4459 100644 --- a/src/agents/sandbox/fs-bridge.ts +++ b/src/agents/sandbox/fs-bridge.ts @@ -118,7 +118,10 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { const buffer = Buffer.isBuffer(params.data) ? params.data : Buffer.from(params.data, params.encoding ?? "utf8"); - const pinnedWriteTarget = this.pathGuard.resolvePinnedEntry(target, "write files"); + const pinnedWriteTarget = await this.pathGuard.resolveAnchoredPinnedEntry( + target, + "write files", + ); await this.runCheckedCommand({ ...buildPinnedWritePlan({ check: writeCheck, @@ -218,7 +221,11 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { signal?: AbortSignal; }): Promise { const target = this.resolveResolvedPath(params); - const result = await this.runPlannedCommand(buildStatPlan(target), params.signal); + const anchoredTarget = await this.pathGuard.resolveAnchoredSandboxEntry(target, "stat files"); + const result = await this.runPlannedCommand( + buildStatPlan(target, anchoredTarget), + params.signal, + ); if (result.code !== 0) { const stderr = result.stderr.toString("utf8"); if (stderr.includes("No such file or directory")) { diff --git a/src/agents/tool-catalog.test.ts b/src/agents/tool-catalog.test.ts new file mode 100644 index 00000000000..120a744432c --- /dev/null +++ b/src/agents/tool-catalog.test.ts @@ -0,0 +1,11 @@ +import { describe, expect, it } from "vitest"; +import { resolveCoreToolProfilePolicy } from "./tool-catalog.js"; + +describe("tool-catalog", () => { + it("includes web_search and web_fetch in the coding profile policy", () => { + const policy = resolveCoreToolProfilePolicy("coding"); + expect(policy).toBeDefined(); + expect(policy!.allow).toContain("web_search"); + expect(policy!.allow).toContain("web_fetch"); + }); +}); diff --git a/src/agents/tool-catalog.ts b/src/agents/tool-catalog.ts index bbada8e7bc9..5ba7ff3b3dc 100644 --- a/src/agents/tool-catalog.ts +++ b/src/agents/tool-catalog.ts @@ -86,7 +86,7 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ label: "web_search", description: "Search the web", sectionId: "web", - profiles: [], + profiles: ["coding"], includeInOpenClawGroup: true, }, { @@ -94,7 +94,7 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ label: "web_fetch", description: "Fetch web content", sectionId: "web", - profiles: [], + profiles: ["coding"], includeInOpenClawGroup: true, }, { diff --git a/src/agents/tool-policy.test.ts b/src/agents/tool-policy.test.ts index 9a9f512189b..963c703a409 100644 --- a/src/agents/tool-policy.test.ts +++ b/src/agents/tool-policy.test.ts @@ -80,6 +80,7 @@ describe("tool-policy", () => { expect(isOwnerOnlyToolName("whatsapp_login")).toBe(true); expect(isOwnerOnlyToolName("cron")).toBe(true); expect(isOwnerOnlyToolName("gateway")).toBe(true); + expect(isOwnerOnlyToolName("nodes")).toBe(true); expect(isOwnerOnlyToolName("read")).toBe(false); }); @@ -107,6 +108,27 @@ describe("tool-policy", () => { expect(applyOwnerOnlyToolPolicy(tools, false)).toEqual([]); expect(applyOwnerOnlyToolPolicy(tools, true)).toHaveLength(1); }); + + it("strips nodes for non-owner senders via fallback policy", () => { + const tools = [ + { + name: "read", + // oxlint-disable-next-line typescript/no-explicit-any + execute: async () => ({ content: [], details: {} }) as any, + }, + { + name: "nodes", + // oxlint-disable-next-line typescript/no-explicit-any + execute: async () => ({ content: [], details: {} }) as any, + }, + ] as unknown as AnyAgentTool[]; + + expect(applyOwnerOnlyToolPolicy(tools, false).map((tool) => tool.name)).toEqual(["read"]); + expect(applyOwnerOnlyToolPolicy(tools, true).map((tool) => tool.name)).toEqual([ + "read", + "nodes", + ]); + }); }); describe("TOOL_POLICY_CONFORMANCE", () => { diff --git a/src/agents/tool-policy.ts b/src/agents/tool-policy.ts index 188a9c3361c..5538fb765ce 100644 --- a/src/agents/tool-policy.ts +++ b/src/agents/tool-policy.ts @@ -28,7 +28,12 @@ function wrapOwnerOnlyToolExecution(tool: AnyAgentTool, senderIsOwner: boolean): }; } -const OWNER_ONLY_TOOL_NAME_FALLBACKS = new Set(["whatsapp_login", "cron", "gateway"]); +const OWNER_ONLY_TOOL_NAME_FALLBACKS = new Set([ + "whatsapp_login", + "cron", + "gateway", + "nodes", +]); export function isOwnerOnlyToolName(name: string) { return OWNER_ONLY_TOOL_NAME_FALLBACKS.has(normalizeToolName(name)); diff --git a/src/agents/tools/session-status-tool.ts b/src/agents/tools/session-status-tool.ts index 2277b6e8ad2..29d8204b750 100644 --- a/src/agents/tools/session-status-tool.ts +++ b/src/agents/tools/session-status-tool.ts @@ -19,6 +19,7 @@ import { import { buildAgentMainSessionKey, DEFAULT_AGENT_ID, + parseAgentSessionKey, resolveAgentIdFromSessionKey, } from "../../routing/session-key.js"; import { applyModelOverrideToSessionEntry } from "../../sessions/model-overrides.js"; @@ -36,10 +37,12 @@ import { import type { AnyAgentTool } from "./common.js"; import { readStringParam } from "./common.js"; import { + createSessionVisibilityGuard, shouldResolveSessionIdInput, - resolveInternalSessionKey, - resolveMainSessionAlias, createAgentToAgentPolicy, + resolveEffectiveSessionToolsVisibility, + resolveInternalSessionKey, + resolveSandboxedSessionToolContext, } from "./sessions-helpers.js"; const SessionStatusToolSchema = Type.Object({ @@ -175,6 +178,7 @@ async function resolveModelOverride(params: { export function createSessionStatusTool(opts?: { agentSessionKey?: string; config?: OpenClawConfig; + sandboxed?: boolean; }): AnyAgentTool { return { label: "Session Status", @@ -185,18 +189,70 @@ export function createSessionStatusTool(opts?: { execute: async (_toolCallId, args) => { const params = args as Record; const cfg = opts?.config ?? loadConfig(); - const { mainKey, alias } = resolveMainSessionAlias(cfg); + const { mainKey, alias, effectiveRequesterKey } = resolveSandboxedSessionToolContext({ + cfg, + agentSessionKey: opts?.agentSessionKey, + sandboxed: opts?.sandboxed, + }); const a2aPolicy = createAgentToAgentPolicy(cfg); + const requesterAgentId = resolveAgentIdFromSessionKey( + opts?.agentSessionKey ?? effectiveRequesterKey, + ); + const visibilityRequesterKey = effectiveRequesterKey.trim(); + const usesLegacyMainAlias = alias === mainKey; + const isLegacyMainVisibilityKey = (sessionKey: string) => { + const trimmed = sessionKey.trim(); + return usesLegacyMainAlias && (trimmed === "main" || trimmed === mainKey); + }; + const resolveVisibilityMainSessionKey = (sessionAgentId: string) => { + const requesterParsed = parseAgentSessionKey(visibilityRequesterKey); + if ( + resolveAgentIdFromSessionKey(visibilityRequesterKey) === sessionAgentId && + (requesterParsed?.rest === mainKey || isLegacyMainVisibilityKey(visibilityRequesterKey)) + ) { + return visibilityRequesterKey; + } + return buildAgentMainSessionKey({ + agentId: sessionAgentId, + mainKey, + }); + }; + const normalizeVisibilityTargetSessionKey = (sessionKey: string, sessionAgentId: string) => { + const trimmed = sessionKey.trim(); + if (!trimmed) { + return trimmed; + } + if (trimmed.startsWith("agent:")) { + const parsed = parseAgentSessionKey(trimmed); + if (parsed?.rest === mainKey) { + return resolveVisibilityMainSessionKey(sessionAgentId); + } + return trimmed; + } + // Preserve legacy bare main keys for requester tree checks. + if (isLegacyMainVisibilityKey(trimmed)) { + return resolveVisibilityMainSessionKey(sessionAgentId); + } + return trimmed; + }; + const visibilityGuard = + opts?.sandboxed === true + ? await createSessionVisibilityGuard({ + action: "status", + requesterSessionKey: visibilityRequesterKey, + visibility: resolveEffectiveSessionToolsVisibility({ + cfg, + sandboxed: true, + }), + a2aPolicy, + }) + : null; const requestedKeyParam = readStringParam(params, "sessionKey"); let requestedKeyRaw = requestedKeyParam ?? opts?.agentSessionKey; if (!requestedKeyRaw?.trim()) { throw new Error("sessionKey required"); } - - const requesterAgentId = resolveAgentIdFromSessionKey( - opts?.agentSessionKey ?? requestedKeyRaw, - ); const ensureAgentAccess = (targetAgentId: string) => { if (targetAgentId === requesterAgentId) { return; @@ -213,7 +269,14 @@ export function createSessionStatusTool(opts?: { }; if (requestedKeyRaw.startsWith("agent:")) { - ensureAgentAccess(resolveAgentIdFromSessionKey(requestedKeyRaw)); + const requestedAgentId = resolveAgentIdFromSessionKey(requestedKeyRaw); + ensureAgentAccess(requestedAgentId); + const access = visibilityGuard?.check( + normalizeVisibilityTargetSessionKey(requestedKeyRaw, requestedAgentId), + ); + if (access && !access.allowed) { + throw new Error(access.error); + } } const isExplicitAgentKey = requestedKeyRaw.startsWith("agent:"); @@ -258,6 +321,15 @@ export function createSessionStatusTool(opts?: { throw new Error(`Unknown ${kind}: ${requestedKeyRaw}`); } + if (visibilityGuard && !requestedKeyRaw.startsWith("agent:")) { + const access = visibilityGuard.check( + normalizeVisibilityTargetSessionKey(resolved.key, agentId), + ); + if (!access.allowed) { + throw new Error(access.error); + } + } + const configured = resolveDefaultModelForAgent({ cfg, agentId }); const modelRaw = readStringParam(params, "model"); let changedModel = false; diff --git a/src/agents/tools/sessions-access.ts b/src/agents/tools/sessions-access.ts index 6574c2296cf..47bd0806f7b 100644 --- a/src/agents/tools/sessions-access.ts +++ b/src/agents/tools/sessions-access.ts @@ -14,7 +14,7 @@ export type AgentToAgentPolicy = { isAllowed: (requesterAgentId: string, targetAgentId: string) => boolean; }; -export type SessionAccessAction = "history" | "send" | "list"; +export type SessionAccessAction = "history" | "send" | "list" | "status"; export type SessionAccessResult = | { allowed: true } @@ -130,6 +130,9 @@ function actionPrefix(action: SessionAccessAction): string { if (action === "send") { return "Session send"; } + if (action === "status") { + return "Session status"; + } return "Session list"; } @@ -140,6 +143,9 @@ function a2aDisabledMessage(action: SessionAccessAction): string { if (action === "send") { return "Agent-to-agent messaging is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent sends."; } + if (action === "status") { + return "Agent-to-agent status is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent access."; + } return "Agent-to-agent listing is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent visibility."; } @@ -150,6 +156,9 @@ function a2aDeniedMessage(action: SessionAccessAction): string { if (action === "send") { return "Agent-to-agent messaging denied by tools.agentToAgent.allow."; } + if (action === "status") { + return "Agent-to-agent status denied by tools.agentToAgent.allow."; + } return "Agent-to-agent listing denied by tools.agentToAgent.allow."; } @@ -160,6 +169,9 @@ function crossVisibilityMessage(action: SessionAccessAction): string { if (action === "send") { return "Session send visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; } + if (action === "status") { + return "Session status visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; + } return "Session list visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; } diff --git a/src/agents/tools/sessions-helpers.ts b/src/agents/tools/sessions-helpers.ts index 5b5f94699c6..e638438758c 100644 --- a/src/agents/tools/sessions-helpers.ts +++ b/src/agents/tools/sessions-helpers.ts @@ -166,9 +166,9 @@ export function extractAssistantText(message: unknown): string | undefined { normalizeText: (text) => text.trim(), }) ?? ""; const stopReason = (message as { stopReason?: unknown }).stopReason; - const errorMessage = (message as { errorMessage?: unknown }).errorMessage; - const errorContext = - stopReason === "error" || (typeof errorMessage === "string" && Boolean(errorMessage.trim())); + // Gate on stopReason only — a non-error response with a stale/background errorMessage + // should not have its content rewritten with error templates (#13935). + const errorContext = stopReason === "error"; return joined ? sanitizeUserFacingText(joined, { errorContext }) : undefined; } diff --git a/src/agents/tools/sessions.test.ts b/src/agents/tools/sessions.test.ts index aa831027f68..ce849e45d07 100644 --- a/src/agents/tools/sessions.test.ts +++ b/src/agents/tools/sessions.test.ts @@ -199,6 +199,16 @@ describe("extractAssistantText", () => { "Firebase downgraded us to the free Spark plan. Check whether billing should be re-enabled.", ); }); + + it("preserves successful turns with stale background errorMessage", () => { + const message = { + role: "assistant", + stopReason: "end_turn", + errorMessage: "insufficient credits for embedding model", + content: [{ type: "text", text: "Handle payment required errors in your API." }], + }; + expect(extractAssistantText(message)).toBe("Handle payment required errors in your API."); + }); }); describe("resolveAnnounceTarget", () => { diff --git a/src/agents/tools/web-fetch.cf-markdown.test.ts b/src/agents/tools/web-fetch.cf-markdown.test.ts index e235177a309..f22dc10df52 100644 --- a/src/agents/tools/web-fetch.cf-markdown.test.ts +++ b/src/agents/tools/web-fetch.cf-markdown.test.ts @@ -114,7 +114,7 @@ describe("web_fetch Cloudflare Markdown for Agents", () => { sandboxed: false, runtimeFirecrawl: { active: false, - apiKeySource: "secretRef", + apiKeySource: "secretRef", // pragma: allowlist secret diagnostics: [], }, }); diff --git a/src/agents/tools/web-tools.enabled-defaults.test.ts b/src/agents/tools/web-tools.enabled-defaults.test.ts index ad3345a3e06..c416804fa11 100644 --- a/src/agents/tools/web-tools.enabled-defaults.test.ts +++ b/src/agents/tools/web-tools.enabled-defaults.test.ts @@ -652,7 +652,7 @@ describe("web_search Perplexity lazy resolution", () => { web: { search: { provider: "gemini", - gemini: { apiKey: "gemini-config-test" }, + gemini: { apiKey: "gemini-config-test" }, // pragma: allowlist secret perplexity: perplexityConfig as { apiKey?: string; baseUrl?: string; model?: string }, }, }, diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index 2f6c27519b0..bdbd68ac2e4 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -6,8 +6,10 @@ import { getCliSessionId } from "../../agents/cli-session.js"; import { runWithModelFallback } from "../../agents/model-fallback.js"; import { isCliProvider } from "../../agents/model-selection.js"; import { + BILLING_ERROR_USER_MESSAGE, isCompactionFailureError, isContextOverflowError, + isBillingErrorMessage, isLikelyContextOverflowError, isTransientHttpError, sanitizeUserFacingText, @@ -514,8 +516,9 @@ export async function runAgentTurnWithFallback(params: { break; } catch (err) { const message = err instanceof Error ? err.message : String(err); - const isContextOverflow = isLikelyContextOverflowError(message); - const isCompactionFailure = isCompactionFailureError(message); + const isBilling = isBillingErrorMessage(message); + const isContextOverflow = !isBilling && isLikelyContextOverflowError(message); + const isCompactionFailure = !isBilling && isCompactionFailureError(message); const isSessionCorruption = /function call turn comes immediately after/i.test(message); const isRoleOrderingError = /incorrect role information|roles must alternate/i.test(message); const isTransientHttp = isTransientHttpError(message); @@ -610,11 +613,13 @@ export async function runAgentTurnWithFallback(params: { ? sanitizeUserFacingText(message, { errorContext: true }) : message; const trimmedMessage = safeMessage.replace(/\.\s*$/, ""); - const fallbackText = isContextOverflow - ? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model." - : isRoleOrderingError - ? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session." - : `⚠️ Agent failed before reply: ${trimmedMessage}.\nLogs: openclaw logs --follow`; + const fallbackText = isBilling + ? BILLING_ERROR_USER_MESSAGE + : isContextOverflow + ? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model." + : isRoleOrderingError + ? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session." + : `⚠️ Agent failed before reply: ${trimmedMessage}.\nLogs: openclaw logs --follow`; return { kind: "final", diff --git a/src/auto-reply/reply/agent-runner-payloads.test.ts b/src/auto-reply/reply/agent-runner-payloads.test.ts index 94088b2b5b8..26f23d7a42c 100644 --- a/src/auto-reply/reply/agent-runner-payloads.test.ts +++ b/src/auto-reply/reply/agent-runner-payloads.test.ts @@ -169,6 +169,50 @@ describe("buildReplyPayloads media filter integration", () => { expect(replyPayloads).toHaveLength(0); }); + it("drops all final payloads when block pipeline streamed successfully", async () => { + const pipeline: Parameters[0]["blockReplyPipeline"] = { + didStream: () => true, + isAborted: () => false, + hasSentPayload: () => false, + enqueue: () => {}, + flush: async () => {}, + stop: () => {}, + hasBuffered: () => false, + }; + // shouldDropFinalPayloads short-circuits to [] when the pipeline streamed + // without aborting, so hasSentPayload is never reached. + const { replyPayloads } = await buildReplyPayloads({ + ...baseParams, + blockStreamingEnabled: true, + blockReplyPipeline: pipeline, + replyToMode: "all", + payloads: [{ text: "response", replyToId: "post-123" }], + }); + + expect(replyPayloads).toHaveLength(0); + }); + + it("deduplicates final payloads against directly sent block keys regardless of replyToId", async () => { + // When block streaming is not active but directlySentBlockKeys has entries + // (e.g. from pre-tool flush), the key should match even if replyToId differs. + const { createBlockReplyContentKey } = await import("./block-reply-pipeline.js"); + const directlySentBlockKeys = new Set(); + directlySentBlockKeys.add( + createBlockReplyContentKey({ text: "response", replyToId: "post-1" }), + ); + + const { replyPayloads } = await buildReplyPayloads({ + ...baseParams, + blockStreamingEnabled: false, + blockReplyPipeline: null, + directlySentBlockKeys, + replyToMode: "off", + payloads: [{ text: "response" }], + }); + + expect(replyPayloads).toHaveLength(0); + }); + it("does not suppress same-target replies when accountId differs", async () => { const { replyPayloads } = await buildReplyPayloads({ ...baseParams, diff --git a/src/auto-reply/reply/agent-runner-payloads.ts b/src/auto-reply/reply/agent-runner-payloads.ts index 263dea9fd54..9e89c921407 100644 --- a/src/auto-reply/reply/agent-runner-payloads.ts +++ b/src/auto-reply/reply/agent-runner-payloads.ts @@ -5,7 +5,7 @@ import type { OriginatingChannelType } from "../templating.js"; import { SILENT_REPLY_TOKEN } from "../tokens.js"; import type { ReplyPayload } from "../types.js"; import { formatBunFetchSocketError, isBunFetchSocketError } from "./agent-runner-utils.js"; -import { createBlockReplyPayloadKey, type BlockReplyPipeline } from "./block-reply-pipeline.js"; +import { createBlockReplyContentKey, type BlockReplyPipeline } from "./block-reply-pipeline.js"; import { resolveOriginAccountId, resolveOriginMessageProvider, @@ -213,7 +213,7 @@ export async function buildReplyPayloads(params: { ) : params.directlySentBlockKeys?.size ? mediaFilteredPayloads.filter( - (payload) => !params.directlySentBlockKeys!.has(createBlockReplyPayloadKey(payload)), + (payload) => !params.directlySentBlockKeys!.has(createBlockReplyContentKey(payload)), ) : mediaFilteredPayloads; const replyPayloads = suppressMessagingToolReplies ? [] : filteredPayloads; diff --git a/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts b/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts index 659ccfe7951..14731dbb0ff 100644 --- a/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts +++ b/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts @@ -1628,3 +1628,72 @@ describe("runReplyAgent transient HTTP retry", () => { expect(payload?.text).toContain("Recovered response"); }); }); + +describe("runReplyAgent billing error classification", () => { + // Regression guard for the runner-level catch block in runAgentTurnWithFallback. + // Billing errors from providers like OpenRouter can contain token/size wording that + // matches context overflow heuristics. This test verifies the final user-visible + // message is the billing-specific one, not the "Context overflow" fallback. + it("returns billing message for mixed-signal error (billing text + overflow patterns)", async () => { + runEmbeddedPiAgentMock.mockRejectedValueOnce( + new Error("402 Payment Required: request token limit exceeded for this billing plan"), + ); + + const typing = createMockTypingController(); + const sessionCtx = { + Provider: "telegram", + MessageSid: "msg", + } as unknown as TemplateContext; + const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings; + const followupRun = { + prompt: "hello", + summaryLine: "hello", + enqueuedAt: Date.now(), + run: { + sessionId: "session", + sessionKey: "main", + messageProvider: "telegram", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + config: {}, + skillsSnapshot: {}, + provider: "anthropic", + model: "claude", + thinkLevel: "low", + verboseLevel: "off", + elevatedLevel: "off", + bashElevated: { + enabled: false, + allowed: false, + defaultLevel: "off", + }, + timeoutMs: 1_000, + blockReplyBreak: "message_end", + }, + } as unknown as FollowupRun; + + const result = await runReplyAgent({ + commandBody: "hello", + followupRun, + queueKey: "main", + resolvedQueue, + shouldSteer: false, + shouldFollowup: false, + isActive: false, + isStreaming: false, + typing, + sessionCtx, + defaultModel: "anthropic/claude", + resolvedVerboseLevel: "off", + isNewSession: false, + blockStreamingEnabled: false, + resolvedBlockStreamingBreak: "message_end", + shouldInjectGroupIntro: false, + typingMode: "instant", + }); + + const payload = Array.isArray(result) ? result[0] : result; + expect(payload?.text).toContain("billing error"); + expect(payload?.text).not.toContain("Context overflow"); + }); +}); diff --git a/src/auto-reply/reply/block-reply-pipeline.test.ts b/src/auto-reply/reply/block-reply-pipeline.test.ts new file mode 100644 index 00000000000..92564033df5 --- /dev/null +++ b/src/auto-reply/reply/block-reply-pipeline.test.ts @@ -0,0 +1,79 @@ +import { describe, expect, it } from "vitest"; +import { + createBlockReplyContentKey, + createBlockReplyPayloadKey, + createBlockReplyPipeline, +} from "./block-reply-pipeline.js"; + +describe("createBlockReplyPayloadKey", () => { + it("produces different keys for payloads differing only by replyToId", () => { + const a = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-1" }); + const b = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-2" }); + const c = createBlockReplyPayloadKey({ text: "hello world" }); + expect(a).not.toBe(b); + expect(a).not.toBe(c); + }); + + it("produces different keys for payloads with different text", () => { + const a = createBlockReplyPayloadKey({ text: "hello" }); + const b = createBlockReplyPayloadKey({ text: "world" }); + expect(a).not.toBe(b); + }); + + it("produces different keys for payloads with different media", () => { + const a = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///a.png" }); + const b = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///b.png" }); + expect(a).not.toBe(b); + }); + + it("trims whitespace from text for key comparison", () => { + const a = createBlockReplyPayloadKey({ text: " hello " }); + const b = createBlockReplyPayloadKey({ text: "hello" }); + expect(a).toBe(b); + }); +}); + +describe("createBlockReplyContentKey", () => { + it("produces the same key for payloads differing only by replyToId", () => { + const a = createBlockReplyContentKey({ text: "hello world", replyToId: "post-1" }); + const b = createBlockReplyContentKey({ text: "hello world", replyToId: "post-2" }); + const c = createBlockReplyContentKey({ text: "hello world" }); + expect(a).toBe(b); + expect(a).toBe(c); + }); +}); + +describe("createBlockReplyPipeline dedup with threading", () => { + it("keeps separate deliveries for same text with different replyToId", async () => { + const sent: Array<{ text?: string; replyToId?: string }> = []; + const pipeline = createBlockReplyPipeline({ + onBlockReply: async (payload) => { + sent.push({ text: payload.text, replyToId: payload.replyToId }); + }, + timeoutMs: 5000, + }); + + pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" }); + pipeline.enqueue({ text: "response text", replyToId: undefined }); + await pipeline.flush(); + + expect(sent).toEqual([ + { text: "response text", replyToId: "thread-root-1" }, + { text: "response text", replyToId: undefined }, + ]); + }); + + it("hasSentPayload matches regardless of replyToId", async () => { + const pipeline = createBlockReplyPipeline({ + onBlockReply: async () => {}, + timeoutMs: 5000, + }); + + pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" }); + await pipeline.flush(); + + // Final payload with no replyToId should be recognized as already sent + expect(pipeline.hasSentPayload({ text: "response text" })).toBe(true); + expect(pipeline.hasSentPayload({ text: "response text", replyToId: "other-id" })).toBe(true); + }); +}); diff --git a/src/auto-reply/reply/block-reply-pipeline.ts b/src/auto-reply/reply/block-reply-pipeline.ts index 752c70a1da2..9ce85334238 100644 --- a/src/auto-reply/reply/block-reply-pipeline.ts +++ b/src/auto-reply/reply/block-reply-pipeline.ts @@ -48,6 +48,19 @@ export function createBlockReplyPayloadKey(payload: ReplyPayload): string { }); } +export function createBlockReplyContentKey(payload: ReplyPayload): string { + const text = payload.text?.trim() ?? ""; + const mediaList = payload.mediaUrls?.length + ? payload.mediaUrls + : payload.mediaUrl + ? [payload.mediaUrl] + : []; + // Content-only key used for final-payload suppression after block streaming. + // This intentionally ignores replyToId so a streamed threaded payload and the + // later final payload still collapse when they carry the same content. + return JSON.stringify({ text, mediaList }); +} + const withTimeout = async ( promise: Promise, timeoutMs: number, @@ -80,6 +93,7 @@ export function createBlockReplyPipeline(params: { }): BlockReplyPipeline { const { onBlockReply, timeoutMs, coalescing, buffer } = params; const sentKeys = new Set(); + const sentContentKeys = new Set(); const pendingKeys = new Set(); const seenKeys = new Set(); const bufferedKeys = new Set(); @@ -95,6 +109,7 @@ export function createBlockReplyPipeline(params: { return; } const payloadKey = createBlockReplyPayloadKey(payload); + const contentKey = createBlockReplyContentKey(payload); if (!bypassSeenCheck) { if (seenKeys.has(payloadKey)) { return; @@ -130,6 +145,7 @@ export function createBlockReplyPipeline(params: { return; } sentKeys.add(payloadKey); + sentContentKeys.add(contentKey); didStream = true; }) .catch((err) => { @@ -238,8 +254,8 @@ export function createBlockReplyPipeline(params: { didStream: () => didStream, isAborted: () => aborted, hasSentPayload: (payload) => { - const payloadKey = createBlockReplyPayloadKey(payload); - return sentKeys.has(payloadKey); + const payloadKey = createBlockReplyContentKey(payload); + return sentContentKeys.has(payloadKey); }, }; } diff --git a/src/auto-reply/reply/directive-handling.model-picker.ts b/src/auto-reply/reply/directive-handling.model-picker.ts index 0c2bcaf61e6..46c892dab0f 100644 --- a/src/auto-reply/reply/directive-handling.model-picker.ts +++ b/src/auto-reply/reply/directive-handling.model-picker.ts @@ -19,6 +19,7 @@ const MODEL_PICK_PROVIDER_PREFERENCE = [ "zai", "openrouter", "opencode", + "opencode-go", "github-copilot", "groq", "cerebras", diff --git a/src/auto-reply/reply/reply-delivery.ts b/src/auto-reply/reply/reply-delivery.ts index acf04e73a3e..cacd6b083cb 100644 --- a/src/auto-reply/reply/reply-delivery.ts +++ b/src/auto-reply/reply/reply-delivery.ts @@ -2,7 +2,7 @@ import { logVerbose } from "../../globals.js"; import { SILENT_REPLY_TOKEN } from "../tokens.js"; import type { BlockReplyContext, ReplyPayload } from "../types.js"; import type { BlockReplyPipeline } from "./block-reply-pipeline.js"; -import { createBlockReplyPayloadKey } from "./block-reply-pipeline.js"; +import { createBlockReplyContentKey } from "./block-reply-pipeline.js"; import { parseReplyDirectives } from "./reply-directives.js"; import { applyReplyTagsToPayload, isRenderablePayload } from "./reply-payloads.js"; import type { TypingSignaler } from "./typing-mode.js"; @@ -128,7 +128,7 @@ export function createBlockReplyDeliveryHandler(params: { } else if (params.blockStreamingEnabled) { // Send directly when flushing before tool execution (no pipeline but streaming enabled). // Track sent key to avoid duplicate in final payloads. - params.directlySentBlockKeys.add(createBlockReplyPayloadKey(blockPayload)); + params.directlySentBlockKeys.add(createBlockReplyContentKey(blockPayload)); await params.onBlockReply(blockPayload); } // When streaming is disabled entirely, blocks are accumulated in final text instead. diff --git a/src/browser/proxy-files.test.ts b/src/browser/proxy-files.test.ts new file mode 100644 index 00000000000..1d7ea9566bb --- /dev/null +++ b/src/browser/proxy-files.test.ts @@ -0,0 +1,54 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { MEDIA_MAX_BYTES } from "../media/store.js"; +import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js"; +import { persistBrowserProxyFiles } from "./proxy-files.js"; + +describe("persistBrowserProxyFiles", () => { + let tempHome: TempHomeEnv; + + beforeEach(async () => { + tempHome = await createTempHomeEnv("openclaw-browser-proxy-files-"); + }); + + afterEach(async () => { + await tempHome.restore(); + }); + + it("persists browser proxy files under the shared media store", async () => { + const sourcePath = "/tmp/proxy-file.txt"; + const mapping = await persistBrowserProxyFiles([ + { + path: sourcePath, + base64: Buffer.from("hello from browser proxy").toString("base64"), + mimeType: "text/plain", + }, + ]); + + const savedPath = mapping.get(sourcePath); + expect(typeof savedPath).toBe("string"); + expect(path.normalize(savedPath ?? "")).toContain( + `${path.sep}.openclaw${path.sep}media${path.sep}browser${path.sep}`, + ); + await expect(fs.readFile(savedPath ?? "", "utf8")).resolves.toBe("hello from browser proxy"); + }); + + it("rejects browser proxy files that exceed the shared media size limit", async () => { + const oversized = Buffer.alloc(MEDIA_MAX_BYTES + 1, 0x41); + + await expect( + persistBrowserProxyFiles([ + { + path: "/tmp/oversized.bin", + base64: oversized.toString("base64"), + mimeType: "application/octet-stream", + }, + ]), + ).rejects.toThrow("Media exceeds 5MB limit"); + + await expect( + fs.stat(path.join(tempHome.home, ".openclaw", "media", "browser")), + ).rejects.toThrow(); + }); +}); diff --git a/src/browser/proxy-files.ts b/src/browser/proxy-files.ts index b18820a4594..1d39d71a09e 100644 --- a/src/browser/proxy-files.ts +++ b/src/browser/proxy-files.ts @@ -13,7 +13,7 @@ export async function persistBrowserProxyFiles(files: BrowserProxyFile[] | undef const mapping = new Map(); for (const file of files) { const buffer = Buffer.from(file.base64, "base64"); - const saved = await saveMediaBuffer(buffer, file.mimeType, "browser", buffer.byteLength); + const saved = await saveMediaBuffer(buffer, file.mimeType, "browser"); mapping.set(file.path, saved.path); } return mapping; diff --git a/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts b/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts index e98351cfa61..943c8a8ba9b 100644 --- a/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts +++ b/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts @@ -1,4 +1,4 @@ -import { describe, vi } from "vitest"; +import { describe, expect, it, vi } from "vitest"; import type { ReplyPayload } from "../../../auto-reply/types.js"; import { installSendPayloadContractSuite, @@ -34,4 +34,92 @@ describe("whatsappOutbound sendPayload", () => { chunking: { mode: "split", longTextLength: 5000, maxChunkLength: 4000 }, createHarness, }); + + it("trims leading whitespace for direct text sends", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendText!({ + cfg: {}, + to: "5511999999999@c.us", + text: "\n \thello", + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenCalledWith("5511999999999@c.us", "hello", { + verbose: false, + cfg: {}, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("trims leading whitespace for direct media captions", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendMedia!({ + cfg: {}, + to: "5511999999999@c.us", + text: "\n \tcaption", + mediaUrl: "/tmp/test.png", + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenCalledWith("5511999999999@c.us", "caption", { + verbose: false, + cfg: {}, + mediaUrl: "/tmp/test.png", + mediaLocalRoots: undefined, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("trims leading whitespace for sendPayload text and caption delivery", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n\nhello" }, + deps: { sendWhatsApp }, + }); + await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n\ncaption", mediaUrl: "/tmp/test.png" }, + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenNthCalledWith(1, "5511999999999@c.us", "hello", { + verbose: false, + cfg: {}, + accountId: undefined, + gifPlayback: undefined, + }); + expect(sendWhatsApp).toHaveBeenNthCalledWith(2, "5511999999999@c.us", "caption", { + verbose: false, + cfg: {}, + mediaUrl: "/tmp/test.png", + mediaLocalRoots: undefined, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("skips whitespace-only text payloads", async () => { + const sendWhatsApp = vi.fn(); + + const result = await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n \t" }, + deps: { sendWhatsApp }, + }); + + expect(result).toEqual({ channel: "whatsapp", messageId: "" }); + expect(sendWhatsApp).not.toHaveBeenCalled(); + }); }); diff --git a/src/channels/plugins/outbound/whatsapp.ts b/src/channels/plugins/outbound/whatsapp.ts index e5de15241ae..58004676e6e 100644 --- a/src/channels/plugins/outbound/whatsapp.ts +++ b/src/channels/plugins/outbound/whatsapp.ts @@ -5,6 +5,10 @@ import { resolveWhatsAppOutboundTarget } from "../../../whatsapp/resolve-outboun import type { ChannelOutboundAdapter } from "../types.js"; import { sendTextMediaPayload } from "./direct-text-media.js"; +function trimLeadingWhitespace(text: string | undefined): string { + return text?.trimStart() ?? ""; +} + export const whatsappOutbound: ChannelOutboundAdapter = { deliveryMode: "gateway", chunker: chunkText, @@ -13,12 +17,32 @@ export const whatsappOutbound: ChannelOutboundAdapter = { pollMaxOptions: 12, resolveTarget: ({ to, allowFrom, mode }) => resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), - sendPayload: async (ctx) => - await sendTextMediaPayload({ channel: "whatsapp", ctx, adapter: whatsappOutbound }), + sendPayload: async (ctx) => { + const text = trimLeadingWhitespace(ctx.payload.text); + const hasMedia = Boolean(ctx.payload.mediaUrl) || (ctx.payload.mediaUrls?.length ?? 0) > 0; + if (!text && !hasMedia) { + return { channel: "whatsapp", messageId: "" }; + } + return await sendTextMediaPayload({ + channel: "whatsapp", + ctx: { + ...ctx, + payload: { + ...ctx.payload, + text, + }, + }, + adapter: whatsappOutbound, + }); + }, sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { + const normalizedText = trimLeadingWhitespace(text); + if (!normalizedText) { + return { channel: "whatsapp", messageId: "" }; + } const send = deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, text, { + const result = await send(to, normalizedText, { verbose: false, cfg, accountId: accountId ?? undefined, @@ -27,9 +51,10 @@ export const whatsappOutbound: ChannelOutboundAdapter = { return { channel: "whatsapp", ...result }; }, sendMedia: async ({ cfg, to, text, mediaUrl, mediaLocalRoots, accountId, deps, gifPlayback }) => { + const normalizedText = trimLeadingWhitespace(text); const send = deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, text, { + const result = await send(to, normalizedText, { verbose: false, cfg, mediaUrl, diff --git a/src/cli/daemon-cli.coverage.test.ts b/src/cli/daemon-cli.coverage.test.ts index d897eee11cc..8faf44cdde3 100644 --- a/src/cli/daemon-cli.coverage.test.ts +++ b/src/cli/daemon-cli.coverage.test.ts @@ -10,7 +10,7 @@ const resolveGatewayProgramArguments = vi.fn(async (_opts?: unknown) => ({ const serviceInstall = vi.fn().mockResolvedValue(undefined); const serviceUninstall = vi.fn().mockResolvedValue(undefined); const serviceStop = vi.fn().mockResolvedValue(undefined); -const serviceRestart = vi.fn().mockResolvedValue(undefined); +const serviceRestart = vi.fn().mockResolvedValue({ outcome: "completed" }); const serviceIsLoaded = vi.fn().mockResolvedValue(false); const serviceReadCommand = vi.fn().mockResolvedValue(null); const serviceReadRuntime = vi.fn().mockResolvedValue({ status: "running" }); @@ -48,20 +48,24 @@ vi.mock("../daemon/program-args.js", () => ({ resolveGatewayProgramArguments: (opts: unknown) => resolveGatewayProgramArguments(opts), })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: () => ({ - label: "LaunchAgent", - loadedText: "loaded", - notLoadedText: "not loaded", - install: serviceInstall, - uninstall: serviceUninstall, - stop: serviceStop, - restart: serviceRestart, - isLoaded: serviceIsLoaded, - readCommand: serviceReadCommand, - readRuntime: serviceReadRuntime, - }), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: () => ({ + label: "LaunchAgent", + loadedText: "loaded", + notLoadedText: "not loaded", + install: serviceInstall, + uninstall: serviceUninstall, + stop: serviceStop, + restart: serviceRestart, + isLoaded: serviceIsLoaded, + readCommand: serviceReadCommand, + readRuntime: serviceReadRuntime, + }), + }; +}); vi.mock("../daemon/legacy.js", () => ({ findLegacyGatewayServices: async () => [], diff --git a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts index a785cde4d9b..188e7090915 100644 --- a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts @@ -65,7 +65,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => { service.restart.mockClear(); service.isLoaded.mockResolvedValue(true); service.readCommand.mockResolvedValue({ environment: {} }); - service.restart.mockResolvedValue(undefined); + service.restart.mockResolvedValue({ outcome: "completed" }); vi.unstubAllEnvs(); vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); @@ -163,7 +163,7 @@ describe("runServiceStart config pre-flight (#35862)", () => { service.isLoaded.mockClear(); service.restart.mockClear(); service.isLoaded.mockResolvedValue(true); - service.restart.mockResolvedValue(undefined); + service.restart.mockResolvedValue({ outcome: "completed" }); }); it("aborts start when config is invalid", async () => { diff --git a/src/cli/daemon-cli/lifecycle-core.test.ts b/src/cli/daemon-cli/lifecycle-core.test.ts index 8fa7ded1bde..ff66bd17653 100644 --- a/src/cli/daemon-cli/lifecycle-core.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.test.ts @@ -40,11 +40,12 @@ vi.mock("../../runtime.js", () => ({ })); let runServiceRestart: typeof import("./lifecycle-core.js").runServiceRestart; +let runServiceStart: typeof import("./lifecycle-core.js").runServiceStart; let runServiceStop: typeof import("./lifecycle-core.js").runServiceStop; describe("runServiceRestart token drift", () => { beforeAll(async () => { - ({ runServiceRestart, runServiceStop } = await import("./lifecycle-core.js")); + ({ runServiceRestart, runServiceStart, runServiceStop } = await import("./lifecycle-core.js")); }); beforeEach(() => { @@ -64,7 +65,7 @@ describe("runServiceRestart token drift", () => { service.readCommand.mockResolvedValue({ environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" }, }); - service.restart.mockResolvedValue(undefined); + service.restart.mockResolvedValue({ outcome: "completed" }); vi.unstubAllEnvs(); vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); @@ -176,4 +177,41 @@ describe("runServiceRestart token drift", () => { expect(payload.result).toBe("restarted"); expect(payload.message).toContain("unmanaged process"); }); + + it("skips restart health checks when restart is only scheduled", async () => { + const postRestartCheck = vi.fn(async () => {}); + service.restart.mockResolvedValue({ outcome: "scheduled" }); + + const result = await runServiceRestart({ + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + postRestartCheck, + }); + + expect(result).toBe(true); + expect(postRestartCheck).not.toHaveBeenCalled(); + const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); + const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + expect(payload.result).toBe("scheduled"); + expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); + }); + + it("emits scheduled when service start routes through a scheduled restart", async () => { + service.restart.mockResolvedValue({ outcome: "scheduled" }); + + await runServiceStart({ + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + }); + + expect(service.isLoaded).toHaveBeenCalledTimes(1); + const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); + const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + expect(payload.result).toBe("scheduled"); + expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); + }); }); diff --git a/src/cli/daemon-cli/lifecycle-core.ts b/src/cli/daemon-cli/lifecycle-core.ts index 75bba03b418..a1ad4073584 100644 --- a/src/cli/daemon-cli/lifecycle-core.ts +++ b/src/cli/daemon-cli/lifecycle-core.ts @@ -3,6 +3,8 @@ import { readBestEffortConfig, readConfigFileSnapshot } from "../../config/confi import { formatConfigIssueLines } from "../../config/issue-format.js"; import { resolveIsNixMode } from "../../config/paths.js"; import { checkTokenDrift } from "../../daemon/service-audit.js"; +import type { GatewayServiceRestartResult } from "../../daemon/service-types.js"; +import { describeGatewayServiceRestart } from "../../daemon/service.js"; import type { GatewayService } from "../../daemon/service.js"; import { renderSystemdUnavailableHints } from "../../daemon/systemd-hints.js"; import { isSystemdUserServiceAvailable } from "../../daemon/systemd.js"; @@ -223,7 +225,20 @@ export async function runServiceStart(params: { } try { - await params.service.restart({ env: process.env, stdout }); + const restartResult = await params.service.restart({ env: process.env, stdout }); + const restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return; + } } catch (err) { const hints = params.renderStartHints(); fail(`${params.serviceNoun} start failed: ${String(err)}`, hints); @@ -317,7 +332,7 @@ export async function runServiceRestart(params: { renderStartHints: () => string[]; opts?: DaemonLifecycleOptions; checkTokenDrift?: boolean; - postRestartCheck?: (ctx: RestartPostCheckContext) => Promise; + postRestartCheck?: (ctx: RestartPostCheckContext) => Promise; onNotLoaded?: (ctx: NotLoadedActionContext) => Promise; }): Promise { const json = Boolean(params.opts?.json); @@ -402,11 +417,42 @@ export async function runServiceRestart(params: { } try { + let restartResult: GatewayServiceRestartResult = { outcome: "completed" }; if (loaded) { - await params.service.restart({ env: process.env, stdout }); + restartResult = await params.service.restart({ env: process.env, stdout }); + } + let restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + warnings: warnings.length ? warnings : undefined, + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return true; } if (params.postRestartCheck) { - await params.postRestartCheck({ json, stdout, warnings, fail }); + const postRestartResult = await params.postRestartCheck({ json, stdout, warnings, fail }); + if (postRestartResult) { + restartStatus = describeGatewayServiceRestart(params.serviceNoun, postRestartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + warnings: warnings.length ? warnings : undefined, + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return true; + } + } } let restarted = loaded; if (loaded) { diff --git a/src/cli/daemon-cli/lifecycle.test.ts b/src/cli/daemon-cli/lifecycle.test.ts index f1e87fc4938..61899e4e78c 100644 --- a/src/cli/daemon-cli/lifecycle.test.ts +++ b/src/cli/daemon-cli/lifecycle.test.ts @@ -132,6 +132,7 @@ describe("runDaemonRestart health checks", () => { programArguments: ["openclaw", "gateway", "--port", "18789"], environment: {}, }); + service.restart.mockResolvedValue({ outcome: "completed" }); runServiceRestart.mockImplementation(async (params: RestartParams) => { const fail = (message: string, hints?: string[]) => { @@ -204,6 +205,25 @@ describe("runDaemonRestart health checks", () => { expect(waitForGatewayHealthyRestart).toHaveBeenCalledTimes(2); }); + it("skips stale-pid retry health checks when the retry restart is only scheduled", async () => { + const unhealthy: RestartHealthSnapshot = { + healthy: false, + staleGatewayPids: [1993], + runtime: { status: "stopped" }, + portUsage: { port: 18789, status: "busy", listeners: [], hints: [] }, + }; + waitForGatewayHealthyRestart.mockResolvedValueOnce(unhealthy); + terminateStaleGatewayPids.mockResolvedValue([1993]); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + const result = await runDaemonRestart({ json: true }); + + expect(result).toBe(true); + expect(terminateStaleGatewayPids).toHaveBeenCalledWith([1993]); + expect(service.restart).toHaveBeenCalledTimes(1); + expect(waitForGatewayHealthyRestart).toHaveBeenCalledTimes(1); + }); + it("fails restart when gateway remains unhealthy", async () => { const unhealthy: RestartHealthSnapshot = { healthy: false, diff --git a/src/cli/daemon-cli/lifecycle.ts b/src/cli/daemon-cli/lifecycle.ts index 7fa7396d0b0..2b0775b0c48 100644 --- a/src/cli/daemon-cli/lifecycle.ts +++ b/src/cli/daemon-cli/lifecycle.ts @@ -286,7 +286,10 @@ export async function runDaemonRestart(opts: DaemonLifecycleOptions = {}): Promi } await terminateStaleGatewayPids(health.staleGatewayPids); - await service.restart({ env: process.env, stdout }); + const retryRestart = await service.restart({ env: process.env, stdout }); + if (retryRestart.outcome === "scheduled") { + return retryRestart; + } health = await waitForGatewayHealthyRestart({ service, port: restartPort, diff --git a/src/cli/devices-cli.ts b/src/cli/devices-cli.ts index 0344bf7967a..143d27b20ff 100644 --- a/src/cli/devices-cli.ts +++ b/src/cli/devices-cli.ts @@ -9,7 +9,7 @@ import { } from "../infra/device-pairing.js"; import { formatTimeAgo } from "../infra/format-time/format-relative.ts"; import { defaultRuntime } from "../runtime.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; import { withProgress } from "./progress.js"; @@ -224,7 +224,7 @@ export function registerDevicesCli(program: Command) { return; } if (list.pending?.length) { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log( `${theme.heading("Pending")} ${theme.muted(`(${list.pending.length})`)}`, ); @@ -251,7 +251,7 @@ export function registerDevicesCli(program: Command) { ); } if (list.paired?.length) { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log( `${theme.heading("Paired")} ${theme.muted(`(${list.paired.length})`)}`, ); diff --git a/src/cli/directory-cli.ts b/src/cli/directory-cli.ts index d11867fbb40..1a9949f224a 100644 --- a/src/cli/directory-cli.ts +++ b/src/cli/directory-cli.ts @@ -6,7 +6,7 @@ import { danger } from "../globals.js"; import { resolveMessageChannelSelection } from "../infra/outbound/channel-selection.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { formatHelpExamples } from "./help-format.js"; @@ -48,7 +48,7 @@ function printDirectoryList(params: { return; } - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log(`${theme.heading(params.title)} ${theme.muted(`(${params.entries.length})`)}`); defaultRuntime.log( renderTable({ @@ -166,7 +166,7 @@ export function registerDirectoryCli(program: Command) { defaultRuntime.log(theme.muted("Not available.")); return; } - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log(theme.heading("Self")); defaultRuntime.log( renderTable({ diff --git a/src/cli/dns-cli.ts b/src/cli/dns-cli.ts index de6e6c0dec0..f9781d2f38e 100644 --- a/src/cli/dns-cli.ts +++ b/src/cli/dns-cli.ts @@ -7,7 +7,7 @@ import { pickPrimaryTailnetIPv4, pickPrimaryTailnetIPv6 } from "../infra/tailnet import { getWideAreaZonePath, resolveWideAreaDiscoveryDomain } from "../infra/widearea-dns.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; type RunOpts = { allowFailure?: boolean; inherit?: boolean }; @@ -133,7 +133,7 @@ export function registerDnsCli(program: Command) { } const zonePath = getWideAreaZonePath(wideAreaDomain); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log(theme.heading("DNS setup")); defaultRuntime.log( renderTable({ diff --git a/src/cli/exec-approvals-cli.ts b/src/cli/exec-approvals-cli.ts index 07fe5a462a6..c243fb7a0aa 100644 --- a/src/cli/exec-approvals-cli.ts +++ b/src/cli/exec-approvals-cli.ts @@ -10,7 +10,7 @@ import { import { formatTimeAgo } from "../infra/format-time/format-relative.ts"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { isRich, theme } from "../terminal/theme.js"; import { describeUnknownError } from "./gateway-cli/shared.js"; import { callGatewayFromCli } from "./gateway-rpc.js"; @@ -151,7 +151,7 @@ function renderApprovalsSnapshot(snapshot: ExecApprovalsSnapshot, targetLabel: s const rich = isRich(); const heading = (text: string) => (rich ? theme.heading(text) : text); const muted = (text: string) => (rich ? theme.muted(text) : text); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const file = snapshot.file ?? { version: 1 }; const defaults = file.defaults ?? {}; diff --git a/src/cli/hooks-cli.ts b/src/cli/hooks-cli.ts index 7ea0de030da..85aa0d0e4b9 100644 --- a/src/cli/hooks-cli.ts +++ b/src/cli/hooks-cli.ts @@ -22,7 +22,7 @@ import { resolveArchiveKind } from "../infra/archive.js"; import { buildPluginStatusReport } from "../plugins/status.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { resolveUserPath, shortenHomePath } from "../utils.js"; import { formatCliCommand } from "./command-format.js"; @@ -273,7 +273,7 @@ export function formatHooksList(report: HookStatusReport, opts: HooksListOptions } const eligible = hooks.filter((h) => h.eligible); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = hooks.map((hook) => { const missing = formatHookMissingSummary(hook); return { diff --git a/src/cli/nodes-cli/register.camera.ts b/src/cli/nodes-cli/register.camera.ts index 3bd7d1203dc..82cde2a35f3 100644 --- a/src/cli/nodes-cli/register.camera.ts +++ b/src/cli/nodes-cli/register.camera.ts @@ -1,6 +1,6 @@ import type { Command } from "commander"; import { defaultRuntime } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { shortenHomePath } from "../../utils.js"; import { type CameraFacing, @@ -71,7 +71,7 @@ export function registerNodesCameraCommands(nodes: Command) { } const { heading, muted } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = devices.map((device) => ({ Name: typeof device.name === "string" ? device.name : "Unknown Camera", Position: typeof device.position === "string" ? device.position : muted("unspecified"), diff --git a/src/cli/nodes-cli/register.pairing.ts b/src/cli/nodes-cli/register.pairing.ts index b20c989c1c7..fd649fae754 100644 --- a/src/cli/nodes-cli/register.pairing.ts +++ b/src/cli/nodes-cli/register.pairing.ts @@ -1,5 +1,6 @@ import type { Command } from "commander"; import { defaultRuntime } from "../../runtime.js"; +import { getTerminalTableWidth } from "../../terminal/table.js"; import { getNodesTheme, runNodesCommand } from "./cli-utils.js"; import { parsePairingList } from "./format.js"; import { renderPendingPairingRequestsTable } from "./pairing-render.js"; @@ -25,7 +26,7 @@ export function registerNodesPairingCommands(nodes: Command) { return; } const { heading, warn, muted } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const now = Date.now(); const rendered = renderPendingPairingRequestsTable({ pending, diff --git a/src/cli/nodes-cli/register.status.ts b/src/cli/nodes-cli/register.status.ts index 4dcb3be8e38..03e00cbbec4 100644 --- a/src/cli/nodes-cli/register.status.ts +++ b/src/cli/nodes-cli/register.status.ts @@ -1,7 +1,7 @@ import type { Command } from "commander"; import { formatTimeAgo } from "../../infra/format-time/format-relative.ts"; import { defaultRuntime } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { shortenHomeInString } from "../../utils.js"; import { parseDurationMs } from "../parse-duration.js"; import { getNodesTheme, runNodesCommand } from "./cli-utils.js"; @@ -112,7 +112,7 @@ export function registerNodesStatusCommands(nodes: Command) { const obj: Record = typeof result === "object" && result !== null ? result : {}; const { ok, warn, muted } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const now = Date.now(); const nodes = parseNodeList(result); const lastConnectedById = @@ -256,7 +256,7 @@ export function registerNodesStatusCommands(nodes: Command) { const status = `${paired ? ok("paired") : warn("unpaired")} · ${ connected ? ok("connected") : muted("disconnected") }`; - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = [ { Field: "ID", Value: nodeId }, displayName ? { Field: "Name", Value: displayName } : null, @@ -307,7 +307,7 @@ export function registerNodesStatusCommands(nodes: Command) { const result = await callGatewayCli("node.pair.list", opts, {}); const { pending, paired } = parsePairingList(result); const { heading, muted, warn } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const now = Date.now(); const hasFilters = connectedOnly || sinceMs !== undefined; const pendingRows = hasFilters ? [] : pending; diff --git a/src/cli/pairing-cli.ts b/src/cli/pairing-cli.ts index 6974663bd49..7c8cbc750ea 100644 --- a/src/cli/pairing-cli.ts +++ b/src/cli/pairing-cli.ts @@ -10,7 +10,7 @@ import { } from "../pairing/pairing-store.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { formatCliCommand } from "./command-format.js"; @@ -88,7 +88,7 @@ export function registerPairingCli(program: Command) { return; } const idLabel = resolvePairingIdLabel(channel); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log( `${theme.heading("Pairing requests")} ${theme.muted(`(${requests.length})`)}`, ); diff --git a/src/cli/plugins-cli.ts b/src/cli/plugins-cli.ts index 36e198c71a2..e77d7026875 100644 --- a/src/cli/plugins-cli.ts +++ b/src/cli/plugins-cli.ts @@ -19,7 +19,7 @@ import { resolveUninstallDirectoryTarget, uninstallPlugin } from "../plugins/uni import { updateNpmInstalledPlugins } from "../plugins/update.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { resolveUserPath, shortenHomeInString, shortenHomePath } from "../utils.js"; import { looksLikeLocalInstallSpec } from "./install-spec.js"; @@ -404,7 +404,7 @@ export function registerPluginsCli(program: Command) { ); if (!opts.verbose) { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const sourceRoots = resolvePluginSourceRoots({ workspaceDir: report.workspaceDir, }); diff --git a/src/cli/program/register.onboard.ts b/src/cli/program/register.onboard.ts index 6a5bd98aea0..4dd285e63c1 100644 --- a/src/cli/program/register.onboard.ts +++ b/src/cli/program/register.onboard.ts @@ -168,6 +168,7 @@ export function registerOnboardCommand(program: Command) { togetherApiKey: opts.togetherApiKey as string | undefined, huggingfaceApiKey: opts.huggingfaceApiKey as string | undefined, opencodeZenApiKey: opts.opencodeZenApiKey as string | undefined, + opencodeGoApiKey: opts.opencodeGoApiKey as string | undefined, xaiApiKey: opts.xaiApiKey as string | undefined, litellmApiKey: opts.litellmApiKey as string | undefined, volcengineApiKey: opts.volcengineApiKey as string | undefined, diff --git a/src/cli/skills-cli.format.ts b/src/cli/skills-cli.format.ts index 5f6dcfdcd2a..9e39eeca30e 100644 --- a/src/cli/skills-cli.format.ts +++ b/src/cli/skills-cli.format.ts @@ -1,5 +1,6 @@ import type { SkillStatusEntry, SkillStatusReport } from "../agents/skills-status.js"; -import { renderTable } from "../terminal/table.js"; +import { stripAnsi } from "../terminal/ansi.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { shortenHomePath } from "../utils.js"; import { formatCliCommand } from "./command-format.js"; @@ -38,8 +39,39 @@ function formatSkillStatus(skill: SkillStatusEntry): string { return theme.error("✗ missing"); } +function normalizeSkillEmoji(emoji?: string): string { + return (emoji ?? "📦").replaceAll("\uFE0E", "\uFE0F"); +} + +const REMAINING_ESC_SEQUENCE_REGEX = new RegExp( + String.raw`\u001b(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])`, + "g", +); +const JSON_CONTROL_CHAR_REGEX = new RegExp(String.raw`[\u0000-\u001f\u007f-\u009f]`, "g"); + +function sanitizeJsonString(value: string): string { + return stripAnsi(value) + .replace(REMAINING_ESC_SEQUENCE_REGEX, "") + .replace(JSON_CONTROL_CHAR_REGEX, ""); +} + +function sanitizeJsonValue(value: unknown): unknown { + if (typeof value === "string") { + return sanitizeJsonString(value); + } + if (Array.isArray(value)) { + return value.map((item) => sanitizeJsonValue(item)); + } + if (value && typeof value === "object") { + return Object.fromEntries( + Object.entries(value).map(([key, entryValue]) => [key, sanitizeJsonValue(entryValue)]), + ); + } + return value; +} + function formatSkillName(skill: SkillStatusEntry): string { - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); return `${emoji} ${theme.command(skill.name)}`; } @@ -67,7 +99,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti const skills = opts.eligible ? report.skills.filter((s) => s.eligible) : report.skills; if (opts.json) { - const jsonReport = { + const jsonReport = sanitizeJsonValue({ workspaceDir: report.workspaceDir, managedSkillsDir: report.managedSkillsDir, skills: skills.map((s) => ({ @@ -83,7 +115,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti homepage: s.homepage, missing: s.missing, })), - }; + }); return JSON.stringify(jsonReport, null, 2); } @@ -95,7 +127,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti } const eligible = skills.filter((s) => s.eligible); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = skills.map((skill) => { const missing = formatSkillMissingSummary(skill); return { @@ -109,7 +141,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti const columns = [ { key: "Status", header: "Status", minWidth: 10 }, - { key: "Skill", header: "Skill", minWidth: 18, flex: true }, + { key: "Skill", header: "Skill", minWidth: 22 }, { key: "Description", header: "Description", minWidth: 24, flex: true }, { key: "Source", header: "Source", minWidth: 10 }, ]; @@ -150,11 +182,11 @@ export function formatSkillInfo( } if (opts.json) { - return JSON.stringify(skill, null, 2); + return JSON.stringify(sanitizeJsonValue(skill), null, 2); } const lines: string[] = []; - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); const status = skill.eligible ? theme.success("✓ Ready") : skill.disabled @@ -247,7 +279,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp if (opts.json) { return JSON.stringify( - { + sanitizeJsonValue({ summary: { total: report.skills.length, eligible: eligible.length, @@ -263,7 +295,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp missing: s.missing, install: s.install, })), - }, + }), null, 2, ); @@ -282,7 +314,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp lines.push(""); lines.push(theme.heading("Ready to use:")); for (const skill of eligible) { - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); lines.push(` ${emoji} ${skill.name}`); } } @@ -291,7 +323,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp lines.push(""); lines.push(theme.heading("Missing requirements:")); for (const skill of missingReqs) { - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); const missing = formatSkillMissingSummary(skill); lines.push(` ${emoji} ${skill.name} ${theme.muted(`(${missing})`)}`); } diff --git a/src/cli/skills-cli.test.ts b/src/cli/skills-cli.test.ts index 37323e7f21d..27031fc0fdf 100644 --- a/src/cli/skills-cli.test.ts +++ b/src/cli/skills-cli.test.ts @@ -148,6 +148,18 @@ describe("skills-cli", () => { expect(output).toContain("Any binaries"); expect(output).toContain("API_KEY"); }); + + it("normalizes text-presentation emoji selectors in info output", () => { + const report = createMockReport([ + createMockSkill({ + name: "info-emoji", + emoji: "🎛\uFE0E", + }), + ]); + + const output = formatSkillInfo(report, "info-emoji", {}); + expect(output).toContain("🎛️"); + }); }); describe("formatSkillsCheck", () => { @@ -170,6 +182,22 @@ describe("skills-cli", () => { expect(output).toContain("go"); // missing binary expect(output).toContain("npx clawhub"); }); + + it("normalizes text-presentation emoji selectors in check output", () => { + const report = createMockReport([ + createMockSkill({ name: "ready-emoji", emoji: "🎛\uFE0E", eligible: true }), + createMockSkill({ + name: "missing-emoji", + emoji: "🎙\uFE0E", + eligible: false, + missing: { bins: ["ffmpeg"], anyBins: [], env: [], config: [], os: [] }, + }), + ]); + + const output = formatSkillsCheck(report, {}); + expect(output).toContain("🎛️ ready-emoji"); + expect(output).toContain("🎙️ missing-emoji"); + }); }); describe("JSON output", () => { @@ -215,5 +243,46 @@ describe("skills-cli", () => { const parsed = JSON.parse(output) as Record; assert(parsed); }); + + it("sanitizes ANSI and C1 controls in skills list JSON output", () => { + const report = createMockReport([ + createMockSkill({ + name: "json-skill", + emoji: "\u001b[31m📧\u001b[0m\u009f", + description: "desc\u0093\u001b[2J\u001b[33m colored\u001b[0m", + }), + ]); + + const output = formatSkillsList(report, { json: true }); + const parsed = JSON.parse(output) as { + skills: Array<{ emoji: string; description: string }>; + }; + + expect(parsed.skills[0]?.emoji).toBe("📧"); + expect(parsed.skills[0]?.description).toBe("desc colored"); + expect(output).not.toContain("\\u001b"); + }); + + it("sanitizes skills info JSON output", () => { + const report = createMockReport([ + createMockSkill({ + name: "info-json", + emoji: "\u001b[31m🎙\u001b[0m\u009f", + description: "hi\u0091", + homepage: "https://example.com/\u0092docs", + }), + ]); + + const output = formatSkillInfo(report, "info-json", { json: true }); + const parsed = JSON.parse(output) as { + emoji: string; + description: string; + homepage: string; + }; + + expect(parsed.emoji).toBe("🎙"); + expect(parsed.description).toBe("hi"); + expect(parsed.homepage).toBe("https://example.com/docs"); + }); }); }); diff --git a/src/cli/update-cli/status.ts b/src/cli/update-cli/status.ts index 5cf2bf8af49..8266a1e5f21 100644 --- a/src/cli/update-cli/status.ts +++ b/src/cli/update-cli/status.ts @@ -10,7 +10,7 @@ import { } from "../../infra/update-channels.js"; import { checkUpdateStatus } from "../../infra/update-check.js"; import { defaultRuntime } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { theme } from "../../terminal/theme.js"; import { parseTimeoutMsOrExit, resolveUpdateRoot, type UpdateStatusOptions } from "./shared.js"; @@ -89,7 +89,7 @@ export async function updateStatusCommand(opts: UpdateStatusOptions): Promise { "volcengine-api-key", "byteplus-api-key", "vllm", + "opencode-go", + "ollama", ]) { expect(options.some((opt) => opt.value === value)).toBe(true); } @@ -80,4 +82,27 @@ describe("buildAuthChoiceOptions", () => { expect(chutesGroup).toBeDefined(); expect(chutesGroup?.options.some((opt) => opt.value === "chutes")).toBe(true); }); + + it("groups OpenCode Zen and Go under one OpenCode entry", () => { + const { groups } = buildAuthChoiceGroups({ + store: EMPTY_STORE, + includeSkip: false, + }); + const openCodeGroup = groups.find((group) => group.value === "opencode"); + + expect(openCodeGroup).toBeDefined(); + expect(openCodeGroup?.options.some((opt) => opt.value === "opencode-zen")).toBe(true); + expect(openCodeGroup?.options.some((opt) => opt.value === "opencode-go")).toBe(true); + }); + + it("shows Ollama in grouped provider selection", () => { + const { groups } = buildAuthChoiceGroups({ + store: EMPTY_STORE, + includeSkip: false, + }); + const ollamaGroup = groups.find((group) => group.value === "ollama"); + + expect(ollamaGroup).toBeDefined(); + expect(ollamaGroup?.options.some((opt) => opt.value === "ollama")).toBe(true); + }); }); diff --git a/src/commands/auth-choice-options.ts b/src/commands/auth-choice-options.ts index 23e9b80d958..077fee024b9 100644 --- a/src/commands/auth-choice-options.ts +++ b/src/commands/auth-choice-options.ts @@ -47,6 +47,12 @@ const AUTH_CHOICE_GROUP_DEFS: { hint: "Local/self-hosted OpenAI-compatible", choices: ["vllm"], }, + { + value: "ollama", + label: "Ollama", + hint: "Cloud and local open models", + choices: ["ollama"], + }, { value: "minimax", label: "MiniMax", @@ -138,10 +144,10 @@ const AUTH_CHOICE_GROUP_DEFS: { choices: ["ai-gateway-api-key"], }, { - value: "opencode-zen", - label: "OpenCode Zen", - hint: "API key", - choices: ["opencode-zen"], + value: "opencode", + label: "OpenCode", + hint: "Shared API key for Zen + Go catalogs", + choices: ["opencode-zen", "opencode-go"], }, { value: "xiaomi", @@ -199,6 +205,8 @@ const PROVIDER_AUTH_CHOICE_OPTION_HINTS: Partial> = { "venice-api-key": "Privacy-focused inference (uncensored models)", "together-api-key": "Access to Llama, DeepSeek, Qwen, and more open models", "huggingface-api-key": "Inference Providers — OpenAI-compatible chat", + "opencode-zen": "Shared OpenCode key; curated Zen catalog", + "opencode-go": "Shared OpenCode key; Kimi/GLM/MiniMax Go catalog", }; const PROVIDER_AUTH_CHOICE_OPTION_LABELS: Partial> = { @@ -206,6 +214,8 @@ const PROVIDER_AUTH_CHOICE_OPTION_LABELS: Partial> = "moonshot-api-key-cn": "Kimi API key (.cn)", "kimi-code-api-key": "Kimi Code API key (subscription)", "cloudflare-ai-gateway-api-key": "Cloudflare AI Gateway", + "opencode-zen": "OpenCode Zen catalog", + "opencode-go": "OpenCode Go catalog", }; function buildProviderAuthChoiceOptions(): AuthChoiceOption[] { @@ -234,6 +244,11 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ label: "vLLM (custom URL + model)", hint: "Local/self-hosted OpenAI-compatible server", }, + { + value: "ollama", + label: "Ollama", + hint: "Cloud and local open models", + }, ...buildProviderAuthChoiceOptions(), { value: "moonshot-api-key-cn", @@ -289,7 +304,7 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ { value: "apiKey", label: "Anthropic API key" }, { value: "opencode-zen", - label: "OpenCode Zen (multi-model proxy)", + label: "OpenCode Zen catalog", hint: "Claude, GPT, Gemini via opencode.ai/zen", }, { value: "minimax-api", label: "MiniMax M2.5" }, @@ -301,7 +316,7 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ { value: "minimax-api-lightning", label: "MiniMax M2.5 Highspeed", - hint: "Official fast tier", + hint: "Official fast tier (legacy: Lightning)", }, { value: "qianfan-api-key", label: "Qianfan API key" }, { diff --git a/src/commands/auth-choice.apply.api-providers.ts b/src/commands/auth-choice.apply.api-providers.ts index 046a2e24893..9e7419f7fda 100644 --- a/src/commands/auth-choice.apply.api-providers.ts +++ b/src/commands/auth-choice.apply.api-providers.ts @@ -34,6 +34,8 @@ import { applyMoonshotConfigCn, applyMoonshotProviderConfig, applyMoonshotProviderConfigCn, + applyOpencodeGoConfig, + applyOpencodeGoProviderConfig, applyOpencodeZenConfig, applyOpencodeZenProviderConfig, applySyntheticConfig, @@ -68,6 +70,7 @@ import { setKimiCodingApiKey, setMistralApiKey, setMoonshotApiKey, + setOpencodeGoApiKey, setOpencodeZenApiKey, setSyntheticApiKey, setTogetherApiKey, @@ -84,6 +87,7 @@ import { setModelStudioApiKey, } from "./onboard-auth.js"; import type { AuthChoice, SecretInputMode } from "./onboard-types.js"; +import { OPENCODE_GO_DEFAULT_MODEL_REF } from "./opencode-go-model-default.js"; import { OPENCODE_ZEN_DEFAULT_MODEL } from "./opencode-zen-model-default.js"; import { detectZaiEndpoint } from "./zai-endpoint-detect.js"; @@ -104,6 +108,7 @@ const API_KEY_TOKEN_PROVIDER_AUTH_CHOICE: Record = { huggingface: "huggingface-api-key", mistral: "mistral-api-key", opencode: "opencode-zen", + "opencode-go": "opencode-go", kilocode: "kilocode-api-key", qianfan: "qianfan-api-key", }; @@ -240,20 +245,40 @@ const SIMPLE_API_KEY_PROVIDER_FLOWS: Partial + vi.fn(async ({ cfg }) => ({ + config: cfg, + defaultModelId: "qwen3.5:35b", + })), +); +const ensureOllamaModelPulled = vi.hoisted(() => vi.fn(async () => {})); +vi.mock("./ollama-setup.js", () => ({ + promptAndConfigureOllama, + ensureOllamaModelPulled, +})); + +function buildParams(overrides: Partial = {}): ApplyAuthChoiceParams { + return { + authChoice: "ollama", + config: {}, + prompter: {} as ApplyAuthChoiceParams["prompter"], + runtime: {} as ApplyAuthChoiceParams["runtime"], + setDefaultModel: false, + ...overrides, + }; +} + +describe("applyAuthChoiceOllama", () => { + it("returns agentModelOverride when setDefaultModel is false", async () => { + const config = { agents: { defaults: { model: { primary: "openai/gpt-4o-mini" } } } }; + promptAndConfigureOllama.mockResolvedValueOnce({ + config, + defaultModelId: "qwen2.5-coder:7b", + }); + + const result = await applyAuthChoiceOllama( + buildParams({ + config, + setDefaultModel: false, + }), + ); + + expect(result).toEqual({ + config, + agentModelOverride: "ollama/qwen2.5-coder:7b", + }); + // Pull is deferred — the wizard model picker handles it. + expect(ensureOllamaModelPulled).not.toHaveBeenCalled(); + }); + + it("sets global default model and preserves fallbacks when setDefaultModel is true", async () => { + const config = { + agents: { + defaults: { + model: { + primary: "openai/gpt-4o-mini", + fallbacks: ["anthropic/claude-sonnet-4-5"], + }, + }, + }, + }; + promptAndConfigureOllama.mockResolvedValueOnce({ + config, + defaultModelId: "qwen2.5-coder:7b", + }); + + const result = await applyAuthChoiceOllama( + buildParams({ + config, + setDefaultModel: true, + }), + ); + + expect(result?.agentModelOverride).toBeUndefined(); + expect(result?.config.agents?.defaults?.model).toEqual({ + primary: "ollama/qwen2.5-coder:7b", + fallbacks: ["anthropic/claude-sonnet-4-5"], + }); + expect(ensureOllamaModelPulled).toHaveBeenCalledOnce(); + }); +}); diff --git a/src/commands/auth-choice.apply.ollama.ts b/src/commands/auth-choice.apply.ollama.ts new file mode 100644 index 00000000000..640b57431cf --- /dev/null +++ b/src/commands/auth-choice.apply.ollama.ts @@ -0,0 +1,31 @@ +import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; +import { ensureOllamaModelPulled, promptAndConfigureOllama } from "./ollama-setup.js"; +import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js"; + +export async function applyAuthChoiceOllama( + params: ApplyAuthChoiceParams, +): Promise { + if (params.authChoice !== "ollama") { + return null; + } + + const { config, defaultModelId } = await promptAndConfigureOllama({ + cfg: params.config, + prompter: params.prompter, + agentDir: params.agentDir, + }); + + // Set an Ollama default so the model picker pre-selects an Ollama model. + const defaultModel = `ollama/${defaultModelId}`; + const configWithDefault = applyAgentDefaultModelPrimary(config, defaultModel); + + if (!params.setDefaultModel) { + // Defer pulling: the interactive wizard will show a model picker next, + // so avoid downloading a model the user may not choose. + return { config, agentModelOverride: defaultModel }; + } + + await ensureOllamaModelPulled({ config: configWithDefault, prompter: params.prompter }); + + return { config: configWithDefault }; +} diff --git a/src/commands/auth-choice.apply.ts b/src/commands/auth-choice.apply.ts index e6dfa9ed52a..36591304da0 100644 --- a/src/commands/auth-choice.apply.ts +++ b/src/commands/auth-choice.apply.ts @@ -9,6 +9,7 @@ import { applyAuthChoiceGitHubCopilot } from "./auth-choice.apply.github-copilot import { applyAuthChoiceGoogleGeminiCli } from "./auth-choice.apply.google-gemini-cli.js"; import { applyAuthChoiceMiniMax } from "./auth-choice.apply.minimax.js"; import { applyAuthChoiceOAuth } from "./auth-choice.apply.oauth.js"; +import { applyAuthChoiceOllama } from "./auth-choice.apply.ollama.js"; import { applyAuthChoiceOpenAI } from "./auth-choice.apply.openai.js"; import { applyAuthChoiceQwenPortal } from "./auth-choice.apply.qwen-portal.js"; import { applyAuthChoiceVllm } from "./auth-choice.apply.vllm.js"; @@ -38,6 +39,7 @@ export async function applyAuthChoice( const handlers: Array<(p: ApplyAuthChoiceParams) => Promise> = [ applyAuthChoiceAnthropic, applyAuthChoiceVllm, + applyAuthChoiceOllama, applyAuthChoiceOpenAI, applyAuthChoiceOAuth, applyAuthChoiceApiProviders, diff --git a/src/commands/auth-choice.preferred-provider.ts b/src/commands/auth-choice.preferred-provider.ts index e56950ea711..7ebc0b24ea1 100644 --- a/src/commands/auth-choice.preferred-provider.ts +++ b/src/commands/auth-choice.preferred-provider.ts @@ -7,6 +7,7 @@ const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial> = { token: "anthropic", apiKey: "anthropic", vllm: "vllm", + ollama: "ollama", "openai-codex": "openai-codex", "codex-cli": "openai-codex", chutes: "chutes", @@ -39,6 +40,7 @@ const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial> = { "minimax-api-lightning": "minimax", minimax: "lmstudio", "opencode-zen": "opencode", + "opencode-go": "opencode-go", "xai-api-key": "xai", "litellm-api-key": "litellm", "qwen-portal": "qwen-portal", diff --git a/src/commands/auth-choice.test.ts b/src/commands/auth-choice.test.ts index 0431e558dac..6cdf32fa1d2 100644 --- a/src/commands/auth-choice.test.ts +++ b/src/commands/auth-choice.test.ts @@ -22,6 +22,7 @@ import { } from "./test-wizard-helpers.js"; type DetectZaiEndpoint = typeof import("./zai-endpoint-detect.js").detectZaiEndpoint; +type PromptAndConfigureOllama = typeof import("./ollama-setup.js").promptAndConfigureOllama; vi.mock("../providers/github-copilot-auth.js", () => ({ githubCopilotLoginCommand: vi.fn(async () => {}), @@ -44,6 +45,16 @@ vi.mock("./zai-endpoint-detect.js", () => ({ detectZaiEndpoint, })); +const promptAndConfigureOllama = vi.hoisted(() => + vi.fn(async ({ cfg }) => ({ + config: cfg, + defaultModelId: "qwen3.5:35b", + })), +); +vi.mock("./ollama-setup.js", () => ({ + promptAndConfigureOllama, +})); + type StoredAuthProfile = { key?: string; keyRef?: { source: string; provider: string; id: string }; @@ -131,6 +142,11 @@ describe("applyAuthChoice", () => { detectZaiEndpoint.mockResolvedValue(null); loginOpenAICodexOAuth.mockReset(); loginOpenAICodexOAuth.mockResolvedValue(null); + promptAndConfigureOllama.mockReset(); + promptAndConfigureOllama.mockImplementation(async ({ cfg }) => ({ + config: cfg, + defaultModelId: "qwen3.5:35b", + })); await lifecycle.cleanup(); activeStateDir = null; }); @@ -498,6 +514,15 @@ describe("applyAuthChoice", () => { profileId: "opencode:default", provider: "opencode", modelPrefix: "opencode/", + extraProfiles: ["opencode-go:default"], + }, + { + authChoice: "opencode-go", + tokenProvider: "opencode-go", + profileId: "opencode-go:default", + provider: "opencode-go", + modelPrefix: "opencode-go/", + extraProfiles: ["opencode:default"], }, { authChoice: "together-api-key", @@ -522,7 +547,7 @@ describe("applyAuthChoice", () => { }, ] as const)( "uses opts token for $authChoice without prompting", - async ({ authChoice, tokenProvider, profileId, provider, modelPrefix }) => { + async ({ authChoice, tokenProvider, profileId, provider, modelPrefix, extraProfiles }) => { await setupTempState(); const text = vi.fn(); @@ -554,6 +579,9 @@ describe("applyAuthChoice", () => { ), ).toBe(true); expect((await readAuthProfile(profileId))?.key).toBe(token); + for (const extraProfile of extraProfiles ?? []) { + expect((await readAuthProfile(extraProfile))?.key).toBe(token); + } }, ); @@ -805,14 +833,15 @@ describe("applyAuthChoice", () => { it("keeps existing default model for explicit provider keys when setDefaultModel=false", async () => { const scenarios: Array<{ - authChoice: "xai-api-key" | "opencode-zen"; + authChoice: "xai-api-key" | "opencode-zen" | "opencode-go"; token: string; promptMessage: string; existingPrimary: string; expectedOverride: string; profileId?: string; profileProvider?: string; - expectProviderConfigUndefined?: "opencode-zen"; + extraProfileId?: string; + expectProviderConfigUndefined?: "opencode" | "opencode-go" | "opencode-zen"; agentId?: string; }> = [ { @@ -828,10 +857,24 @@ describe("applyAuthChoice", () => { { authChoice: "opencode-zen", token: "sk-opencode-zen-test", - promptMessage: "Enter OpenCode Zen API key", + promptMessage: "Enter OpenCode API key", existingPrimary: "anthropic/claude-opus-4-5", expectedOverride: "opencode/claude-opus-4-6", - expectProviderConfigUndefined: "opencode-zen", + profileId: "opencode:default", + profileProvider: "opencode", + extraProfileId: "opencode-go:default", + expectProviderConfigUndefined: "opencode", + }, + { + authChoice: "opencode-go", + token: "sk-opencode-go-test", + promptMessage: "Enter OpenCode API key", + existingPrimary: "anthropic/claude-opus-4-5", + expectedOverride: "opencode-go/kimi-k2.5", + profileId: "opencode-go:default", + profileProvider: "opencode-go", + extraProfileId: "opencode:default", + expectProviderConfigUndefined: "opencode-go", }, ]; for (const scenario of scenarios) { @@ -863,6 +906,9 @@ describe("applyAuthChoice", () => { }); expect((await readAuthProfile(scenario.profileId))?.key).toBe(scenario.token); } + if (scenario.extraProfileId) { + expect((await readAuthProfile(scenario.extraProfileId))?.key).toBe(scenario.token); + } if (scenario.expectProviderConfigUndefined) { expect( result.config.models?.providers?.[scenario.expectProviderConfigUndefined], @@ -1320,6 +1366,7 @@ describe("resolvePreferredProviderForAuthChoice", () => { { authChoice: "github-copilot" as const, expectedProvider: "github-copilot" }, { authChoice: "qwen-portal" as const, expectedProvider: "qwen-portal" }, { authChoice: "mistral-api-key" as const, expectedProvider: "mistral" }, + { authChoice: "ollama" as const, expectedProvider: "ollama" }, { authChoice: "unknown" as AuthChoice, expectedProvider: undefined }, ] as const; for (const scenario of scenarios) { diff --git a/src/commands/configure.daemon.test.ts b/src/commands/configure.daemon.test.ts index 9a7aa76e0c8..11b54dc6b19 100644 --- a/src/commands/configure.daemon.test.ts +++ b/src/commands/configure.daemon.test.ts @@ -1,13 +1,22 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; -const withProgress = vi.hoisted(() => vi.fn(async (_opts, run) => run({ setLabel: vi.fn() }))); +const progressSetLabel = vi.hoisted(() => vi.fn()); +const withProgress = vi.hoisted(() => + vi.fn(async (_opts, run) => run({ setLabel: progressSetLabel })), +); const loadConfig = vi.hoisted(() => vi.fn()); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn()); const buildGatewayInstallPlan = vi.hoisted(() => vi.fn()); const note = vi.hoisted(() => vi.fn()); const serviceIsLoaded = vi.hoisted(() => vi.fn(async () => false)); const serviceInstall = vi.hoisted(() => vi.fn(async () => {})); +const serviceRestart = vi.hoisted(() => + vi.fn<() => Promise<{ outcome: "completed" } | { outcome: "scheduled" }>>(async () => ({ + outcome: "completed", + })), +); const ensureSystemdUserLingerInteractive = vi.hoisted(() => vi.fn(async () => {})); +const select = vi.hoisted(() => vi.fn(async () => "node")); vi.mock("../cli/progress.js", () => ({ withProgress, @@ -32,7 +41,7 @@ vi.mock("../terminal/note.js", () => ({ vi.mock("./configure.shared.js", () => ({ confirm: vi.fn(async () => true), - select: vi.fn(async () => "node"), + select, })); vi.mock("./daemon-runtime.js", () => ({ @@ -40,12 +49,17 @@ vi.mock("./daemon-runtime.js", () => ({ GATEWAY_DAEMON_RUNTIME_OPTIONS: [{ value: "node", label: "Node" }], })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: vi.fn(() => ({ - isLoaded: serviceIsLoaded, - install: serviceInstall, - })), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: vi.fn(() => ({ + isLoaded: serviceIsLoaded, + install: serviceInstall, + restart: serviceRestart, + })), + }; +}); vi.mock("./onboard-helpers.js", () => ({ guardCancel: (value: unknown) => value, @@ -60,8 +74,10 @@ const { maybeInstallDaemon } = await import("./configure.daemon.js"); describe("maybeInstallDaemon", () => { beforeEach(() => { vi.clearAllMocks(); + progressSetLabel.mockReset(); serviceIsLoaded.mockResolvedValue(false); serviceInstall.mockResolvedValue(undefined); + serviceRestart.mockResolvedValue({ outcome: "completed" }); loadConfig.mockReturnValue({}); resolveGatewayInstallToken.mockResolvedValue({ token: undefined, @@ -152,4 +168,19 @@ describe("maybeInstallDaemon", () => { expect(serviceInstall).toHaveBeenCalledTimes(1); }); + + it("shows restart scheduled when a loaded service defers restart handoff", async () => { + serviceIsLoaded.mockResolvedValue(true); + select.mockResolvedValueOnce("restart"); + serviceRestart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeInstallDaemon({ + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + port: 18789, + }); + + expect(serviceRestart).toHaveBeenCalledTimes(1); + expect(serviceInstall).not.toHaveBeenCalled(); + expect(progressSetLabel).toHaveBeenLastCalledWith("Gateway service restart scheduled."); + }); }); diff --git a/src/commands/configure.daemon.ts b/src/commands/configure.daemon.ts index 4f943982a38..64272c9e2bc 100644 --- a/src/commands/configure.daemon.ts +++ b/src/commands/configure.daemon.ts @@ -1,6 +1,6 @@ import { withProgress } from "../cli/progress.js"; import { loadConfig } from "../config/config.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { isNonFatalSystemdInstallProbeError } from "../daemon/systemd.js"; import type { RuntimeEnv } from "../runtime.js"; import { note } from "../terminal/note.js"; @@ -50,11 +50,13 @@ export async function maybeInstallDaemon(params: { { label: "Gateway service", indeterminate: true, delayMs: 0 }, async (progress) => { progress.setLabel("Restarting Gateway service…"); - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); - progress.setLabel("Gateway service restarted."); + progress.setLabel( + describeGatewayServiceRestart("Gateway", restartResult).progressMessage, + ); }, ); shouldCheckLinger = true; diff --git a/src/commands/doctor-config-analysis.ts b/src/commands/doctor-config-analysis.ts index dea3fa1b3f2..994bac5f863 100644 --- a/src/commands/doctor-config-analysis.ts +++ b/src/commands/doctor-config-analysis.ts @@ -105,18 +105,22 @@ export function noteOpencodeProviderOverrides(cfg: OpenClawConfig): void { if (providers["opencode-zen"]) { overrides.push("opencode-zen"); } + if (providers["opencode-go"]) { + overrides.push("opencode-go"); + } if (overrides.length === 0) { return; } const lines = overrides.flatMap((id) => { + const providerLabel = id === "opencode-go" ? "OpenCode Go" : "OpenCode Zen"; const providerEntry = providers[id]; const api = isRecord(providerEntry) && typeof providerEntry.api === "string" ? providerEntry.api : undefined; return [ - `- models.providers.${id} is set; this overrides the built-in OpenCode Zen catalog.`, + `- models.providers.${id} is set; this overrides the built-in ${providerLabel} catalog.`, api ? `- models.providers.${id}.api=${api}` : null, ].filter((line): line is string => Boolean(line)); }); @@ -124,7 +128,7 @@ export function noteOpencodeProviderOverrides(cfg: OpenClawConfig): void { lines.push( "- Remove these entries to restore per-model API routing + costs (then re-run onboarding if needed).", ); - note(lines.join("\n"), "OpenCode Zen"); + note(lines.join("\n"), "OpenCode"); } export function noteIncludeConfinementWarning(snapshot: { diff --git a/src/commands/doctor-gateway-daemon-flow.test.ts b/src/commands/doctor-gateway-daemon-flow.test.ts new file mode 100644 index 00000000000..02c0b885bb0 --- /dev/null +++ b/src/commands/doctor-gateway-daemon-flow.test.ts @@ -0,0 +1,194 @@ +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; + +const service = vi.hoisted(() => ({ + isLoaded: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), + install: vi.fn(), + readCommand: vi.fn(), +})); +const note = vi.hoisted(() => vi.fn()); +const sleep = vi.hoisted(() => vi.fn(async () => {})); +const healthCommand = vi.hoisted(() => vi.fn(async () => {})); +const inspectPortUsage = vi.hoisted(() => vi.fn()); +const readLastGatewayErrorLine = vi.hoisted(() => vi.fn(async () => null)); + +vi.mock("../config/config.js", () => ({ + resolveGatewayPort: vi.fn(() => 18789), +})); + +vi.mock("../daemon/constants.js", () => ({ + resolveGatewayLaunchAgentLabel: vi.fn(() => "ai.openclaw.gateway"), + resolveNodeLaunchAgentLabel: vi.fn(() => "ai.openclaw.node"), +})); + +vi.mock("../daemon/diagnostics.js", () => ({ + readLastGatewayErrorLine, +})); + +vi.mock("../daemon/launchd.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isLaunchAgentListed: vi.fn(async () => false), + isLaunchAgentLoaded: vi.fn(async () => false), + launchAgentPlistExists: vi.fn(async () => false), + repairLaunchAgentBootstrap: vi.fn(async () => ({ ok: true })), + }; +}); + +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: () => service, + }; +}); + +vi.mock("../daemon/systemd-hints.js", () => ({ + renderSystemdUnavailableHints: vi.fn(() => []), +})); + +vi.mock("../daemon/systemd.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isSystemdUserServiceAvailable: vi.fn(async () => true), + }; +}); + +vi.mock("../infra/ports.js", () => ({ + inspectPortUsage, + formatPortDiagnostics: vi.fn(() => []), +})); + +vi.mock("../infra/wsl.js", () => ({ + isWSL: vi.fn(async () => false), +})); + +vi.mock("../terminal/note.js", () => ({ + note, +})); + +vi.mock("../utils.js", () => ({ + sleep, +})); + +vi.mock("./daemon-install-helpers.js", () => ({ + buildGatewayInstallPlan: vi.fn(), + gatewayInstallErrorHint: vi.fn(() => "hint"), +})); + +vi.mock("./doctor-format.js", () => ({ + buildGatewayRuntimeHints: vi.fn(() => []), + formatGatewayRuntimeSummary: vi.fn(() => null), +})); + +vi.mock("./gateway-install-token.js", () => ({ + resolveGatewayInstallToken: vi.fn(), +})); + +vi.mock("./health-format.js", () => ({ + formatHealthCheckFailure: vi.fn(() => "health failed"), +})); + +vi.mock("./health.js", () => ({ + healthCommand, +})); + +describe("maybeRepairGatewayDaemon", () => { + let maybeRepairGatewayDaemon: typeof import("./doctor-gateway-daemon-flow.js").maybeRepairGatewayDaemon; + const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + + beforeAll(async () => { + ({ maybeRepairGatewayDaemon } = await import("./doctor-gateway-daemon-flow.js")); + }); + + beforeEach(() => { + vi.clearAllMocks(); + service.isLoaded.mockResolvedValue(true); + service.readRuntime.mockResolvedValue({ status: "running" }); + service.restart.mockResolvedValue({ outcome: "completed" }); + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + }); + + afterEach(() => { + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + + function setPlatform(platform: NodeJS.Platform) { + if (!originalPlatformDescriptor) { + return; + } + Object.defineProperty(process, "platform", { + ...originalPlatformDescriptor, + value: platform, + }); + } + + function createPrompter(confirmImpl: (message: string) => boolean) { + return { + confirm: vi.fn(), + confirmRepair: vi.fn(), + confirmAggressive: vi.fn(), + confirmSkipInNonInteractive: vi.fn(async ({ message }: { message: string }) => + confirmImpl(message), + ), + select: vi.fn(), + shouldRepair: false, + shouldForce: false, + }; + } + + it("skips restart verification when a running service restart is only scheduled", async () => { + setPlatform("linux"); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeRepairGatewayDaemon({ + cfg: { gateway: {} }, + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + prompter: createPrompter((message) => message === "Restart gateway service now?"), + options: { deep: false }, + gatewayDetailsMessage: "details", + healthOk: false, + }); + + expect(service.restart).toHaveBeenCalledTimes(1); + expect(note).toHaveBeenCalledWith( + "restart scheduled, gateway will restart momentarily", + "Gateway", + ); + expect(sleep).not.toHaveBeenCalled(); + expect(healthCommand).not.toHaveBeenCalled(); + }); + + it("skips start verification when a stopped service start is only scheduled", async () => { + setPlatform("linux"); + service.readRuntime.mockResolvedValue({ status: "stopped" }); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeRepairGatewayDaemon({ + cfg: { gateway: {} }, + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + prompter: createPrompter((message) => message === "Start gateway service now?"), + options: { deep: false }, + gatewayDetailsMessage: "details", + healthOk: false, + }); + + expect(service.restart).toHaveBeenCalledTimes(1); + expect(note).toHaveBeenCalledWith( + "restart scheduled, gateway will restart momentarily", + "Gateway", + ); + expect(sleep).not.toHaveBeenCalled(); + expect(healthCommand).not.toHaveBeenCalled(); + }); +}); diff --git a/src/commands/doctor-gateway-daemon-flow.ts b/src/commands/doctor-gateway-daemon-flow.ts index 4fd8df3490b..c476efa615f 100644 --- a/src/commands/doctor-gateway-daemon-flow.ts +++ b/src/commands/doctor-gateway-daemon-flow.ts @@ -12,7 +12,7 @@ import { launchAgentPlistExists, repairLaunchAgentBootstrap, } from "../daemon/launchd.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { renderSystemdUnavailableHints } from "../daemon/systemd-hints.js"; import { isSystemdUserServiceAvailable } from "../daemon/systemd.js"; import { formatPortDiagnostics, inspectPortUsage } from "../infra/ports.js"; @@ -235,11 +235,16 @@ export async function maybeRepairGatewayDaemon(params: { initialValue: true, }); if (start) { - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); - await sleep(1500); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + if (!restartStatus.scheduled) { + await sleep(1500); + } else { + note(restartStatus.message, "Gateway"); + } } } @@ -257,10 +262,15 @@ export async function maybeRepairGatewayDaemon(params: { initialValue: true, }); if (restart) { - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + if (restartStatus.scheduled) { + note(restartStatus.message, "Gateway"); + return; + } await sleep(1500); try { await healthCommand({ json: false, timeoutMs: 10_000 }, params.runtime); diff --git a/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts b/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts index 69c9da9d579..68d865996d2 100644 --- a/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts +++ b/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts @@ -41,6 +41,10 @@ describe("doctor command", () => { api: "openai-completions", baseUrl: "https://opencode.ai/zen/v1", }, + "opencode-go": { + api: "openai-completions", + baseUrl: "https://opencode.ai/zen/go/v1", + }, }, }, }, @@ -53,7 +57,9 @@ describe("doctor command", () => { const warned = note.mock.calls.some( ([message, title]) => - title === "OpenCode Zen" && String(message).includes("models.providers.opencode"), + title === "OpenCode" && + String(message).includes("models.providers.opencode") && + String(message).includes("models.providers.opencode-go"), ); expect(warned).toBe(true); }); diff --git a/src/commands/message-format.ts b/src/commands/message-format.ts index aafe570287c..8f4fe9bd08c 100644 --- a/src/commands/message-format.ts +++ b/src/commands/message-format.ts @@ -4,7 +4,7 @@ import type { OutboundDeliveryResult } from "../infra/outbound/deliver.js"; import { formatGatewaySummary, formatOutboundDeliverySummary } from "../infra/outbound/format.js"; import type { MessageActionRunResult } from "../infra/outbound/message-action-runner.js"; import { formatTargetDisplay } from "../infra/outbound/target-resolver.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { isRich, theme } from "../terminal/theme.js"; import { shortenText } from "./text-format.js"; @@ -257,7 +257,7 @@ export function formatMessageCliText(result: MessageActionRunResult): string[] { const muted = (text: string) => (rich ? theme.muted(text) : text); const heading = (text: string) => (rich ? theme.heading(text) : text); - const width = Math.max(60, (process.stdout.columns ?? 120) - 1); + const width = getTerminalTableWidth(); const opts: FormatOpts = { width }; if (result.handledBy === "dry-run") { diff --git a/src/commands/models/list.status-command.ts b/src/commands/models/list.status-command.ts index 59614e3f866..156860bb960 100644 --- a/src/commands/models/list.status-command.ts +++ b/src/commands/models/list.status-command.ts @@ -38,7 +38,7 @@ import { } from "../../infra/provider-usage.js"; import { getShellEnvAppliedKeys, shouldEnableShellEnvFallback } from "../../infra/shell-env.js"; import type { RuntimeEnv } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { colorize, theme } from "../../terminal/theme.js"; import { shortenHomePath } from "../../utils.js"; import { resolveProviderAuthOverview } from "./list.auth-overview.js"; @@ -631,7 +631,7 @@ export async function modelsStatusCommand( if (probeSummary.results.length === 0) { runtime.log(colorize(rich, theme.muted, "- none")); } else { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const sorted = sortProbeResults(probeSummary.results); const statusColor = (status: string) => { if (status === "ok") { diff --git a/src/commands/ollama-setup.test.ts b/src/commands/ollama-setup.test.ts new file mode 100644 index 00000000000..124254c53b2 --- /dev/null +++ b/src/commands/ollama-setup.test.ts @@ -0,0 +1,444 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; +import type { WizardPrompter } from "../wizard/prompts.js"; +import { + configureOllamaNonInteractive, + ensureOllamaModelPulled, + promptAndConfigureOllama, +} from "./ollama-setup.js"; + +const upsertAuthProfileWithLock = vi.hoisted(() => vi.fn(async () => {})); +vi.mock("../agents/auth-profiles.js", () => ({ + upsertAuthProfileWithLock, +})); + +const openUrlMock = vi.hoisted(() => vi.fn(async () => false)); +vi.mock("./onboard-helpers.js", async (importOriginal) => { + const original = await importOriginal(); + return { ...original, openUrl: openUrlMock }; +}); + +const isRemoteEnvironmentMock = vi.hoisted(() => vi.fn(() => false)); +vi.mock("./oauth-env.js", () => ({ + isRemoteEnvironment: isRemoteEnvironmentMock, +})); + +function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); +} + +function requestUrl(input: string | URL | Request): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +function requestBody(body: BodyInit | null | undefined): string { + return typeof body === "string" ? body : "{}"; +} + +function createOllamaFetchMock(params: { + tags?: string[]; + show?: Record; + meResponses?: Response[]; + pullResponse?: Response; + tagsError?: Error; +}) { + const meResponses = [...(params.meResponses ?? [])]; + return vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = requestUrl(input); + if (url.endsWith("/api/tags")) { + if (params.tagsError) { + throw params.tagsError; + } + return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) }); + } + if (url.endsWith("/api/show")) { + const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const contextWindow = body.name ? params.show?.[body.name] : undefined; + return contextWindow + ? jsonResponse({ model_info: { "llama.context_length": contextWindow } }) + : jsonResponse({}); + } + if (url.endsWith("/api/me")) { + return meResponses.shift() ?? jsonResponse({ username: "testuser" }); + } + if (url.endsWith("/api/pull")) { + return params.pullResponse ?? new Response('{"status":"success"}\n', { status: 200 }); + } + throw new Error(`Unexpected fetch: ${url}`); + }); +} + +describe("ollama setup", () => { + afterEach(() => { + vi.unstubAllGlobals(); + upsertAuthProfileWithLock.mockClear(); + openUrlMock.mockClear(); + isRemoteEnvironmentMock.mockReset().mockReturnValue(false); + }); + + it("returns suggested default model for local mode", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("local"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(result.defaultModelId).toBe("glm-4.7-flash"); + }); + + it("returns suggested default model for remote mode", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("remote"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(result.defaultModelId).toBe("kimi-k2.5:cloud"); + }); + + it("mode selection affects model ordering (local)", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("local"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(result.defaultModelId).toBe("glm-4.7-flash"); + const modelIds = result.config.models?.providers?.ollama?.models?.map((m) => m.id); + expect(modelIds?.[0]).toBe("glm-4.7-flash"); + expect(modelIds).toContain("llama3:8b"); + }); + + it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("remote"), + confirm: vi.fn().mockResolvedValueOnce(true), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ + jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), + jsonResponse({ username: "testuser" }), + ], + }); + vi.stubGlobal("fetch", fetchMock); + + await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(openUrlMock).toHaveBeenCalledWith("https://ollama.com/signin"); + expect(prompter.confirm).toHaveBeenCalled(); + }); + + it("cloud+local mode does not open browser in remote environment", async () => { + isRemoteEnvironmentMock.mockReturnValue(true); + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("remote"), + confirm: vi.fn().mockResolvedValueOnce(true), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ + jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), + jsonResponse({ username: "testuser" }), + ], + }); + vi.stubGlobal("fetch", fetchMock); + + await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(openUrlMock).not.toHaveBeenCalled(); + }); + + it("local mode does not trigger cloud auth", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("local"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); + vi.stubGlobal("fetch", fetchMock); + + await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(fetchMock.mock.calls[0]?.[0]).toContain("/api/tags"); + expect(fetchMock.mock.calls.some((call) => requestUrl(call[0]).includes("/api/me"))).toBe( + false, + ); + }); + + it("suggested models appear first in model list (cloud+local)", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("remote"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b", "glm-4.7-flash", "deepseek-r1:14b"], + }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + const modelIds = result.config.models?.providers?.ollama?.models?.map((m) => m.id); + + expect(modelIds).toEqual([ + "kimi-k2.5:cloud", + "minimax-m2.5:cloud", + "glm-5:cloud", + "llama3:8b", + "glm-4.7-flash", + "deepseek-r1:14b", + ]); + }); + + it("uses /api/show context windows when building Ollama model configs", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("local"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + show: { "llama3:8b": 65536 }, + }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + const model = result.config.models?.providers?.ollama?.models?.find( + (m) => m.id === "llama3:8b", + ); + + expect(model?.contextWindow).toBe(65536); + }); + + describe("ensureOllamaModelPulled", () => { + it("pulls model when not available locally", async () => { + const progress = { update: vi.fn(), stop: vi.fn() }; + const prompter = { + progress: vi.fn(() => progress), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + pullResponse: new Response('{"status":"success"}\n', { status: 200 }), + }); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: { + agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }, + prompter, + }); + + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(fetchMock.mock.calls[1][0]).toContain("/api/pull"); + }); + + it("skips pull when model is already available", async () => { + const prompter = {} as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ tags: ["glm-4.7-flash"] }); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: { + agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }, + prompter, + }); + + expect(fetchMock).toHaveBeenCalledTimes(1); + }); + + it("skips pull for cloud models", async () => { + const prompter = {} as unknown as WizardPrompter; + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: { + agents: { defaults: { model: { primary: "ollama/kimi-k2.5:cloud" } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }, + prompter, + }); + + expect(fetchMock).not.toHaveBeenCalled(); + }); + + it("skips when model is not an ollama model", async () => { + const prompter = {} as unknown as WizardPrompter; + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: { + agents: { defaults: { model: { primary: "openai/gpt-4o" } } }, + }, + prompter, + }); + + expect(fetchMock).not.toHaveBeenCalled(); + }); + }); + + it("uses discovered model when requested non-interactive download fails", async () => { + const fetchMock = createOllamaFetchMock({ + tags: ["qwen2.5-coder:7b"], + pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }), + }); + vi.stubGlobal("fetch", fetchMock); + + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; + + const result = await configureOllamaNonInteractive({ + nextConfig: { + agents: { + defaults: { + model: { + primary: "openai/gpt-4o-mini", + fallbacks: ["anthropic/claude-sonnet-4-5"], + }, + }, + }, + }, + opts: { + customBaseUrl: "http://127.0.0.1:11434", + customModelId: "missing-model", + }, + runtime, + }); + + expect(runtime.error).toHaveBeenCalledWith("Download failed: disk full"); + expect(result.agents?.defaults?.model).toEqual({ + primary: "ollama/qwen2.5-coder:7b", + fallbacks: ["anthropic/claude-sonnet-4-5"], + }); + }); + + it("normalizes ollama/ prefix in non-interactive custom model download", async () => { + const fetchMock = createOllamaFetchMock({ + tags: [], + pullResponse: new Response('{"status":"success"}\n', { status: 200 }), + }); + vi.stubGlobal("fetch", fetchMock); + + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; + + const result = await configureOllamaNonInteractive({ + nextConfig: {}, + opts: { + customBaseUrl: "http://127.0.0.1:11434", + customModelId: "ollama/llama3.2:latest", + }, + runtime, + }); + + const pullRequest = fetchMock.mock.calls[1]?.[1]; + expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); + expect(result.agents?.defaults?.model).toEqual( + expect.objectContaining({ primary: "ollama/llama3.2:latest" }), + ); + }); + + it("accepts cloud models in non-interactive mode without pulling", async () => { + const fetchMock = createOllamaFetchMock({ tags: [] }); + vi.stubGlobal("fetch", fetchMock); + + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; + + const result = await configureOllamaNonInteractive({ + nextConfig: {}, + opts: { + customBaseUrl: "http://127.0.0.1:11434", + customModelId: "kimi-k2.5:cloud", + }, + runtime, + }); + + expect(fetchMock).toHaveBeenCalledTimes(1); + expect(result.models?.providers?.ollama?.models?.map((model) => model.id)).toContain( + "kimi-k2.5:cloud", + ); + expect(result.agents?.defaults?.model).toEqual( + expect.objectContaining({ primary: "ollama/kimi-k2.5:cloud" }), + ); + }); + + it("exits when Ollama is unreachable", async () => { + const fetchMock = createOllamaFetchMock({ + tagsError: new Error("connect ECONNREFUSED"), + }); + vi.stubGlobal("fetch", fetchMock); + + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; + const nextConfig = {}; + + const result = await configureOllamaNonInteractive({ + nextConfig, + opts: { + customBaseUrl: "http://127.0.0.1:11435", + customModelId: "llama3.2:latest", + }, + runtime, + }); + + expect(runtime.error).toHaveBeenCalledWith( + expect.stringContaining("Ollama could not be reached at http://127.0.0.1:11435."), + ); + expect(runtime.exit).toHaveBeenCalledWith(1); + expect(result).toBe(nextConfig); + }); +}); diff --git a/src/commands/ollama-setup.ts b/src/commands/ollama-setup.ts new file mode 100644 index 00000000000..f6aec85dafc --- /dev/null +++ b/src/commands/ollama-setup.ts @@ -0,0 +1,534 @@ +import { upsertAuthProfileWithLock } from "../agents/auth-profiles.js"; +import { + OLLAMA_DEFAULT_BASE_URL, + buildOllamaModelDefinition, + enrichOllamaModelsWithContext, + fetchOllamaModels, + resolveOllamaApiBase, + type OllamaModelWithContext, +} from "../agents/ollama-models.js"; +import type { OpenClawConfig } from "../config/config.js"; +import type { RuntimeEnv } from "../runtime.js"; +import { WizardCancelledError, type WizardPrompter } from "../wizard/prompts.js"; +import { isRemoteEnvironment } from "./oauth-env.js"; +import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js"; +import { openUrl } from "./onboard-helpers.js"; +import type { OnboardMode, OnboardOptions } from "./onboard-types.js"; + +export { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; +export const OLLAMA_DEFAULT_MODEL = "glm-4.7-flash"; + +const OLLAMA_SUGGESTED_MODELS_LOCAL = ["glm-4.7-flash"]; +const OLLAMA_SUGGESTED_MODELS_CLOUD = ["kimi-k2.5:cloud", "minimax-m2.5:cloud", "glm-5:cloud"]; + +function normalizeOllamaModelName(value: string | undefined): string | undefined { + const trimmed = value?.trim(); + if (!trimmed) { + return undefined; + } + if (trimmed.toLowerCase().startsWith("ollama/")) { + const withoutPrefix = trimmed.slice("ollama/".length).trim(); + return withoutPrefix || undefined; + } + return trimmed; +} + +function isOllamaCloudModel(modelName: string | undefined): boolean { + return Boolean(modelName?.trim().toLowerCase().endsWith(":cloud")); +} + +function formatOllamaPullStatus(status: string): { text: string; hidePercent: boolean } { + const trimmed = status.trim(); + const partStatusMatch = trimmed.match(/^([a-z-]+)\s+(?:sha256:)?[a-f0-9]{8,}$/i); + if (partStatusMatch) { + return { text: `${partStatusMatch[1]} part`, hidePercent: false }; + } + if (/^verifying\b.*\bdigest\b/i.test(trimmed)) { + return { text: "verifying digest", hidePercent: true }; + } + return { text: trimmed, hidePercent: false }; +} + +type OllamaCloudAuthResult = { + signedIn: boolean; + signinUrl?: string; +}; + +/** Check if the user is signed in to Ollama cloud via /api/me. */ +async function checkOllamaCloudAuth(baseUrl: string): Promise { + try { + const apiBase = resolveOllamaApiBase(baseUrl); + const response = await fetch(`${apiBase}/api/me`, { + method: "POST", + signal: AbortSignal.timeout(5000), + }); + if (response.status === 401) { + // 401 body contains { error, signin_url } + const data = (await response.json()) as { signin_url?: string }; + return { signedIn: false, signinUrl: data.signin_url }; + } + if (!response.ok) { + return { signedIn: false }; + } + return { signedIn: true }; + } catch { + // /api/me not supported or unreachable — fail closed so cloud mode + // doesn't silently skip auth; the caller handles the fallback. + return { signedIn: false }; + } +} + +type OllamaPullChunk = { + status?: string; + total?: number; + completed?: number; + error?: string; +}; + +type OllamaPullFailureKind = "http" | "no-body" | "chunk-error" | "network"; +type OllamaPullResult = + | { ok: true } + | { + ok: false; + kind: OllamaPullFailureKind; + message: string; + }; + +async function pullOllamaModelCore(params: { + baseUrl: string; + modelName: string; + onStatus?: (status: string, percent: number | null) => void; +}): Promise { + const { onStatus } = params; + const baseUrl = resolveOllamaApiBase(params.baseUrl); + const modelName = normalizeOllamaModelName(params.modelName) ?? params.modelName.trim(); + try { + const response = await fetch(`${baseUrl}/api/pull`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name: modelName }), + }); + if (!response.ok) { + return { + ok: false, + kind: "http", + message: `Failed to download ${modelName} (HTTP ${response.status})`, + }; + } + if (!response.body) { + return { + ok: false, + kind: "no-body", + message: `Failed to download ${modelName} (no response body)`, + }; + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + const layers = new Map(); + + const parseLine = (line: string): OllamaPullResult => { + const trimmed = line.trim(); + if (!trimmed) { + return { ok: true }; + } + try { + const chunk = JSON.parse(trimmed) as OllamaPullChunk; + if (chunk.error) { + return { + ok: false, + kind: "chunk-error", + message: `Download failed: ${chunk.error}`, + }; + } + if (!chunk.status) { + return { ok: true }; + } + if (chunk.total && chunk.completed !== undefined) { + layers.set(chunk.status, { total: chunk.total, completed: chunk.completed }); + let totalSum = 0; + let completedSum = 0; + for (const layer of layers.values()) { + totalSum += layer.total; + completedSum += layer.completed; + } + const percent = totalSum > 0 ? Math.round((completedSum / totalSum) * 100) : null; + onStatus?.(chunk.status, percent); + } else { + onStatus?.(chunk.status, null); + } + } catch { + // Ignore malformed lines from streaming output. + } + return { ok: true }; + }; + + for (;;) { + const { done, value } = await reader.read(); + if (done) { + break; + } + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + for (const line of lines) { + const parsed = parseLine(line); + if (!parsed.ok) { + return parsed; + } + } + } + + const trailing = buffer.trim(); + if (trailing) { + const parsed = parseLine(trailing); + if (!parsed.ok) { + return parsed; + } + } + + return { ok: true }; + } catch (err) { + const reason = err instanceof Error ? err.message : String(err); + return { + ok: false, + kind: "network", + message: `Failed to download ${modelName}: ${reason}`, + }; + } +} + +/** Pull a model from Ollama, streaming progress updates. */ +async function pullOllamaModel( + baseUrl: string, + modelName: string, + prompter: WizardPrompter, +): Promise { + const spinner = prompter.progress(`Downloading ${modelName}...`); + const result = await pullOllamaModelCore({ + baseUrl, + modelName, + onStatus: (status, percent) => { + const displayStatus = formatOllamaPullStatus(status); + if (displayStatus.hidePercent) { + spinner.update(`Downloading ${modelName} - ${displayStatus.text}`); + } else { + spinner.update(`Downloading ${modelName} - ${displayStatus.text} - ${percent ?? 0}%`); + } + }, + }); + if (!result.ok) { + spinner.stop(result.message); + return false; + } + spinner.stop(`Downloaded ${modelName}`); + return true; +} + +async function pullOllamaModelNonInteractive( + baseUrl: string, + modelName: string, + runtime: RuntimeEnv, +): Promise { + runtime.log(`Downloading ${modelName}...`); + const result = await pullOllamaModelCore({ baseUrl, modelName }); + if (!result.ok) { + runtime.error(result.message); + return false; + } + runtime.log(`Downloaded ${modelName}`); + return true; +} + +function buildOllamaModelsConfig( + modelNames: string[], + discoveredModelsByName?: Map, +) { + return modelNames.map((name) => + buildOllamaModelDefinition(name, discoveredModelsByName?.get(name)?.contextWindow), + ); +} + +function applyOllamaProviderConfig( + cfg: OpenClawConfig, + baseUrl: string, + modelNames: string[], + discoveredModelsByName?: Map, +): OpenClawConfig { + return { + ...cfg, + models: { + ...cfg.models, + mode: cfg.models?.mode ?? "merge", + providers: { + ...cfg.models?.providers, + ollama: { + baseUrl, + api: "ollama", + apiKey: "OLLAMA_API_KEY", // pragma: allowlist secret + models: buildOllamaModelsConfig(modelNames, discoveredModelsByName), + }, + }, + }, + }; +} + +async function storeOllamaCredential(agentDir?: string): Promise { + await upsertAuthProfileWithLock({ + profileId: "ollama:default", + credential: { type: "api_key", provider: "ollama", key: "ollama-local" }, + agentDir, + }); +} + +/** + * Interactive: prompt for base URL, discover models, configure provider. + * Model selection is handled by the standard model picker downstream. + */ +export async function promptAndConfigureOllama(params: { + cfg: OpenClawConfig; + prompter: WizardPrompter; + agentDir?: string; +}): Promise<{ config: OpenClawConfig; defaultModelId: string }> { + const { prompter } = params; + + // 1. Prompt base URL + const baseUrlRaw = await prompter.text({ + message: "Ollama base URL", + initialValue: OLLAMA_DEFAULT_BASE_URL, + placeholder: OLLAMA_DEFAULT_BASE_URL, + validate: (value) => (value?.trim() ? undefined : "Required"), + }); + const configuredBaseUrl = String(baseUrlRaw ?? "") + .trim() + .replace(/\/+$/, ""); + const baseUrl = resolveOllamaApiBase(configuredBaseUrl); + + // 2. Check reachability + const { reachable, models } = await fetchOllamaModels(baseUrl); + + if (!reachable) { + await prompter.note( + [ + `Ollama could not be reached at ${baseUrl}.`, + "Download it at https://ollama.com/download", + "", + "Start Ollama and re-run onboarding.", + ].join("\n"), + "Ollama", + ); + throw new WizardCancelledError("Ollama not reachable"); + } + + const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50)); + const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model])); + const modelNames = models.map((m) => m.name); + + // 3. Mode selection + const mode = (await prompter.select({ + message: "Ollama mode", + options: [ + { value: "remote", label: "Cloud + Local", hint: "Ollama cloud models + local models" }, + { value: "local", label: "Local", hint: "Local models only" }, + ], + })) as OnboardMode; + + // 4. Cloud auth — check /api/me upfront for remote (cloud+local) mode + let cloudAuthVerified = false; + if (mode === "remote") { + const authResult = await checkOllamaCloudAuth(baseUrl); + if (!authResult.signedIn) { + if (authResult.signinUrl) { + if (!isRemoteEnvironment()) { + await openUrl(authResult.signinUrl); + } + await prompter.note( + ["Sign in to Ollama Cloud:", authResult.signinUrl].join("\n"), + "Ollama Cloud", + ); + const confirmed = await prompter.confirm({ + message: "Have you signed in?", + }); + if (!confirmed) { + throw new WizardCancelledError("Ollama cloud sign-in cancelled"); + } + // Re-check after user claims sign-in + const recheck = await checkOllamaCloudAuth(baseUrl); + if (!recheck.signedIn) { + throw new WizardCancelledError("Ollama cloud sign-in required"); + } + cloudAuthVerified = true; + } else { + // No signin URL available (older server, unreachable /api/me, or custom gateway). + await prompter.note( + [ + "Could not verify Ollama Cloud authentication.", + "Cloud models may not work until you sign in at https://ollama.com.", + ].join("\n"), + "Ollama Cloud", + ); + const continueAnyway = await prompter.confirm({ + message: "Continue without cloud auth?", + }); + if (!continueAnyway) { + throw new WizardCancelledError("Ollama cloud auth could not be verified"); + } + // Cloud auth unverified — fall back to local defaults so the model + // picker doesn't steer toward cloud models that may fail. + } + } else { + cloudAuthVerified = true; + } + } + + // 5. Model ordering — suggested models first. + // Use cloud defaults only when auth was actually verified; otherwise fall + // back to local defaults so the user isn't steered toward cloud models + // that may fail at runtime. + const suggestedModels = + mode === "local" || !cloudAuthVerified + ? OLLAMA_SUGGESTED_MODELS_LOCAL + : OLLAMA_SUGGESTED_MODELS_CLOUD; + const orderedModelNames = [ + ...suggestedModels, + ...modelNames.filter((name) => !suggestedModels.includes(name)), + ]; + + await storeOllamaCredential(params.agentDir); + + const defaultModelId = suggestedModels[0] ?? OLLAMA_DEFAULT_MODEL; + const config = applyOllamaProviderConfig( + params.cfg, + baseUrl, + orderedModelNames, + discoveredModelsByName, + ); + return { config, defaultModelId }; +} + +/** Non-interactive: auto-discover models and configure provider. */ +export async function configureOllamaNonInteractive(params: { + nextConfig: OpenClawConfig; + opts: OnboardOptions; + runtime: RuntimeEnv; +}): Promise { + const { opts, runtime } = params; + const configuredBaseUrl = (opts.customBaseUrl?.trim() || OLLAMA_DEFAULT_BASE_URL).replace( + /\/+$/, + "", + ); + const baseUrl = resolveOllamaApiBase(configuredBaseUrl); + + const { reachable, models } = await fetchOllamaModels(baseUrl); + const explicitModel = normalizeOllamaModelName(opts.customModelId); + + if (!reachable) { + runtime.error( + [ + `Ollama could not be reached at ${baseUrl}.`, + "Download it at https://ollama.com/download", + ].join("\n"), + ); + runtime.exit(1); + return params.nextConfig; + } + + await storeOllamaCredential(); + + const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50)); + const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model])); + const modelNames = models.map((m) => m.name); + + // Apply local suggested model ordering. + const suggestedModels = OLLAMA_SUGGESTED_MODELS_LOCAL; + const orderedModelNames = [ + ...suggestedModels, + ...modelNames.filter((name) => !suggestedModels.includes(name)), + ]; + + const requestedDefaultModelId = explicitModel ?? suggestedModels[0]; + let pulledRequestedModel = false; + const availableModelNames = new Set(modelNames); + const requestedCloudModel = isOllamaCloudModel(requestedDefaultModelId); + + if (requestedCloudModel) { + availableModelNames.add(requestedDefaultModelId); + } + + // Pull if model not in discovered list and Ollama is reachable + if (!requestedCloudModel && !modelNames.includes(requestedDefaultModelId)) { + pulledRequestedModel = await pullOllamaModelNonInteractive( + baseUrl, + requestedDefaultModelId, + runtime, + ); + if (pulledRequestedModel) { + availableModelNames.add(requestedDefaultModelId); + } + } + + let allModelNames = orderedModelNames; + let defaultModelId = requestedDefaultModelId; + if ( + (pulledRequestedModel || requestedCloudModel) && + !allModelNames.includes(requestedDefaultModelId) + ) { + allModelNames = [...allModelNames, requestedDefaultModelId]; + } + if (!availableModelNames.has(requestedDefaultModelId)) { + if (availableModelNames.size > 0) { + const firstAvailableModel = + allModelNames.find((name) => availableModelNames.has(name)) ?? + Array.from(availableModelNames)[0]; + defaultModelId = firstAvailableModel; + runtime.log( + `Ollama model ${requestedDefaultModelId} was not available; using ${defaultModelId} instead.`, + ); + } else { + runtime.error( + [ + `No Ollama models are available at ${baseUrl}.`, + "Pull a model first, then re-run onboarding.", + ].join("\n"), + ); + runtime.exit(1); + return params.nextConfig; + } + } + + const config = applyOllamaProviderConfig( + params.nextConfig, + baseUrl, + allModelNames, + discoveredModelsByName, + ); + const modelRef = `ollama/${defaultModelId}`; + runtime.log(`Default Ollama model: ${defaultModelId}`); + return applyAgentDefaultModelPrimary(config, modelRef); +} + +/** Pull the configured default Ollama model if it isn't already available locally. */ +export async function ensureOllamaModelPulled(params: { + config: OpenClawConfig; + prompter: WizardPrompter; +}): Promise { + const modelCfg = params.config.agents?.defaults?.model; + const modelId = typeof modelCfg === "string" ? modelCfg : modelCfg?.primary; + if (!modelId?.startsWith("ollama/")) { + return; + } + const baseUrl = params.config.models?.providers?.ollama?.baseUrl ?? OLLAMA_DEFAULT_BASE_URL; + const modelName = modelId.slice("ollama/".length); + if (isOllamaCloudModel(modelName)) { + return; + } + const { models } = await fetchOllamaModels(baseUrl); + if (models.some((m) => m.name === modelName)) { + return; + } + const pulled = await pullOllamaModel(baseUrl, modelName, params.prompter); + if (!pulled) { + throw new WizardCancelledError("Failed to download selected Ollama model"); + } +} diff --git a/src/commands/onboard-auth.config-opencode-go.ts b/src/commands/onboard-auth.config-opencode-go.ts new file mode 100644 index 00000000000..25be5ffa18f --- /dev/null +++ b/src/commands/onboard-auth.config-opencode-go.ts @@ -0,0 +1,36 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js"; +import { OPENCODE_GO_DEFAULT_MODEL_REF } from "./opencode-go-model-default.js"; + +const OPENCODE_GO_ALIAS_DEFAULTS: Record = { + "opencode-go/kimi-k2.5": "Kimi", + "opencode-go/glm-5": "GLM", + "opencode-go/minimax-m2.5": "MiniMax", +}; + +export function applyOpencodeGoProviderConfig(cfg: OpenClawConfig): OpenClawConfig { + // Use the built-in opencode-go provider from pi-ai; only seed allowlist aliases. + const models = { ...cfg.agents?.defaults?.models }; + for (const [modelRef, alias] of Object.entries(OPENCODE_GO_ALIAS_DEFAULTS)) { + models[modelRef] = { + ...models[modelRef], + alias: models[modelRef]?.alias ?? alias, + }; + } + + return { + ...cfg, + agents: { + ...cfg.agents, + defaults: { + ...cfg.agents?.defaults, + models, + }, + }, + }; +} + +export function applyOpencodeGoConfig(cfg: OpenClawConfig): OpenClawConfig { + const next = applyOpencodeGoProviderConfig(cfg); + return applyAgentDefaultModelPrimary(next, OPENCODE_GO_DEFAULT_MODEL_REF); +} diff --git a/src/commands/onboard-auth.credentials.test.ts b/src/commands/onboard-auth.credentials.test.ts index 5ff2c57461d..e844ac501c2 100644 --- a/src/commands/onboard-auth.credentials.test.ts +++ b/src/commands/onboard-auth.credentials.test.ts @@ -3,6 +3,7 @@ import { setByteplusApiKey, setCloudflareAiGatewayConfig, setMoonshotApiKey, + setOpencodeZenApiKey, setOpenaiApiKey, setVolcengineApiKey, } from "./onboard-auth.js"; @@ -22,6 +23,7 @@ describe("onboard auth credentials secret refs", () => { "CLOUDFLARE_AI_GATEWAY_API_KEY", "VOLCANO_ENGINE_API_KEY", "BYTEPLUS_API_KEY", + "OPENCODE_API_KEY", ]); afterEach(async () => { @@ -207,4 +209,25 @@ describe("onboard auth credentials secret refs", () => { }); expect(parsed.profiles?.["byteplus:default"]?.key).toBeUndefined(); }); + + it("stores shared OpenCode credentials for both runtime providers", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-opencode-"); + lifecycle.setStateDir(env.stateDir); + process.env.OPENCODE_API_KEY = "sk-opencode-env"; // pragma: allowlist secret + + await setOpencodeZenApiKey("sk-opencode-env", env.agentDir, { + secretInputMode: "ref", // pragma: allowlist secret + }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + + expect(parsed.profiles?.["opencode:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" }, + }); + expect(parsed.profiles?.["opencode-go:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" }, + }); + }); }); diff --git a/src/commands/onboard-auth.credentials.ts b/src/commands/onboard-auth.credentials.ts index c83861b5685..92e1170b010 100644 --- a/src/commands/onboard-auth.credentials.ts +++ b/src/commands/onboard-auth.credentials.ts @@ -433,11 +433,30 @@ export async function setOpencodeZenApiKey( agentDir?: string, options?: ApiKeyStorageOptions, ) { - upsertAuthProfile({ - profileId: "opencode:default", - credential: buildApiKeyCredential("opencode", key, undefined, options), - agentDir: resolveAuthAgentDir(agentDir), - }); + await setSharedOpencodeApiKey(key, agentDir, options); +} + +export async function setOpencodeGoApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + await setSharedOpencodeApiKey(key, agentDir, options); +} + +async function setSharedOpencodeApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + const resolvedAgentDir = resolveAuthAgentDir(agentDir); + for (const provider of ["opencode", "opencode-go"] as const) { + upsertAuthProfile({ + profileId: `${provider}:default`, + credential: buildApiKeyCredential(provider, key, undefined, options), + agentDir: resolvedAgentDir, + }); + } } export async function setTogetherApiKey( diff --git a/src/commands/onboard-auth.test.ts b/src/commands/onboard-auth.test.ts index a79eb1d970a..fa2c9f4f10d 100644 --- a/src/commands/onboard-auth.test.ts +++ b/src/commands/onboard-auth.test.ts @@ -16,6 +16,8 @@ import { applyMistralProviderConfig, applyMinimaxApiConfig, applyMinimaxApiProviderConfig, + applyOpencodeGoConfig, + applyOpencodeGoProviderConfig, applyOpencodeZenConfig, applyOpencodeZenProviderConfig, applyOpenrouterConfig, @@ -675,6 +677,11 @@ describe("allowlist provider helpers", () => { modelRef: "opencode/claude-opus-4-6", alias: "My Opus", }, + { + applyConfig: applyOpencodeGoProviderConfig, + modelRef: "opencode-go/kimi-k2.5", + alias: "Kimi", + }, { applyConfig: applyOpenrouterProviderConfig, modelRef: OPENROUTER_DEFAULT_MODEL_REF, @@ -729,6 +736,10 @@ describe("default-model config helpers", () => { applyConfig: applyOpencodeZenConfig, primaryModel: "opencode/claude-opus-4-6", }, + { + applyConfig: applyOpencodeGoConfig, + primaryModel: "opencode-go/kimi-k2.5", + }, { applyConfig: applyOpenrouterConfig, primaryModel: OPENROUTER_DEFAULT_MODEL_REF, diff --git a/src/commands/onboard-auth.ts b/src/commands/onboard-auth.ts index 22946567fae..cda460b6c19 100644 --- a/src/commands/onboard-auth.ts +++ b/src/commands/onboard-auth.ts @@ -60,6 +60,10 @@ export { applyOpencodeZenConfig, applyOpencodeZenProviderConfig, } from "./onboard-auth.config-opencode.js"; +export { + applyOpencodeGoConfig, + applyOpencodeGoProviderConfig, +} from "./onboard-auth.config-opencode-go.js"; export { CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF, KILOCODE_DEFAULT_MODEL_REF, @@ -77,6 +81,7 @@ export { setMinimaxApiKey, setMistralApiKey, setMoonshotApiKey, + setOpencodeGoApiKey, setOpencodeZenApiKey, setOpenrouterApiKey, setSyntheticApiKey, diff --git a/src/commands/onboard-custom.test.ts b/src/commands/onboard-custom.test.ts index b04f7bc08ab..bc1a1927bdc 100644 --- a/src/commands/onboard-custom.test.ts +++ b/src/commands/onboard-custom.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js"; +import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import { defaultRuntime } from "../runtime.js"; import { @@ -133,6 +134,23 @@ describe("promptCustomApiConfig", () => { expect(result.config.agents?.defaults?.models?.["custom/llama3"]?.alias).toBe("local"); }); + it("defaults custom onboarding to the native Ollama base URL", async () => { + const prompter = createTestPrompter({ + text: ["http://localhost:11434", "", "llama3", "custom", ""], + select: ["plaintext", "openai"], + }); + stubFetchSequence([{ ok: true }]); + + await runPromptCustomApi(prompter); + + expect(prompter.text).toHaveBeenCalledWith( + expect.objectContaining({ + message: "API Base URL", + initialValue: OLLAMA_DEFAULT_BASE_URL, + }), + ); + }); + it("retries when verification fails", async () => { const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "bad-model", "good-model", "custom", ""], diff --git a/src/commands/onboard-custom.ts b/src/commands/onboard-custom.ts index a05922aafe0..874018a74ea 100644 --- a/src/commands/onboard-custom.ts +++ b/src/commands/onboard-custom.ts @@ -1,6 +1,7 @@ import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js"; import { DEFAULT_PROVIDER } from "../agents/defaults.js"; import { buildModelAliasIndex, modelKey } from "../agents/model-selection.js"; +import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import type { ModelProviderConfig } from "../config/types.models.js"; import { isSecretRef, type SecretInput } from "../config/types.secrets.js"; @@ -16,7 +17,6 @@ import { applyPrimaryModel } from "./model-picker.js"; import { normalizeAlias } from "./models/shared.js"; import type { SecretInputMode } from "./onboard-types.js"; -const DEFAULT_OLLAMA_BASE_URL = "http://127.0.0.1:11434/v1"; const DEFAULT_CONTEXT_WINDOW = CONTEXT_WINDOW_HARD_MIN_TOKENS; const DEFAULT_MAX_TOKENS = 4096; const VERIFY_TIMEOUT_MS = 30_000; @@ -389,7 +389,7 @@ async function promptBaseUrlAndKey(params: { }): Promise<{ baseUrl: string; apiKey?: SecretInput; resolvedApiKey: string }> { const baseUrlInput = await params.prompter.text({ message: "API Base URL", - initialValue: params.initialBaseUrl ?? DEFAULT_OLLAMA_BASE_URL, + initialValue: params.initialBaseUrl ?? OLLAMA_DEFAULT_BASE_URL, placeholder: "https://api.example.com/v1", validate: (val) => { try { diff --git a/src/commands/onboard-non-interactive.provider-auth.test.ts b/src/commands/onboard-non-interactive.provider-auth.test.ts index 3f5ccee1755..9606b70259f 100644 --- a/src/commands/onboard-non-interactive.provider-auth.test.ts +++ b/src/commands/onboard-non-interactive.provider-auth.test.ts @@ -42,11 +42,6 @@ let upsertAuthProfile: typeof import("../agents/auth-profiles.js").upsertAuthPro type ProviderAuthConfigSnapshot = { auth?: { profiles?: Record }; agents?: { defaults?: { model?: { primary?: string } } }; - talk?: { - provider?: string; - apiKey?: string | { source?: string; id?: string }; - providers?: Record; - }; models?: { providers?: Record< string, @@ -362,38 +357,6 @@ describe("onboard (non-interactive): provider auth", () => { }); }); - it("does not persist talk fallback secrets when OpenAI ref onboarding starts from an empty config", async () => { - await withOnboardEnv("openclaw-onboard-openai-ref-no-talk-leak-", async (env) => { - await withEnvAsync( - { - OPENAI_API_KEY: "sk-openai-env-key", // pragma: allowlist secret - ELEVENLABS_API_KEY: "elevenlabs-env-key", // pragma: allowlist secret - }, - async () => { - const cfg = await runOnboardingAndReadConfig(env, { - authChoice: "openai-api-key", - secretInputMode: "ref", // pragma: allowlist secret - }); - - expect(cfg.agents?.defaults?.model?.primary).toBe(OPENAI_DEFAULT_MODEL); - expect(cfg.talk).toBeUndefined(); - - const store = ensureAuthProfileStore(); - const profile = store.profiles["openai:default"]; - expect(profile?.type).toBe("api_key"); - if (profile?.type === "api_key") { - expect(profile.key).toBeUndefined(); - expect(profile.keyRef).toEqual({ - source: "env", - provider: "default", - id: "OPENAI_API_KEY", - }); - } - }, - ); - }); - }); - it.each([ { name: "anthropic", @@ -479,7 +442,7 @@ describe("onboard (non-interactive): provider auth", () => { }, ); - it("stores the detected env alias as keyRef for opencode ref mode", async () => { + it("stores the detected env alias as keyRef for both OpenCode runtime providers", async () => { await withOnboardEnv("openclaw-onboard-ref-opencode-alias-", async ({ runtime }) => { await withEnvAsync( { @@ -494,15 +457,17 @@ describe("onboard (non-interactive): provider auth", () => { }); const store = ensureAuthProfileStore(); - const profile = store.profiles["opencode:default"]; - expect(profile?.type).toBe("api_key"); - if (profile?.type === "api_key") { - expect(profile.key).toBeUndefined(); - expect(profile.keyRef).toEqual({ - source: "env", - provider: "default", - id: "OPENCODE_ZEN_API_KEY", - }); + for (const profileId of ["opencode:default", "opencode-go:default"]) { + const profile = store.profiles[profileId]; + expect(profile?.type).toBe("api_key"); + if (profile?.type === "api_key") { + expect(profile.key).toBeUndefined(); + expect(profile.keyRef).toEqual({ + source: "env", + provider: "default", + id: "OPENCODE_ZEN_API_KEY", + }); + } } }, ); diff --git a/src/commands/onboard-non-interactive/local/auth-choice-inference.ts b/src/commands/onboard-non-interactive/local/auth-choice-inference.ts index a49be3ad2c8..212bb9dd890 100644 --- a/src/commands/onboard-non-interactive/local/auth-choice-inference.ts +++ b/src/commands/onboard-non-interactive/local/auth-choice-inference.ts @@ -27,6 +27,7 @@ type AuthChoiceFlagOptions = Pick< | "xiaomiApiKey" | "minimaxApiKey" | "opencodeZenApiKey" + | "opencodeGoApiKey" | "xaiApiKey" | "litellmApiKey" | "qianfanApiKey" diff --git a/src/commands/onboard-non-interactive/local/auth-choice.ts b/src/commands/onboard-non-interactive/local/auth-choice.ts index 9739f57ce2e..af119c12efe 100644 --- a/src/commands/onboard-non-interactive/local/auth-choice.ts +++ b/src/commands/onboard-non-interactive/local/auth-choice.ts @@ -10,6 +10,7 @@ import { normalizeSecretInputModeInput } from "../../auth-choice.apply-helpers.j import { buildTokenProfileId, validateAnthropicSetupToken } from "../../auth-token.js"; import { applyGoogleGeminiModelDefault } from "../../google-gemini-model-default.js"; import { applyPrimaryModel } from "../../model-picker.js"; +import { configureOllamaNonInteractive } from "../../ollama-setup.js"; import { applyAuthProfileConfig, applyCloudflareAiGatewayConfig, @@ -23,6 +24,7 @@ import { applyMinimaxConfig, applyMoonshotConfig, applyMoonshotConfigCn, + applyOpencodeGoConfig, applyOpencodeZenConfig, applyOpenrouterConfig, applySyntheticConfig, @@ -48,6 +50,7 @@ import { setMinimaxApiKey, setMoonshotApiKey, setOpenaiApiKey, + setOpencodeGoApiKey, setOpencodeZenApiKey, setOpenrouterApiKey, setSyntheticApiKey, @@ -172,6 +175,10 @@ export async function applyNonInteractiveAuthChoice(params: { return null; } + if (authChoice === "ollama") { + return configureOllamaNonInteractive({ nextConfig, opts, runtime }); + } + if (authChoice === "apiKey") { const resolved = await resolveApiKey({ provider: "anthropic", @@ -926,6 +933,33 @@ export async function applyNonInteractiveAuthChoice(params: { return applyOpencodeZenConfig(nextConfig); } + if (authChoice === "opencode-go") { + const resolved = await resolveApiKey({ + provider: "opencode-go", + cfg: baseConfig, + flagValue: opts.opencodeGoApiKey, + flagName: "--opencode-go-api-key", + envVar: "OPENCODE_API_KEY", + runtime, + }); + if (!resolved) { + return null; + } + if ( + !(await maybeSetResolvedApiKey(resolved, (value) => + setOpencodeGoApiKey(value, undefined, apiKeyStorageOptions), + )) + ) { + return null; + } + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId: "opencode-go:default", + provider: "opencode-go", + mode: "api_key", + }); + return applyOpencodeGoConfig(nextConfig); + } + if (authChoice === "together-api-key") { const resolved = await resolveApiKey({ provider: "together", diff --git a/src/commands/onboard-provider-auth-flags.ts b/src/commands/onboard-provider-auth-flags.ts index 43c552f99fb..7610727097f 100644 --- a/src/commands/onboard-provider-auth-flags.ts +++ b/src/commands/onboard-provider-auth-flags.ts @@ -20,6 +20,7 @@ type OnboardProviderAuthOptionKey = keyof Pick< | "togetherApiKey" | "huggingfaceApiKey" | "opencodeZenApiKey" + | "opencodeGoApiKey" | "xaiApiKey" | "litellmApiKey" | "qianfanApiKey" @@ -163,7 +164,14 @@ export const ONBOARD_PROVIDER_AUTH_FLAGS: ReadonlyArray authChoice: "opencode-zen", cliFlag: "--opencode-zen-api-key", cliOption: "--opencode-zen-api-key ", - description: "OpenCode Zen API key", + description: "OpenCode API key (Zen catalog)", + }, + { + optionKey: "opencodeGoApiKey", + authChoice: "opencode-go", + cliFlag: "--opencode-go-api-key", + cliOption: "--opencode-go-api-key ", + description: "OpenCode API key (Go catalog)", }, { optionKey: "xaiApiKey", diff --git a/src/commands/onboard-types.ts b/src/commands/onboard-types.ts index 44f4660321e..40a02e85c15 100644 --- a/src/commands/onboard-types.ts +++ b/src/commands/onboard-types.ts @@ -10,6 +10,7 @@ export type AuthChoice = | "token" | "chutes" | "vllm" + | "ollama" | "openai-codex" | "openai-api-key" | "openrouter-api-key" @@ -41,6 +42,7 @@ export type AuthChoice = | "minimax-api-lightning" | "minimax-portal" | "opencode-zen" + | "opencode-go" | "github-copilot" | "copilot-proxy" | "qwen-portal" @@ -58,6 +60,7 @@ export type AuthChoiceGroupId = | "anthropic" | "chutes" | "vllm" + | "ollama" | "google" | "copilot" | "openrouter" @@ -68,7 +71,7 @@ export type AuthChoiceGroupId = | "moonshot" | "zai" | "xiaomi" - | "opencode-zen" + | "opencode" | "minimax" | "synthetic" | "venice" @@ -134,6 +137,7 @@ export type OnboardOptions = { togetherApiKey?: string; huggingfaceApiKey?: string; opencodeZenApiKey?: string; + opencodeGoApiKey?: string; xaiApiKey?: string; volcengineApiKey?: string; byteplusApiKey?: string; diff --git a/src/commands/opencode-go-model-default.ts b/src/commands/opencode-go-model-default.ts new file mode 100644 index 00000000000..c959f23ff2e --- /dev/null +++ b/src/commands/opencode-go-model-default.ts @@ -0,0 +1,11 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { applyAgentDefaultPrimaryModel } from "./model-default.js"; + +export const OPENCODE_GO_DEFAULT_MODEL_REF = "opencode-go/kimi-k2.5"; + +export function applyOpencodeGoModelDefault(cfg: OpenClawConfig): { + next: OpenClawConfig; + changed: boolean; +} { + return applyAgentDefaultPrimaryModel({ cfg, model: OPENCODE_GO_DEFAULT_MODEL_REF }); +} diff --git a/src/commands/status-all/report-lines.ts b/src/commands/status-all/report-lines.ts index 152918029b5..751237360b4 100644 --- a/src/commands/status-all/report-lines.ts +++ b/src/commands/status-all/report-lines.ts @@ -1,5 +1,5 @@ import type { ProgressReporter } from "../../cli/progress.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { isRich, theme } from "../../terminal/theme.js"; import { groupChannelIssuesByChannel } from "./channel-issues.js"; import { appendStatusAllDiagnosis } from "./diagnosis.js"; @@ -57,7 +57,7 @@ export async function buildStatusAllReportLines(params: { const fail = (text: string) => (rich ? theme.error(text) : text); const muted = (text: string) => (rich ? theme.muted(text) : text); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const overview = renderTable({ width: tableWidth, diff --git a/src/commands/status.command.ts b/src/commands/status.command.ts index 0d412c9715a..7e68424c5a9 100644 --- a/src/commands/status.command.ts +++ b/src/commands/status.command.ts @@ -16,7 +16,7 @@ import { } from "../memory/status-format.js"; import type { RuntimeEnv } from "../runtime.js"; import { runSecurityAudit } from "../security/audit.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { formatHealthChannelLines, type HealthSummary } from "./health.js"; import { resolveControlUiLinks } from "./onboard-helpers.js"; @@ -229,7 +229,7 @@ export async function statusCommand( runtime.log(""); } - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); if (secretDiagnostics.length > 0) { runtime.log(theme.warn("Secret diagnostics:")); diff --git a/src/commands/status.service-summary.test.ts b/src/commands/status.service-summary.test.ts index fb51d8036e4..f1a688ea092 100644 --- a/src/commands/status.service-summary.test.ts +++ b/src/commands/status.service-summary.test.ts @@ -10,7 +10,7 @@ function createService(overrides: Partial): GatewayService { install: vi.fn(async () => {}), uninstall: vi.fn(async () => {}), stop: vi.fn(async () => {}), - restart: vi.fn(async () => {}), + restart: vi.fn(async () => ({ outcome: "completed" as const })), isLoaded: vi.fn(async () => false), readCommand: vi.fn(async () => null), readRuntime: vi.fn(async () => ({ status: "stopped" as const })), diff --git a/src/commands/status.summary.redaction.test.ts b/src/commands/status.summary.redaction.test.ts index 02eaecbcb35..26e28887560 100644 --- a/src/commands/status.summary.redaction.test.ts +++ b/src/commands/status.summary.redaction.test.ts @@ -22,6 +22,7 @@ function createRecentSessionRow() { describe("redactSensitiveStatusSummary", () => { it("removes sensitive session and path details while preserving summary structure", () => { const input: StatusSummary = { + runtimeVersion: "2026.3.8", heartbeat: { defaultAgentId: "main", agents: [{ agentId: "main", enabled: true, every: "5m", everyMs: 300_000 }], @@ -50,6 +51,7 @@ describe("redactSensitiveStatusSummary", () => { expect(redacted.sessions.recent).toEqual([]); expect(redacted.sessions.byAgent[0]?.path).toBe("[redacted]"); expect(redacted.sessions.byAgent[0]?.recent).toEqual([]); + expect(redacted.runtimeVersion).toBe("2026.3.8"); expect(redacted.heartbeat).toEqual(input.heartbeat); expect(redacted.channelSummary).toEqual(input.channelSummary); }); diff --git a/src/commands/status.summary.test.ts b/src/commands/status.summary.test.ts new file mode 100644 index 00000000000..addda823a23 --- /dev/null +++ b/src/commands/status.summary.test.ts @@ -0,0 +1,85 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +vi.mock("../agents/context.js", () => ({ + resolveContextTokensForModel: vi.fn(() => 200_000), +})); + +vi.mock("../agents/defaults.js", () => ({ + DEFAULT_CONTEXT_TOKENS: 200_000, + DEFAULT_MODEL: "gpt-5.2", + DEFAULT_PROVIDER: "openai", +})); + +vi.mock("../agents/model-selection.js", () => ({ + resolveConfiguredModelRef: vi.fn(() => ({ + provider: "openai", + model: "gpt-5.2", + })), +})); + +vi.mock("../config/config.js", () => ({ + loadConfig: vi.fn(() => ({})), +})); + +vi.mock("../config/sessions.js", () => ({ + loadSessionStore: vi.fn(() => ({})), + resolveFreshSessionTotalTokens: vi.fn(() => undefined), + resolveMainSessionKey: vi.fn(() => "main"), + resolveStorePath: vi.fn(() => "/tmp/sessions.json"), +})); + +vi.mock("../gateway/session-utils.js", () => ({ + classifySessionKey: vi.fn(() => "direct"), + listAgentsForGateway: vi.fn(() => ({ + defaultId: "main", + agents: [{ id: "main" }], + })), + resolveSessionModelRef: vi.fn(() => ({ + provider: "openai", + model: "gpt-5.2", + })), +})); + +vi.mock("../infra/channel-summary.js", () => ({ + buildChannelSummary: vi.fn(async () => ["ok"]), +})); + +vi.mock("../infra/heartbeat-runner.js", () => ({ + resolveHeartbeatSummaryForAgent: vi.fn(() => ({ + enabled: true, + every: "5m", + everyMs: 300_000, + })), +})); + +vi.mock("../infra/system-events.js", () => ({ + peekSystemEvents: vi.fn(() => []), +})); + +vi.mock("../routing/session-key.js", () => ({ + parseAgentSessionKey: vi.fn(() => null), +})); + +vi.mock("../version.js", () => ({ + resolveRuntimeServiceVersion: vi.fn(() => "2026.3.8"), +})); + +vi.mock("./status.link-channel.js", () => ({ + resolveLinkChannelContext: vi.fn(async () => undefined), +})); + +describe("getStatusSummary", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("includes runtimeVersion in the status payload", async () => { + const { getStatusSummary } = await import("./status.summary.js"); + + const summary = await getStatusSummary(); + + expect(summary.runtimeVersion).toBe("2026.3.8"); + expect(summary.heartbeat.defaultAgentId).toBe("main"); + expect(summary.channelSummary).toEqual(["ok"]); + }); +}); diff --git a/src/commands/status.summary.ts b/src/commands/status.summary.ts index 3a71464973f..79a51f0d9d3 100644 --- a/src/commands/status.summary.ts +++ b/src/commands/status.summary.ts @@ -19,6 +19,7 @@ import { buildChannelSummary } from "../infra/channel-summary.js"; import { resolveHeartbeatSummaryForAgent } from "../infra/heartbeat-runner.js"; import { peekSystemEvents } from "../infra/system-events.js"; import { parseAgentSessionKey } from "../routing/session-key.js"; +import { resolveRuntimeServiceVersion } from "../version.js"; import { resolveLinkChannelContext } from "./status.link-channel.js"; import type { HeartbeatStatus, SessionStatus, StatusSummary } from "./status.types.js"; @@ -210,6 +211,7 @@ export async function getStatusSummary( const totalSessions = allSessions.length; const summary: StatusSummary = { + runtimeVersion: resolveRuntimeServiceVersion(process.env), linkChannel: linkContext ? { id: linkContext.plugin.id, diff --git a/src/commands/status.types.ts b/src/commands/status.types.ts index a3e0a5ca8e2..ec157b3488a 100644 --- a/src/commands/status.types.ts +++ b/src/commands/status.types.ts @@ -34,6 +34,7 @@ export type HeartbeatStatus = { }; export type StatusSummary = { + runtimeVersion?: string | null; linkChannel?: { id: ChannelId; label: string; diff --git a/src/config/config.discord.test.ts b/src/config/config.discord.test.ts index 8afde31b9e3..0bf5484dbe3 100644 --- a/src/config/config.discord.test.ts +++ b/src/config/config.discord.test.ts @@ -36,7 +36,7 @@ describe("config discord", () => { requireMention: false, users: ["steipete"], channels: { - general: { allow: true }, + general: { allow: true, autoThread: true }, }, }, }, @@ -54,6 +54,7 @@ describe("config discord", () => { expect(cfg.channels?.discord?.actions?.channels).toBe(true); expect(cfg.channels?.discord?.guilds?.["123"]?.slug).toBe("friends-of-openclaw"); expect(cfg.channels?.discord?.guilds?.["123"]?.channels?.general?.allow).toBe(true); + expect(cfg.channels?.discord?.guilds?.["123"]?.channels?.general?.autoThread).toBe(true); }, ); }); diff --git a/src/config/config.plugin-validation.test.ts b/src/config/config.plugin-validation.test.ts index 02eab6789ea..464a5f37ced 100644 --- a/src/config/config.plugin-validation.test.ts +++ b/src/config/config.plugin-validation.test.ts @@ -38,12 +38,15 @@ describe("config plugin validation", () => { let enumPluginDir = ""; let bluebubblesPluginDir = ""; let voiceCallSchemaPluginDir = ""; - const envSnapshot = { - OPENCLAW_STATE_DIR: process.env.OPENCLAW_STATE_DIR, - OPENCLAW_PLUGIN_MANIFEST_CACHE_MS: process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS, - }; + const suiteEnv = () => + ({ + ...process.env, + OPENCLAW_STATE_DIR: path.join(suiteHome, ".openclaw"), + OPENCLAW_PLUGIN_MANIFEST_CACHE_MS: "10000", + }) satisfies NodeJS.ProcessEnv; - const validateInSuite = (raw: unknown) => validateConfigObjectWithPlugins(raw); + const validateInSuite = (raw: unknown) => + validateConfigObjectWithPlugins(raw, { env: suiteEnv() }); beforeAll(async () => { fixtureRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-config-plugin-validation-")); @@ -102,8 +105,6 @@ describe("config plugin validation", () => { id: "voice-call-schema-fixture", schema: voiceCallManifest.configSchema, }); - process.env.OPENCLAW_STATE_DIR = path.join(suiteHome, ".openclaw"); - process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS = "10000"; clearPluginManifestRegistryCache(); // Warm the plugin manifest cache once so path-based validations can reuse // parsed manifests across test cases. @@ -118,16 +119,6 @@ describe("config plugin validation", () => { afterAll(async () => { await fs.rm(fixtureRoot, { recursive: true, force: true }); clearPluginManifestRegistryCache(); - if (envSnapshot.OPENCLAW_STATE_DIR === undefined) { - delete process.env.OPENCLAW_STATE_DIR; - } else { - process.env.OPENCLAW_STATE_DIR = envSnapshot.OPENCLAW_STATE_DIR; - } - if (envSnapshot.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS === undefined) { - delete process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS; - } else { - process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS = envSnapshot.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS; - } }); it("reports missing plugin refs across load paths, entries, and allowlist surfaces", async () => { @@ -279,6 +270,31 @@ describe("config plugin validation", () => { expect(res.ok).toBe(true); }); + it("accepts voice-call OpenAI TTS speed, instructions, and baseUrl config fields", async () => { + const res = validateInSuite({ + agents: { list: [{ id: "pi" }] }, + plugins: { + enabled: true, + load: { paths: [voiceCallSchemaPluginDir] }, + entries: { + "voice-call-schema-fixture": { + config: { + tts: { + openai: { + baseUrl: "http://localhost:8880/v1", + voice: "alloy", + speed: 1.5, + instructions: "Speak in a cheerful tone", + }, + }, + }, + }, + }, + }, + }); + expect(res.ok).toBe(true); + }); + it("accepts known plugin ids and valid channel/heartbeat enums", async () => { const res = validateInSuite({ agents: { diff --git a/src/config/config.schema-regressions.test.ts b/src/config/config.schema-regressions.test.ts index 4125cb1b3d4..3e605e06c35 100644 --- a/src/config/config.schema-regressions.test.ts +++ b/src/config/config.schema-regressions.test.ts @@ -184,4 +184,31 @@ describe("config schema regressions", () => { expect(res.ok).toBe(false); }); + + it("accepts signal accountUuid for loop protection", () => { + const res = validateConfigObject({ + channels: { + signal: { + accountUuid: "a1b2c3d4-e5f6-7890-abcd-ef1234567890", + }, + }, + }); + + expect(res.ok).toBe(true); + }); + + it("accepts telegram actions editMessage and createForumTopic", () => { + const res = validateConfigObject({ + channels: { + telegram: { + actions: { + editMessage: true, + createForumTopic: false, + }, + }, + }, + }); + + expect(res.ok).toBe(true); + }); }); diff --git a/src/config/io.ts b/src/config/io.ts index 2b542bba755..fba17f253aa 100644 --- a/src/config/io.ts +++ b/src/config/io.ts @@ -164,6 +164,32 @@ function hashConfigRaw(raw: string | null): string { .digest("hex"); } +async function tightenStateDirPermissionsIfNeeded(params: { + configPath: string; + env: NodeJS.ProcessEnv; + homedir: () => string; + fsModule: typeof fs; +}): Promise { + if (process.platform === "win32") { + return; + } + const stateDir = resolveStateDir(params.env, params.homedir); + const configDir = path.dirname(params.configPath); + if (path.resolve(configDir) !== path.resolve(stateDir)) { + return; + } + try { + const stat = await params.fsModule.promises.stat(configDir); + const mode = stat.mode & 0o777; + if ((mode & 0o077) === 0) { + return; + } + await params.fsModule.promises.chmod(configDir, 0o700); + } catch { + // Best-effort hardening only; callers still need the config write to proceed. + } +} + function formatConfigValidationFailure(pathLabel: string, issueMessage: string): string { const match = issueMessage.match(OPEN_DM_POLICY_ALLOW_FROM_RE); const policyPath = match?.groups?.policyPath?.trim(); @@ -1136,6 +1162,12 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) { const dir = path.dirname(configPath); await deps.fs.promises.mkdir(dir, { recursive: true, mode: 0o700 }); + await tightenStateDirPermissionsIfNeeded({ + configPath, + env: deps.env, + homedir: deps.homedir, + fsModule: deps.fs, + }); const outputConfigBase = envRefMap && changedPaths ? (restoreEnvRefsFromMap(cfgToWrite, "", envRefMap, changedPaths) as OpenClawConfig) diff --git a/src/config/io.write-config.test.ts b/src/config/io.write-config.test.ts index 6b73b9fbd30..68709725d83 100644 --- a/src/config/io.write-config.test.ts +++ b/src/config/io.write-config.test.ts @@ -142,6 +142,28 @@ describe("config io write", () => { }); }); + it.runIf(process.platform !== "win32")( + "tightens world-writable state dir when writing the default config", + async () => { + await withSuiteHome(async (home) => { + const stateDir = path.join(home, ".openclaw"); + await fs.mkdir(stateDir, { recursive: true, mode: 0o777 }); + await fs.chmod(stateDir, 0o777); + + const io = createConfigIO({ + env: {} as NodeJS.ProcessEnv, + homedir: () => home, + logger: silentLogger, + }); + + await io.writeConfigFile({ gateway: { mode: "local" } }); + + const stat = await fs.stat(stateDir); + expect(stat.mode & 0o777).toBe(0o700); + }); + }, + ); + it('shows actionable guidance for dmPolicy="open" without wildcard allowFrom', async () => { await withSuiteHome(async (home) => { const io = createConfigIO({ diff --git a/src/config/schema.help.quality.test.ts b/src/config/schema.help.quality.test.ts index 04d5200bfbb..965eed0e55d 100644 --- a/src/config/schema.help.quality.test.ts +++ b/src/config/schema.help.quality.test.ts @@ -72,6 +72,10 @@ const TARGET_KEYS = [ "agents.defaults.memorySearch.fallback", "agents.defaults.memorySearch.sources", "agents.defaults.memorySearch.extraPaths", + "agents.defaults.memorySearch.multimodal", + "agents.defaults.memorySearch.multimodal.enabled", + "agents.defaults.memorySearch.multimodal.modalities", + "agents.defaults.memorySearch.multimodal.maxFileBytes", "agents.defaults.memorySearch.experimental.sessionMemory", "agents.defaults.memorySearch.remote.baseUrl", "agents.defaults.memorySearch.remote.apiKey", @@ -83,6 +87,7 @@ const TARGET_KEYS = [ "agents.defaults.memorySearch.remote.batch.timeoutMinutes", "agents.defaults.memorySearch.local.modelPath", "agents.defaults.memorySearch.store.path", + "agents.defaults.memorySearch.outputDimensionality", "agents.defaults.memorySearch.store.vector.enabled", "agents.defaults.memorySearch.store.vector.extensionPath", "agents.defaults.memorySearch.query.hybrid.enabled", diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts index 908829cbf33..3db7f40fe73 100644 --- a/src/config/schema.help.ts +++ b/src/config/schema.help.ts @@ -778,13 +778,23 @@ export const FIELD_HELP: Record = { "agents.defaults.memorySearch.sources": 'Chooses which sources are indexed: "memory" reads MEMORY.md + memory files, and "sessions" includes transcript history. Keep ["memory"] unless you need recall from prior chat transcripts.', "agents.defaults.memorySearch.extraPaths": - "Adds extra directories or .md files to the memory index beyond default memory files. Use this when key reference docs live elsewhere in your repo; keep paths small and intentional to avoid noisy recall.", + "Adds extra directories or .md files to the memory index beyond default memory files. Use this when key reference docs live elsewhere in your repo; when multimodal memory is enabled, matching image/audio files under these paths are also eligible for indexing.", + "agents.defaults.memorySearch.multimodal": + 'Optional multimodal memory settings for indexing image and audio files from configured extra paths. Keep this off unless your embedding model explicitly supports cross-modal embeddings, and set `memorySearch.fallback` to "none" while it is enabled. Matching files are uploaded to the configured remote embedding provider during indexing.', + "agents.defaults.memorySearch.multimodal.enabled": + "Enables image/audio memory indexing from extraPaths. This currently requires Gemini embedding-2, keeps the default memory roots Markdown-only, disables memory-search fallback providers, and uploads matching binary content to the configured remote embedding provider.", + "agents.defaults.memorySearch.multimodal.modalities": + 'Selects which multimodal file types are indexed from extraPaths: "image", "audio", or "all". Keep this narrow to avoid indexing large binary corpora unintentionally.', + "agents.defaults.memorySearch.multimodal.maxFileBytes": + "Sets the maximum bytes allowed per multimodal file before it is skipped during memory indexing. Use this to cap upload cost and indexing latency, or raise it for short high-quality audio clips.", "agents.defaults.memorySearch.experimental.sessionMemory": "Indexes session transcripts into memory search so responses can reference prior chat turns. Keep this off unless transcript recall is needed, because indexing cost and storage usage both increase.", "agents.defaults.memorySearch.provider": 'Selects the embedding backend used to build/query memory vectors: "openai", "gemini", "voyage", "mistral", "ollama", or "local". Keep your most reliable provider here and configure fallback for resilience.', "agents.defaults.memorySearch.model": "Embedding model override used by the selected memory provider when a non-default model is required. Set this only when you need explicit recall quality/cost tuning beyond provider defaults.", + "agents.defaults.memorySearch.outputDimensionality": + "Gemini embedding-2 only: chooses the output vector size for memory embeddings. Use 768, 1536, or 3072 (default), and expect a full reindex when you change it because stored vector dimensions must stay consistent.", "agents.defaults.memorySearch.remote.baseUrl": "Overrides the embedding API endpoint, such as an OpenAI-compatible proxy or custom Gemini base URL. Use this only when routing through your own gateway or vendor endpoint; keep provider defaults otherwise.", "agents.defaults.memorySearch.remote.apiKey": diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts index c643cf91cd9..01b8d0f57dd 100644 --- a/src/config/schema.labels.ts +++ b/src/config/schema.labels.ts @@ -319,6 +319,10 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.enabled": "Enable Memory Search", "agents.defaults.memorySearch.sources": "Memory Search Sources", "agents.defaults.memorySearch.extraPaths": "Extra Memory Paths", + "agents.defaults.memorySearch.multimodal": "Memory Search Multimodal", + "agents.defaults.memorySearch.multimodal.enabled": "Enable Memory Search Multimodal", + "agents.defaults.memorySearch.multimodal.modalities": "Memory Search Multimodal Modalities", + "agents.defaults.memorySearch.multimodal.maxFileBytes": "Memory Search Multimodal Max File Bytes", "agents.defaults.memorySearch.experimental.sessionMemory": "Memory Search Session Index (Experimental)", "agents.defaults.memorySearch.provider": "Memory Search Provider", @@ -331,6 +335,7 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.remote.batch.pollIntervalMs": "Remote Batch Poll Interval (ms)", "agents.defaults.memorySearch.remote.batch.timeoutMinutes": "Remote Batch Timeout (min)", "agents.defaults.memorySearch.model": "Memory Search Model", + "agents.defaults.memorySearch.outputDimensionality": "Memory Search Output Dimensionality", "agents.defaults.memorySearch.fallback": "Memory Search Fallback", "agents.defaults.memorySearch.local.modelPath": "Local Embedding Model Path", "agents.defaults.memorySearch.store.path": "Memory Search Index Path", diff --git a/src/config/types.discord.ts b/src/config/types.discord.ts index 2d2e674f6b6..2d005dd7d7a 100644 --- a/src/config/types.discord.ts +++ b/src/config/types.discord.ts @@ -52,6 +52,8 @@ export type DiscordGuildChannelConfig = { systemPrompt?: string; /** If false, omit thread starter context for this channel (default: true). */ includeThreadStarter?: boolean; + /** If true, automatically create a thread for each new message in this channel. */ + autoThread?: boolean; }; export type DiscordReactionNotificationMode = "off" | "own" | "all" | "allowlist"; diff --git a/src/config/types.gateway.ts b/src/config/types.gateway.ts index 58b061682a1..422bbc82eed 100644 --- a/src/config/types.gateway.ts +++ b/src/config/types.gateway.ts @@ -186,6 +186,8 @@ export type GatewayTailscaleConfig = { }; export type GatewayRemoteConfig = { + /** Whether remote gateway surfaces are enabled. Default: true when absent. */ + enabled?: boolean; /** Remote Gateway WebSocket URL (ws:// or wss://). */ url?: string; /** Transport for macOS remote connections (ssh tunnel or direct WS). */ diff --git a/src/config/types.tools.ts b/src/config/types.tools.ts index e352f858c39..aaf6cb33e79 100644 --- a/src/config/types.tools.ts +++ b/src/config/types.tools.ts @@ -319,6 +319,15 @@ export type MemorySearchConfig = { sources?: Array<"memory" | "sessions">; /** Extra paths to include in memory search (directories or .md files). */ extraPaths?: string[]; + /** Optional multimodal file indexing for selected extra paths. */ + multimodal?: { + /** Enable image/audio embeddings from extraPaths. */ + enabled?: boolean; + /** Which non-text file types to index. */ + modalities?: Array<"image" | "audio" | "all">; + /** Max bytes allowed per multimodal file before it is skipped. */ + maxFileBytes?: number; + }; /** Experimental memory search settings. */ experimental?: { /** Enable session transcript indexing (experimental, default: false). */ @@ -347,6 +356,11 @@ export type MemorySearchConfig = { fallback?: "openai" | "gemini" | "local" | "voyage" | "mistral" | "ollama" | "none"; /** Embedding model id (remote) or alias (local). */ model?: string; + /** + * Gemini embedding-2 models only: output vector dimensions. + * Supported values today are 768, 1536, and 3072. + */ + outputDimensionality?: number; /** Local embedding settings (node-llama-cpp). */ local?: { /** GGUF model path or hf: URI. */ diff --git a/src/config/types.tts.ts b/src/config/types.tts.ts index 3d898ff9c57..a6232f9de5a 100644 --- a/src/config/types.tts.ts +++ b/src/config/types.tts.ts @@ -61,6 +61,10 @@ export type TtsConfig = { baseUrl?: string; model?: string; voice?: string; + /** Playback speed (0.25–4.0, default 1.0). */ + speed?: number; + /** System-level instructions for the TTS model (gpt-4o-mini-tts only). */ + instructions?: string; }; /** Microsoft Edge (node-edge-tts) configuration. */ edge?: { diff --git a/src/config/validation.ts b/src/config/validation.ts index 90d733e0818..686dbb0ed43 100644 --- a/src/config/validation.ts +++ b/src/config/validation.ts @@ -297,17 +297,23 @@ type ValidateConfigWithPluginsResult = warnings: ConfigValidationIssue[]; }; -export function validateConfigObjectWithPlugins(raw: unknown): ValidateConfigWithPluginsResult { - return validateConfigObjectWithPluginsBase(raw, { applyDefaults: true }); +export function validateConfigObjectWithPlugins( + raw: unknown, + params?: { env?: NodeJS.ProcessEnv }, +): ValidateConfigWithPluginsResult { + return validateConfigObjectWithPluginsBase(raw, { applyDefaults: true, env: params?.env }); } -export function validateConfigObjectRawWithPlugins(raw: unknown): ValidateConfigWithPluginsResult { - return validateConfigObjectWithPluginsBase(raw, { applyDefaults: false }); +export function validateConfigObjectRawWithPlugins( + raw: unknown, + params?: { env?: NodeJS.ProcessEnv }, +): ValidateConfigWithPluginsResult { + return validateConfigObjectWithPluginsBase(raw, { applyDefaults: false, env: params?.env }); } function validateConfigObjectWithPluginsBase( raw: unknown, - opts: { applyDefaults: boolean }, + opts: { applyDefaults: boolean; env?: NodeJS.ProcessEnv }, ): ValidateConfigWithPluginsResult { const base = opts.applyDefaults ? validateConfigObject(raw) : validateConfigObjectRaw(raw); if (!base.ok) { @@ -345,6 +351,7 @@ function validateConfigObjectWithPluginsBase( const registry = loadPluginManifestRegistry({ config, workspaceDir: workspaceDir ?? undefined, + env: opts.env, }); for (const diag of registry.diagnostics) { diff --git a/src/config/zod-schema.agent-runtime.ts b/src/config/zod-schema.agent-runtime.ts index 3ede7218b80..d5b9eeedb16 100644 --- a/src/config/zod-schema.agent-runtime.ts +++ b/src/config/zod-schema.agent-runtime.ts @@ -553,6 +553,16 @@ export const MemorySearchSchema = z enabled: z.boolean().optional(), sources: z.array(z.union([z.literal("memory"), z.literal("sessions")])).optional(), extraPaths: z.array(z.string()).optional(), + multimodal: z + .object({ + enabled: z.boolean().optional(), + modalities: z + .array(z.union([z.literal("image"), z.literal("audio"), z.literal("all")])) + .optional(), + maxFileBytes: z.number().int().positive().optional(), + }) + .strict() + .optional(), experimental: z .object({ sessionMemory: z.boolean().optional(), @@ -599,6 +609,7 @@ export const MemorySearchSchema = z ]) .optional(), model: z.string().optional(), + outputDimensionality: z.number().int().positive().optional(), local: z .object({ modelPath: z.string().optional(), diff --git a/src/config/zod-schema.core.ts b/src/config/zod-schema.core.ts index 066a33f0f4f..305efab4b26 100644 --- a/src/config/zod-schema.core.ts +++ b/src/config/zod-schema.core.ts @@ -404,6 +404,8 @@ export const TtsConfigSchema = z baseUrl: z.string().optional(), model: z.string().optional(), voice: z.string().optional(), + speed: z.number().min(0.25).max(4).optional(), + instructions: z.string().optional(), }) .strict() .optional(), diff --git a/src/config/zod-schema.providers-core.ts b/src/config/zod-schema.providers-core.ts index 0bb676fa5ad..d68ac63759c 100644 --- a/src/config/zod-schema.providers-core.ts +++ b/src/config/zod-schema.providers-core.ts @@ -244,7 +244,9 @@ export const TelegramAccountSchemaBase = z sendMessage: z.boolean().optional(), poll: z.boolean().optional(), deleteMessage: z.boolean().optional(), + editMessage: z.boolean().optional(), sticker: z.boolean().optional(), + createForumTopic: z.boolean().optional(), }) .strict() .optional(), @@ -977,6 +979,7 @@ export const SignalAccountSchemaBase = z enabled: z.boolean().optional(), configWrites: z.boolean().optional(), account: z.string().optional(), + accountUuid: z.string().optional(), httpUrl: z.string().optional(), httpHost: z.string().optional(), httpPort: z.number().int().positive().optional(), diff --git a/src/config/zod-schema.tts.test.ts b/src/config/zod-schema.tts.test.ts new file mode 100644 index 00000000000..70398e81054 --- /dev/null +++ b/src/config/zod-schema.tts.test.ts @@ -0,0 +1,36 @@ +import { describe, expect, it } from "vitest"; +import { TtsConfigSchema } from "./zod-schema.core.js"; + +describe("TtsConfigSchema openai speed and instructions", () => { + it("accepts speed and instructions in openai section", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + voice: "alloy", + speed: 1.5, + instructions: "Speak in a cheerful tone", + }, + }), + ).not.toThrow(); + }); + + it("rejects out-of-range openai speed", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + speed: 5.0, + }, + }), + ).toThrow(); + }); + + it("rejects openai speed below minimum", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + speed: 0.1, + }, + }), + ).toThrow(); + }); +}); diff --git a/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts b/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts index 9da88bbb4a3..2c7eb20a3c6 100644 --- a/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts +++ b/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts @@ -217,6 +217,9 @@ describe("dispatchCronDelivery — double-announce guard", () => { payloads: [{ text: "Detailed child result, everything finished successfully." }], }), ); + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ skipQueue: true }), + ); }); it("normal text delivery sends exactly once and sets deliveryAttempted=true", async () => { @@ -304,4 +307,69 @@ describe("dispatchCronDelivery — double-announce guard", () => { expect(deliverOutboundPayloads).not.toHaveBeenCalled(); expect(state.deliveryAttempted).toBe(false); }); + + it("text delivery always bypasses the write-ahead queue", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Daily digest ready." }); + const state = await dispatchCronDelivery(params); + + expect(state.delivered).toBe(true); + expect(state.deliveryAttempted).toBe(true); + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "telegram", + to: "123456", + payloads: [{ text: "Daily digest ready." }], + skipQueue: true, + }), + ); + }); + + it("structured/thread delivery also bypasses the write-ahead queue", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Report attached." }); + // Simulate structured content so useDirectDelivery path is taken (no retryTransient) + (params as Record).deliveryPayloadHasStructuredContent = true; + await dispatchCronDelivery(params); + + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ skipQueue: true }), + ); + }); + + it("transient retry delivers exactly once with skipQueue on both attempts", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + + // First call throws a transient error, second call succeeds. + vi.mocked(deliverOutboundPayloads) + .mockRejectedValueOnce(new Error("gateway timeout")) + .mockResolvedValueOnce([{ ok: true } as never]); + + vi.stubEnv("OPENCLAW_TEST_FAST", "1"); + try { + const params = makeBaseParams({ synthesizedText: "Retry test." }); + const state = await dispatchCronDelivery(params); + + expect(state.delivered).toBe(true); + expect(state.deliveryAttempted).toBe(true); + // Two calls total: first failed transiently, second succeeded. + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(2); + + const calls = vi.mocked(deliverOutboundPayloads).mock.calls; + expect(calls[0][0]).toEqual(expect.objectContaining({ skipQueue: true })); + expect(calls[1][0]).toEqual(expect.objectContaining({ skipQueue: true })); + } finally { + vi.unstubAllEnvs(); + } + }); }); diff --git a/src/cron/isolated-agent/delivery-dispatch.ts b/src/cron/isolated-agent/delivery-dispatch.ts index fa9a295a777..a5dc0190b72 100644 --- a/src/cron/isolated-agent/delivery-dispatch.ts +++ b/src/cron/isolated-agent/delivery-dispatch.ts @@ -157,7 +157,9 @@ function isTransientDirectCronDeliveryError(error: unknown): boolean { } function resolveDirectCronRetryDelaysMs(): readonly number[] { - return process.env.OPENCLAW_TEST_FAST === "1" ? [8, 16, 32] : [5_000, 10_000, 20_000]; + return process.env.NODE_ENV === "test" && process.env.OPENCLAW_TEST_FAST === "1" + ? [8, 16, 32] + : [5_000, 10_000, 20_000]; } async function retryTransientDirectCronDelivery(params: { @@ -256,6 +258,12 @@ export async function dispatchCronDelivery( bestEffort: params.deliveryBestEffort, deps: createOutboundSendDeps(params.deps), abortSignal: params.abortSignal, + // Isolated cron direct delivery uses its own transient retry loop. + // Keep all attempts out of the write-ahead delivery queue so a + // late-successful first send cannot leave behind a failed queue + // entry that replays on the next restart. + // See: https://github.com/openclaw/openclaw/issues/40545 + skipQueue: true, }); const deliveryResults = options?.retryTransient ? await retryTransientDirectCronDelivery({ diff --git a/src/daemon/launchd-restart-handoff.test.ts b/src/daemon/launchd-restart-handoff.test.ts new file mode 100644 index 00000000000..d685e64d851 --- /dev/null +++ b/src/daemon/launchd-restart-handoff.test.ts @@ -0,0 +1,43 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; + +const spawnMock = vi.hoisted(() => vi.fn()); +const unrefMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawn: (...args: unknown[]) => spawnMock(...args), +})); + +import { scheduleDetachedLaunchdRestartHandoff } from "./launchd-restart-handoff.js"; + +afterEach(() => { + spawnMock.mockReset(); + unrefMock.mockReset(); + spawnMock.mockReturnValue({ pid: 4242, unref: unrefMock }); +}); + +describe("scheduleDetachedLaunchdRestartHandoff", () => { + it("waits for the caller pid before kickstarting launchd", () => { + const env = { + HOME: "/Users/test", + OPENCLAW_PROFILE: "default", + }; + spawnMock.mockReturnValue({ pid: 4242, unref: unrefMock }); + + const result = scheduleDetachedLaunchdRestartHandoff({ + env, + mode: "kickstart", + waitForPid: 9876, + }); + + expect(result).toEqual({ ok: true, pid: 4242 }); + expect(spawnMock).toHaveBeenCalledTimes(1); + const [, args] = spawnMock.mock.calls[0] as [string, string[]]; + expect(args[0]).toBe("-c"); + expect(args[2]).toBe("openclaw-launchd-restart-handoff"); + expect(args[6]).toBe("9876"); + expect(args[1]).toContain('while kill -0 "$wait_pid" >/dev/null 2>&1; do'); + expect(args[1]).toContain('launchctl kickstart -k "$service_target" >/dev/null 2>&1'); + expect(args[1]).not.toContain("sleep 1"); + expect(unrefMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/src/daemon/launchd-restart-handoff.ts b/src/daemon/launchd-restart-handoff.ts new file mode 100644 index 00000000000..ff2fa9dc612 --- /dev/null +++ b/src/daemon/launchd-restart-handoff.ts @@ -0,0 +1,138 @@ +import { spawn } from "node:child_process"; +import os from "node:os"; +import path from "node:path"; +import { resolveGatewayLaunchAgentLabel } from "./constants.js"; + +export type LaunchdRestartHandoffMode = "kickstart" | "start-after-exit"; + +export type LaunchdRestartHandoffResult = { + ok: boolean; + pid?: number; + detail?: string; +}; + +export type LaunchdRestartTarget = { + domain: string; + label: string; + plistPath: string; + serviceTarget: string; +}; + +function resolveGuiDomain(): string { + if (typeof process.getuid !== "function") { + return "gui/501"; + } + return `gui/${process.getuid()}`; +} + +function resolveLaunchAgentLabel(env?: Record): string { + const envLabel = env?.OPENCLAW_LAUNCHD_LABEL?.trim(); + if (envLabel) { + return envLabel; + } + return resolveGatewayLaunchAgentLabel(env?.OPENCLAW_PROFILE); +} + +export function resolveLaunchdRestartTarget( + env: Record = process.env, +): LaunchdRestartTarget { + const domain = resolveGuiDomain(); + const label = resolveLaunchAgentLabel(env); + const home = env.HOME?.trim() || os.homedir(); + const plistPath = path.join(home, "Library", "LaunchAgents", `${label}.plist`); + return { + domain, + label, + plistPath, + serviceTarget: `${domain}/${label}`, + }; +} + +export function isCurrentProcessLaunchdServiceLabel( + label: string, + env: NodeJS.ProcessEnv = process.env, +): boolean { + const launchdLabel = + env.LAUNCH_JOB_LABEL?.trim() || env.LAUNCH_JOB_NAME?.trim() || env.XPC_SERVICE_NAME?.trim(); + if (launchdLabel) { + return launchdLabel === label; + } + const configuredLabel = env.OPENCLAW_LAUNCHD_LABEL?.trim(); + return Boolean(configuredLabel && configuredLabel === label); +} + +function buildLaunchdRestartScript(mode: LaunchdRestartHandoffMode): string { + const waitForCallerPid = `wait_pid="$4" +if [ -n "$wait_pid" ] && [ "$wait_pid" -gt 1 ] 2>/dev/null; then + while kill -0 "$wait_pid" >/dev/null 2>&1; do + sleep 0.1 + done +fi +`; + + if (mode === "kickstart") { + return `service_target="$1" +domain="$2" +plist_path="$3" +${waitForCallerPid} +if ! launchctl kickstart -k "$service_target" >/dev/null 2>&1; then + launchctl enable "$service_target" >/dev/null 2>&1 + if launchctl bootstrap "$domain" "$plist_path" >/dev/null 2>&1; then + launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + fi +fi +`; + } + + return `service_target="$1" +domain="$2" +plist_path="$3" +${waitForCallerPid} +if ! launchctl start "$service_target" >/dev/null 2>&1; then + launchctl enable "$service_target" >/dev/null 2>&1 + if launchctl bootstrap "$domain" "$plist_path" >/dev/null 2>&1; then + launchctl start "$service_target" >/dev/null 2>&1 || launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + else + launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + fi +fi +`; +} + +export function scheduleDetachedLaunchdRestartHandoff(params: { + env?: Record; + mode: LaunchdRestartHandoffMode; + waitForPid?: number; +}): LaunchdRestartHandoffResult { + const target = resolveLaunchdRestartTarget(params.env); + const waitForPid = + typeof params.waitForPid === "number" && Number.isFinite(params.waitForPid) + ? Math.floor(params.waitForPid) + : 0; + try { + const child = spawn( + "/bin/sh", + [ + "-c", + buildLaunchdRestartScript(params.mode), + "openclaw-launchd-restart-handoff", + target.serviceTarget, + target.domain, + target.plistPath, + String(waitForPid), + ], + { + detached: true, + stdio: "ignore", + env: { ...process.env, ...params.env }, + }, + ); + child.unref(); + return { ok: true, pid: child.pid ?? undefined }; + } catch (err) { + return { + ok: false, + detail: err instanceof Error ? err.message : String(err), + }; + } +} diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index 99e5e1f933e..3acd239afe1 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -18,11 +18,17 @@ const state = vi.hoisted(() => ({ listOutput: "", printOutput: "", bootstrapError: "", + kickstartError: "", + kickstartFailuresRemaining: 0, dirs: new Set(), dirModes: new Map(), files: new Map(), fileModes: new Map(), })); +const launchdRestartHandoffState = vi.hoisted(() => ({ + isCurrentProcessLaunchdServiceLabel: vi.fn<(label: string) => boolean>(() => false), + scheduleDetachedLaunchdRestartHandoff: vi.fn((_params: unknown) => ({ ok: true, pid: 7331 })), +})); const defaultProgramArguments = ["node", "-e", "process.exit(0)"]; function normalizeLaunchctlArgs(file: string, args: string[]): string[] { @@ -49,10 +55,21 @@ vi.mock("./exec-file.js", () => ({ if (call[0] === "bootstrap" && state.bootstrapError) { return { stdout: "", stderr: state.bootstrapError, code: 1 }; } + if (call[0] === "kickstart" && state.kickstartError && state.kickstartFailuresRemaining > 0) { + state.kickstartFailuresRemaining -= 1; + return { stdout: "", stderr: state.kickstartError, code: 1 }; + } return { stdout: "", stderr: "", code: 0 }; }), })); +vi.mock("./launchd-restart-handoff.js", () => ({ + isCurrentProcessLaunchdServiceLabel: (label: string) => + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel(label), + scheduleDetachedLaunchdRestartHandoff: (params: unknown) => + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff(params), +})); + vi.mock("node:fs/promises", async (importOriginal) => { const actual = await importOriginal(); const wrapped = { @@ -109,10 +126,19 @@ beforeEach(() => { state.listOutput = ""; state.printOutput = ""; state.bootstrapError = ""; + state.kickstartError = ""; + state.kickstartFailuresRemaining = 0; state.dirs.clear(); state.dirModes.clear(); state.files.clear(); state.fileModes.clear(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReset(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(false); + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReset(); + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReturnValue({ + ok: true, + pid: 7331, + }); vi.clearAllMocks(); }); @@ -304,9 +330,28 @@ describe("launchd install", () => { expect(state.fileModes.get(plistPath)).toBe(0o644); }); - it("restarts LaunchAgent with bootout-enable-bootstrap-kickstart order", async () => { + it("restarts LaunchAgent with kickstart and no bootout", async () => { const env = createDefaultLaunchdEnv(); - await restartLaunchAgent({ + const result = await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const serviceId = `${domain}/${label}`; + expect(result).toEqual({ outcome: "completed" }); + expect(state.launchctlCalls).toContainEqual(["kickstart", "-k", serviceId]); + expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); + expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false); + }); + + it("falls back to bootstrap when kickstart cannot find the service", async () => { + const env = createDefaultLaunchdEnv(); + state.kickstartError = "Could not find service"; + state.kickstartFailuresRemaining = 1; + + const result = await restartLaunchAgent({ env, stdout: new PassThrough(), }); @@ -315,8 +360,8 @@ describe("launchd install", () => { const label = "ai.openclaw.gateway"; const plistPath = resolveLaunchAgentPlistPath(env); const serviceId = `${domain}/${label}`; - const bootoutIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootout" && c[1] === serviceId, + const kickstartCalls = state.launchctlCalls.filter( + (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); const enableIndex = state.launchctlCalls.findIndex( (c) => c[0] === "enable" && c[1] === serviceId, @@ -324,53 +369,46 @@ describe("launchd install", () => { const bootstrapIndex = state.launchctlCalls.findIndex( (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, ); - const kickstartIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, - ); - expect(bootoutIndex).toBeGreaterThanOrEqual(0); + expect(result).toEqual({ outcome: "completed" }); + expect(kickstartCalls).toHaveLength(2); expect(enableIndex).toBeGreaterThanOrEqual(0); expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(bootoutIndex).toBeLessThan(enableIndex); - expect(enableIndex).toBeLessThan(bootstrapIndex); - expect(bootstrapIndex).toBeLessThan(kickstartIndex); + expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); }); - it("waits for previous launchd pid to exit before bootstrapping", async () => { + it("surfaces the original kickstart failure when the service is still loaded", async () => { const env = createDefaultLaunchdEnv(); - state.printOutput = ["state = running", "pid = 4242"].join("\n"); - const killSpy = vi.spyOn(process, "kill"); - killSpy - .mockImplementationOnce(() => true) - .mockImplementationOnce(() => { - const err = new Error("no such process") as NodeJS.ErrnoException; - err.code = "ESRCH"; - throw err; - }); + state.kickstartError = "Input/output error"; + state.kickstartFailuresRemaining = 1; - vi.useFakeTimers(); - try { - const restartPromise = restartLaunchAgent({ + await expect( + restartLaunchAgent({ env, stdout: new PassThrough(), - }); - await vi.advanceTimersByTimeAsync(250); - await restartPromise; - expect(killSpy).toHaveBeenCalledWith(4242, 0); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const bootoutIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootout" && c[1] === `${domain}/${label}`, - ); - const bootstrapIndex = state.launchctlCalls.findIndex((c) => c[0] === "bootstrap"); - expect(bootoutIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(bootoutIndex).toBeLessThan(bootstrapIndex); - } finally { - vi.useRealTimers(); - killSpy.mockRestore(); - } + }), + ).rejects.toThrow("launchctl kickstart failed: Input/output error"); + + expect(state.launchctlCalls.some((call) => call[0] === "enable")).toBe(false); + expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false); + }); + + it("hands restart off to a detached helper when invoked from the current LaunchAgent", async () => { + const env = createDefaultLaunchdEnv(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(true); + + const result = await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + expect(result).toEqual({ outcome: "scheduled" }); + expect(launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff).toHaveBeenCalledWith({ + env, + mode: "kickstart", + waitForPid: process.pid, + }); + expect(state.launchctlCalls).toEqual([]); }); it("shows actionable guidance when launchctl gui domain does not support bootstrap", async () => { diff --git a/src/daemon/launchd.ts b/src/daemon/launchd.ts index 492eb2e4d6e..68ae1b43edd 100644 --- a/src/daemon/launchd.ts +++ b/src/daemon/launchd.ts @@ -12,6 +12,10 @@ import { buildLaunchAgentPlist as buildLaunchAgentPlistImpl, readLaunchAgentProgramArgumentsFromFile, } from "./launchd-plist.js"; +import { + isCurrentProcessLaunchdServiceLabel, + scheduleDetachedLaunchdRestartHandoff, +} from "./launchd-restart-handoff.js"; import { formatLine, toPosixPath, writeFormattedLines } from "./output.js"; import { resolveGatewayStateDir, resolveHomeDir } from "./paths.js"; import { parseKeyValueOutput } from "./runtime-parse.js"; @@ -23,6 +27,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; const LAUNCH_AGENT_DIR_MODE = 0o755; @@ -352,34 +357,6 @@ function isUnsupportedGuiDomain(detail: string): boolean { ); } -const RESTART_PID_WAIT_TIMEOUT_MS = 10_000; -const RESTART_PID_WAIT_INTERVAL_MS = 200; - -async function sleepMs(ms: number): Promise { - await new Promise((resolve) => { - setTimeout(resolve, ms); - }); -} - -async function waitForPidExit(pid: number): Promise { - if (!Number.isFinite(pid) || pid <= 1) { - return; - } - const deadline = Date.now() + RESTART_PID_WAIT_TIMEOUT_MS; - while (Date.now() < deadline) { - try { - process.kill(pid, 0); - } catch (err) { - const code = (err as NodeJS.ErrnoException).code; - if (code === "ESRCH" || code === "EPERM") { - return; - } - return; - } - await sleepMs(RESTART_PID_WAIT_INTERVAL_MS); - } -} - export async function stopLaunchAgent({ stdout, env }: GatewayServiceControlArgs): Promise { const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env }); @@ -471,29 +448,53 @@ export async function installLaunchAgent({ export async function restartLaunchAgent({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { const serviceEnv = env ?? (process.env as GatewayServiceEnv); const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env: serviceEnv }); const plistPath = resolveLaunchAgentPlistPath(serviceEnv); + const serviceTarget = `${domain}/${label}`; - const runtime = await execLaunchctl(["print", `${domain}/${label}`]); - const previousPid = - runtime.code === 0 - ? parseLaunchctlPrint(runtime.stdout || runtime.stderr || "").pid - : undefined; - - const stop = await execLaunchctl(["bootout", `${domain}/${label}`]); - if (stop.code !== 0 && !isLaunchctlNotLoaded(stop)) { - throw new Error(`launchctl bootout failed: ${stop.stderr || stop.stdout}`.trim()); - } - if (typeof previousPid === "number") { - await waitForPidExit(previousPid); + // Restart requests issued from inside the managed gateway process tree need a + // detached handoff. A direct `kickstart -k` would terminate the caller before + // it can finish the restart command. + if (isCurrentProcessLaunchdServiceLabel(label)) { + const handoff = scheduleDetachedLaunchdRestartHandoff({ + env: serviceEnv, + mode: "kickstart", + waitForPid: process.pid, + }); + if (!handoff.ok) { + throw new Error(`launchd restart handoff failed: ${handoff.detail ?? "unknown error"}`); + } + try { + stdout.write(`${formatLine("Scheduled LaunchAgent restart", serviceTarget)}\n`); + } catch (err: unknown) { + if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { + throw err; + } + } + return { outcome: "scheduled" }; } - // launchd can persist "disabled" state after bootout; clear it before bootstrap - // (matches the same guard in installLaunchAgent). - await execLaunchctl(["enable", `${domain}/${label}`]); + const start = await execLaunchctl(["kickstart", "-k", serviceTarget]); + if (start.code === 0) { + try { + stdout.write(`${formatLine("Restarted LaunchAgent", serviceTarget)}\n`); + } catch (err: unknown) { + if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { + throw err; + } + } + return { outcome: "completed" }; + } + + if (!isLaunchctlNotLoaded(start)) { + throw new Error(`launchctl kickstart failed: ${start.stderr || start.stdout}`.trim()); + } + + // If the service was previously booted out, re-register the plist and retry. + await execLaunchctl(["enable", serviceTarget]); const boot = await execLaunchctl(["bootstrap", domain, plistPath]); if (boot.code !== 0) { const detail = (boot.stderr || boot.stdout).trim(); @@ -511,15 +512,16 @@ export async function restartLaunchAgent({ throw new Error(`launchctl bootstrap failed: ${detail}`); } - const start = await execLaunchctl(["kickstart", "-k", `${domain}/${label}`]); - if (start.code !== 0) { - throw new Error(`launchctl kickstart failed: ${start.stderr || start.stdout}`.trim()); + const retry = await execLaunchctl(["kickstart", "-k", serviceTarget]); + if (retry.code !== 0) { + throw new Error(`launchctl kickstart failed: ${retry.stderr || retry.stdout}`.trim()); } try { - stdout.write(`${formatLine("Restarted LaunchAgent", `${domain}/${label}`)}\n`); + stdout.write(`${formatLine("Restarted LaunchAgent", serviceTarget)}\n`); } catch (err: unknown) { if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { throw err; } } + return { outcome: "completed" }; } diff --git a/src/daemon/schtasks.ts b/src/daemon/schtasks.ts index af09d2ca564..ddca704f6a4 100644 --- a/src/daemon/schtasks.ts +++ b/src/daemon/schtasks.ts @@ -16,6 +16,7 @@ import type { GatewayServiceInstallArgs, GatewayServiceManageArgs, GatewayServiceRenderArgs, + GatewayServiceRestartResult, } from "./service-types.js"; function resolveTaskName(env: GatewayServiceEnv): string { @@ -316,7 +317,7 @@ export async function stopScheduledTask({ stdout, env }: GatewayServiceControlAr export async function restartScheduledTask({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { await assertSchtasksAvailable(); const taskName = resolveTaskName(env ?? (process.env as GatewayServiceEnv)); await execSchtasks(["/End", "/TN", taskName]); @@ -325,6 +326,7 @@ export async function restartScheduledTask({ throw new Error(`schtasks run failed: ${res.stderr || res.stdout}`.trim()); } stdout.write(`${formatLine("Restarted Scheduled Task", taskName)}\n`); + return { outcome: "completed" }; } export async function isScheduledTaskInstalled(args: GatewayServiceEnvArgs): Promise { diff --git a/src/daemon/service-types.ts b/src/daemon/service-types.ts index ae7d8d1a28f..202930bd6ce 100644 --- a/src/daemon/service-types.ts +++ b/src/daemon/service-types.ts @@ -19,6 +19,8 @@ export type GatewayServiceControlArgs = { env?: GatewayServiceEnv; }; +export type GatewayServiceRestartResult = { outcome: "completed" } | { outcome: "scheduled" }; + export type GatewayServiceEnvArgs = { env?: GatewayServiceEnv; }; diff --git a/src/daemon/service.test.ts b/src/daemon/service.test.ts index 19811e49699..ea2c53e8e1a 100644 --- a/src/daemon/service.test.ts +++ b/src/daemon/service.test.ts @@ -1,5 +1,5 @@ import { afterEach, describe, expect, it } from "vitest"; -import { resolveGatewayService } from "./service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "./service.js"; const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); @@ -37,4 +37,13 @@ describe("resolveGatewayService", () => { setPlatform("aix"); expect(() => resolveGatewayService()).toThrow("Gateway service install not supported on aix"); }); + + it("describes scheduled restart handoffs consistently", () => { + expect(describeGatewayServiceRestart("Gateway", { outcome: "scheduled" })).toEqual({ + scheduled: true, + daemonActionResult: "scheduled", + message: "restart scheduled, gateway will restart momentarily", + progressMessage: "Gateway service restart scheduled.", + }); + }); }); diff --git a/src/daemon/service.ts b/src/daemon/service.ts index 9685ed1ece5..8083ce4b5e1 100644 --- a/src/daemon/service.ts +++ b/src/daemon/service.ts @@ -24,6 +24,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; import { installSystemdService, @@ -41,6 +42,7 @@ export type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; function ignoreInstallResult( @@ -58,12 +60,37 @@ export type GatewayService = { install: (args: GatewayServiceInstallArgs) => Promise; uninstall: (args: GatewayServiceManageArgs) => Promise; stop: (args: GatewayServiceControlArgs) => Promise; - restart: (args: GatewayServiceControlArgs) => Promise; + restart: (args: GatewayServiceControlArgs) => Promise; isLoaded: (args: GatewayServiceEnvArgs) => Promise; readCommand: (env: GatewayServiceEnv) => Promise; readRuntime: (env: GatewayServiceEnv) => Promise; }; +export function describeGatewayServiceRestart( + serviceNoun: string, + result: GatewayServiceRestartResult, +): { + scheduled: boolean; + daemonActionResult: "restarted" | "scheduled"; + message: string; + progressMessage: string; +} { + if (result.outcome === "scheduled") { + return { + scheduled: true, + daemonActionResult: "scheduled", + message: `restart scheduled, ${serviceNoun.toLowerCase()} will restart momentarily`, + progressMessage: `${serviceNoun} service restart scheduled.`, + }; + } + return { + scheduled: false, + daemonActionResult: "restarted", + message: `${serviceNoun} service restarted.`, + progressMessage: `${serviceNoun} service restarted.`, + }; +} + type SupportedGatewayServicePlatform = "darwin" | "linux" | "win32"; const GATEWAY_SERVICE_REGISTRY: Record = { diff --git a/src/daemon/systemd.ts b/src/daemon/systemd.ts index bce7593e24e..62ab2dfa146 100644 --- a/src/daemon/systemd.ts +++ b/src/daemon/systemd.ts @@ -20,6 +20,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; import { enableSystemdUserLinger, @@ -570,13 +571,14 @@ export async function stopSystemdService({ export async function restartSystemdService({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { await runSystemdServiceAction({ stdout, env, action: "restart", label: "Restarted systemd service", }); + return { outcome: "completed" }; } export async function isSystemdServiceEnabled(args: GatewayServiceEnvArgs): Promise { diff --git a/src/discord/monitor.test.ts b/src/discord/monitor.test.ts index 10c7dc66747..9471a3fe6bc 100644 --- a/src/discord/monitor.test.ts +++ b/src/discord/monitor.test.ts @@ -38,6 +38,7 @@ const makeEntries = ( requireMention: value.requireMention, reactionNotifications: value.reactionNotifications, users: value.users, + roles: value.roles, channels: value.channels, }; } @@ -730,6 +731,17 @@ describe("discord reaction notification gating", () => { }, expected: true, }, + { + name: "all mode blocks non-allowlisted guild member", + input: { + mode: "all" as const, + botId: "bot-1", + messageAuthorId: "user-1", + userId: "user-2", + guildInfo: { users: ["trusted-user"] }, + }, + expected: false, + }, { name: "own mode with bot-authored message", input: { @@ -750,6 +762,17 @@ describe("discord reaction notification gating", () => { }, expected: false, }, + { + name: "own mode still blocks member outside users allowlist", + input: { + mode: "own" as const, + botId: "bot-1", + messageAuthorId: "bot-1", + userId: "user-3", + guildInfo: { users: ["trusted-user"] }, + }, + expected: false, + }, { name: "allowlist mode without match", input: { @@ -769,7 +792,7 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "123", userName: "steipete", - allowlist: ["123", "other"] as string[], + guildInfo: { users: ["123", "other"] }, }, expected: true, }, @@ -781,7 +804,7 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "999", userName: "trusted-user", - allowlist: ["trusted-user"] as string[], + guildInfo: { users: ["trusted-user"] }, }, expected: false, }, @@ -793,21 +816,29 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "999", userName: "trusted-user", - allowlist: ["trusted-user"] as string[], + guildInfo: { users: ["trusted-user"] }, allowNameMatching: true, }, expected: true, }, + { + name: "allowlist mode matches allowed role", + input: { + mode: "allowlist" as const, + botId: "bot-1", + messageAuthorId: "user-1", + userId: "999", + guildInfo: { roles: ["role:trusted-role"] }, + memberRoleIds: ["trusted-role"], + }, + expected: true, + }, ]); for (const testCase of cases) { expect( shouldEmitDiscordReactionNotification({ ...testCase.input, - allowlist: - "allowlist" in testCase.input && testCase.input.allowlist - ? [...testCase.input.allowlist] - : undefined, }), testCase.name, ).toBe(testCase.expected); @@ -863,6 +894,7 @@ function makeReactionEvent(overrides?: { messageAuthorId?: string; messageFetch?: ReturnType; guild?: { name?: string; id?: string }; + memberRoleIds?: string[]; }) { const userId = overrides?.userId ?? "user-1"; const messageId = overrides?.messageId ?? "msg-1"; @@ -882,6 +914,7 @@ function makeReactionEvent(overrides?: { message_id: messageId, emoji: { name: overrides?.emojiName ?? "👍", id: null }, guild: overrides?.guild, + rawMember: overrides?.memberRoleIds ? { roles: overrides.memberRoleIds } : undefined, user: { id: userId, bot: false, @@ -1059,7 +1092,31 @@ describe("discord DM reaction handling", () => { expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); }); - it("still processes guild reactions (no regression)", async () => { + it("blocks guild reactions for sender outside users allowlist", async () => { + const data = makeReactionEvent({ + guildId: "guild-123", + userId: "attacker-user", + botAsAuthor: true, + guild: { id: "guild-123", name: "Test Guild" }, + }); + const client = makeReactionClient({ channelType: ChannelType.GuildText }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + guildEntries: makeEntries({ + "guild-123": { + users: ["user:trusted-user"], + }, + }), + }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + expect(resolveAgentRouteMock).not.toHaveBeenCalled(); + }); + + it("allows guild reactions for sender in channel role allowlist override", async () => { resolveAgentRouteMock.mockReturnValueOnce({ agentId: "default", channel: "discord", @@ -1069,11 +1126,27 @@ describe("discord DM reaction handling", () => { const data = makeReactionEvent({ guildId: "guild-123", + userId: "member-user", botAsAuthor: true, - guild: { name: "Test Guild" }, + guild: { id: "guild-123", name: "Test Guild" }, + memberRoleIds: ["trusted-role"], }); const client = makeReactionClient({ channelType: ChannelType.GuildText }); - const listener = new DiscordReactionListener(makeReactionListenerParams()); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + guildEntries: makeEntries({ + "guild-123": { + roles: ["role:blocked-role"], + channels: { + "channel-1": { + allow: true, + roles: ["role:trusted-role"], + }, + }, + }, + }), + }), + ); await listener.handle(data, client); diff --git a/src/discord/monitor/allow-list.ts b/src/discord/monitor/allow-list.ts index b736928e276..7c1250cb8ef 100644 --- a/src/discord/monitor/allow-list.ts +++ b/src/discord/monitor/allow-list.ts @@ -556,6 +556,9 @@ export function shouldEmitDiscordReactionNotification(params: { userId: string; userName?: string; userTag?: string; + channelConfig?: DiscordChannelConfigResolved | null; + guildInfo?: DiscordGuildEntryResolved | null; + memberRoleIds?: string[]; allowlist?: string[]; allowNameMatching?: boolean; }) { @@ -563,26 +566,31 @@ export function shouldEmitDiscordReactionNotification(params: { if (mode === "off") { return false; } + const accessGuildInfo = + params.guildInfo ?? + (params.allowlist ? ({ users: params.allowlist } satisfies DiscordGuildEntryResolved) : null); + const { hasAccessRestrictions, memberAllowed } = resolveDiscordMemberAccessState({ + channelConfig: params.channelConfig, + guildInfo: accessGuildInfo, + memberRoleIds: params.memberRoleIds ?? [], + sender: { + id: params.userId, + name: params.userName, + tag: params.userTag, + }, + allowNameMatching: params.allowNameMatching, + }); + if (mode === "allowlist") { + return hasAccessRestrictions && memberAllowed; + } + if (hasAccessRestrictions && !memberAllowed) { + return false; + } if (mode === "all") { return true; } if (mode === "own") { return Boolean(params.botId && params.messageAuthorId === params.botId); } - if (mode === "allowlist") { - const list = normalizeDiscordAllowList(params.allowlist, ["discord:", "user:", "pk:"]); - if (!list) { - return false; - } - return allowListMatches( - list, - { - id: params.userId, - name: params.userName, - tag: params.userTag, - }, - { allowNameMatching: params.allowNameMatching }, - ); - } return false; } diff --git a/src/discord/monitor/listeners.ts b/src/discord/monitor/listeners.ts index 056a1ad7116..824cb5fb19a 100644 --- a/src/discord/monitor/listeners.ts +++ b/src/discord/monitor/listeners.ts @@ -24,6 +24,7 @@ import { normalizeDiscordSlug, resolveDiscordAllowListMatch, resolveDiscordChannelConfigWithFallback, + resolveDiscordMemberAccessState, resolveGroupDmAllow, resolveDiscordGuildEntry, shouldEmitDiscordReactionNotification, @@ -294,6 +295,7 @@ async function runDiscordReactionHandler(params: { type DiscordReactionIngressAuthorizationParams = { accountId: string; user: User; + memberRoleIds: string[]; isDirectMessage: boolean; isGroupDm: boolean; isGuildMessage: boolean; @@ -308,7 +310,7 @@ type DiscordReactionIngressAuthorizationParams = { groupPolicy: "open" | "allowlist" | "disabled"; allowNameMatching: boolean; guildInfo: import("./allow-list.js").DiscordGuildEntryResolved | null; - channelConfig?: { allowed?: boolean } | null; + channelConfig?: import("./allow-list.js").DiscordChannelConfigResolved | null; }; async function authorizeDiscordReactionIngress( @@ -383,6 +385,20 @@ async function authorizeDiscordReactionIngress( if (params.channelConfig?.allowed === false) { return { allowed: false, reason: "guild-channel-denied" }; } + const { hasAccessRestrictions, memberAllowed } = resolveDiscordMemberAccessState({ + channelConfig: params.channelConfig, + guildInfo: params.guildInfo, + memberRoleIds: params.memberRoleIds, + sender: { + id: params.user.id, + name: params.user.username, + tag: formatDiscordUserTag(params.user), + }, + allowNameMatching: params.allowNameMatching, + }); + if (hasAccessRestrictions && !memberAllowed) { + return { allowed: false, reason: "guild-member-denied" }; + } return { allowed: true }; } @@ -434,9 +450,13 @@ async function handleDiscordReactionEvent( channelType === ChannelType.PublicThread || channelType === ChannelType.PrivateThread || channelType === ChannelType.AnnouncementThread; + const memberRoleIds = Array.isArray(data.rawMember?.roles) + ? data.rawMember.roles.map((roleId: string) => String(roleId)) + : []; const reactionIngressBase: Omit = { accountId: params.accountId, user, + memberRoleIds, isDirectMessage, isGroupDm, isGuildMessage, @@ -452,17 +472,18 @@ async function handleDiscordReactionEvent( allowNameMatching: params.allowNameMatching, guildInfo, }; - const ingressAccess = await authorizeDiscordReactionIngress(reactionIngressBase); - if (!ingressAccess.allowed) { - logVerbose(`discord reaction blocked sender=${user.id} (reason=${ingressAccess.reason})`); - return; + // Guild reactions need resolved channel/thread config before member access + // can mirror the normal message preflight path. + if (!isGuildMessage) { + const ingressAccess = await authorizeDiscordReactionIngress(reactionIngressBase); + if (!ingressAccess.allowed) { + logVerbose(`discord reaction blocked sender=${user.id} (reason=${ingressAccess.reason})`); + return; + } } let parentId = "parentId" in channel ? (channel.parentId ?? undefined) : undefined; let parentName: string | undefined; let parentSlug = ""; - const memberRoleIds = Array.isArray(data.rawMember?.roles) - ? data.rawMember.roles.map((roleId: string) => String(roleId)) - : []; let reactionBase: { baseText: string; contextKey: string } | null = null; const resolveReactionBase = () => { if (reactionBase) { @@ -507,6 +528,7 @@ async function handleDiscordReactionEvent( const shouldNotifyReaction = (options: { mode: "off" | "own" | "all" | "allowlist"; messageAuthorId?: string; + channelConfig?: ReturnType; }) => shouldEmitDiscordReactionNotification({ mode: options.mode, @@ -515,7 +537,9 @@ async function handleDiscordReactionEvent( userId: user.id, userName: user.username, userTag: formatDiscordUserTag(user), - allowlist: guildInfo?.users, + channelConfig: options.channelConfig, + guildInfo, + memberRoleIds, allowNameMatching: params.allowNameMatching, }); const emitReactionWithAuthor = (message: { author?: User } | null) => { @@ -550,10 +574,12 @@ async function handleDiscordReactionEvent( ...reactionIngressBase, channelConfig, }); - const authorizeThreadChannelAccess = async (channelInfo: { parentId?: string } | null) => { + const resolveThreadChannelAccess = async (channelInfo: { parentId?: string } | null) => { parentId = channelInfo?.parentId; await loadThreadParentInfo(); - return await authorizeReactionIngressForChannel(resolveThreadChannelConfig()); + const channelConfig = resolveThreadChannelConfig(); + const access = await authorizeReactionIngressForChannel(channelConfig); + return { access, channelConfig }; }; // Parallelize async operations for thread channels @@ -572,16 +598,18 @@ async function handleDiscordReactionEvent( // Fast path: for "all" and "allowlist" modes, we don't need to fetch the message if (reactionMode === "all" || reactionMode === "allowlist") { const channelInfo = await channelInfoPromise; - const threadAccess = await authorizeThreadChannelAccess(channelInfo); + const { access: threadAccess, channelConfig: threadChannelConfig } = + await resolveThreadChannelAccess(channelInfo); if (!threadAccess.allowed) { return; } - - // For allowlist mode, check if user is in allowlist first - if (reactionMode === "allowlist") { - if (!shouldNotifyReaction({ mode: reactionMode })) { - return; - } + if ( + !shouldNotifyReaction({ + mode: reactionMode, + channelConfig: threadChannelConfig, + }) + ) { + return; } const { baseText } = resolveReactionBase(); @@ -593,13 +621,20 @@ async function handleDiscordReactionEvent( const messagePromise = data.message.fetch().catch(() => null); const [channelInfo, message] = await Promise.all([channelInfoPromise, messagePromise]); - const threadAccess = await authorizeThreadChannelAccess(channelInfo); + const { access: threadAccess, channelConfig: threadChannelConfig } = + await resolveThreadChannelAccess(channelInfo); if (!threadAccess.allowed) { return; } const messageAuthorId = message?.author?.id ?? undefined; - if (!shouldNotifyReaction({ mode: reactionMode, messageAuthorId })) { + if ( + !shouldNotifyReaction({ + mode: reactionMode, + messageAuthorId, + channelConfig: threadChannelConfig, + }) + ) { return; } @@ -634,11 +669,8 @@ async function handleDiscordReactionEvent( // Fast path: for "all" and "allowlist" modes, we don't need to fetch the message if (reactionMode === "all" || reactionMode === "allowlist") { - // For allowlist mode, check if user is in allowlist first - if (reactionMode === "allowlist") { - if (!shouldNotifyReaction({ mode: reactionMode })) { - return; - } + if (!shouldNotifyReaction({ mode: reactionMode, channelConfig })) { + return; } const { baseText } = resolveReactionBase(); @@ -649,7 +681,7 @@ async function handleDiscordReactionEvent( // For "own" mode, we need to fetch the message to check the author const message = await data.message.fetch().catch(() => null); const messageAuthorId = message?.author?.id ?? undefined; - if (!shouldNotifyReaction({ mode: reactionMode, messageAuthorId })) { + if (!shouldNotifyReaction({ mode: reactionMode, messageAuthorId, channelConfig })) { return; } diff --git a/src/gateway/chat-abort.ts b/src/gateway/chat-abort.ts index 0210f9223f7..4be479153f6 100644 --- a/src/gateway/chat-abort.ts +++ b/src/gateway/chat-abort.ts @@ -6,6 +6,8 @@ export type ChatAbortControllerEntry = { sessionKey: string; startedAtMs: number; expiresAtMs: number; + ownerConnId?: string; + ownerDeviceId?: string; }; export function isChatStopCommandText(text: string): boolean { diff --git a/src/gateway/server-methods.ts b/src/gateway/server-methods.ts index 483914b9bf5..f6f052f8cc2 100644 --- a/src/gateway/server-methods.ts +++ b/src/gateway/server-methods.ts @@ -153,5 +153,5 @@ export async function handleGatewayRequest( // All handlers run inside a request scope so that plugin runtime // subagent methods (e.g. context engine tools spawning sub-agents // during tool execution) can dispatch back into the gateway. - await withPluginRuntimeGatewayRequestScope({ context, isWebchatConnect }, invokeHandler); + await withPluginRuntimeGatewayRequestScope({ context, client, isWebchatConnect }, invokeHandler); } diff --git a/src/gateway/server-methods/chat.abort-authorization.test.ts b/src/gateway/server-methods/chat.abort-authorization.test.ts new file mode 100644 index 00000000000..6fbf0478df3 --- /dev/null +++ b/src/gateway/server-methods/chat.abort-authorization.test.ts @@ -0,0 +1,147 @@ +import { describe, expect, it, vi } from "vitest"; +import { chatHandlers } from "./chat.js"; + +function createActiveRun(sessionKey: string, owner?: { connId?: string; deviceId?: string }) { + const now = Date.now(); + return { + controller: new AbortController(), + sessionId: `${sessionKey}-session`, + sessionKey, + startedAtMs: now, + expiresAtMs: now + 30_000, + ownerConnId: owner?.connId, + ownerDeviceId: owner?.deviceId, + }; +} + +function createContext(overrides: Record = {}) { + return { + chatAbortControllers: new Map(), + chatRunBuffers: new Map(), + chatDeltaSentAt: new Map(), + chatAbortedRuns: new Map(), + removeChatRun: vi + .fn() + .mockImplementation((run: string) => ({ sessionKey: "main", clientRunId: run })), + agentRunSeq: new Map(), + broadcast: vi.fn(), + nodeSendToSession: vi.fn(), + logGateway: { warn: vi.fn() }, + ...overrides, + }; +} + +async function invokeChatAbort(params: { + context: ReturnType; + request: { sessionKey: string; runId?: string }; + client?: { + connId?: string; + connect?: { + device?: { id?: string }; + scopes?: string[]; + }; + } | null; +}) { + const respond = vi.fn(); + await chatHandlers["chat.abort"]({ + params: params.request, + respond: respond as never, + context: params.context as never, + req: {} as never, + client: (params.client ?? null) as never, + isWebchatConnect: () => false, + }); + return respond; +} + +describe("chat.abort authorization", () => { + it("rejects explicit run aborts from other clients", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-1", createActiveRun("main", { connId: "conn-owner", deviceId: "dev-owner" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main", runId: "run-1" }, + client: { + connId: "conn-other", + connect: { device: { id: "dev-other" }, scopes: ["operator.write"] }, + }, + }); + + const [ok, payload, error] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(false); + expect(payload).toBeUndefined(); + expect(error).toMatchObject({ code: "INVALID_REQUEST", message: "unauthorized" }); + expect(context.chatAbortControllers.has("run-1")).toBe(true); + }); + + it("allows the same paired device to abort after reconnecting", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-1", createActiveRun("main", { connId: "conn-old", deviceId: "dev-1" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main", runId: "run-1" }, + client: { + connId: "conn-new", + connect: { device: { id: "dev-1" }, scopes: ["operator.write"] }, + }, + }); + + const [ok, payload] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(true); + expect(payload).toMatchObject({ aborted: true, runIds: ["run-1"] }); + expect(context.chatAbortControllers.has("run-1")).toBe(false); + }); + + it("only aborts session-scoped runs owned by the requester", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-mine", createActiveRun("main", { deviceId: "dev-1" })], + ["run-other", createActiveRun("main", { deviceId: "dev-2" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main" }, + client: { + connId: "conn-1", + connect: { device: { id: "dev-1" }, scopes: ["operator.write"] }, + }, + }); + + const [ok, payload] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(true); + expect(payload).toMatchObject({ aborted: true, runIds: ["run-mine"] }); + expect(context.chatAbortControllers.has("run-mine")).toBe(false); + expect(context.chatAbortControllers.has("run-other")).toBe(true); + }); + + it("allows operator.admin clients to bypass owner checks", async () => { + const context = createContext({ + chatAbortControllers: new Map([ + ["run-1", createActiveRun("main", { connId: "conn-owner", deviceId: "dev-owner" })], + ]), + }); + + const respond = await invokeChatAbort({ + context, + request: { sessionKey: "main", runId: "run-1" }, + client: { + connId: "conn-admin", + connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] }, + }, + }); + + const [ok, payload] = respond.mock.calls.at(-1) ?? []; + expect(ok).toBe(true); + expect(payload).toMatchObject({ aborted: true, runIds: ["run-1"] }); + }); +}); diff --git a/src/gateway/server-methods/chat.ts b/src/gateway/server-methods/chat.ts index 71669080382..13f3b997892 100644 --- a/src/gateway/server-methods/chat.ts +++ b/src/gateway/server-methods/chat.ts @@ -25,7 +25,6 @@ import { } from "../../utils/message-channel.js"; import { abortChatRunById, - abortChatRunsForSessionKey, type ChatAbortControllerEntry, type ChatAbortOps, isChatStopCommandText, @@ -33,6 +32,7 @@ import { } from "../chat-abort.js"; import { type ChatImageContent, parseMessageWithAttachments } from "../chat-attachments.js"; import { stripEnvelopeFromMessage, stripEnvelopeFromMessages } from "../chat-sanitize.js"; +import { ADMIN_SCOPE } from "../method-scopes.js"; import { GATEWAY_CLIENT_CAPS, GATEWAY_CLIENT_MODES, @@ -83,6 +83,12 @@ type AbortedPartialSnapshot = { abortOrigin: AbortOrigin; }; +type ChatAbortRequester = { + connId?: string; + deviceId?: string; + isAdmin: boolean; +}; + const CHAT_HISTORY_TEXT_MAX_CHARS = 12_000; const CHAT_HISTORY_MAX_SINGLE_MESSAGE_BYTES = 128 * 1024; const CHAT_HISTORY_OVERSIZED_PLACEHOLDER = "[chat.history omitted: message too large]"; @@ -314,6 +320,68 @@ function sanitizeChatHistoryContentBlock(block: unknown): { block: unknown; chan return { block: changed ? entry : block, changed }; } +/** + * Validate that a value is a finite number, returning undefined otherwise. + */ +function toFiniteNumber(x: unknown): number | undefined { + return typeof x === "number" && Number.isFinite(x) ? x : undefined; +} + +/** + * Sanitize usage metadata to ensure only finite numeric fields are included. + * Prevents UI crashes from malformed transcript JSON. + */ +function sanitizeUsage(raw: unknown): Record | undefined { + if (!raw || typeof raw !== "object") { + return undefined; + } + const u = raw as Record; + const out: Record = {}; + + // Whitelist known usage fields and validate they're finite numbers + const knownFields = [ + "input", + "output", + "totalTokens", + "inputTokens", + "outputTokens", + "cacheRead", + "cacheWrite", + "cache_read_input_tokens", + "cache_creation_input_tokens", + ]; + + for (const k of knownFields) { + const n = toFiniteNumber(u[k]); + if (n !== undefined) { + out[k] = n; + } + } + + // Preserve nested usage.cost when present + if ("cost" in u && u.cost != null && typeof u.cost === "object") { + const sanitizedCost = sanitizeCost(u.cost); + if (sanitizedCost) { + (out as Record).cost = sanitizedCost; + } + } + + return Object.keys(out).length > 0 ? out : undefined; +} + +/** + * Sanitize cost metadata to ensure only finite numeric fields are included. + * Prevents UI crashes from calling .toFixed() on non-numbers. + */ +function sanitizeCost(raw: unknown): { total?: number } | undefined { + if (!raw || typeof raw !== "object") { + return undefined; + } + const c = raw as Record; + const total = toFiniteNumber(c.total); + return total !== undefined ? { total } : undefined; +} + function sanitizeChatHistoryMessage(message: unknown): { message: unknown; changed: boolean } { if (!message || typeof message !== "object") { return { message, changed: false }; @@ -325,13 +393,38 @@ function sanitizeChatHistoryMessage(message: unknown): { message: unknown; chang delete entry.details; changed = true; } - if ("usage" in entry) { - delete entry.usage; - changed = true; - } - if ("cost" in entry) { - delete entry.cost; - changed = true; + + // Keep usage/cost so the chat UI can render per-message token and cost badges. + // Only retain usage/cost on assistant messages and validate numeric fields to prevent UI crashes. + if (entry.role !== "assistant") { + if ("usage" in entry) { + delete entry.usage; + changed = true; + } + if ("cost" in entry) { + delete entry.cost; + changed = true; + } + } else { + // Validate and sanitize usage/cost for assistant messages + if ("usage" in entry) { + const sanitized = sanitizeUsage(entry.usage); + if (sanitized) { + entry.usage = sanitized; + } else { + delete entry.usage; + } + changed = true; + } + if ("cost" in entry) { + const sanitized = sanitizeCost(entry.cost); + if (sanitized) { + entry.cost = sanitized; + } else { + delete entry.cost; + } + changed = true; + } } if (typeof entry.content === "string") { @@ -597,12 +690,12 @@ function appendAssistantTranscriptMessage(params: { function collectSessionAbortPartials(params: { chatAbortControllers: Map; chatRunBuffers: Map; - sessionKey: string; + runIds: ReadonlySet; abortOrigin: AbortOrigin; }): AbortedPartialSnapshot[] { const out: AbortedPartialSnapshot[] = []; for (const [runId, active] of params.chatAbortControllers) { - if (active.sessionKey !== params.sessionKey) { + if (!params.runIds.has(runId)) { continue; } const text = params.chatRunBuffers.get(runId); @@ -664,23 +757,104 @@ function createChatAbortOps(context: GatewayRequestContext): ChatAbortOps { }; } +function normalizeOptionalText(value?: string | null): string | undefined { + const trimmed = value?.trim(); + return trimmed || undefined; +} + +function resolveChatAbortRequester( + client: GatewayRequestHandlerOptions["client"], +): ChatAbortRequester { + const scopes = Array.isArray(client?.connect?.scopes) ? client.connect.scopes : []; + return { + connId: normalizeOptionalText(client?.connId), + deviceId: normalizeOptionalText(client?.connect?.device?.id), + isAdmin: scopes.includes(ADMIN_SCOPE), + }; +} + +function canRequesterAbortChatRun( + entry: ChatAbortControllerEntry, + requester: ChatAbortRequester, +): boolean { + if (requester.isAdmin) { + return true; + } + const ownerDeviceId = normalizeOptionalText(entry.ownerDeviceId); + const ownerConnId = normalizeOptionalText(entry.ownerConnId); + if (!ownerDeviceId && !ownerConnId) { + return true; + } + if (ownerDeviceId && requester.deviceId && ownerDeviceId === requester.deviceId) { + return true; + } + if (ownerConnId && requester.connId && ownerConnId === requester.connId) { + return true; + } + return false; +} + +function resolveAuthorizedRunIdsForSession(params: { + chatAbortControllers: Map; + sessionKey: string; + requester: ChatAbortRequester; +}) { + const authorizedRunIds: string[] = []; + let matchedSessionRuns = 0; + for (const [runId, active] of params.chatAbortControllers) { + if (active.sessionKey !== params.sessionKey) { + continue; + } + matchedSessionRuns += 1; + if (canRequesterAbortChatRun(active, params.requester)) { + authorizedRunIds.push(runId); + } + } + return { + matchedSessionRuns, + authorizedRunIds, + }; +} + function abortChatRunsForSessionKeyWithPartials(params: { context: GatewayRequestContext; ops: ChatAbortOps; sessionKey: string; abortOrigin: AbortOrigin; stopReason?: string; + requester: ChatAbortRequester; }) { + const { matchedSessionRuns, authorizedRunIds } = resolveAuthorizedRunIdsForSession({ + chatAbortControllers: params.context.chatAbortControllers, + sessionKey: params.sessionKey, + requester: params.requester, + }); + if (authorizedRunIds.length === 0) { + return { + aborted: false, + runIds: [], + unauthorized: matchedSessionRuns > 0, + }; + } + const authorizedRunIdSet = new Set(authorizedRunIds); const snapshots = collectSessionAbortPartials({ chatAbortControllers: params.context.chatAbortControllers, chatRunBuffers: params.context.chatRunBuffers, - sessionKey: params.sessionKey, + runIds: authorizedRunIdSet, abortOrigin: params.abortOrigin, }); - const res = abortChatRunsForSessionKey(params.ops, { - sessionKey: params.sessionKey, - stopReason: params.stopReason, - }); + const runIds: string[] = []; + for (const runId of authorizedRunIds) { + const res = abortChatRunById(params.ops, { + runId, + sessionKey: params.sessionKey, + stopReason: params.stopReason, + }); + if (res.aborted) { + runIds.push(runId); + } + } + const res = { aborted: runIds.length > 0, runIds, unauthorized: false }; if (res.aborted) { persistAbortedPartials({ context: params.context, @@ -802,7 +976,7 @@ export const chatHandlers: GatewayRequestHandlers = { verboseLevel, }); }, - "chat.abort": ({ params, respond, context }) => { + "chat.abort": ({ params, respond, context, client }) => { if (!validateChatAbortParams(params)) { respond( false, @@ -820,6 +994,7 @@ export const chatHandlers: GatewayRequestHandlers = { }; const ops = createChatAbortOps(context); + const requester = resolveChatAbortRequester(client); if (!runId) { const res = abortChatRunsForSessionKeyWithPartials({ @@ -828,7 +1003,12 @@ export const chatHandlers: GatewayRequestHandlers = { sessionKey: rawSessionKey, abortOrigin: "rpc", stopReason: "rpc", + requester, }); + if (res.unauthorized) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized")); + return; + } respond(true, { ok: true, aborted: res.aborted, runIds: res.runIds }); return; } @@ -846,6 +1026,10 @@ export const chatHandlers: GatewayRequestHandlers = { ); return; } + if (!canRequesterAbortChatRun(active, requester)) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized")); + return; + } const partialText = context.chatRunBuffers.get(runId); const res = abortChatRunById(ops, { @@ -987,7 +1171,12 @@ export const chatHandlers: GatewayRequestHandlers = { sessionKey: rawSessionKey, abortOrigin: "stop-command", stopReason: "stop", + requester: resolveChatAbortRequester(client), }); + if (res.unauthorized) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized")); + return; + } respond(true, { ok: true, aborted: res.aborted, runIds: res.runIds }); return; } @@ -1017,6 +1206,8 @@ export const chatHandlers: GatewayRequestHandlers = { sessionKey: rawSessionKey, startedAtMs: now, expiresAtMs: resolveChatRunExpiresAtMs({ now, timeoutMs }), + ownerConnId: normalizeOptionalText(client?.connId), + ownerDeviceId: normalizeOptionalText(client?.connect?.device?.id), }); const ackPayload = { runId: clientRunId, diff --git a/src/gateway/server-methods/config.ts b/src/gateway/server-methods/config.ts index 9b57a126e5f..1d3d1c85977 100644 --- a/src/gateway/server-methods/config.ts +++ b/src/gateway/server-methods/config.ts @@ -10,6 +10,7 @@ import { validateConfigObjectWithPlugins, writeConfigFile, } from "../../config/config.js"; +import { formatConfigIssueLines } from "../../config/issue-format.js"; import { applyLegacyMigrations } from "../../config/legacy.js"; import { applyMergePatch } from "../../config/merge-patch.js"; import { @@ -23,7 +24,7 @@ import { type ConfigSchemaResponse, } from "../../config/schema.js"; import { extractDeliveryInfo } from "../../config/sessions.js"; -import type { OpenClawConfig } from "../../config/types.openclaw.js"; +import type { ConfigValidationIssue, OpenClawConfig } from "../../config/types.openclaw.js"; import { formatDoctorNonInteractiveHint, type RestartSentinelPayload, @@ -54,6 +55,8 @@ import { parseRestartRequestParams } from "./restart-request.js"; import type { GatewayRequestHandlers, RespondFn } from "./types.js"; import { assertValidParams } from "./validation.js"; +const MAX_CONFIG_ISSUES_IN_ERROR_MESSAGE = 3; + function requireConfigBaseHash( params: unknown, snapshot: Awaited>, @@ -158,7 +161,7 @@ function parseValidateConfigFromRawOrRespond( respond( false, undefined, - errorShape(ErrorCodes.INVALID_REQUEST, "invalid config", { + errorShape(ErrorCodes.INVALID_REQUEST, summarizeConfigValidationIssues(validated.issues), { details: { issues: validated.issues }, }), ); @@ -167,6 +170,20 @@ function parseValidateConfigFromRawOrRespond( return { config: validated.config, schema }; } +function summarizeConfigValidationIssues(issues: ReadonlyArray): string { + const trimmed = issues.slice(0, MAX_CONFIG_ISSUES_IN_ERROR_MESSAGE); + const lines = formatConfigIssueLines(trimmed, "", { normalizeRoot: true }) + .map((line) => line.trim()) + .filter(Boolean); + if (lines.length === 0) { + return "invalid config"; + } + const hiddenCount = Math.max(0, issues.length - lines.length); + return `invalid config: ${lines.join("; ")}${ + hiddenCount > 0 ? ` (+${hiddenCount} more issue${hiddenCount === 1 ? "" : "s"})` : "" + }`; +} + function resolveConfigRestartRequest(params: unknown): { sessionKey: string | undefined; note: string | undefined; @@ -398,7 +415,7 @@ export const configHandlers: GatewayRequestHandlers = { respond( false, undefined, - errorShape(ErrorCodes.INVALID_REQUEST, "invalid config", { + errorShape(ErrorCodes.INVALID_REQUEST, summarizeConfigValidationIssues(validated.issues), { details: { issues: validated.issues }, }), ); diff --git a/src/gateway/server-methods/devices.ts b/src/gateway/server-methods/devices.ts index 98c4938b22c..a068b2dfac5 100644 --- a/src/gateway/server-methods/devices.ts +++ b/src/gateway/server-methods/devices.ts @@ -1,5 +1,6 @@ import { approveDevicePairing, + getPairedDevice, listDevicePairing, removePairedDevice, type DeviceAuthToken, @@ -8,6 +9,8 @@ import { rotateDeviceToken, summarizeDeviceTokens, } from "../../infra/device-pairing.js"; +import { normalizeDeviceAuthScopes } from "../../shared/device-auth.js"; +import { roleScopesAllow } from "../../shared/operator-scope-compat.js"; import { ErrorCodes, errorShape, @@ -31,6 +34,25 @@ function redactPairedDevice( }; } +function resolveMissingRequestedScope(params: { + role: string; + requestedScopes: readonly string[]; + callerScopes: readonly string[]; +}): string | null { + for (const scope of params.requestedScopes) { + if ( + !roleScopesAllow({ + role: params.role, + requestedScopes: [scope], + allowedScopes: params.callerScopes, + }) + ) { + return scope; + } + } + return null; +} + export const deviceHandlers: GatewayRequestHandlers = { "device.pair.list": async ({ params, respond }) => { if (!validateDevicePairListParams(params)) { @@ -146,7 +168,7 @@ export const deviceHandlers: GatewayRequestHandlers = { context.logGateway.info(`device pairing removed device=${removed.deviceId}`); respond(true, removed, undefined); }, - "device.token.rotate": async ({ params, respond, context }) => { + "device.token.rotate": async ({ params, respond, context, client }) => { if (!validateDeviceTokenRotateParams(params)) { respond( false, @@ -165,6 +187,28 @@ export const deviceHandlers: GatewayRequestHandlers = { role: string; scopes?: string[]; }; + const pairedDevice = await getPairedDevice(deviceId); + if (!pairedDevice) { + respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unknown deviceId/role")); + return; + } + const callerScopes = Array.isArray(client?.connect?.scopes) ? client.connect.scopes : []; + const requestedScopes = normalizeDeviceAuthScopes( + scopes ?? pairedDevice.tokens?.[role.trim()]?.scopes ?? pairedDevice.scopes, + ); + const missingScope = resolveMissingRequestedScope({ + role, + requestedScopes, + callerScopes, + }); + if (missingScope) { + respond( + false, + undefined, + errorShape(ErrorCodes.INVALID_REQUEST, `missing scope: ${missingScope}`), + ); + return; + } const entry = await rotateDeviceToken({ deviceId, role, scopes }); if (!entry) { respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unknown deviceId/role")); diff --git a/src/gateway/server-methods/exec-approval.ts b/src/gateway/server-methods/exec-approval.ts index 07dd8546c3f..81d479cbbd6 100644 --- a/src/gateway/server-methods/exec-approval.ts +++ b/src/gateway/server-methods/exec-approval.ts @@ -1,3 +1,4 @@ +import { sanitizeExecApprovalDisplayText } from "../../infra/exec-approval-command-display.js"; import type { ExecApprovalForwarder } from "../../infra/exec-approval-forwarder.js"; import { DEFAULT_EXEC_APPROVAL_TIMEOUT_MS, @@ -125,8 +126,11 @@ export function createExecApprovalHandlers( return; } const request = { - command: effectiveCommandText, - commandPreview: host === "node" ? undefined : approvalContext.commandPreview, + command: sanitizeExecApprovalDisplayText(effectiveCommandText), + commandPreview: + host === "node" || !approvalContext.commandPreview + ? undefined + : sanitizeExecApprovalDisplayText(approvalContext.commandPreview), commandArgv: host === "node" ? undefined : effectiveCommandArgv, envKeys: systemRunBinding?.envKeys?.length ? systemRunBinding.envKeys : undefined, systemRunBinding: systemRunBinding?.binding ?? null, diff --git a/src/gateway/server-methods/server-methods.test.ts b/src/gateway/server-methods/server-methods.test.ts index 51da6927f5e..424511370cd 100644 --- a/src/gateway/server-methods/server-methods.test.ts +++ b/src/gateway/server-methods/server-methods.test.ts @@ -641,6 +641,34 @@ describe("exec approval handlers", () => { ); }); + it("sanitizes invisible Unicode format chars in approval display text without changing node bindings", async () => { + const { handlers, broadcasts, respond, context } = createExecApprovalFixture(); + await requestExecApproval({ + handlers, + respond, + context, + params: { + timeoutMs: 10, + command: "bash safe\u200B.sh", + commandArgv: ["bash", "safe\u200B.sh"], + systemRunPlan: { + argv: ["bash", "safe\u200B.sh"], + cwd: "/real/cwd", + commandText: "bash safe\u200B.sh", + agentId: "main", + sessionKey: "agent:main:main", + }, + }, + }); + const requested = broadcasts.find((entry) => entry.event === "exec.approval.requested"); + expect(requested).toBeTruthy(); + const request = (requested?.payload as { request?: Record })?.request ?? {}; + expect(request["command"]).toBe("bash safe\\u{200B}.sh"); + expect((request["systemRunPlan"] as { commandText?: string }).commandText).toBe( + "bash safe\u200B.sh", + ); + }); + it("accepts resolve during broadcast", async () => { const manager = new ExecApprovalManager(); const handlers = createExecApprovalHandlers(manager); diff --git a/src/gateway/server-plugins.ts b/src/gateway/server-plugins.ts index dde23f703a6..7d8b2a8a051 100644 --- a/src/gateway/server-plugins.ts +++ b/src/gateway/server-plugins.ts @@ -85,7 +85,7 @@ async function dispatchGatewayMethod( method, params, }, - client: createSyntheticOperatorClient(), + client: scope?.client ?? createSyntheticOperatorClient(), isWebchatConnect, respond: (ok, payload, error) => { if (!result) { diff --git a/src/gateway/server.auth.browser-hardening.test.ts b/src/gateway/server.auth.browser-hardening.test.ts index e9550a8b1aa..c4060716bd4 100644 --- a/src/gateway/server.auth.browser-hardening.test.ts +++ b/src/gateway/server.auth.browser-hardening.test.ts @@ -12,6 +12,7 @@ import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-cha import { buildDeviceAuthPayload } from "./device-auth.js"; import { connectReq, + connectOk, installGatewayTestHooks, readConnectChallengeNonce, testState, @@ -27,6 +28,7 @@ const TEST_OPERATOR_CLIENT = { platform: "test", mode: GATEWAY_CLIENT_MODES.TEST, }; +const ALLOWED_BROWSER_ORIGIN = "https://control.example.com"; const originForPort = (port: number) => `http://127.0.0.1:${port}`; @@ -73,6 +75,127 @@ async function createSignedDevice(params: { } describe("gateway auth browser hardening", () => { + test("rejects trusted-proxy browser connects from origins outside the allowlist", async () => { + const { writeConfigFile } = await import("../config/config.js"); + await writeConfigFile({ + gateway: { + auth: { + mode: "trusted-proxy", + trustedProxy: { + userHeader: "x-forwarded-user", + requiredHeaders: ["x-forwarded-proto"], + }, + }, + trustedProxies: ["127.0.0.1"], + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); + + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { + origin: "https://evil.example", + "x-forwarded-for": "203.0.113.50", + "x-forwarded-proto": "https", + "x-forwarded-user": "operator@example.com", + }); + try { + const res = await connectReq(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, + }); + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("origin not allowed"); + } finally { + ws.close(); + } + }); + }); + + test("accepts trusted-proxy browser connects from allowed origins", async () => { + const { writeConfigFile } = await import("../config/config.js"); + await writeConfigFile({ + gateway: { + auth: { + mode: "trusted-proxy", + trustedProxy: { + userHeader: "x-forwarded-user", + requiredHeaders: ["x-forwarded-proto"], + }, + }, + trustedProxies: ["127.0.0.1"], + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); + + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { + origin: ALLOWED_BROWSER_ORIGIN, + "x-forwarded-for": "203.0.113.50", + "x-forwarded-proto": "https", + "x-forwarded-user": "operator@example.com", + }); + try { + const payload = await connectOk(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, + }); + expect(payload.type).toBe("hello-ok"); + } finally { + ws.close(); + } + }); + }); + + test.each([ + { + name: "rejects disallowed origins", + origin: "https://evil.example", + ok: false, + expectedMessage: "origin not allowed", + }, + { + name: "accepts allowed origins", + origin: ALLOWED_BROWSER_ORIGIN, + ok: true, + }, + ])( + "keeps non-proxy browser-origin behavior unchanged: $name", + async ({ origin, ok, expectedMessage }) => { + const { writeConfigFile } = await import("../config/config.js"); + testState.gatewayAuth = { mode: "token", token: "secret" }; + await writeConfigFile({ + gateway: { + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); + + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { origin }); + try { + const res = await connectReq(ws, { + token: "secret", + client: TEST_OPERATOR_CLIENT, + device: null, + }); + expect(res.ok).toBe(ok); + if (ok) { + expect((res.payload as { type?: string } | undefined)?.type).toBe("hello-ok"); + } else { + expect(res.error?.message ?? "").toContain(expectedMessage ?? ""); + } + } finally { + ws.close(); + } + }); + }, + ); + test("rejects non-local browser origins for non-control-ui clients", async () => { testState.gatewayAuth = { mode: "token", token: "secret" }; await withGatewayServer(async ({ port }) => { diff --git a/src/gateway/server.chat.gateway-server-chat-b.test.ts b/src/gateway/server.chat.gateway-server-chat-b.test.ts index 2e76e1a5de1..ca1e2c09402 100644 --- a/src/gateway/server.chat.gateway-server-chat-b.test.ts +++ b/src/gateway/server.chat.gateway-server-chat-b.test.ts @@ -273,6 +273,37 @@ describe("gateway server chat", () => { }); }); + test("chat.history preserves usage and cost metadata for assistant messages", async () => { + await withGatewayChatHarness(async ({ ws, createSessionDir }) => { + await connectOk(ws); + + const sessionDir = await createSessionDir(); + await writeMainSessionStore(); + + await writeMainSessionTranscript(sessionDir, [ + JSON.stringify({ + message: { + role: "assistant", + timestamp: Date.now(), + content: [{ type: "text", text: "hello" }], + usage: { input: 12, output: 5, totalTokens: 17 }, + cost: { total: 0.0123 }, + details: { debug: true }, + }, + }), + ]); + + const messages = await fetchHistoryMessages(ws); + expect(messages).toHaveLength(1); + expect(messages[0]).toMatchObject({ + role: "assistant", + usage: { input: 12, output: 5, totalTokens: 17 }, + cost: { total: 0.0123 }, + }); + expect(messages[0]).not.toHaveProperty("details"); + }); + }); + test("chat.history strips inline directives from displayed message text", async () => { await withGatewayChatHarness(async ({ ws, createSessionDir }) => { await connectOk(ws); diff --git a/src/gateway/server.config-patch.test.ts b/src/gateway/server.config-patch.test.ts index 1f2d465b4da..67efe9b79be 100644 --- a/src/gateway/server.config-patch.test.ts +++ b/src/gateway/server.config-patch.test.ts @@ -72,6 +72,38 @@ describe("gateway config methods", () => { expect(res.payload?.config).toBeTruthy(); }); + it("returns config.set validation details in the top-level error message", async () => { + const current = await rpcReq<{ + hash?: string; + }>(requireWs(), "config.get", {}); + expect(current.ok).toBe(true); + expect(typeof current.payload?.hash).toBe("string"); + + const res = await rpcReq<{ + ok?: boolean; + error?: { + message?: string; + }; + }>(requireWs(), "config.set", { + raw: JSON.stringify({ gateway: { bind: 123 } }), + baseHash: current.payload?.hash, + }); + const error = res.error as + | { + message?: string; + details?: { + issues?: Array<{ path?: string; message?: string }>; + }; + } + | undefined; + + expect(res.ok).toBe(false); + expect(error?.message ?? "").toContain("invalid config:"); + expect(error?.message ?? "").toContain("gateway.bind"); + expect(error?.message ?? "").toContain("allowed:"); + expect(error?.details?.issues?.[0]?.path).toBe("gateway.bind"); + }); + it("returns a path-scoped config schema lookup", async () => { const res = await rpcReq<{ path: string; diff --git a/src/gateway/server.device-token-rotate-authz.test.ts b/src/gateway/server.device-token-rotate-authz.test.ts new file mode 100644 index 00000000000..9f3ecdaf719 --- /dev/null +++ b/src/gateway/server.device-token-rotate-authz.test.ts @@ -0,0 +1,284 @@ +import os from "node:os"; +import path from "node:path"; +import { describe, expect, test } from "vitest"; +import { WebSocket } from "ws"; +import { + loadOrCreateDeviceIdentity, + publicKeyRawBase64UrlFromPem, + type DeviceIdentity, +} from "../infra/device-identity.js"; +import { + approveDevicePairing, + getPairedDevice, + requestDevicePairing, + rotateDeviceToken, +} from "../infra/device-pairing.js"; +import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; +import { GatewayClient } from "./client.js"; +import { + connectOk, + installGatewayTestHooks, + rpcReq, + startServerWithClient, + trackConnectChallengeNonce, +} from "./test-helpers.js"; + +installGatewayTestHooks({ scope: "suite" }); + +function resolveDeviceIdentityPath(name: string): string { + const root = process.env.OPENCLAW_STATE_DIR ?? process.env.HOME ?? os.tmpdir(); + return path.join(root, "test-device-identities", `${name}.json`); +} + +function loadDeviceIdentity(name: string): { + identityPath: string; + identity: DeviceIdentity; + publicKey: string; +} { + const identityPath = resolveDeviceIdentityPath(name); + const identity = loadOrCreateDeviceIdentity(identityPath); + return { + identityPath, + identity, + publicKey: publicKeyRawBase64UrlFromPem(identity.publicKeyPem), + }; +} + +async function pairDevice(params: { + name: string; + role: "node" | "operator"; + scopes: string[]; + clientId?: string; + clientMode?: string; +}): Promise<{ + identityPath: string; + identity: DeviceIdentity; +}> { + const loaded = loadDeviceIdentity(params.name); + const request = await requestDevicePairing({ + deviceId: loaded.identity.deviceId, + publicKey: loaded.publicKey, + role: params.role, + scopes: params.scopes, + clientId: params.clientId, + clientMode: params.clientMode, + }); + await approveDevicePairing(request.request.requestId); + return { + identityPath: loaded.identityPath, + identity: loaded.identity, + }; +} + +async function issuePairingScopedTokenForAdminApprovedDevice(name: string): Promise<{ + deviceId: string; + identityPath: string; + pairingToken: string; +}> { + const paired = await pairDevice({ + name, + role: "operator", + scopes: ["operator.admin"], + clientId: GATEWAY_CLIENT_NAMES.TEST, + clientMode: GATEWAY_CLIENT_MODES.TEST, + }); + const rotated = await rotateDeviceToken({ + deviceId: paired.identity.deviceId, + role: "operator", + scopes: ["operator.pairing"], + }); + expect(rotated?.token).toBeTruthy(); + return { + deviceId: paired.identity.deviceId, + identityPath: paired.identityPath, + pairingToken: String(rotated?.token ?? ""), + }; +} + +async function openTrackedWs(port: number): Promise { + const ws = new WebSocket(`ws://127.0.0.1:${port}`); + trackConnectChallengeNonce(ws); + await new Promise((resolve, reject) => { + const timer = setTimeout(() => reject(new Error("timeout waiting for ws open")), 5_000); + ws.once("open", () => { + clearTimeout(timer); + resolve(); + }); + ws.once("error", (error) => { + clearTimeout(timer); + reject(error); + }); + }); + return ws; +} + +async function connectPairingScopedOperator(params: { + port: number; + identityPath: string; + deviceToken: string; +}): Promise { + const ws = await openTrackedWs(params.port); + await connectOk(ws, { + skipDefaultAuth: true, + deviceToken: params.deviceToken, + deviceIdentityPath: params.identityPath, + scopes: ["operator.pairing"], + }); + return ws; +} + +async function connectApprovedNode(params: { + port: number; + name: string; + onInvoke: (payload: unknown) => void; +}): Promise { + const paired = await pairDevice({ + name: params.name, + role: "node", + scopes: [], + clientId: GATEWAY_CLIENT_NAMES.NODE_HOST, + clientMode: GATEWAY_CLIENT_MODES.NODE, + }); + + let readyResolve: (() => void) | null = null; + const ready = new Promise((resolve) => { + readyResolve = resolve; + }); + + const client = new GatewayClient({ + url: `ws://127.0.0.1:${params.port}`, + connectDelayMs: 2_000, + token: "secret", + role: "node", + clientName: GATEWAY_CLIENT_NAMES.NODE_HOST, + clientVersion: "1.0.0", + platform: "linux", + mode: GATEWAY_CLIENT_MODES.NODE, + scopes: [], + commands: ["system.run"], + deviceIdentity: paired.identity, + onHelloOk: () => readyResolve?.(), + onEvent: (event) => { + if (event.event !== "node.invoke.request") { + return; + } + params.onInvoke(event.payload); + const payload = event.payload as { id?: string; nodeId?: string }; + if (!payload.id || !payload.nodeId) { + return; + } + void client.request("node.invoke.result", { + id: payload.id, + nodeId: payload.nodeId, + ok: true, + payloadJSON: JSON.stringify({ ok: true }), + }); + }, + }); + client.start(); + await Promise.race([ + ready, + new Promise((_, reject) => { + setTimeout(() => reject(new Error("timeout waiting for node hello")), 5_000); + }), + ]); + return client; +} + +async function getConnectedNodeId(ws: WebSocket): Promise { + const nodes = await rpcReq<{ nodes?: Array<{ nodeId: string; connected?: boolean }> }>( + ws, + "node.list", + {}, + ); + expect(nodes.ok).toBe(true); + const nodeId = nodes.payload?.nodes?.find((node) => node.connected)?.nodeId ?? ""; + expect(nodeId).toBeTruthy(); + return nodeId; +} + +async function waitForMacrotasks(): Promise { + await new Promise((resolve) => setImmediate(resolve)); + await new Promise((resolve) => setImmediate(resolve)); +} + +describe("gateway device.token.rotate caller scope guard", () => { + test("rejects rotating an admin-approved device token above the caller session scopes", async () => { + const started = await startServerWithClient("secret"); + const attacker = await issuePairingScopedTokenForAdminApprovedDevice("rotate-attacker"); + + let pairingWs: WebSocket | undefined; + try { + pairingWs = await connectPairingScopedOperator({ + port: started.port, + identityPath: attacker.identityPath, + deviceToken: attacker.pairingToken, + }); + + const rotate = await rpcReq(pairingWs, "device.token.rotate", { + deviceId: attacker.deviceId, + role: "operator", + scopes: ["operator.admin"], + }); + expect(rotate.ok).toBe(false); + expect(rotate.error?.message).toBe("missing scope: operator.admin"); + + const paired = await getPairedDevice(attacker.deviceId); + expect(paired?.tokens?.operator?.scopes).toEqual(["operator.pairing"]); + expect(paired?.approvedScopes).toEqual(["operator.admin"]); + } finally { + pairingWs?.close(); + started.ws.close(); + await started.server.close(); + started.envSnapshot.restore(); + } + }); + + test("blocks the pairing-token to admin-node-invoke escalation chain", async () => { + const started = await startServerWithClient("secret"); + const attacker = await issuePairingScopedTokenForAdminApprovedDevice("rotate-rce-attacker"); + + let sawInvoke = false; + let pairingWs: WebSocket | undefined; + let nodeClient: GatewayClient | undefined; + + try { + await connectOk(started.ws); + nodeClient = await connectApprovedNode({ + port: started.port, + name: "rotate-rce-node", + onInvoke: () => { + sawInvoke = true; + }, + }); + await getConnectedNodeId(started.ws); + + pairingWs = await connectPairingScopedOperator({ + port: started.port, + identityPath: attacker.identityPath, + deviceToken: attacker.pairingToken, + }); + + const rotate = await rpcReq<{ token?: string }>(pairingWs, "device.token.rotate", { + deviceId: attacker.deviceId, + role: "operator", + scopes: ["operator.admin"], + }); + + expect(rotate.ok).toBe(false); + expect(rotate.error?.message).toBe("missing scope: operator.admin"); + await waitForMacrotasks(); + expect(sawInvoke).toBe(false); + + const paired = await getPairedDevice(attacker.deviceId); + expect(paired?.tokens?.operator?.scopes).toEqual(["operator.pairing"]); + expect(paired?.tokens?.operator?.token).toBe(attacker.pairingToken); + } finally { + pairingWs?.close(); + nodeClient?.stop(); + started.ws.close(); + await started.server.close(); + started.envSnapshot.restore(); + } + }); +}); diff --git a/src/gateway/server.talk-config.test.ts b/src/gateway/server.talk-config.test.ts index f430edfc185..ad9027f36fc 100644 --- a/src/gateway/server.talk-config.test.ts +++ b/src/gateway/server.talk-config.test.ts @@ -6,6 +6,7 @@ import { publicKeyRawBase64UrlFromPem, signDevicePayload, } from "../infra/device-identity.js"; +import { withEnvAsync } from "../test-utils/env.js"; import { buildDeviceAuthPayload } from "./device-auth.js"; import { validateTalkConfigResult } from "./protocol/index.js"; import { @@ -150,45 +151,47 @@ describe("gateway talk.config", () => { }, }); - await withServer(async (ws) => { - await connectOperator(ws, ["operator.read", "operator.write", "operator.talk.secrets"]); - const res = await rpcReq<{ - config?: { - talk?: { - apiKey?: { source?: string; provider?: string; id?: string }; - providers?: { - elevenlabs?: { - apiKey?: { source?: string; provider?: string; id?: string }; + await withEnvAsync({ ELEVENLABS_API_KEY: "env-elevenlabs-key" }, async () => { + await withServer(async (ws) => { + await connectOperator(ws, ["operator.read", "operator.write", "operator.talk.secrets"]); + const res = await rpcReq<{ + config?: { + talk?: { + apiKey?: { source?: string; provider?: string; id?: string }; + providers?: { + elevenlabs?: { + apiKey?: { source?: string; provider?: string; id?: string }; + }; }; - }; - resolved?: { - provider?: string; - config?: { - apiKey?: { source?: string; provider?: string; id?: string }; + resolved?: { + provider?: string; + config?: { + apiKey?: { source?: string; provider?: string; id?: string }; + }; }; }; }; - }; - }>(ws, "talk.config", { - includeSecrets: true, - }); - expect(res.ok).toBe(true); - expect(validateTalkConfigResult(res.payload)).toBe(true); - expect(res.payload?.config?.talk?.apiKey).toEqual({ - source: "env", - provider: "default", - id: "ELEVENLABS_API_KEY", - }); - expect(res.payload?.config?.talk?.providers?.elevenlabs?.apiKey).toEqual({ - source: "env", - provider: "default", - id: "ELEVENLABS_API_KEY", - }); - expect(res.payload?.config?.talk?.resolved?.provider).toBe("elevenlabs"); - expect(res.payload?.config?.talk?.resolved?.config?.apiKey).toEqual({ - source: "env", - provider: "default", - id: "ELEVENLABS_API_KEY", + }>(ws, "talk.config", { + includeSecrets: true, + }); + expect(res.ok).toBe(true); + expect(validateTalkConfigResult(res.payload)).toBe(true); + expect(res.payload?.config?.talk?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "ELEVENLABS_API_KEY", + }); + expect(res.payload?.config?.talk?.providers?.elevenlabs?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "ELEVENLABS_API_KEY", + }); + expect(res.payload?.config?.talk?.resolved?.provider).toBe("elevenlabs"); + expect(res.payload?.config?.talk?.resolved?.config?.apiKey).toEqual({ + source: "env", + provider: "default", + id: "ELEVENLABS_API_KEY", + }); }); }); }); diff --git a/src/gateway/server/plugins-http.test.ts b/src/gateway/server/plugins-http.test.ts index 391792b0022..476f76f8850 100644 --- a/src/gateway/server/plugins-http.test.ts +++ b/src/gateway/server/plugins-http.test.ts @@ -1,5 +1,7 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import { describe, expect, it, vi } from "vitest"; +import type { PluginRuntime } from "../../plugins/runtime/types.js"; +import type { GatewayRequestContext, GatewayRequestOptions } from "../server-methods/types.js"; import { makeMockHttpResponse } from "../test-http-response.js"; import { createTestRegistry } from "./__tests__/test-utils.js"; import { @@ -8,6 +10,22 @@ import { shouldEnforceGatewayAuthForPluginPath, } from "./plugins-http.js"; +const loadOpenClawPlugins = vi.hoisted(() => vi.fn()); +type HandleGatewayRequestOptions = GatewayRequestOptions & { + extraHandlers?: Record; +}; +const handleGatewayRequest = vi.hoisted(() => + vi.fn(async (_opts: HandleGatewayRequestOptions) => {}), +); + +vi.mock("../../plugins/loader.js", () => ({ + loadOpenClawPlugins, +})); + +vi.mock("../server-methods.js", () => ({ + handleGatewayRequest, +})); + type PluginHandlerLog = Parameters[0]["log"]; function createPluginLog(): PluginHandlerLog { @@ -39,7 +57,85 @@ function buildRepeatedEncodedSlash(depth: number): string { return encodedSlash; } +function createSubagentRuntimeRegistry() { + return createTestRegistry(); +} + +async function createSubagentRuntime(): Promise { + const serverPlugins = await import("../server-plugins.js"); + loadOpenClawPlugins.mockReturnValue(createSubagentRuntimeRegistry()); + serverPlugins.loadGatewayPlugins({ + cfg: {}, + workspaceDir: "/tmp", + log: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, + coreGatewayHandlers: {}, + baseMethods: [], + }); + serverPlugins.setFallbackGatewayContext({} as GatewayRequestContext); + const call = loadOpenClawPlugins.mock.calls.at(-1)?.[0] as + | { runtimeOptions?: { subagent?: PluginRuntime["subagent"] } } + | undefined; + if (!call?.runtimeOptions?.subagent) { + throw new Error("Expected subagent runtime from loadGatewayPlugins"); + } + return call.runtimeOptions.subagent; +} + describe("createGatewayPluginRequestHandler", () => { + it("caps unauthenticated plugin routes to non-admin subagent scopes", async () => { + loadOpenClawPlugins.mockReset(); + handleGatewayRequest.mockReset(); + handleGatewayRequest.mockImplementation(async (opts: HandleGatewayRequestOptions) => { + const scopes = opts.client?.connect.scopes ?? []; + if (opts.req.method === "sessions.delete" && !scopes.includes("operator.admin")) { + opts.respond(false, undefined, { + code: "invalid_request", + message: "missing scope: operator.admin", + }); + return; + } + opts.respond(true, {}); + }); + + const subagent = await createSubagentRuntime(); + const log = createPluginLog(); + const handler = createGatewayPluginRequestHandler({ + registry: createTestRegistry({ + httpRoutes: [ + createRoute({ + path: "/hook", + auth: "plugin", + handler: async (_req, _res) => { + await subagent.deleteSession({ sessionKey: "agent:main:subagent:child" }); + return true; + }, + }), + ], + }), + log, + }); + + const { res, setHeader, end } = makeMockHttpResponse(); + const handled = await handler({ url: "/hook" } as IncomingMessage, res, undefined, { + gatewayAuthSatisfied: false, + }); + + expect(handled).toBe(true); + expect(handleGatewayRequest).toHaveBeenCalledTimes(1); + expect(handleGatewayRequest.mock.calls[0]?.[0]?.client?.connect.scopes).toEqual([ + "operator.write", + ]); + expect(res.statusCode).toBe(500); + expect(setHeader).toHaveBeenCalledWith("Content-Type", "text/plain; charset=utf-8"); + expect(end).toHaveBeenCalledWith("Internal Server Error"); + expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("missing scope: operator.admin")); + }); + it("returns false when no routes are registered", async () => { const log = createPluginLog(); const handler = createGatewayPluginRequestHandler({ diff --git a/src/gateway/server/plugins-http.ts b/src/gateway/server/plugins-http.ts index 50114a33af6..6147e1bee99 100644 --- a/src/gateway/server/plugins-http.ts +++ b/src/gateway/server/plugins-http.ts @@ -1,6 +1,11 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import type { createSubsystemLogger } from "../../logging/subsystem.js"; import type { PluginRegistry } from "../../plugins/registry.js"; +import { withPluginRuntimeGatewayRequestScope } from "../../plugins/runtime/gateway-request-scope.js"; +import { ADMIN_SCOPE, APPROVALS_SCOPE, PAIRING_SCOPE, WRITE_SCOPE } from "../method-scopes.js"; +import { GATEWAY_CLIENT_IDS, GATEWAY_CLIENT_MODES } from "../protocol/client-info.js"; +import { PROTOCOL_VERSION } from "../protocol/index.js"; +import type { GatewayRequestOptions } from "../server-methods/types.js"; import { resolvePluginRoutePathContext, type PluginRoutePathContext, @@ -21,6 +26,32 @@ export { shouldEnforceGatewayAuthForPluginPath } from "./plugins-http/route-auth type SubsystemLogger = ReturnType; +function createPluginRouteRuntimeClient(params: { + requiresGatewayAuth: boolean; + gatewayAuthSatisfied?: boolean; +}): GatewayRequestOptions["client"] { + // Plugin-authenticated webhooks can still use non-admin subagent helpers, + // but they must not inherit admin-only gateway methods by default. + const scopes = + params.requiresGatewayAuth && params.gatewayAuthSatisfied !== false + ? [ADMIN_SCOPE, APPROVALS_SCOPE, PAIRING_SCOPE] + : [WRITE_SCOPE]; + return { + connect: { + minProtocol: PROTOCOL_VERSION, + maxProtocol: PROTOCOL_VERSION, + client: { + id: GATEWAY_CLIENT_IDS.GATEWAY_CLIENT, + version: "internal", + platform: "node", + mode: GATEWAY_CLIENT_MODES.BACKEND, + }, + role: "operator", + scopes, + }, + }; +} + export type PluginHttpRequestHandler = ( req: IncomingMessage, res: ServerResponse, @@ -49,30 +80,40 @@ export function createGatewayPluginRequestHandler(params: { if (matchedRoutes.length === 0) { return false; } - if ( - matchedPluginRoutesRequireGatewayAuth(matchedRoutes) && - dispatchContext?.gatewayAuthSatisfied === false - ) { + const requiresGatewayAuth = matchedPluginRoutesRequireGatewayAuth(matchedRoutes); + if (requiresGatewayAuth && dispatchContext?.gatewayAuthSatisfied === false) { log.warn(`plugin http route blocked without gateway auth (${pathContext.canonicalPath})`); return false; } + const runtimeClient = createPluginRouteRuntimeClient({ + requiresGatewayAuth, + gatewayAuthSatisfied: dispatchContext?.gatewayAuthSatisfied, + }); - for (const route of matchedRoutes) { - try { - const handled = await route.handler(req, res); - if (handled !== false) { - return true; + return await withPluginRuntimeGatewayRequestScope( + { + client: runtimeClient, + isWebchatConnect: () => false, + }, + async () => { + for (const route of matchedRoutes) { + try { + const handled = await route.handler(req, res); + if (handled !== false) { + return true; + } + } catch (err) { + log.warn(`plugin http route failed (${route.pluginId ?? "unknown"}): ${String(err)}`); + if (!res.headersSent) { + res.statusCode = 500; + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.end("Internal Server Error"); + } + return true; + } } - } catch (err) { - log.warn(`plugin http route failed (${route.pluginId ?? "unknown"}): ${String(err)}`); - if (!res.headersSent) { - res.statusCode = 500; - res.setHeader("Content-Type", "text/plain; charset=utf-8"); - res.end("Internal Server Error"); - } - return true; - } - } - return false; + return false; + }, + ); }; } diff --git a/src/gateway/server/ws-connection/message-handler.ts b/src/gateway/server/ws-connection/message-handler.ts index 83d1b5f12a3..0897b51e937 100644 --- a/src/gateway/server/ws-connection/message-handler.ts +++ b/src/gateway/server/ws-connection/message-handler.ts @@ -114,7 +114,7 @@ function resolveHandshakeBrowserSecurityContext(params: { ); return { hasBrowserOriginHeader, - enforceOriginCheckForAnyClient: hasBrowserOriginHeader && !params.hasProxyHeaders, + enforceOriginCheckForAnyClient: hasBrowserOriginHeader, rateLimitClientIp: hasBrowserOriginHeader && isLoopbackAddress(params.clientIp) ? BROWSER_ORIGIN_LOOPBACK_RATE_LIMIT_IP diff --git a/src/gateway/session-utils.ts b/src/gateway/session-utils.ts index 969c60c378c..e16777f4f2c 100644 --- a/src/gateway/session-utils.ts +++ b/src/gateway/session-utils.ts @@ -810,6 +810,7 @@ export function listSessionsFromStore(params: { const model = resolvedModel.model ?? DEFAULT_MODEL; return { key, + spawnedBy: entry?.spawnedBy, entry, kind: classifySessionKey(key, entry), label: entry?.label, diff --git a/src/gateway/session-utils.types.ts b/src/gateway/session-utils.types.ts index 711a1997f22..80873b0000c 100644 --- a/src/gateway/session-utils.types.ts +++ b/src/gateway/session-utils.types.ts @@ -15,6 +15,7 @@ export type GatewaySessionsDefaults = { export type GatewaySessionRow = { key: string; + spawnedBy?: string; kind: "direct" | "group" | "global" | "unknown"; label?: string; displayName?: string; diff --git a/src/gateway/tools-invoke-http.test.ts b/src/gateway/tools-invoke-http.test.ts index 66a68bf5d9f..36b05c00d50 100644 --- a/src/gateway/tools-invoke-http.test.ts +++ b/src/gateway/tools-invoke-http.test.ts @@ -1,8 +1,22 @@ import { createServer, type IncomingMessage, type ServerResponse } from "node:http"; import type { AddressInfo } from "node:net"; import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import type { runBeforeToolCallHook as runBeforeToolCallHookType } from "../agents/pi-tools.before-tool-call.js"; + +type RunBeforeToolCallHook = typeof runBeforeToolCallHookType; +type RunBeforeToolCallHookArgs = Parameters[0]; +type RunBeforeToolCallHookResult = Awaited>; const TEST_GATEWAY_TOKEN = "test-gateway-token-1234567890"; +const hookMocks = vi.hoisted(() => ({ + resolveToolLoopDetectionConfig: vi.fn(() => ({ warnAt: 3 })), + runBeforeToolCallHook: vi.fn( + async (args: RunBeforeToolCallHookArgs): Promise => ({ + blocked: false, + params: args.params, + }), + ), +})); let cfg: Record = {}; let lastCreateOpenClawToolsContext: Record | undefined; @@ -152,6 +166,14 @@ vi.mock("../agents/openclaw-tools.js", () => { }; }); +vi.mock("../agents/pi-tools.js", () => ({ + resolveToolLoopDetectionConfig: hookMocks.resolveToolLoopDetectionConfig, +})); + +vi.mock("../agents/pi-tools.before-tool-call.js", () => ({ + runBeforeToolCallHook: hookMocks.runBeforeToolCallHook, +})); + const { handleToolsInvokeHttpRequest } = await import("./tools-invoke-http.js"); let pluginHttpHandlers: Array<(req: IncomingMessage, res: ServerResponse) => Promise> = []; @@ -206,6 +228,15 @@ beforeEach(() => { pluginHttpHandlers = []; cfg = {}; lastCreateOpenClawToolsContext = undefined; + hookMocks.resolveToolLoopDetectionConfig.mockClear(); + hookMocks.resolveToolLoopDetectionConfig.mockImplementation(() => ({ warnAt: 3 })); + hookMocks.runBeforeToolCallHook.mockClear(); + hookMocks.runBeforeToolCallHook.mockImplementation( + async (args: RunBeforeToolCallHookArgs): Promise => ({ + blocked: false, + params: args.params, + }), + ); }); const resolveGatewayToken = (): string => TEST_GATEWAY_TOKEN; @@ -336,6 +367,56 @@ describe("POST /tools/invoke", () => { expect(body.ok).toBe(true); expect(body).toHaveProperty("result"); expect(lastCreateOpenClawToolsContext?.allowMediaInvokeCommands).toBe(true); + expect(hookMocks.runBeforeToolCallHook).toHaveBeenCalledWith( + expect.objectContaining({ + toolName: "agents_list", + ctx: expect.objectContaining({ + agentId: "main", + sessionKey: "agent:main:main", + loopDetection: { warnAt: 3 }, + }), + }), + ); + }); + + it("blocks tool execution when before_tool_call rejects the invoke", async () => { + setMainAllowedTools({ allow: ["tools_invoke_test"] }); + hookMocks.runBeforeToolCallHook.mockResolvedValueOnce({ + blocked: true, + reason: "blocked by test hook", + }); + + const res = await invokeToolAuthed({ + tool: "tools_invoke_test", + args: { mode: "ok" }, + sessionKey: "main", + }); + + expect(res.status).toBe(403); + await expect(res.json()).resolves.toMatchObject({ + ok: false, + error: { + type: "tool_call_blocked", + message: "blocked by test hook", + }, + }); + }); + + it("uses before_tool_call adjusted params for HTTP tool execution", async () => { + setMainAllowedTools({ allow: ["tools_invoke_test"] }); + hookMocks.runBeforeToolCallHook.mockImplementationOnce(async () => ({ + blocked: false, + params: { mode: "rewritten" }, + })); + + const res = await invokeToolAuthed({ + tool: "tools_invoke_test", + args: { mode: "input" }, + sessionKey: "main", + }); + + const body = await expectOkInvokeResponse(res); + expect(body.result).toMatchObject({ ok: true }); }); it("supports tools.alsoAllow in profile and implicit modes", async () => { diff --git a/src/gateway/tools-invoke-http.ts b/src/gateway/tools-invoke-http.ts index 88cea7b3845..0cccafce999 100644 --- a/src/gateway/tools-invoke-http.ts +++ b/src/gateway/tools-invoke-http.ts @@ -1,5 +1,7 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import { createOpenClawTools } from "../agents/openclaw-tools.js"; +import { runBeforeToolCallHook } from "../agents/pi-tools.before-tool-call.js"; +import { resolveToolLoopDetectionConfig } from "../agents/pi-tools.js"; import { resolveEffectiveToolPolicy, resolveGroupToolPolicy, @@ -311,14 +313,32 @@ export async function handleToolsInvokeHttpRequest( } try { + const toolCallId = `http-${Date.now()}`; const toolArgs = mergeActionIntoArgsIfSupported({ // oxlint-disable-next-line typescript/no-explicit-any toolSchema: (tool as any).parameters, action, args, }); + const hookResult = await runBeforeToolCallHook({ + toolName, + params: toolArgs, + toolCallId, + ctx: { + agentId, + sessionKey, + loopDetection: resolveToolLoopDetectionConfig({ cfg, agentId }), + }, + }); + if (hookResult.blocked) { + sendJson(res, 403, { + ok: false, + error: { type: "tool_call_blocked", message: hookResult.reason }, + }); + return true; + } // oxlint-disable-next-line typescript/no-explicit-any - const result = await (tool as any).execute?.(`http-${Date.now()}`, toolArgs); + const result = await (tool as any).execute?.(toolCallId, hookResult.params); sendJson(res, 200, { ok: true, result }); } catch (err) { const inputStatus = resolveToolInputErrorStatus(err); diff --git a/src/imessage/monitor/inbound-processing.test.ts b/src/imessage/monitor/inbound-processing.test.ts index fab878a4cc7..b18012b9f1f 100644 --- a/src/imessage/monitor/inbound-processing.test.ts +++ b/src/imessage/monitor/inbound-processing.test.ts @@ -1,9 +1,11 @@ import { describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../../config/config.js"; +import { sanitizeTerminalText } from "../../terminal/safe-text.js"; import { describeIMessageEchoDropLog, resolveIMessageInboundDecision, } from "./inbound-processing.js"; +import { createSelfChatCache } from "./self-chat-cache.js"; describe("resolveIMessageInboundDecision echo detection", () => { const cfg = {} as OpenClawConfig; @@ -46,6 +48,324 @@ describe("resolveIMessageInboundDecision echo detection", () => { }), ); }); + + it("drops reflected self-chat duplicates after seeing the from-me copy", () => { + const selfChatCache = createSelfChatCache(); + const createdAt = "2026-03-02T20:58:10.649Z"; + + expect( + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9641, + sender: "+15555550123", + text: "Do you want to report this issue?", + created_at: createdAt, + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: "Do you want to report this issue?", + bodyText: "Do you want to report this issue?", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "from me" }); + + expect( + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9642, + sender: "+15555550123", + text: "Do you want to report this issue?", + created_at: createdAt, + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "Do you want to report this issue?", + bodyText: "Do you want to report this issue?", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "self-chat echo" }); + }); + + it("does not drop same-text messages when created_at differs", () => { + const selfChatCache = createSelfChatCache(); + + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9641, + sender: "+15555550123", + text: "ok", + created_at: "2026-03-02T20:58:10.649Z", + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: "ok", + bodyText: "ok", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + const decision = resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9642, + sender: "+15555550123", + text: "ok", + created_at: "2026-03-02T20:58:11.649Z", + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "ok", + bodyText: "ok", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + }); + + it("keeps self-chat cache scoped to configured group threads", () => { + const selfChatCache = createSelfChatCache(); + const groupedCfg = { + channels: { + imessage: { + groups: { + "123": {}, + "456": {}, + }, + }, + }, + } as OpenClawConfig; + const createdAt = "2026-03-02T20:58:10.649Z"; + + expect( + resolveIMessageInboundDecision({ + cfg: groupedCfg, + accountId: "default", + message: { + id: 9701, + chat_id: 123, + sender: "+15555550123", + text: "same text", + created_at: createdAt, + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "from me" }); + + const decision = resolveIMessageInboundDecision({ + cfg: groupedCfg, + accountId: "default", + message: { + id: 9702, + chat_id: 456, + sender: "+15555550123", + text: "same text", + created_at: createdAt, + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + }); + + it("does not drop other participants in the same group thread", () => { + const selfChatCache = createSelfChatCache(); + const createdAt = "2026-03-02T20:58:10.649Z"; + + expect( + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9751, + chat_id: 123, + sender: "+15555550123", + text: "same text", + created_at: createdAt, + is_from_me: true, + is_group: true, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }), + ).toEqual({ kind: "drop", reason: "from me" }); + + const decision = resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9752, + chat_id: 123, + sender: "+15555550999", + text: "same text", + created_at: createdAt, + is_from_me: false, + is_group: true, + }, + opts: undefined, + messageText: "same text", + bodyText: "same text", + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose: undefined, + }); + + expect(decision.kind).toBe("dispatch"); + }); + + it("sanitizes reflected duplicate previews before logging", () => { + const selfChatCache = createSelfChatCache(); + const logVerbose = vi.fn(); + const createdAt = "2026-03-02T20:58:10.649Z"; + const bodyText = "line-1\nline-2\t\u001b[31mred"; + + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9801, + sender: "+15555550123", + text: bodyText, + created_at: createdAt, + is_from_me: true, + is_group: false, + }, + opts: undefined, + messageText: bodyText, + bodyText, + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose, + }); + + resolveIMessageInboundDecision({ + cfg, + accountId: "default", + message: { + id: 9802, + sender: "+15555550123", + text: bodyText, + created_at: createdAt, + is_from_me: false, + is_group: false, + }, + opts: undefined, + messageText: bodyText, + bodyText, + allowFrom: [], + groupAllowFrom: [], + groupPolicy: "open", + dmPolicy: "open", + storeAllowFrom: [], + historyLimit: 0, + groupHistories: new Map(), + echoCache: undefined, + selfChatCache, + logVerbose, + }); + + expect(logVerbose).toHaveBeenCalledWith( + `imessage: dropping self-chat reflected duplicate: "${sanitizeTerminalText(bodyText)}"`, + ); + }); }); describe("describeIMessageEchoDropLog", () => { diff --git a/src/imessage/monitor/inbound-processing.ts b/src/imessage/monitor/inbound-processing.ts index d042f1f1a0f..b3fc10c1e7b 100644 --- a/src/imessage/monitor/inbound-processing.ts +++ b/src/imessage/monitor/inbound-processing.ts @@ -24,6 +24,7 @@ import { DM_GROUP_ACCESS_REASON, resolveDmGroupAccessWithLists, } from "../../security/dm-policy-shared.js"; +import { sanitizeTerminalText } from "../../terminal/safe-text.js"; import { truncateUtf16Safe } from "../../utils.js"; import { formatIMessageChatTarget, @@ -31,6 +32,7 @@ import { normalizeIMessageHandle, } from "../targets.js"; import { detectReflectedContent } from "./reflection-guard.js"; +import type { SelfChatCache } from "./self-chat-cache.js"; import type { MonitorIMessageOpts, IMessagePayload } from "./types.js"; type IMessageReplyContext = { @@ -101,6 +103,7 @@ export function resolveIMessageInboundDecision(params: { historyLimit: number; groupHistories: Map; echoCache?: { has: (scope: string, lookup: { text?: string; messageId?: string }) => boolean }; + selfChatCache?: SelfChatCache; logVerbose?: (msg: string) => void; }): IMessageInboundDecision { const senderRaw = params.message.sender ?? ""; @@ -109,13 +112,10 @@ export function resolveIMessageInboundDecision(params: { return { kind: "drop", reason: "missing sender" }; } const senderNormalized = normalizeIMessageHandle(sender); - if (params.message.is_from_me) { - return { kind: "drop", reason: "from me" }; - } - const chatId = params.message.chat_id ?? undefined; const chatGuid = params.message.chat_guid ?? undefined; const chatIdentifier = params.message.chat_identifier ?? undefined; + const createdAt = params.message.created_at ? Date.parse(params.message.created_at) : undefined; const groupIdCandidate = chatId !== undefined ? String(chatId) : undefined; const groupListPolicy = groupIdCandidate @@ -138,6 +138,18 @@ export function resolveIMessageInboundDecision(params: { groupIdCandidate && groupListPolicy.allowlistEnabled && groupListPolicy.groupConfig, ); const isGroup = Boolean(params.message.is_group) || treatAsGroupByConfig; + const selfChatLookup = { + accountId: params.accountId, + isGroup, + chatId, + sender, + text: params.bodyText, + createdAt, + }; + if (params.message.is_from_me) { + params.selfChatCache?.remember(selfChatLookup); + return { kind: "drop", reason: "from me" }; + } if (isGroup && !chatId) { return { kind: "drop", reason: "group without chat_id" }; } @@ -215,6 +227,17 @@ export function resolveIMessageInboundDecision(params: { return { kind: "drop", reason: "empty body" }; } + if ( + params.selfChatCache?.has({ + ...selfChatLookup, + text: bodyText, + }) + ) { + const preview = sanitizeTerminalText(truncateUtf16Safe(bodyText, 50)); + params.logVerbose?.(`imessage: dropping self-chat reflected duplicate: "${preview}"`); + return { kind: "drop", reason: "self-chat echo" }; + } + // Echo detection: check if the received message matches a recently sent message. // Scope by conversation so same text in different chats is not conflated. const inboundMessageId = params.message.id != null ? String(params.message.id) : undefined; @@ -250,7 +273,6 @@ export function resolveIMessageInboundDecision(params: { } const replyContext = describeReplyContext(params.message); - const createdAt = params.message.created_at ? Date.parse(params.message.created_at) : undefined; const historyKey = isGroup ? String(chatId ?? chatGuid ?? chatIdentifier ?? "unknown") : undefined; diff --git a/src/imessage/monitor/monitor-provider.ts b/src/imessage/monitor/monitor-provider.ts index 1ea35b60d95..1324529cbff 100644 --- a/src/imessage/monitor/monitor-provider.ts +++ b/src/imessage/monitor/monitor-provider.ts @@ -53,6 +53,7 @@ import { import { createLoopRateLimiter } from "./loop-rate-limiter.js"; import { parseIMessageNotification } from "./parse-notification.js"; import { normalizeAllowList, resolveRuntime } from "./runtime.js"; +import { createSelfChatCache } from "./self-chat-cache.js"; import type { IMessagePayload, MonitorIMessageOpts } from "./types.js"; /** @@ -99,6 +100,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P ); const groupHistories = new Map(); const sentMessageCache = createSentMessageCache(); + const selfChatCache = createSelfChatCache(); const loopRateLimiter = createLoopRateLimiter(); const textLimit = resolveTextChunkLimit(cfg, "imessage", accountInfo.accountId); const allowFrom = normalizeAllowList(opts.allowFrom ?? imessageCfg.allowFrom); @@ -252,6 +254,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P historyLimit, groupHistories, echoCache: sentMessageCache, + selfChatCache, logVerbose, }); @@ -267,6 +270,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P // are normal and should not escalate. const isLoopDrop = decision.reason === "echo" || + decision.reason === "self-chat echo" || decision.reason === "reflected assistant content" || decision.reason === "from me"; if (isLoopDrop) { diff --git a/src/imessage/monitor/self-chat-cache.test.ts b/src/imessage/monitor/self-chat-cache.test.ts new file mode 100644 index 00000000000..cf3a245ba30 --- /dev/null +++ b/src/imessage/monitor/self-chat-cache.test.ts @@ -0,0 +1,76 @@ +import { describe, expect, it, vi } from "vitest"; +import { createSelfChatCache } from "./self-chat-cache.js"; + +describe("createSelfChatCache", () => { + const directLookup = { + accountId: "default", + sender: "+15555550123", + isGroup: false, + } as const; + + it("matches repeated lookups for the same scope, timestamp, and text", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + cache.remember({ + ...directLookup, + text: " hello\r\nworld ", + createdAt: 123, + }); + + expect( + cache.has({ + ...directLookup, + text: "hello\nworld", + createdAt: 123, + }), + ).toBe(true); + }); + + it("expires entries after the ttl window", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + cache.remember({ ...directLookup, text: "hello", createdAt: 123 }); + + vi.advanceTimersByTime(11_001); + + expect(cache.has({ ...directLookup, text: "hello", createdAt: 123 })).toBe(false); + }); + + it("evicts older entries when the cache exceeds its cap", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + for (let i = 0; i < 513; i += 1) { + cache.remember({ + ...directLookup, + text: `message-${i}`, + createdAt: i, + }); + vi.advanceTimersByTime(1_001); + } + + expect(cache.has({ ...directLookup, text: "message-0", createdAt: 0 })).toBe(false); + expect(cache.has({ ...directLookup, text: "message-512", createdAt: 512 })).toBe(true); + }); + + it("does not collide long texts that differ only in the middle", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const cache = createSelfChatCache(); + const prefix = "a".repeat(256); + const suffix = "b".repeat(256); + const longTextA = `${prefix}${"x".repeat(300)}${suffix}`; + const longTextB = `${prefix}${"y".repeat(300)}${suffix}`; + + cache.remember({ ...directLookup, text: longTextA, createdAt: 123 }); + + expect(cache.has({ ...directLookup, text: longTextA, createdAt: 123 })).toBe(true); + expect(cache.has({ ...directLookup, text: longTextB, createdAt: 123 })).toBe(false); + }); +}); diff --git a/src/imessage/monitor/self-chat-cache.ts b/src/imessage/monitor/self-chat-cache.ts new file mode 100644 index 00000000000..a2c4c31ccd9 --- /dev/null +++ b/src/imessage/monitor/self-chat-cache.ts @@ -0,0 +1,103 @@ +import { createHash } from "node:crypto"; +import { formatIMessageChatTarget } from "../targets.js"; + +type SelfChatCacheKeyParts = { + accountId: string; + sender: string; + isGroup: boolean; + chatId?: number; +}; + +export type SelfChatLookup = SelfChatCacheKeyParts & { + text?: string; + createdAt?: number; +}; + +export type SelfChatCache = { + remember: (lookup: SelfChatLookup) => void; + has: (lookup: SelfChatLookup) => boolean; +}; + +const SELF_CHAT_TTL_MS = 10_000; +const MAX_SELF_CHAT_CACHE_ENTRIES = 512; +const CLEANUP_MIN_INTERVAL_MS = 1_000; + +function normalizeText(text: string | undefined): string | null { + if (!text) { + return null; + } + const normalized = text.replace(/\r\n?/g, "\n").trim(); + return normalized ? normalized : null; +} + +function isUsableTimestamp(createdAt: number | undefined): createdAt is number { + return typeof createdAt === "number" && Number.isFinite(createdAt); +} + +function digestText(text: string): string { + return createHash("sha256").update(text).digest("hex"); +} + +function buildScope(parts: SelfChatCacheKeyParts): string { + if (!parts.isGroup) { + return `${parts.accountId}:imessage:${parts.sender}`; + } + const chatTarget = formatIMessageChatTarget(parts.chatId) || "chat_id:unknown"; + return `${parts.accountId}:${chatTarget}:imessage:${parts.sender}`; +} + +class DefaultSelfChatCache implements SelfChatCache { + private cache = new Map(); + private lastCleanupAt = 0; + + private buildKey(lookup: SelfChatLookup): string | null { + const text = normalizeText(lookup.text); + if (!text || !isUsableTimestamp(lookup.createdAt)) { + return null; + } + return `${buildScope(lookup)}:${lookup.createdAt}:${digestText(text)}`; + } + + remember(lookup: SelfChatLookup): void { + const key = this.buildKey(lookup); + if (!key) { + return; + } + this.cache.set(key, Date.now()); + this.maybeCleanup(); + } + + has(lookup: SelfChatLookup): boolean { + this.maybeCleanup(); + const key = this.buildKey(lookup); + if (!key) { + return false; + } + const timestamp = this.cache.get(key); + return typeof timestamp === "number" && Date.now() - timestamp <= SELF_CHAT_TTL_MS; + } + + private maybeCleanup(): void { + const now = Date.now(); + if (now - this.lastCleanupAt < CLEANUP_MIN_INTERVAL_MS) { + return; + } + this.lastCleanupAt = now; + for (const [key, timestamp] of this.cache.entries()) { + if (now - timestamp > SELF_CHAT_TTL_MS) { + this.cache.delete(key); + } + } + while (this.cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) { + const oldestKey = this.cache.keys().next().value; + if (typeof oldestKey !== "string") { + break; + } + this.cache.delete(oldestKey); + } + } +} + +export function createSelfChatCache(): SelfChatCache { + return new DefaultSelfChatCache(); +} diff --git a/src/infra/device-pairing.test.ts b/src/infra/device-pairing.test.ts index c76b44b323d..17f03df089a 100644 --- a/src/infra/device-pairing.test.ts +++ b/src/infra/device-pairing.test.ts @@ -1,16 +1,19 @@ -import { mkdtemp } from "node:fs/promises"; +import { mkdtemp, readFile, writeFile } from "node:fs/promises"; import { tmpdir } from "node:os"; import { join } from "node:path"; import { describe, expect, test } from "vitest"; import { approveDevicePairing, clearDevicePairing, + ensureDeviceToken, getPairedDevice, removePairedDevice, requestDevicePairing, rotateDeviceToken, verifyDeviceToken, + type PairedDevice, } from "./device-pairing.js"; +import { resolvePairingPaths } from "./pairing-files.js"; async function setupPairedOperatorDevice(baseDir: string, scopes: string[]) { const request = await requestDevicePairing( @@ -51,6 +54,43 @@ function requireToken(token: string | undefined): string { return token; } +async function overwritePairedOperatorTokenScopes(baseDir: string, scopes: string[]) { + const { pairedPath } = resolvePairingPaths(baseDir, "devices"); + const pairedByDeviceId = JSON.parse(await readFile(pairedPath, "utf8")) as Record< + string, + PairedDevice + >; + const device = pairedByDeviceId["device-1"]; + expect(device?.tokens?.operator).toBeDefined(); + if (!device?.tokens?.operator) { + throw new Error("expected paired operator token"); + } + device.tokens.operator.scopes = scopes; + await writeFile(pairedPath, JSON.stringify(pairedByDeviceId, null, 2)); +} + +async function mutatePairedOperatorDevice(baseDir: string, mutate: (device: PairedDevice) => void) { + const { pairedPath } = resolvePairingPaths(baseDir, "devices"); + const pairedByDeviceId = JSON.parse(await readFile(pairedPath, "utf8")) as Record< + string, + PairedDevice + >; + const device = pairedByDeviceId["device-1"]; + expect(device).toBeDefined(); + if (!device) { + throw new Error("expected paired operator device"); + } + mutate(device); + await writeFile(pairedPath, JSON.stringify(pairedByDeviceId, null, 2)); +} + +async function clearPairedOperatorApprovalBaseline(baseDir: string) { + await mutatePairedOperatorDevice(baseDir, (device) => { + delete device.approvedScopes; + delete device.scopes; + }); +} + describe("device pairing tokens", () => { test("reuses existing pending requests for the same device", async () => { const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); @@ -180,6 +220,26 @@ describe("device pairing tokens", () => { expect(after?.approvedScopes).toEqual(["operator.read"]); }); + test("rejects scope escalation when ensuring a token and leaves state unchanged", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.read"]); + const before = await getPairedDevice("device-1", baseDir); + + const ensured = await ensureDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.admin"], + baseDir, + }); + expect(ensured).toBeNull(); + + const after = await getPairedDevice("device-1", baseDir); + expect(after?.tokens?.operator?.token).toEqual(before?.tokens?.operator?.token); + expect(after?.tokens?.operator?.scopes).toEqual(["operator.read"]); + expect(after?.scopes).toEqual(["operator.read"]); + expect(after?.approvedScopes).toEqual(["operator.read"]); + }); + test("verifies token and rejects mismatches", async () => { const { baseDir, token } = await setupOperatorToken(["operator.read"]); @@ -199,6 +259,32 @@ describe("device pairing tokens", () => { expect(mismatch.reason).toBe("token-mismatch"); }); + test("rejects persisted tokens whose scopes exceed the approved scope baseline", async () => { + const { baseDir, token } = await setupOperatorToken(["operator.read"]); + await overwritePairedOperatorTokenScopes(baseDir, ["operator.admin"]); + + await expect( + verifyOperatorToken({ + baseDir, + token, + scopes: ["operator.admin"], + }), + ).resolves.toEqual({ ok: false, reason: "scope-mismatch" }); + }); + + test("fails closed when the paired device approval baseline is missing during verification", async () => { + const { baseDir, token } = await setupOperatorToken(["operator.read"]); + await clearPairedOperatorApprovalBaseline(baseDir); + + await expect( + verifyOperatorToken({ + baseDir, + token, + scopes: ["operator.read"], + }), + ).resolves.toEqual({ ok: false, reason: "scope-mismatch" }); + }); + test("accepts operator.read/operator.write requests with an operator.admin token scope", async () => { const { baseDir, token } = await setupOperatorToken(["operator.admin"]); @@ -217,6 +303,57 @@ describe("device pairing tokens", () => { expect(writeOk.ok).toBe(true); }); + test("accepts custom operator scopes under an operator.admin approval baseline", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.admin"]); + + const rotated = await rotateDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.talk.secrets"], + baseDir, + }); + expect(rotated?.scopes).toEqual(["operator.talk.secrets"]); + + await expect( + verifyOperatorToken({ + baseDir, + token: requireToken(rotated?.token), + scopes: ["operator.talk.secrets"], + }), + ).resolves.toEqual({ ok: true }); + }); + + test("fails closed when the paired device approval baseline is missing during ensure", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.admin"]); + await clearPairedOperatorApprovalBaseline(baseDir); + + await expect( + ensureDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.admin"], + baseDir, + }), + ).resolves.toBeNull(); + }); + + test("fails closed when the paired device approval baseline is missing during rotation", async () => { + const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-")); + await setupPairedOperatorDevice(baseDir, ["operator.admin"]); + await clearPairedOperatorApprovalBaseline(baseDir); + + await expect( + rotateDeviceToken({ + deviceId: "device-1", + role: "operator", + scopes: ["operator.admin"], + baseDir, + }), + ).resolves.toBeNull(); + }); + test("treats multibyte same-length token input as mismatch without throwing", async () => { const { baseDir, token } = await setupOperatorToken(["operator.read"]); const multibyteToken = "é".repeat(token.length); diff --git a/src/infra/device-pairing.ts b/src/infra/device-pairing.ts index 591a9d70888..5bd2909a56e 100644 --- a/src/infra/device-pairing.ts +++ b/src/infra/device-pairing.ts @@ -181,44 +181,6 @@ function mergePendingDevicePairingRequest( }; } -function scopesAllow(requested: string[], allowed: string[]): boolean { - if (requested.length === 0) { - return true; - } - if (allowed.length === 0) { - return false; - } - const allowedSet = new Set(allowed); - return requested.every((scope) => allowedSet.has(scope)); -} - -const DEVICE_SCOPE_IMPLICATIONS: Readonly> = { - "operator.admin": ["operator.read", "operator.write", "operator.approvals", "operator.pairing"], - "operator.write": ["operator.read"], -}; - -function expandScopeImplications(scopes: string[]): string[] { - const expanded = new Set(scopes); - const queue = [...scopes]; - while (queue.length > 0) { - const scope = queue.pop(); - if (!scope) { - continue; - } - for (const impliedScope of DEVICE_SCOPE_IMPLICATIONS[scope] ?? []) { - if (!expanded.has(impliedScope)) { - expanded.add(impliedScope); - queue.push(impliedScope); - } - } - } - return [...expanded]; -} - -function scopesAllowWithImplications(requested: string[], allowed: string[]): boolean { - return scopesAllow(expandScopeImplications(requested), expandScopeImplications(allowed)); -} - function newToken() { return generatePairingToken(); } @@ -252,6 +214,29 @@ function buildDeviceAuthToken(params: { }; } +function resolveApprovedDeviceScopeBaseline(device: PairedDevice): string[] | null { + const baseline = device.approvedScopes ?? device.scopes; + if (!Array.isArray(baseline)) { + return null; + } + return normalizeDeviceAuthScopes(baseline); +} + +function scopesWithinApprovedDeviceBaseline(params: { + role: string; + scopes: readonly string[]; + approvedScopes: readonly string[] | null; +}): boolean { + if (!params.approvedScopes) { + return false; + } + return roleScopesAllow({ + role: params.role, + requestedScopes: params.scopes, + allowedScopes: params.approvedScopes, + }); +} + export async function listDevicePairing(baseDir?: string): Promise { const state = await loadState(baseDir); const pending = Object.values(state.pendingById).toSorted((a, b) => b.ts - a.ts); @@ -494,6 +479,16 @@ export async function verifyDeviceToken(params: { if (!verifyPairingToken(params.token, entry.token)) { return { ok: false, reason: "token-mismatch" }; } + const approvedScopes = resolveApprovedDeviceScopeBaseline(device); + if ( + !scopesWithinApprovedDeviceBaseline({ + role, + scopes: entry.scopes, + approvedScopes, + }) + ) { + return { ok: false, reason: "scope-mismatch" }; + } const requestedScopes = normalizeDeviceAuthScopes(params.scopes); if (!roleScopesAllow({ role, requestedScopes, allowedScopes: entry.scopes })) { return { ok: false, reason: "scope-mismatch" }; @@ -525,8 +520,26 @@ export async function ensureDeviceToken(params: { return null; } const { device, role, tokens, existing } = context; + const approvedScopes = resolveApprovedDeviceScopeBaseline(device); + if ( + !scopesWithinApprovedDeviceBaseline({ + role, + scopes: requestedScopes, + approvedScopes, + }) + ) { + return null; + } if (existing && !existing.revokedAtMs) { - if (roleScopesAllow({ role, requestedScopes, allowedScopes: existing.scopes })) { + const existingWithinApproved = scopesWithinApprovedDeviceBaseline({ + role, + scopes: existing.scopes, + approvedScopes, + }); + if ( + existingWithinApproved && + roleScopesAllow({ role, requestedScopes, allowedScopes: existing.scopes }) + ) { return existing; } } @@ -589,10 +602,14 @@ export async function rotateDeviceToken(params: { const requestedScopes = normalizeDeviceAuthScopes( params.scopes ?? existing?.scopes ?? device.scopes, ); - const approvedScopes = normalizeDeviceAuthScopes( - device.approvedScopes ?? device.scopes ?? existing?.scopes, - ); - if (!scopesAllowWithImplications(requestedScopes, approvedScopes)) { + const approvedScopes = resolveApprovedDeviceScopeBaseline(device); + if ( + !scopesWithinApprovedDeviceBaseline({ + role, + scopes: requestedScopes, + approvedScopes, + }) + ) { return null; } const now = Date.now(); diff --git a/src/infra/exec-allowlist-pattern.test.ts b/src/infra/exec-allowlist-pattern.test.ts new file mode 100644 index 00000000000..2c45e12627f --- /dev/null +++ b/src/infra/exec-allowlist-pattern.test.ts @@ -0,0 +1,14 @@ +import { describe, expect, it } from "vitest"; +import { matchesExecAllowlistPattern } from "./exec-allowlist-pattern.js"; + +describe("matchesExecAllowlistPattern", () => { + it("does not let ? cross path separators", () => { + expect(matchesExecAllowlistPattern("/tmp/a?b", "/tmp/a/b")).toBe(false); + expect(matchesExecAllowlistPattern("/tmp/a?b", "/tmp/acb")).toBe(true); + }); + + it.runIf(process.platform !== "win32")("preserves case sensitivity on POSIX", () => { + expect(matchesExecAllowlistPattern("/tmp/Allowed-Tool", "/tmp/allowed-tool")).toBe(false); + expect(matchesExecAllowlistPattern("/tmp/Allowed-Tool", "/tmp/Allowed-Tool")).toBe(true); + }); +}); diff --git a/src/infra/exec-allowlist-pattern.ts b/src/infra/exec-allowlist-pattern.ts index df05a2ae1d9..cdf84dfc51e 100644 --- a/src/infra/exec-allowlist-pattern.ts +++ b/src/infra/exec-allowlist-pattern.ts @@ -9,7 +9,7 @@ function normalizeMatchTarget(value: string): string { const stripped = value.replace(/^\\\\[?.]\\/, ""); return stripped.replace(/\\/g, "/").toLowerCase(); } - return value.replace(/\\\\/g, "/").toLowerCase(); + return value.replace(/\\\\/g, "/"); } function tryRealpath(value: string): string | null { @@ -46,7 +46,7 @@ function compileGlobRegex(pattern: string): RegExp { continue; } if (ch === "?") { - regex += "."; + regex += "[^/]"; i += 1; continue; } @@ -55,7 +55,7 @@ function compileGlobRegex(pattern: string): RegExp { } regex += "$"; - const compiled = new RegExp(regex, "i"); + const compiled = new RegExp(regex, process.platform === "win32" ? "i" : ""); if (globRegexCache.size >= GLOB_REGEX_CACHE_LIMIT) { globRegexCache.clear(); } diff --git a/src/infra/exec-approval-command-display.ts b/src/infra/exec-approval-command-display.ts index b5b00625ef2..9ab62e55669 100644 --- a/src/infra/exec-approval-command-display.ts +++ b/src/infra/exec-approval-command-display.ts @@ -1,8 +1,22 @@ import type { ExecApprovalRequestPayload } from "./exec-approvals.js"; +const UNICODE_FORMAT_CHAR_REGEX = /\p{Cf}/gu; + +function formatCodePointEscape(char: string): string { + return `\\u{${char.codePointAt(0)?.toString(16).toUpperCase() ?? "FFFD"}}`; +} + +export function sanitizeExecApprovalDisplayText(commandText: string): string { + return commandText.replace(UNICODE_FORMAT_CHAR_REGEX, formatCodePointEscape); +} + function normalizePreview(commandText: string, commandPreview?: string | null): string | null { - const preview = commandPreview?.trim() ?? ""; - if (!preview || preview === commandText) { + const previewRaw = commandPreview?.trim() ?? ""; + if (!previewRaw) { + return null; + } + const preview = sanitizeExecApprovalDisplayText(previewRaw); + if (preview === commandText) { return null; } return preview; @@ -12,17 +26,15 @@ export function resolveExecApprovalCommandDisplay(request: ExecApprovalRequestPa commandText: string; commandPreview: string | null; } { - if (request.host === "node" && request.systemRunPlan) { - return { - commandText: request.systemRunPlan.commandText, - commandPreview: normalizePreview( - request.systemRunPlan.commandText, - request.systemRunPlan.commandPreview, - ), - }; - } + const commandTextSource = + request.command || + (request.host === "node" && request.systemRunPlan ? request.systemRunPlan.commandText : ""); + const commandText = sanitizeExecApprovalDisplayText(commandTextSource); + const previewSource = + request.commandPreview ?? + (request.host === "node" ? (request.systemRunPlan?.commandPreview ?? null) : null); return { - commandText: request.command, - commandPreview: normalizePreview(request.command, request.commandPreview), + commandText, + commandPreview: normalizePreview(commandText, previewSource), }; } diff --git a/src/infra/exec-approval-forwarder.test.ts b/src/infra/exec-approval-forwarder.test.ts index 8ae1b53cc57..ca4d81e012e 100644 --- a/src/infra/exec-approval-forwarder.test.ts +++ b/src/infra/exec-approval-forwarder.test.ts @@ -294,6 +294,24 @@ describe("exec approval forwarder", () => { expect(text).toContain("Reply with: /approve allow-once|allow-always|deny"); }); + it("renders invisible Unicode format chars as visible escapes", async () => { + vi.useFakeTimers(); + const { deliver, forwarder } = createForwarder({ cfg: TARGETS_CFG }); + + await expect( + forwarder.handleRequested({ + ...baseRequest, + request: { + ...baseRequest.request, + command: "bash safe\u200B.sh", + }, + }), + ).resolves.toBe(true); + await Promise.resolve(); + + expect(getFirstDeliveryText(deliver)).toContain("Command: `bash safe\\u{200B}.sh`"); + }); + it("formats complex commands as fenced code blocks", async () => { vi.useFakeTimers(); const { deliver, forwarder } = createForwarder({ cfg: TARGETS_CFG }); diff --git a/src/infra/git-commit.test.ts b/src/infra/git-commit.test.ts index d00c50fbf6f..c0ddb136e85 100644 --- a/src/infra/git-commit.test.ts +++ b/src/infra/git-commit.test.ts @@ -198,7 +198,7 @@ describe("git commit resolution", () => { await fs.mkdir(path.join(packageRoot, "dist"), { recursive: true }); await fs.writeFile( path.join(packageRoot, "package.json"), - JSON.stringify({ name: "openclaw", version: "2026.3.9" }), + JSON.stringify({ name: "openclaw", version: "2026.3.10" }), "utf-8", ); const moduleUrl = pathToFileURL(path.join(packageRoot, "dist", "entry.js")).href; diff --git a/src/infra/host-env-security-policy.json b/src/infra/host-env-security-policy.json index 8b8f3cf3333..9e3ad27581e 100644 --- a/src/infra/host-env-security-policy.json +++ b/src/infra/host-env-security-policy.json @@ -11,6 +11,7 @@ "BASH_ENV", "ENV", "GIT_EXTERNAL_DIFF", + "GIT_EXEC_PATH", "SHELL", "SHELLOPTS", "PS4", diff --git a/src/infra/host-env-security.test.ts b/src/infra/host-env-security.test.ts index 4e7bcdb9ed9..08f1a3d65fb 100644 --- a/src/infra/host-env-security.test.ts +++ b/src/infra/host-env-security.test.ts @@ -18,6 +18,7 @@ describe("isDangerousHostEnvVarName", () => { expect(isDangerousHostEnvVarName("bash_env")).toBe(true); expect(isDangerousHostEnvVarName("SHELL")).toBe(true); expect(isDangerousHostEnvVarName("GIT_EXTERNAL_DIFF")).toBe(true); + expect(isDangerousHostEnvVarName("git_exec_path")).toBe(true); expect(isDangerousHostEnvVarName("SHELLOPTS")).toBe(true); expect(isDangerousHostEnvVarName("ps4")).toBe(true); expect(isDangerousHostEnvVarName("DYLD_INSERT_LIBRARIES")).toBe(true); @@ -60,6 +61,7 @@ describe("sanitizeHostExecEnv", () => { ZDOTDIR: "/tmp/evil-zdotdir", BASH_ENV: "/tmp/pwn.sh", GIT_SSH_COMMAND: "touch /tmp/pwned", + GIT_EXEC_PATH: "/tmp/git-exec-path", EDITOR: "/tmp/editor", NPM_CONFIG_USERCONFIG: "/tmp/npmrc", GIT_CONFIG_GLOBAL: "/tmp/gitconfig", @@ -73,6 +75,7 @@ describe("sanitizeHostExecEnv", () => { expect(env.OPENCLAW_CLI).toBe(OPENCLAW_CLI_ENV_VALUE); expect(env.BASH_ENV).toBeUndefined(); expect(env.GIT_SSH_COMMAND).toBeUndefined(); + expect(env.GIT_EXEC_PATH).toBeUndefined(); expect(env.EDITOR).toBeUndefined(); expect(env.NPM_CONFIG_USERCONFIG).toBeUndefined(); expect(env.GIT_CONFIG_GLOBAL).toBeUndefined(); @@ -211,6 +214,65 @@ describe("shell wrapper exploit regression", () => { }); describe("git env exploit regression", () => { + it("blocks inherited GIT_EXEC_PATH so git cannot execute helper payloads", async () => { + if (process.platform === "win32") { + return; + } + const gitPath = "/usr/bin/git"; + if (!fs.existsSync(gitPath)) { + return; + } + + const helperDir = fs.mkdtempSync( + path.join(os.tmpdir(), `openclaw-git-exec-path-${process.pid}-${Date.now()}-`), + ); + const helperPath = path.join(helperDir, "git-remote-https"); + const marker = path.join( + os.tmpdir(), + `openclaw-git-exec-path-marker-${process.pid}-${Date.now()}`, + ); + try { + try { + fs.unlinkSync(marker); + } catch { + // no-op + } + fs.writeFileSync(helperPath, `#!/bin/sh\ntouch ${JSON.stringify(marker)}\nexit 1\n`, "utf8"); + fs.chmodSync(helperPath, 0o755); + + const target = "https://127.0.0.1:1/does-not-matter"; + const unsafeEnv = { + PATH: process.env.PATH ?? "/usr/bin:/bin", + GIT_EXEC_PATH: helperDir, + GIT_TERMINAL_PROMPT: "0", + }; + + await new Promise((resolve) => { + const child = spawn(gitPath, ["ls-remote", target], { env: unsafeEnv, stdio: "ignore" }); + child.once("error", () => resolve()); + child.once("close", () => resolve()); + }); + + expect(fs.existsSync(marker)).toBe(true); + fs.unlinkSync(marker); + + const safeEnv = sanitizeHostExecEnv({ + baseEnv: unsafeEnv, + }); + + await new Promise((resolve) => { + const child = spawn(gitPath, ["ls-remote", target], { env: safeEnv, stdio: "ignore" }); + child.once("error", () => resolve()); + child.once("close", () => resolve()); + }); + + expect(fs.existsSync(marker)).toBe(false); + } finally { + fs.rmSync(helperDir, { recursive: true, force: true }); + fs.rmSync(marker, { force: true }); + } + }); + it("blocks GIT_SSH_COMMAND override so git cannot execute helper payloads", async () => { if (process.platform === "win32") { return; diff --git a/src/infra/net/proxy-env.test.ts b/src/infra/net/proxy-env.test.ts new file mode 100644 index 00000000000..37b910f1769 --- /dev/null +++ b/src/infra/net/proxy-env.test.ts @@ -0,0 +1,42 @@ +import { describe, expect, it } from "vitest"; +import { hasEnvHttpProxyConfigured, resolveEnvHttpProxyUrl } from "./proxy-env.js"; + +describe("resolveEnvHttpProxyUrl", () => { + it("uses lower-case https_proxy before upper-case HTTPS_PROXY", () => { + const env = { + https_proxy: "http://lower.test:8080", + HTTPS_PROXY: "http://upper.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("https", env)).toBe("http://lower.test:8080"); + }); + + it("treats empty lower-case https_proxy as authoritative over upper-case HTTPS_PROXY", () => { + const env = { + https_proxy: "", + HTTPS_PROXY: "http://upper.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("https", env)).toBeUndefined(); + expect(hasEnvHttpProxyConfigured("https", env)).toBe(false); + }); + + it("treats empty lower-case http_proxy as authoritative over upper-case HTTP_PROXY", () => { + const env = { + http_proxy: " ", + HTTP_PROXY: "http://upper-http.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("http", env)).toBeUndefined(); + expect(hasEnvHttpProxyConfigured("http", env)).toBe(false); + }); + + it("falls back from HTTPS proxy vars to HTTP proxy vars for https requests", () => { + const env = { + HTTP_PROXY: "http://upper-http.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("https", env)).toBe("http://upper-http.test:8080"); + expect(hasEnvHttpProxyConfigured("https", env)).toBe(true); + }); +}); diff --git a/src/infra/net/proxy-env.ts b/src/infra/net/proxy-env.ts index 01401074678..c0c332c7301 100644 --- a/src/infra/net/proxy-env.ts +++ b/src/infra/net/proxy-env.ts @@ -16,3 +16,40 @@ export function hasProxyEnvConfigured(env: NodeJS.ProcessEnv = process.env): boo } return false; } + +function normalizeProxyEnvValue(value: string | undefined): string | null | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed.length > 0 ? trimmed : null; +} + +/** + * Match undici EnvHttpProxyAgent semantics for env-based HTTP/S proxy selection: + * - lower-case vars take precedence over upper-case + * - HTTPS requests prefer https_proxy/HTTPS_PROXY, then fall back to http_proxy/HTTP_PROXY + * - ALL_PROXY is ignored by EnvHttpProxyAgent + */ +export function resolveEnvHttpProxyUrl( + protocol: "http" | "https", + env: NodeJS.ProcessEnv = process.env, +): string | undefined { + const lowerHttpProxy = normalizeProxyEnvValue(env.http_proxy); + const lowerHttpsProxy = normalizeProxyEnvValue(env.https_proxy); + const httpProxy = + lowerHttpProxy !== undefined ? lowerHttpProxy : normalizeProxyEnvValue(env.HTTP_PROXY); + const httpsProxy = + lowerHttpsProxy !== undefined ? lowerHttpsProxy : normalizeProxyEnvValue(env.HTTPS_PROXY); + if (protocol === "https") { + return httpsProxy ?? httpProxy ?? undefined; + } + return httpProxy ?? undefined; +} + +export function hasEnvHttpProxyConfigured( + protocol: "http" | "https" = "https", + env: NodeJS.ProcessEnv = process.env, +): boolean { + return resolveEnvHttpProxyUrl(protocol, env) !== undefined; +} diff --git a/src/infra/net/proxy-fetch.test.ts b/src/infra/net/proxy-fetch.test.ts index a10c83d1a07..331cd1ac6ea 100644 --- a/src/infra/net/proxy-fetch.test.ts +++ b/src/infra/net/proxy-fetch.test.ts @@ -73,11 +73,7 @@ describe("resolveProxyFetchFromEnv", () => { }); it("returns proxy fetch using EnvHttpProxyAgent when HTTPS_PROXY is set", async () => { - // Stub empty vars first — on Windows, process.env is case-insensitive so - // HTTPS_PROXY and https_proxy share the same slot. Value must be set LAST. vi.stubEnv("HTTP_PROXY", ""); - vi.stubEnv("https_proxy", ""); - vi.stubEnv("http_proxy", ""); vi.stubEnv("HTTPS_PROXY", "http://proxy.test:8080"); undiciFetch.mockResolvedValue({ ok: true }); @@ -94,8 +90,6 @@ describe("resolveProxyFetchFromEnv", () => { it("returns proxy fetch when HTTP_PROXY is set", () => { vi.stubEnv("HTTPS_PROXY", ""); - vi.stubEnv("https_proxy", ""); - vi.stubEnv("http_proxy", ""); vi.stubEnv("HTTP_PROXY", "http://fallback.test:3128"); const fetchFn = resolveProxyFetchFromEnv(); diff --git a/src/infra/net/proxy-fetch.ts b/src/infra/net/proxy-fetch.ts index 391387f3cca..7305cbfcc5c 100644 --- a/src/infra/net/proxy-fetch.ts +++ b/src/infra/net/proxy-fetch.ts @@ -1,5 +1,6 @@ import { EnvHttpProxyAgent, ProxyAgent, fetch as undiciFetch } from "undici"; import { logWarn } from "../../logger.js"; +import { hasEnvHttpProxyConfigured } from "./proxy-env.js"; export const PROXY_FETCH_PROXY_URL = Symbol.for("openclaw.proxyFetch.proxyUrl"); type ProxyFetchWithMetadata = typeof fetch & { @@ -51,12 +52,7 @@ export function getProxyUrlFromFetch(fetchImpl?: typeof fetch): string | undefin * Gracefully returns undefined if the proxy URL is malformed. */ export function resolveProxyFetchFromEnv(): typeof fetch | undefined { - const proxyUrl = - process.env.HTTPS_PROXY || - process.env.HTTP_PROXY || - process.env.https_proxy || - process.env.http_proxy; - if (!proxyUrl?.trim()) { + if (!hasEnvHttpProxyConfigured("https")) { return undefined; } try { diff --git a/src/infra/net/undici-global-dispatcher.test.ts b/src/infra/net/undici-global-dispatcher.test.ts index 0c4d5793b57..8b14c4084fc 100644 --- a/src/infra/net/undici-global-dispatcher.test.ts +++ b/src/infra/net/undici-global-dispatcher.test.ts @@ -57,8 +57,14 @@ vi.mock("node:net", () => ({ getDefaultAutoSelectFamily, })); +vi.mock("./proxy-env.js", () => ({ + hasEnvHttpProxyConfigured: vi.fn(() => false), +})); + +import { hasEnvHttpProxyConfigured } from "./proxy-env.js"; import { DEFAULT_UNDICI_STREAM_TIMEOUT_MS, + ensureGlobalUndiciEnvProxyDispatcher, ensureGlobalUndiciStreamTimeouts, resetGlobalUndiciStreamTimeoutsForTests, } from "./undici-global-dispatcher.js"; @@ -69,6 +75,7 @@ describe("ensureGlobalUndiciStreamTimeouts", () => { resetGlobalUndiciStreamTimeoutsForTests(); setCurrentDispatcher(new Agent()); getDefaultAutoSelectFamily.mockReturnValue(undefined); + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(false); }); it("replaces default Agent dispatcher with extended stream timeouts", () => { @@ -136,3 +143,66 @@ describe("ensureGlobalUndiciStreamTimeouts", () => { }); }); }); + +describe("ensureGlobalUndiciEnvProxyDispatcher", () => { + beforeEach(() => { + vi.clearAllMocks(); + resetGlobalUndiciStreamTimeoutsForTests(); + setCurrentDispatcher(new Agent()); + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(false); + }); + + it("installs EnvHttpProxyAgent when env HTTP proxy is configured on a default Agent", () => { + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(true); + + ensureGlobalUndiciEnvProxyDispatcher(); + + expect(setGlobalDispatcher).toHaveBeenCalledTimes(1); + expect(getCurrentDispatcher()).toBeInstanceOf(EnvHttpProxyAgent); + }); + + it("does not override unsupported custom proxy dispatcher types", () => { + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(true); + setCurrentDispatcher(new ProxyAgent("http://proxy.test:8080")); + + ensureGlobalUndiciEnvProxyDispatcher(); + + expect(setGlobalDispatcher).not.toHaveBeenCalled(); + }); + + it("retries proxy bootstrap after an unsupported dispatcher later becomes a default Agent", () => { + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(true); + setCurrentDispatcher(new ProxyAgent("http://proxy.test:8080")); + + ensureGlobalUndiciEnvProxyDispatcher(); + expect(setGlobalDispatcher).not.toHaveBeenCalled(); + + setCurrentDispatcher(new Agent()); + ensureGlobalUndiciEnvProxyDispatcher(); + + expect(setGlobalDispatcher).toHaveBeenCalledTimes(1); + expect(getCurrentDispatcher()).toBeInstanceOf(EnvHttpProxyAgent); + }); + + it("is idempotent after proxy bootstrap succeeds", () => { + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(true); + + ensureGlobalUndiciEnvProxyDispatcher(); + ensureGlobalUndiciEnvProxyDispatcher(); + + expect(setGlobalDispatcher).toHaveBeenCalledTimes(1); + }); + + it("reinstalls env proxy if an external change later reverts the dispatcher to Agent", () => { + vi.mocked(hasEnvHttpProxyConfigured).mockReturnValue(true); + + ensureGlobalUndiciEnvProxyDispatcher(); + expect(setGlobalDispatcher).toHaveBeenCalledTimes(1); + + setCurrentDispatcher(new Agent()); + ensureGlobalUndiciEnvProxyDispatcher(); + + expect(setGlobalDispatcher).toHaveBeenCalledTimes(2); + expect(getCurrentDispatcher()).toBeInstanceOf(EnvHttpProxyAgent); + }); +}); diff --git a/src/infra/net/undici-global-dispatcher.ts b/src/infra/net/undici-global-dispatcher.ts index b63ff5688bb..994af564777 100644 --- a/src/infra/net/undici-global-dispatcher.ts +++ b/src/infra/net/undici-global-dispatcher.ts @@ -1,11 +1,13 @@ import * as net from "node:net"; import { Agent, EnvHttpProxyAgent, getGlobalDispatcher, setGlobalDispatcher } from "undici"; +import { hasEnvHttpProxyConfigured } from "./proxy-env.js"; export const DEFAULT_UNDICI_STREAM_TIMEOUT_MS = 30 * 60 * 1000; const AUTO_SELECT_FAMILY_ATTEMPT_TIMEOUT_MS = 300; -let lastAppliedDispatcherKey: string | null = null; +let lastAppliedTimeoutKey: string | null = null; +let lastAppliedProxyBootstrap = false; type DispatcherKind = "agent" | "env-proxy" | "unsupported"; @@ -59,28 +61,59 @@ function resolveDispatcherKey(params: { return `${params.kind}:${params.timeoutMs}:${autoSelectToken}`; } +function resolveCurrentDispatcherKind(): DispatcherKind | null { + let dispatcher: unknown; + try { + dispatcher = getGlobalDispatcher(); + } catch { + return null; + } + + const currentKind = resolveDispatcherKind(dispatcher); + return currentKind === "unsupported" ? null : currentKind; +} + +export function ensureGlobalUndiciEnvProxyDispatcher(): void { + const shouldUseEnvProxy = hasEnvHttpProxyConfigured("https"); + if (!shouldUseEnvProxy) { + return; + } + if (lastAppliedProxyBootstrap) { + if (resolveCurrentDispatcherKind() === "env-proxy") { + return; + } + lastAppliedProxyBootstrap = false; + } + const currentKind = resolveCurrentDispatcherKind(); + if (currentKind === null) { + return; + } + if (currentKind === "env-proxy") { + lastAppliedProxyBootstrap = true; + return; + } + try { + setGlobalDispatcher(new EnvHttpProxyAgent()); + lastAppliedProxyBootstrap = true; + } catch { + // Best-effort bootstrap only. + } +} + export function ensureGlobalUndiciStreamTimeouts(opts?: { timeoutMs?: number }): void { const timeoutMsRaw = opts?.timeoutMs ?? DEFAULT_UNDICI_STREAM_TIMEOUT_MS; const timeoutMs = Math.max(1, Math.floor(timeoutMsRaw)); if (!Number.isFinite(timeoutMsRaw)) { return; } - - let dispatcher: unknown; - try { - dispatcher = getGlobalDispatcher(); - } catch { - return; - } - - const kind = resolveDispatcherKind(dispatcher); - if (kind === "unsupported") { + const kind = resolveCurrentDispatcherKind(); + if (kind === null) { return; } const autoSelectFamily = resolveAutoSelectFamily(); const nextKey = resolveDispatcherKey({ kind, timeoutMs, autoSelectFamily }); - if (lastAppliedDispatcherKey === nextKey) { + if (lastAppliedTimeoutKey === nextKey) { return; } @@ -102,12 +135,13 @@ export function ensureGlobalUndiciStreamTimeouts(opts?: { timeoutMs?: number }): }), ); } - lastAppliedDispatcherKey = nextKey; + lastAppliedTimeoutKey = nextKey; } catch { // Best-effort hardening only. } } export function resetGlobalUndiciStreamTimeoutsForTests(): void { - lastAppliedDispatcherKey = null; + lastAppliedTimeoutKey = null; + lastAppliedProxyBootstrap = false; } diff --git a/src/infra/process-respawn.test.ts b/src/infra/process-respawn.test.ts index 7b9a9df1252..bacf4e1b24b 100644 --- a/src/infra/process-respawn.test.ts +++ b/src/infra/process-respawn.test.ts @@ -4,6 +4,7 @@ import { SUPERVISOR_HINT_ENV_VARS } from "./supervisor-markers.js"; const spawnMock = vi.hoisted(() => vi.fn()); const triggerOpenClawRestartMock = vi.hoisted(() => vi.fn()); +const scheduleDetachedLaunchdRestartHandoffMock = vi.hoisted(() => vi.fn()); vi.mock("node:child_process", () => ({ spawn: (...args: unknown[]) => spawnMock(...args), @@ -11,6 +12,10 @@ vi.mock("node:child_process", () => ({ vi.mock("./restart.js", () => ({ triggerOpenClawRestart: (...args: unknown[]) => triggerOpenClawRestartMock(...args), })); +vi.mock("../daemon/launchd-restart-handoff.js", () => ({ + scheduleDetachedLaunchdRestartHandoff: (...args: unknown[]) => + scheduleDetachedLaunchdRestartHandoffMock(...args), +})); import { restartGatewayProcessWithFreshPid } from "./process-respawn.js"; @@ -35,6 +40,8 @@ afterEach(() => { process.execArgv = [...originalExecArgv]; spawnMock.mockClear(); triggerOpenClawRestartMock.mockClear(); + scheduleDetachedLaunchdRestartHandoffMock.mockReset(); + scheduleDetachedLaunchdRestartHandoffMock.mockReturnValue({ ok: true, pid: 8123 }); if (originalPlatformDescriptor) { Object.defineProperty(process, "platform", originalPlatformDescriptor); } @@ -54,6 +61,11 @@ function expectLaunchdSupervisedWithoutKickstart(params?: { launchJobLabel?: str process.env.OPENCLAW_LAUNCHD_LABEL = "ai.openclaw.gateway"; const result = restartGatewayProcessWithFreshPid(); expect(result.mode).toBe("supervised"); + expect(scheduleDetachedLaunchdRestartHandoffMock).toHaveBeenCalledWith({ + env: process.env, + mode: "start-after-exit", + waitForPid: process.pid, + }); expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); expect(spawnMock).not.toHaveBeenCalled(); } @@ -72,6 +84,12 @@ describe("restartGatewayProcessWithFreshPid", () => { process.env.LAUNCH_JOB_LABEL = "ai.openclaw.gateway"; const result = restartGatewayProcessWithFreshPid(); expect(result.mode).toBe("supervised"); + expect(result.detail).toContain("launchd restart handoff"); + expect(scheduleDetachedLaunchdRestartHandoffMock).toHaveBeenCalledWith({ + env: process.env, + mode: "start-after-exit", + waitForPid: process.pid, + }); expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); expect(spawnMock).not.toHaveBeenCalled(); }); @@ -96,6 +114,25 @@ describe("restartGatewayProcessWithFreshPid", () => { expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); }); + it("falls back to plain supervised exit when launchd handoff scheduling fails", () => { + clearSupervisorHints(); + setPlatform("darwin"); + process.env.XPC_SERVICE_NAME = "ai.openclaw.gateway"; + scheduleDetachedLaunchdRestartHandoffMock.mockReturnValue({ + ok: false, + detail: "spawn failed", + }); + + const result = restartGatewayProcessWithFreshPid(); + + expect(result).toEqual({ + mode: "supervised", + detail: "launchd exit fallback (spawn failed)", + }); + expect(triggerOpenClawRestartMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + it("does not schedule kickstart on non-darwin platforms", () => { setPlatform("linux"); process.env.INVOCATION_ID = "abc123"; diff --git a/src/infra/process-respawn.ts b/src/infra/process-respawn.ts index 8bf1503b18f..473319f86fb 100644 --- a/src/infra/process-respawn.ts +++ b/src/infra/process-respawn.ts @@ -1,4 +1,5 @@ import { spawn } from "node:child_process"; +import { scheduleDetachedLaunchdRestartHandoff } from "../daemon/launchd-restart-handoff.js"; import { triggerOpenClawRestart } from "./restart.js"; import { detectRespawnSupervisor } from "./supervisor-markers.js"; @@ -30,10 +31,25 @@ export function restartGatewayProcessWithFreshPid(): GatewayRespawnResult { } const supervisor = detectRespawnSupervisor(process.env); if (supervisor) { - // launchd: exit(0) is sufficient — KeepAlive=true restarts the service. - // Self-issued `kickstart -k` races with launchd's bootout state machine - // and can leave the LaunchAgent permanently unloaded. - // See: https://github.com/openclaw/openclaw/issues/39760 + // Hand off launchd restarts to a detached helper before exiting so config + // reloads and SIGUSR1-driven restarts do not depend on exit/respawn timing. + if (supervisor === "launchd") { + const handoff = scheduleDetachedLaunchdRestartHandoff({ + env: process.env, + mode: "start-after-exit", + waitForPid: process.pid, + }); + if (!handoff.ok) { + return { + mode: "supervised", + detail: `launchd exit fallback (${handoff.detail ?? "restart handoff failed"})`, + }; + } + return { + mode: "supervised", + detail: `launchd restart handoff pid ${handoff.pid ?? "unknown"}`, + }; + } if (supervisor === "schtasks") { const restart = triggerOpenClawRestart(); if (!restart.ok) { diff --git a/src/install-sh-version.test.ts b/src/install-sh-version.test.ts index 824a5366efd..12336b803d6 100644 --- a/src/install-sh-version.test.ts +++ b/src/install-sh-version.test.ts @@ -73,10 +73,10 @@ describe("install.sh version resolution", () => { it.runIf(process.platform !== "win32")( "extracts the semantic version from decorated CLI output", () => { - const fixture = withFakeCli("OpenClaw 2026.3.9 (abcdef0)"); + const fixture = withFakeCli("OpenClaw 2026.3.10 (abcdef0)"); tempRoots.push(fixture.root); - expect(resolveVersionFromInstaller(fixture.cliPath)).toBe("2026.3.9"); + expect(resolveVersionFromInstaller(fixture.cliPath)).toBe("2026.3.10"); }, ); @@ -93,7 +93,7 @@ describe("install.sh version resolution", () => { it.runIf(process.platform !== "win32")( "does not source version helpers from cwd when installer runs via stdin", () => { - const fixture = withFakeCli("OpenClaw 2026.3.9 (abcdef0)"); + const fixture = withFakeCli("OpenClaw 2026.3.10 (abcdef0)"); tempRoots.push(fixture.root); const hostileCwd = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-install-stdin-")); @@ -115,7 +115,7 @@ extract_openclaw_semver() { "utf-8", ); - expect(resolveVersionFromInstallerViaStdin(fixture.cliPath, hostileCwd)).toBe("2026.3.9"); + expect(resolveVersionFromInstallerViaStdin(fixture.cliPath, hostileCwd)).toBe("2026.3.10"); }, ); }); diff --git a/src/media/mime.ts b/src/media/mime.ts index e551350c057..4c9b6bf1db3 100644 --- a/src/media/mime.ts +++ b/src/media/mime.ts @@ -12,6 +12,10 @@ const EXT_BY_MIME: Record = { "image/gif": ".gif", "audio/ogg": ".ogg", "audio/mpeg": ".mp3", + "audio/wav": ".wav", + "audio/flac": ".flac", + "audio/aac": ".aac", + "audio/opus": ".opus", "audio/x-m4a": ".m4a", "audio/mp4": ".m4a", "video/mp4": ".mp4", diff --git a/src/memory/batch-gemini.test.ts b/src/memory/batch-gemini.test.ts new file mode 100644 index 00000000000..0cbada7293b --- /dev/null +++ b/src/memory/batch-gemini.test.ts @@ -0,0 +1,102 @@ +import { afterEach, beforeAll, describe, expect, it, vi } from "vitest"; +import type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; + +function magnitude(values: number[]) { + return Math.sqrt(values.reduce((sum, value) => sum + value * value, 0)); +} + +describe("runGeminiEmbeddingBatches", () => { + let runGeminiEmbeddingBatches: typeof import("./batch-gemini.js").runGeminiEmbeddingBatches; + + beforeAll(async () => { + ({ runGeminiEmbeddingBatches } = await import("./batch-gemini.js")); + }); + + afterEach(() => { + vi.resetAllMocks(); + vi.unstubAllGlobals(); + }); + + const mockClient: GeminiEmbeddingClient = { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + headers: {}, + model: "gemini-embedding-2-preview", + modelPath: "models/gemini-embedding-2-preview", + apiKeys: ["test-key"], + outputDimensionality: 1536, + }; + + it("includes outputDimensionality in batch upload requests", async () => { + const fetchMock = vi.fn(async (input: RequestInfo | URL, init?: RequestInit) => { + const url = + typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url; + if (url.includes("/upload/v1beta/files?uploadType=multipart")) { + const body = init?.body; + if (!(body instanceof Blob)) { + throw new Error("expected multipart blob body"); + } + const text = await body.text(); + expect(text).toContain('"taskType":"RETRIEVAL_DOCUMENT"'); + expect(text).toContain('"outputDimensionality":1536'); + return new Response(JSON.stringify({ name: "files/file-123" }), { + status: 200, + headers: { "Content-Type": "application/json" }, + }); + } + if (url.endsWith(":asyncBatchEmbedContent")) { + return new Response( + JSON.stringify({ + name: "batches/batch-1", + state: "COMPLETED", + outputConfig: { file: "files/output-1" }, + }), + { + status: 200, + headers: { "Content-Type": "application/json" }, + }, + ); + } + if (url.endsWith("/files/output-1:download")) { + return new Response( + JSON.stringify({ + key: "req-1", + response: { embedding: { values: [3, 4] } }, + }), + { + status: 200, + headers: { "Content-Type": "application/jsonl" }, + }, + ); + } + throw new Error(`unexpected fetch ${url}`); + }); + + vi.stubGlobal("fetch", fetchMock); + + const results = await runGeminiEmbeddingBatches({ + gemini: mockClient, + agentId: "main", + requests: [ + { + custom_id: "req-1", + request: { + content: { parts: [{ text: "hello world" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 1536, + }, + }, + ], + wait: true, + pollIntervalMs: 1, + timeoutMs: 1000, + concurrency: 1, + }); + + const embedding = results.get("req-1"); + expect(embedding).toBeDefined(); + expect(embedding?.[0]).toBeCloseTo(0.6, 5); + expect(embedding?.[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding ?? [])).toBeCloseTo(1, 5); + expect(fetchMock).toHaveBeenCalledTimes(3); + }); +}); diff --git a/src/memory/batch-gemini.ts b/src/memory/batch-gemini.ts index 998f283b676..4bdc9fa055e 100644 --- a/src/memory/batch-gemini.ts +++ b/src/memory/batch-gemini.ts @@ -4,15 +4,15 @@ import { type EmbeddingBatchExecutionParams, } from "./batch-runner.js"; import { buildBatchHeaders, normalizeBatchBaseUrl } from "./batch-utils.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; import { debugEmbeddingsLog } from "./embeddings-debug.js"; -import type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; +import type { GeminiEmbeddingClient, GeminiTextEmbeddingRequest } from "./embeddings-gemini.js"; import { hashText } from "./internal.js"; import { withRemoteHttpResponse } from "./remote-http.js"; export type GeminiBatchRequest = { custom_id: string; - content: { parts: Array<{ text: string }> }; - taskType: "RETRIEVAL_DOCUMENT" | "RETRIEVAL_QUERY"; + request: GeminiTextEmbeddingRequest; }; export type GeminiBatchStatus = { @@ -82,10 +82,7 @@ async function submitGeminiBatch(params: { .map((request) => JSON.stringify({ key: request.custom_id, - request: { - content: request.content, - task_type: request.taskType, - }, + request: request.request, }), ) .join("\n"); @@ -350,7 +347,9 @@ export async function runGeminiEmbeddingBatches( errors.push(`${customId}: ${line.response.error.message}`); continue; } - const embedding = line.embedding?.values ?? line.response?.embedding?.values ?? []; + const embedding = sanitizeAndNormalizeEmbedding( + line.embedding?.values ?? line.response?.embedding?.values ?? [], + ); if (embedding.length === 0) { errors.push(`${customId}: empty embedding`); continue; diff --git a/src/memory/embedding-chunk-limits.ts b/src/memory/embedding-chunk-limits.ts index 033b30a84a3..5c8cf9020f3 100644 --- a/src/memory/embedding-chunk-limits.ts +++ b/src/memory/embedding-chunk-limits.ts @@ -1,4 +1,5 @@ import { estimateUtf8Bytes, splitTextToUtf8ByteLimit } from "./embedding-input-limits.js"; +import { hasNonTextEmbeddingParts } from "./embedding-inputs.js"; import { resolveEmbeddingMaxInputTokens } from "./embedding-model-limits.js"; import type { EmbeddingProvider } from "./embeddings.js"; import { hashText, type MemoryChunk } from "./internal.js"; @@ -16,6 +17,10 @@ export function enforceEmbeddingMaxInputTokens( const out: MemoryChunk[] = []; for (const chunk of chunks) { + if (hasNonTextEmbeddingParts(chunk.embeddingInput)) { + out.push(chunk); + continue; + } if (estimateUtf8Bytes(chunk.text) <= maxInputTokens) { out.push(chunk); continue; @@ -27,6 +32,7 @@ export function enforceEmbeddingMaxInputTokens( endLine: chunk.endLine, text, hash: hashText(text), + embeddingInput: { text }, }); } } diff --git a/src/memory/embedding-input-limits.ts b/src/memory/embedding-input-limits.ts index dad83bb7aa7..4eadf1bf48d 100644 --- a/src/memory/embedding-input-limits.ts +++ b/src/memory/embedding-input-limits.ts @@ -1,3 +1,5 @@ +import type { EmbeddingInput } from "./embedding-inputs.js"; + // Helpers for enforcing embedding model input size limits. // // We use UTF-8 byte length as a conservative upper bound for tokenizer output. @@ -11,6 +13,22 @@ export function estimateUtf8Bytes(text: string): number { return Buffer.byteLength(text, "utf8"); } +export function estimateStructuredEmbeddingInputBytes(input: EmbeddingInput): number { + if (!input.parts?.length) { + return estimateUtf8Bytes(input.text); + } + let total = 0; + for (const part of input.parts) { + if (part.type === "text") { + total += estimateUtf8Bytes(part.text); + continue; + } + total += estimateUtf8Bytes(part.mimeType); + total += estimateUtf8Bytes(part.data); + } + return total; +} + export function splitTextToUtf8ByteLimit(text: string, maxUtf8Bytes: number): string[] { if (maxUtf8Bytes <= 0) { return [text]; diff --git a/src/memory/embedding-inputs.ts b/src/memory/embedding-inputs.ts new file mode 100644 index 00000000000..767a463f740 --- /dev/null +++ b/src/memory/embedding-inputs.ts @@ -0,0 +1,34 @@ +export type EmbeddingInputTextPart = { + type: "text"; + text: string; +}; + +export type EmbeddingInputInlineDataPart = { + type: "inline-data"; + mimeType: string; + data: string; +}; + +export type EmbeddingInputPart = EmbeddingInputTextPart | EmbeddingInputInlineDataPart; + +export type EmbeddingInput = { + text: string; + parts?: EmbeddingInputPart[]; +}; + +export function buildTextEmbeddingInput(text: string): EmbeddingInput { + return { text }; +} + +export function isInlineDataEmbeddingInputPart( + part: EmbeddingInputPart, +): part is EmbeddingInputInlineDataPart { + return part.type === "inline-data"; +} + +export function hasNonTextEmbeddingParts(input: EmbeddingInput | undefined): boolean { + if (!input?.parts?.length) { + return false; + } + return input.parts.some((part) => isInlineDataEmbeddingInputPart(part)); +} diff --git a/src/memory/embedding-model-limits.ts b/src/memory/embedding-model-limits.ts index b9960009606..0819686b905 100644 --- a/src/memory/embedding-model-limits.ts +++ b/src/memory/embedding-model-limits.ts @@ -8,6 +8,8 @@ const KNOWN_EMBEDDING_MAX_INPUT_TOKENS: Record = { "openai:text-embedding-3-large": 8192, "openai:text-embedding-ada-002": 8191, "gemini:text-embedding-004": 2048, + "gemini:gemini-embedding-001": 2048, + "gemini:gemini-embedding-2-preview": 8192, "voyage:voyage-3": 32000, "voyage:voyage-3-lite": 16000, "voyage:voyage-code-3": 32000, diff --git a/src/memory/embedding-vectors.ts b/src/memory/embedding-vectors.ts new file mode 100644 index 00000000000..d589f61390d --- /dev/null +++ b/src/memory/embedding-vectors.ts @@ -0,0 +1,8 @@ +export function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { + const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0)); + const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0)); + if (magnitude < 1e-10) { + return sanitized; + } + return sanitized.map((value) => value / magnitude); +} diff --git a/src/memory/embeddings-gemini.test.ts b/src/memory/embeddings-gemini.test.ts new file mode 100644 index 00000000000..f97cc6cb142 --- /dev/null +++ b/src/memory/embeddings-gemini.test.ts @@ -0,0 +1,609 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import * as authModule from "../agents/model-auth.js"; +import { + buildGeminiEmbeddingRequest, + buildGeminiTextEmbeddingRequest, + createGeminiEmbeddingProvider, + DEFAULT_GEMINI_EMBEDDING_MODEL, + GEMINI_EMBEDDING_2_MODELS, + isGeminiEmbedding2Model, + resolveGeminiOutputDimensionality, +} from "./embeddings-gemini.js"; + +vi.mock("../agents/model-auth.js", async () => { + const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js"); + return createModelAuthMockModule(); +}); + +const createGeminiFetchMock = (embeddingValues = [1, 2, 3]) => + vi.fn(async (_input?: unknown, _init?: unknown) => ({ + ok: true, + status: 200, + json: async () => ({ embedding: { values: embeddingValues } }), + })); + +const createGeminiBatchFetchMock = (count: number, embeddingValues = [1, 2, 3]) => + vi.fn(async (_input?: unknown, _init?: unknown) => ({ + ok: true, + status: 200, + json: async () => ({ + embeddings: Array.from({ length: count }, () => ({ values: embeddingValues })), + }), + })); + +function readFirstFetchRequest(fetchMock: { mock: { calls: unknown[][] } }) { + const [url, init] = fetchMock.mock.calls[0] ?? []; + return { url, init: init as RequestInit | undefined }; +} + +function parseFetchBody(fetchMock: { mock: { calls: unknown[][] } }, callIndex = 0) { + const init = fetchMock.mock.calls[callIndex]?.[1] as RequestInit | undefined; + return JSON.parse((init?.body as string) ?? "{}") as Record; +} + +function magnitude(values: number[]) { + return Math.sqrt(values.reduce((sum, value) => sum + value * value, 0)); +} + +afterEach(() => { + vi.resetAllMocks(); + vi.unstubAllGlobals(); +}); + +function mockResolvedProviderKey(apiKey = "test-key") { + vi.mocked(authModule.resolveApiKeyForProvider).mockResolvedValue({ + apiKey, + mode: "api-key", + source: "test", + }); +} + +describe("buildGeminiTextEmbeddingRequest", () => { + it("builds a text embedding request with optional model and dimensions", () => { + expect( + buildGeminiTextEmbeddingRequest({ + text: "hello", + taskType: "RETRIEVAL_DOCUMENT", + modelPath: "models/gemini-embedding-2-preview", + outputDimensionality: 1536, + }), + ).toEqual({ + model: "models/gemini-embedding-2-preview", + content: { parts: [{ text: "hello" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 1536, + }); + }); +}); + +describe("buildGeminiEmbeddingRequest", () => { + it("builds a multimodal request from structured input parts", () => { + expect( + buildGeminiEmbeddingRequest({ + input: { + text: "Image file: diagram.png", + parts: [ + { type: "text", text: "Image file: diagram.png" }, + { type: "inline-data", mimeType: "image/png", data: "abc123" }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + modelPath: "models/gemini-embedding-2-preview", + outputDimensionality: 1536, + }), + ).toEqual({ + model: "models/gemini-embedding-2-preview", + content: { + parts: [ + { text: "Image file: diagram.png" }, + { inlineData: { mimeType: "image/png", data: "abc123" } }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 1536, + }); + }); +}); + +// ---------- Model detection ---------- + +describe("isGeminiEmbedding2Model", () => { + it("returns true for gemini-embedding-2-preview", () => { + expect(isGeminiEmbedding2Model("gemini-embedding-2-preview")).toBe(true); + }); + + it("returns false for gemini-embedding-001", () => { + expect(isGeminiEmbedding2Model("gemini-embedding-001")).toBe(false); + }); + + it("returns false for text-embedding-004", () => { + expect(isGeminiEmbedding2Model("text-embedding-004")).toBe(false); + }); +}); + +describe("GEMINI_EMBEDDING_2_MODELS", () => { + it("contains gemini-embedding-2-preview", () => { + expect(GEMINI_EMBEDDING_2_MODELS.has("gemini-embedding-2-preview")).toBe(true); + }); +}); + +// ---------- Dimension resolution ---------- + +describe("resolveGeminiOutputDimensionality", () => { + it("returns undefined for non-v2 models", () => { + expect(resolveGeminiOutputDimensionality("gemini-embedding-001")).toBeUndefined(); + expect(resolveGeminiOutputDimensionality("text-embedding-004")).toBeUndefined(); + }); + + it("returns 3072 by default for v2 models", () => { + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview")).toBe(3072); + }); + + it("accepts valid dimension values", () => { + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 768)).toBe(768); + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 1536)).toBe(1536); + expect(resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 3072)).toBe(3072); + }); + + it("throws for invalid dimension values", () => { + expect(() => resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 512)).toThrow( + /Invalid outputDimensionality 512/, + ); + expect(() => resolveGeminiOutputDimensionality("gemini-embedding-2-preview", 1024)).toThrow( + /Valid values: 768, 1536, 3072/, + ); + }); +}); + +// ---------- Provider: gemini-embedding-001 (backward compat) ---------- + +describe("gemini-embedding-001 provider (backward compat)", () => { + it("does NOT include outputDimensionality in embedQuery", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-001", + fallback: "none", + }); + + await provider.embedQuery("test query"); + + const body = parseFetchBody(fetchMock); + expect(body).not.toHaveProperty("outputDimensionality"); + expect(body.taskType).toBe("RETRIEVAL_QUERY"); + expect(body.content).toEqual({ parts: [{ text: "test query" }] }); + }); + + it("does NOT include outputDimensionality in embedBatch", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-001", + fallback: "none", + }); + + await provider.embedBatch(["text1", "text2"]); + + const body = parseFetchBody(fetchMock); + expect(body).not.toHaveProperty("outputDimensionality"); + }); +}); + +// ---------- Provider: gemini-embedding-2-preview ---------- + +describe("gemini-embedding-2-preview provider", () => { + it("includes outputDimensionality in embedQuery request", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test query"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + expect(body.taskType).toBe("RETRIEVAL_QUERY"); + expect(body.content).toEqual({ parts: [{ text: "test query" }] }); + }); + + it("normalizes embedQuery response vectors", async () => { + const fetchMock = createGeminiFetchMock([3, 4]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const embedding = await provider.embedQuery("test query"); + + expect(embedding[0]).toBeCloseTo(0.6, 5); + expect(embedding[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding)).toBeCloseTo(1, 5); + }); + + it("includes outputDimensionality in embedBatch request", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedBatch(["text1", "text2"]); + + const body = parseFetchBody(fetchMock); + expect(body.requests).toEqual([ + { + model: "models/gemini-embedding-2-preview", + content: { parts: [{ text: "text1" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + { + model: "models/gemini-embedding-2-preview", + content: { parts: [{ text: "text2" }] }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + ]); + }); + + it("normalizes embedBatch response vectors", async () => { + const fetchMock = createGeminiBatchFetchMock(2, [3, 4]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const embeddings = await provider.embedBatch(["text1", "text2"]); + + expect(embeddings).toHaveLength(2); + for (const embedding of embeddings) { + expect(embedding[0]).toBeCloseTo(0.6, 5); + expect(embedding[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding)).toBeCloseTo(1, 5); + } + }); + + it("respects custom outputDimensionality", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + outputDimensionality: 768, + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(768); + }); + + it("sanitizes and normalizes embedQuery responses", async () => { + const fetchMock = createGeminiFetchMock([3, 4, Number.NaN]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await expect(provider.embedQuery("test")).resolves.toEqual([0.6, 0.8, 0]); + }); + + it("uses custom outputDimensionality for each embedBatch request", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + outputDimensionality: 768, + }); + + await provider.embedBatch(["text1", "text2"]); + + const body = parseFetchBody(fetchMock); + expect(body.requests).toEqual([ + expect.objectContaining({ outputDimensionality: 768 }), + expect.objectContaining({ outputDimensionality: 768 }), + ]); + }); + + it("sanitizes and normalizes structured batch responses", async () => { + const fetchMock = createGeminiBatchFetchMock(1, [0, Number.POSITIVE_INFINITY, 5]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await expect( + provider.embedBatchInputs?.([ + { + text: "Image file: diagram.png", + parts: [ + { type: "text", text: "Image file: diagram.png" }, + { type: "inline-data", mimeType: "image/png", data: "img" }, + ], + }, + ]), + ).resolves.toEqual([[0, 0, 1]]); + }); + + it("supports multimodal embedBatchInputs requests", async () => { + const fetchMock = createGeminiBatchFetchMock(2); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + expect(provider.embedBatchInputs).toBeDefined(); + await provider.embedBatchInputs?.([ + { + text: "Image file: diagram.png", + parts: [ + { type: "text", text: "Image file: diagram.png" }, + { type: "inline-data", mimeType: "image/png", data: "img" }, + ], + }, + { + text: "Audio file: note.wav", + parts: [ + { type: "text", text: "Audio file: note.wav" }, + { type: "inline-data", mimeType: "audio/wav", data: "aud" }, + ], + }, + ]); + + const body = parseFetchBody(fetchMock); + expect(body.requests).toEqual([ + { + model: "models/gemini-embedding-2-preview", + content: { + parts: [ + { text: "Image file: diagram.png" }, + { inlineData: { mimeType: "image/png", data: "img" } }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + { + model: "models/gemini-embedding-2-preview", + content: { + parts: [ + { text: "Audio file: note.wav" }, + { inlineData: { mimeType: "audio/wav", data: "aud" } }, + ], + }, + taskType: "RETRIEVAL_DOCUMENT", + outputDimensionality: 3072, + }, + ]); + }); + + it("throws for invalid outputDimensionality", async () => { + mockResolvedProviderKey(); + + await expect( + createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + outputDimensionality: 512, + }), + ).rejects.toThrow(/Invalid outputDimensionality 512/); + }); + + it("sanitizes non-finite values before normalization", async () => { + const fetchMock = createGeminiFetchMock([ + 1, + Number.NaN, + Number.POSITIVE_INFINITY, + Number.NEGATIVE_INFINITY, + ]); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const embedding = await provider.embedQuery("test"); + + expect(embedding).toEqual([1, 0, 0, 0]); + }); + + it("uses correct endpoint URL", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const { url } = readFirstFetchRequest(fetchMock); + expect(url).toBe( + "https://generativelanguage.googleapis.com/v1beta/models/gemini-embedding-2-preview:embedContent", + ); + }); + + it("allows taskType override via options", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + taskType: "SEMANTIC_SIMILARITY", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.taskType).toBe("SEMANTIC_SIMILARITY"); + }); +}); + +// ---------- Model normalization ---------- + +describe("gemini model normalization", () => { + it("handles models/ prefix for v2 model", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "models/gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + }); + + it("handles gemini/ prefix for v2 model", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini/gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + }); + + it("handles google/ prefix for v2 model", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "google/gemini-embedding-2-preview", + fallback: "none", + }); + + await provider.embedQuery("test"); + + const body = parseFetchBody(fetchMock); + expect(body.outputDimensionality).toBe(3072); + }); + + it("defaults to gemini-embedding-001 when model is empty", async () => { + const fetchMock = createGeminiFetchMock(); + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + + const { provider, client } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "", + fallback: "none", + }); + + expect(client.model).toBe(DEFAULT_GEMINI_EMBEDDING_MODEL); + expect(provider.model).toBe(DEFAULT_GEMINI_EMBEDDING_MODEL); + }); + + it("returns empty array for blank query text", async () => { + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const result = await provider.embedQuery(" "); + expect(result).toEqual([]); + }); + + it("returns empty array for empty batch", async () => { + mockResolvedProviderKey(); + + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + }); + + const result = await provider.embedBatch([]); + expect(result).toEqual([]); + }); +}); diff --git a/src/memory/embeddings-gemini.ts b/src/memory/embeddings-gemini.ts index 1d5cc5876ea..ab028241ed8 100644 --- a/src/memory/embeddings-gemini.ts +++ b/src/memory/embeddings-gemini.ts @@ -5,6 +5,8 @@ import { import { requireApiKey, resolveApiKeyForProvider } from "../agents/model-auth.js"; import { parseGeminiAuth } from "../infra/gemini-auth.js"; import type { SsrFPolicy } from "../infra/net/ssrf.js"; +import type { EmbeddingInput } from "./embedding-inputs.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; import { debugEmbeddingsLog } from "./embeddings-debug.js"; import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js"; import { buildRemoteBaseUrlPolicy, withRemoteHttpResponse } from "./remote-http.js"; @@ -17,6 +19,7 @@ export type GeminiEmbeddingClient = { model: string; modelPath: string; apiKeys: string[]; + outputDimensionality?: number; }; const DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"; @@ -24,6 +27,111 @@ export const DEFAULT_GEMINI_EMBEDDING_MODEL = "gemini-embedding-001"; const GEMINI_MAX_INPUT_TOKENS: Record = { "text-embedding-004": 2048, }; + +// --- gemini-embedding-2-preview support --- + +export const GEMINI_EMBEDDING_2_MODELS = new Set([ + "gemini-embedding-2-preview", + // Add the GA model name here once released. +]); + +const GEMINI_EMBEDDING_2_DEFAULT_DIMENSIONS = 3072; +const GEMINI_EMBEDDING_2_VALID_DIMENSIONS = [768, 1536, 3072] as const; + +export type GeminiTaskType = + | "RETRIEVAL_QUERY" + | "RETRIEVAL_DOCUMENT" + | "SEMANTIC_SIMILARITY" + | "CLASSIFICATION" + | "CLUSTERING" + | "QUESTION_ANSWERING" + | "FACT_VERIFICATION"; + +export type GeminiTextPart = { text: string }; +export type GeminiInlinePart = { + inlineData: { mimeType: string; data: string }; +}; +export type GeminiPart = GeminiTextPart | GeminiInlinePart; +export type GeminiEmbeddingRequest = { + content: { parts: GeminiPart[] }; + taskType: GeminiTaskType; + outputDimensionality?: number; + model?: string; +}; +export type GeminiTextEmbeddingRequest = GeminiEmbeddingRequest; + +/** Builds the text-only Gemini embedding request shape used across direct and batch APIs. */ +export function buildGeminiTextEmbeddingRequest(params: { + text: string; + taskType: GeminiTaskType; + outputDimensionality?: number; + modelPath?: string; +}): GeminiTextEmbeddingRequest { + return buildGeminiEmbeddingRequest({ + input: { text: params.text }, + taskType: params.taskType, + outputDimensionality: params.outputDimensionality, + modelPath: params.modelPath, + }); +} + +export function buildGeminiEmbeddingRequest(params: { + input: EmbeddingInput; + taskType: GeminiTaskType; + outputDimensionality?: number; + modelPath?: string; +}): GeminiEmbeddingRequest { + const request: GeminiEmbeddingRequest = { + content: { + parts: params.input.parts?.map((part) => + part.type === "text" + ? ({ text: part.text } satisfies GeminiTextPart) + : ({ + inlineData: { mimeType: part.mimeType, data: part.data }, + } satisfies GeminiInlinePart), + ) ?? [{ text: params.input.text }], + }, + taskType: params.taskType, + }; + if (params.modelPath) { + request.model = params.modelPath; + } + if (params.outputDimensionality != null) { + request.outputDimensionality = params.outputDimensionality; + } + return request; +} + +/** + * Returns true if the given model name is a gemini-embedding-2 variant that + * supports `outputDimensionality` and extended task types. + */ +export function isGeminiEmbedding2Model(model: string): boolean { + return GEMINI_EMBEDDING_2_MODELS.has(model); +} + +/** + * Validate and return the `outputDimensionality` for gemini-embedding-2 models. + * Returns `undefined` for older models (they don't support the param). + */ +export function resolveGeminiOutputDimensionality( + model: string, + requested?: number, +): number | undefined { + if (!isGeminiEmbedding2Model(model)) { + return undefined; + } + if (requested == null) { + return GEMINI_EMBEDDING_2_DEFAULT_DIMENSIONS; + } + const valid: readonly number[] = GEMINI_EMBEDDING_2_VALID_DIMENSIONS; + if (!valid.includes(requested)) { + throw new Error( + `Invalid outputDimensionality ${requested} for ${model}. Valid values: ${valid.join(", ")}`, + ); + } + return requested; +} function resolveRemoteApiKey(remoteApiKey: unknown): string | undefined { const trimmed = resolveMemorySecretInputString({ value: remoteApiKey, @@ -38,7 +146,7 @@ function resolveRemoteApiKey(remoteApiKey: unknown): string | undefined { return trimmed; } -function normalizeGeminiModel(model: string): string { +export function normalizeGeminiModel(model: string): string { const trimmed = model.trim(); if (!trimmed) { return DEFAULT_GEMINI_EMBEDDING_MODEL; @@ -53,6 +161,46 @@ function normalizeGeminiModel(model: string): string { return withoutPrefix; } +async function fetchGeminiEmbeddingPayload(params: { + client: GeminiEmbeddingClient; + endpoint: string; + body: unknown; +}): Promise<{ + embedding?: { values?: number[] }; + embeddings?: Array<{ values?: number[] }>; +}> { + return await executeWithApiKeyRotation({ + provider: "google", + apiKeys: params.client.apiKeys, + execute: async (apiKey) => { + const authHeaders = parseGeminiAuth(apiKey); + const headers = { + ...authHeaders.headers, + ...params.client.headers, + }; + return await withRemoteHttpResponse({ + url: params.endpoint, + ssrfPolicy: params.client.ssrfPolicy, + init: { + method: "POST", + headers, + body: JSON.stringify(params.body), + }, + onResponse: async (res) => { + if (!res.ok) { + const text = await res.text(); + throw new Error(`gemini embeddings failed: ${res.status} ${text}`); + } + return (await res.json()) as { + embedding?: { values?: number[] }; + embeddings?: Array<{ values?: number[] }>; + }; + }, + }); + }, + }); +} + function normalizeGeminiBaseUrl(raw: string): string { const trimmed = raw.replace(/\/+$/, ""); const openAiIndex = trimmed.indexOf("/openai"); @@ -73,70 +221,53 @@ export async function createGeminiEmbeddingProvider( const baseUrl = client.baseUrl.replace(/\/$/, ""); const embedUrl = `${baseUrl}/${client.modelPath}:embedContent`; const batchUrl = `${baseUrl}/${client.modelPath}:batchEmbedContents`; - - const fetchWithGeminiAuth = async (apiKey: string, endpoint: string, body: unknown) => { - const authHeaders = parseGeminiAuth(apiKey); - const headers = { - ...authHeaders.headers, - ...client.headers, - }; - const payload = await withRemoteHttpResponse({ - url: endpoint, - ssrfPolicy: client.ssrfPolicy, - init: { - method: "POST", - headers, - body: JSON.stringify(body), - }, - onResponse: async (res) => { - if (!res.ok) { - const text = await res.text(); - throw new Error(`gemini embeddings failed: ${res.status} ${text}`); - } - return (await res.json()) as { - embedding?: { values?: number[] }; - embeddings?: Array<{ values?: number[] }>; - }; - }, - }); - return payload; - }; + const isV2 = isGeminiEmbedding2Model(client.model); + const outputDimensionality = client.outputDimensionality; const embedQuery = async (text: string): Promise => { if (!text.trim()) { return []; } - const payload = await executeWithApiKeyRotation({ - provider: "google", - apiKeys: client.apiKeys, - execute: (apiKey) => - fetchWithGeminiAuth(apiKey, embedUrl, { - content: { parts: [{ text }] }, - taskType: "RETRIEVAL_QUERY", - }), + const payload = await fetchGeminiEmbeddingPayload({ + client, + endpoint: embedUrl, + body: buildGeminiTextEmbeddingRequest({ + text, + taskType: options.taskType ?? "RETRIEVAL_QUERY", + outputDimensionality: isV2 ? outputDimensionality : undefined, + }), }); - return payload.embedding?.values ?? []; + return sanitizeAndNormalizeEmbedding(payload.embedding?.values ?? []); + }; + + const embedBatchInputs = async (inputs: EmbeddingInput[]): Promise => { + if (inputs.length === 0) { + return []; + } + const payload = await fetchGeminiEmbeddingPayload({ + client, + endpoint: batchUrl, + body: { + requests: inputs.map((input) => + buildGeminiEmbeddingRequest({ + input, + modelPath: client.modelPath, + taskType: options.taskType ?? "RETRIEVAL_DOCUMENT", + outputDimensionality: isV2 ? outputDimensionality : undefined, + }), + ), + }, + }); + const embeddings = Array.isArray(payload.embeddings) ? payload.embeddings : []; + return inputs.map((_, index) => sanitizeAndNormalizeEmbedding(embeddings[index]?.values ?? [])); }; const embedBatch = async (texts: string[]): Promise => { - if (texts.length === 0) { - return []; - } - const requests = texts.map((text) => ({ - model: client.modelPath, - content: { parts: [{ text }] }, - taskType: "RETRIEVAL_DOCUMENT", - })); - const payload = await executeWithApiKeyRotation({ - provider: "google", - apiKeys: client.apiKeys, - execute: (apiKey) => - fetchWithGeminiAuth(apiKey, batchUrl, { - requests, - }), - }); - const embeddings = Array.isArray(payload.embeddings) ? payload.embeddings : []; - return texts.map((_, index) => embeddings[index]?.values ?? []); + return await embedBatchInputs( + texts.map((text) => ({ + text, + })), + ); }; return { @@ -146,6 +277,7 @@ export async function createGeminiEmbeddingProvider( maxInputTokens: GEMINI_MAX_INPUT_TOKENS[client.model], embedQuery, embedBatch, + embedBatchInputs, }, client, }; @@ -183,13 +315,18 @@ export async function resolveGeminiEmbeddingClient( }); const model = normalizeGeminiModel(options.model); const modelPath = buildGeminiModelPath(model); + const outputDimensionality = resolveGeminiOutputDimensionality( + model, + options.outputDimensionality, + ); debugEmbeddingsLog("memory embeddings: gemini client", { rawBaseUrl, baseUrl, model, modelPath, + outputDimensionality, embedEndpoint: `${baseUrl}/${modelPath}:embedContent`, batchEndpoint: `${baseUrl}/${modelPath}:batchEmbedContents`, }); - return { baseUrl, headers, ssrfPolicy, model, modelPath, apiKeys }; + return { baseUrl, headers, ssrfPolicy, model, modelPath, apiKeys, outputDimensionality }; } diff --git a/src/memory/embeddings-ollama.ts b/src/memory/embeddings-ollama.ts index 4c9326df874..7bd2bcf7428 100644 --- a/src/memory/embeddings-ollama.ts +++ b/src/memory/embeddings-ollama.ts @@ -1,7 +1,9 @@ import { resolveEnvApiKey } from "../agents/model-auth.js"; +import { resolveOllamaApiBase } from "../agents/ollama-models.js"; import { formatErrorMessage } from "../infra/errors.js"; import type { SsrFPolicy } from "../infra/net/ssrf.js"; import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; import { normalizeEmbeddingModelWithPrefixes } from "./embeddings-model-normalize.js"; import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js"; import { buildRemoteBaseUrlPolicy, withRemoteHttpResponse } from "./remote-http.js"; @@ -17,16 +19,6 @@ export type OllamaEmbeddingClient = { type OllamaEmbeddingClientConfig = Omit; export const DEFAULT_OLLAMA_EMBEDDING_MODEL = "nomic-embed-text"; -const DEFAULT_OLLAMA_BASE_URL = "http://127.0.0.1:11434"; - -function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { - const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0)); - const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0)); - if (magnitude < 1e-10) { - return sanitized; - } - return sanitized.map((value) => value / magnitude); -} function normalizeOllamaModel(model: string): string { return normalizeEmbeddingModelWithPrefixes({ @@ -36,14 +28,6 @@ function normalizeOllamaModel(model: string): string { }); } -function resolveOllamaApiBase(configuredBaseUrl?: string): string { - if (!configuredBaseUrl) { - return DEFAULT_OLLAMA_BASE_URL; - } - const trimmed = configuredBaseUrl.replace(/\/+$/, ""); - return trimmed.replace(/\/v1$/i, ""); -} - function resolveOllamaApiKey(options: EmbeddingProviderOptions): string | undefined { const remoteApiKey = resolveMemorySecretInputString({ value: options.remote?.apiKey, diff --git a/src/memory/embeddings.ts b/src/memory/embeddings.ts index ca6b4046e2c..f9cc76eb19d 100644 --- a/src/memory/embeddings.ts +++ b/src/memory/embeddings.ts @@ -4,7 +4,13 @@ import type { OpenClawConfig } from "../config/config.js"; import type { SecretInput } from "../config/types.secrets.js"; import { formatErrorMessage } from "../infra/errors.js"; import { resolveUserPath } from "../utils.js"; -import { createGeminiEmbeddingProvider, type GeminiEmbeddingClient } from "./embeddings-gemini.js"; +import type { EmbeddingInput } from "./embedding-inputs.js"; +import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js"; +import { + createGeminiEmbeddingProvider, + type GeminiEmbeddingClient, + type GeminiTaskType, +} from "./embeddings-gemini.js"; import { createMistralEmbeddingProvider, type MistralEmbeddingClient, @@ -14,15 +20,6 @@ import { createOpenAiEmbeddingProvider, type OpenAiEmbeddingClient } from "./emb import { createVoyageEmbeddingProvider, type VoyageEmbeddingClient } from "./embeddings-voyage.js"; import { importNodeLlamaCpp } from "./node-llama.js"; -function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { - const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0)); - const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0)); - if (magnitude < 1e-10) { - return sanitized; - } - return sanitized.map((value) => value / magnitude); -} - export type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; export type { MistralEmbeddingClient } from "./embeddings-mistral.js"; export type { OpenAiEmbeddingClient } from "./embeddings-openai.js"; @@ -35,6 +32,7 @@ export type EmbeddingProvider = { maxInputTokens?: number; embedQuery: (text: string) => Promise; embedBatch: (texts: string[]) => Promise; + embedBatchInputs?: (inputs: EmbeddingInput[]) => Promise; }; export type EmbeddingProviderId = "openai" | "local" | "gemini" | "voyage" | "mistral" | "ollama"; @@ -74,6 +72,10 @@ export type EmbeddingProviderOptions = { modelPath?: string; modelCacheDir?: string; }; + /** Gemini embedding-2: output vector dimensions (768, 1536, or 3072). */ + outputDimensionality?: number; + /** Gemini: override the default task type sent with embedding requests. */ + taskType?: GeminiTaskType; }; export const DEFAULT_LOCAL_MODEL = diff --git a/src/memory/index.test.ts b/src/memory/index.test.ts index 43ebcca58c2..23371056b18 100644 --- a/src/memory/index.test.ts +++ b/src/memory/index.test.ts @@ -1,3 +1,4 @@ +import { randomUUID } from "node:crypto"; import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; @@ -6,27 +7,86 @@ import { getMemorySearchManager, type MemoryIndexManager } from "./index.js"; import "./test-runtime-mocks.js"; let embedBatchCalls = 0; +let embedBatchInputCalls = 0; +let providerCalls: Array<{ provider?: string; model?: string; outputDimensionality?: number }> = []; vi.mock("./embeddings.js", () => { const embedText = (text: string) => { const lower = text.toLowerCase(); const alpha = lower.split("alpha").length - 1; const beta = lower.split("beta").length - 1; - return [alpha, beta]; + const image = lower.split("image").length - 1; + const audio = lower.split("audio").length - 1; + return [alpha, beta, image, audio]; }; return { - createEmbeddingProvider: async (options: { model?: string }) => ({ - requestedProvider: "openai", - provider: { - id: "mock", - model: options.model ?? "mock-embed", - embedQuery: async (text: string) => embedText(text), - embedBatch: async (texts: string[]) => { - embedBatchCalls += 1; - return texts.map(embedText); + createEmbeddingProvider: async (options: { + provider?: string; + model?: string; + outputDimensionality?: number; + }) => { + providerCalls.push({ + provider: options.provider, + model: options.model, + outputDimensionality: options.outputDimensionality, + }); + const providerId = options.provider === "gemini" ? "gemini" : "mock"; + const model = options.model ?? "mock-embed"; + return { + requestedProvider: options.provider ?? "openai", + provider: { + id: providerId, + model, + embedQuery: async (text: string) => embedText(text), + embedBatch: async (texts: string[]) => { + embedBatchCalls += 1; + return texts.map(embedText); + }, + ...(providerId === "gemini" + ? { + embedBatchInputs: async ( + inputs: Array<{ + text: string; + parts?: Array< + | { type: "text"; text: string } + | { type: "inline-data"; mimeType: string; data: string } + >; + }>, + ) => { + embedBatchInputCalls += 1; + return inputs.map((input) => { + const inlineData = input.parts?.find((part) => part.type === "inline-data"); + if (inlineData?.type === "inline-data" && inlineData.data.length > 9000) { + throw new Error("payload too large"); + } + const mimeType = + inlineData?.type === "inline-data" ? inlineData.mimeType : undefined; + if (mimeType?.startsWith("image/")) { + return [0, 0, 1, 0]; + } + if (mimeType?.startsWith("audio/")) { + return [0, 0, 0, 1]; + } + return embedText(input.text); + }); + }, + } + : {}), }, - }, - }), + ...(providerId === "gemini" + ? { + gemini: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + headers: {}, + model, + modelPath: `models/${model}`, + apiKeys: ["test-key"], + outputDimensionality: options.outputDimensionality, + }, + } + : {}), + }; + }, }; }); @@ -38,6 +98,7 @@ describe("memory index", () => { let indexVectorPath = ""; let indexMainPath = ""; let indexExtraPath = ""; + let indexMultimodalPath = ""; let indexStatusPath = ""; let indexSourceChangePath = ""; let indexModelPath = ""; @@ -71,6 +132,7 @@ describe("memory index", () => { indexMainPath = path.join(workspaceDir, "index-main.sqlite"); indexVectorPath = path.join(workspaceDir, "index-vector.sqlite"); indexExtraPath = path.join(workspaceDir, "index-extra.sqlite"); + indexMultimodalPath = path.join(workspaceDir, "index-multimodal.sqlite"); indexStatusPath = path.join(workspaceDir, "index-status.sqlite"); indexSourceChangePath = path.join(workspaceDir, "index-source-change.sqlite"); indexModelPath = path.join(workspaceDir, "index-model-change.sqlite"); @@ -93,6 +155,8 @@ describe("memory index", () => { // Keep atomic reindex tests on the safe path. vi.stubEnv("OPENCLAW_TEST_MEMORY_UNSAFE_REINDEX", "1"); embedBatchCalls = 0; + embedBatchInputCalls = 0; + providerCalls = []; // Keep the workspace stable to allow manager reuse across tests. await fs.mkdir(memoryDir, { recursive: true }); @@ -119,7 +183,14 @@ describe("memory index", () => { extraPaths?: string[]; sources?: Array<"memory" | "sessions">; sessionMemory?: boolean; + provider?: "openai" | "gemini"; model?: string; + outputDimensionality?: number; + multimodal?: { + enabled?: boolean; + modalities?: Array<"image" | "audio" | "all">; + maxFileBytes?: number; + }; vectorEnabled?: boolean; cacheEnabled?: boolean; minScore?: number; @@ -130,8 +201,9 @@ describe("memory index", () => { defaults: { workspace: workspaceDir, memorySearch: { - provider: "openai", + provider: params.provider ?? "openai", model: params.model ?? "mock-embed", + outputDimensionality: params.outputDimensionality, store: { path: params.storePath, vector: { enabled: params.vectorEnabled ?? false } }, // Perf: keep test indexes to a single chunk to reduce sqlite work. chunking: { tokens: 4000, overlap: 0 }, @@ -142,6 +214,7 @@ describe("memory index", () => { }, cache: params.cacheEnabled ? { enabled: true } : undefined, extraPaths: params.extraPaths, + multimodal: params.multimodal, sources: params.sources, experimental: { sessionMemory: params.sessionMemory ?? false }, }, @@ -217,6 +290,103 @@ describe("memory index", () => { ); }); + it("indexes multimodal image and audio files from extra paths with Gemini structured inputs", async () => { + const mediaDir = path.join(workspaceDir, "media-memory"); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(path.join(mediaDir, "diagram.png"), Buffer.from("png")); + await fs.writeFile(path.join(mediaDir, "meeting.wav"), Buffer.from("wav")); + + const cfg = createCfg({ + storePath: indexMultimodalPath, + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image", "audio"] }, + }); + const manager = await getPersistentManager(cfg); + await manager.sync({ reason: "test" }); + + expect(embedBatchInputCalls).toBeGreaterThan(0); + + const imageResults = await manager.search("image"); + expect(imageResults.some((result) => result.path.endsWith("diagram.png"))).toBe(true); + + const audioResults = await manager.search("audio"); + expect(audioResults.some((result) => result.path.endsWith("meeting.wav"))).toBe(true); + }); + + it("skips oversized multimodal inputs without aborting sync", async () => { + const mediaDir = path.join(workspaceDir, "media-oversize"); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(path.join(mediaDir, "huge.png"), Buffer.alloc(7000, 1)); + + const cfg = createCfg({ + storePath: path.join(workspaceDir, `index-oversize-${randomUUID()}.sqlite`), + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image"] }, + }); + const manager = requireManager(await getMemorySearchManager({ cfg, agentId: "main" })); + await manager.sync({ reason: "test" }); + + expect(embedBatchInputCalls).toBeGreaterThan(0); + const imageResults = await manager.search("image"); + expect(imageResults.some((result) => result.path.endsWith("huge.png"))).toBe(false); + + const alphaResults = await manager.search("alpha"); + expect(alphaResults.some((result) => result.path.endsWith("memory/2026-01-12.md"))).toBe(true); + + await manager.close?.(); + }); + + it("reindexes a multimodal file after a transient mid-sync disappearance", async () => { + const mediaDir = path.join(workspaceDir, "media-race"); + const imagePath = path.join(mediaDir, "diagram.png"); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(imagePath, Buffer.from("png")); + + const cfg = createCfg({ + storePath: path.join(workspaceDir, `index-race-${randomUUID()}.sqlite`), + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image"] }, + }); + const manager = requireManager(await getMemorySearchManager({ cfg, agentId: "main" })); + const realReadFile = fs.readFile.bind(fs); + let imageReads = 0; + const readSpy = vi.spyOn(fs, "readFile").mockImplementation(async (...args) => { + const [targetPath] = args; + if (typeof targetPath === "string" && targetPath === imagePath) { + imageReads += 1; + if (imageReads === 2) { + const err = Object.assign( + new Error(`ENOENT: no such file or directory, open '${imagePath}'`), + { + code: "ENOENT", + }, + ) as NodeJS.ErrnoException; + throw err; + } + } + return await realReadFile(...args); + }); + + await manager.sync({ reason: "test" }); + readSpy.mockRestore(); + + const callsAfterFirstSync = embedBatchInputCalls; + (manager as unknown as { dirty: boolean }).dirty = true; + await manager.sync({ reason: "test" }); + + expect(embedBatchInputCalls).toBeGreaterThan(callsAfterFirstSync); + const results = await manager.search("image"); + expect(results.some((result) => result.path.endsWith("diagram.png"))).toBe(true); + + await manager.close?.(); + }); + it("keeps dirty false in status-only manager after prior indexing", async () => { const cfg = createCfg({ storePath: indexStatusPath }); @@ -342,6 +512,143 @@ describe("memory index", () => { await secondManager.close?.(); }); + it("passes Gemini outputDimensionality from config into the provider", async () => { + const cfg = createCfg({ + storePath: indexMainPath, + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 1536, + }); + + const result = await getMemorySearchManager({ cfg, agentId: "main" }); + const manager = requireManager(result); + + expect( + providerCalls.some( + (call) => + call.provider === "gemini" && + call.model === "gemini-embedding-2-preview" && + call.outputDimensionality === 1536, + ), + ).toBe(true); + await manager.close?.(); + }); + + it("reindexes when Gemini outputDimensionality changes", async () => { + const base = createCfg({ + storePath: indexModelPath, + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 3072, + }); + const baseAgents = base.agents!; + const baseDefaults = baseAgents.defaults!; + const baseMemorySearch = baseDefaults.memorySearch!; + + const first = await getMemorySearchManager({ cfg: base, agentId: "main" }); + const firstManager = requireManager(first); + await firstManager.sync?.({ reason: "test" }); + const callsAfterFirstSync = embedBatchCalls; + await firstManager.close?.(); + + const second = await getMemorySearchManager({ + cfg: { + ...base, + agents: { + ...baseAgents, + defaults: { + ...baseDefaults, + memorySearch: { + ...baseMemorySearch, + outputDimensionality: 768, + }, + }, + }, + }, + agentId: "main", + }); + const secondManager = requireManager(second); + await secondManager.sync?.({ reason: "test" }); + expect(embedBatchCalls).toBeGreaterThan(callsAfterFirstSync); + await secondManager.close?.(); + }); + + it("reindexes when extraPaths change", async () => { + const storePath = path.join(workspaceDir, `index-scope-extra-${randomUUID()}.sqlite`); + const firstExtraDir = path.join(workspaceDir, "scope-extra-a"); + const secondExtraDir = path.join(workspaceDir, "scope-extra-b"); + await fs.rm(firstExtraDir, { recursive: true, force: true }); + await fs.rm(secondExtraDir, { recursive: true, force: true }); + await fs.mkdir(firstExtraDir, { recursive: true }); + await fs.mkdir(secondExtraDir, { recursive: true }); + await fs.writeFile(path.join(firstExtraDir, "a.md"), "alpha only"); + await fs.writeFile(path.join(secondExtraDir, "b.md"), "beta only"); + + const first = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + extraPaths: [firstExtraDir], + }), + agentId: "main", + }); + const firstManager = requireManager(first); + await firstManager.sync?.({ reason: "test" }); + await firstManager.close?.(); + + const second = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + extraPaths: [secondExtraDir], + }), + agentId: "main", + }); + const secondManager = requireManager(second); + await secondManager.sync?.({ reason: "test" }); + const results = await secondManager.search("beta"); + expect(results.some((result) => result.path.endsWith("scope-extra-b/b.md"))).toBe(true); + expect(results.some((result) => result.path.endsWith("scope-extra-a/a.md"))).toBe(false); + await secondManager.close?.(); + }); + + it("reindexes when multimodal settings change", async () => { + const storePath = path.join(workspaceDir, `index-scope-multimodal-${randomUUID()}.sqlite`); + const mediaDir = path.join(workspaceDir, "scope-media"); + await fs.rm(mediaDir, { recursive: true, force: true }); + await fs.mkdir(mediaDir, { recursive: true }); + await fs.writeFile(path.join(mediaDir, "diagram.png"), Buffer.from("png")); + + const first = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + }), + agentId: "main", + }); + const firstManager = requireManager(first); + await firstManager.sync?.({ reason: "test" }); + const multimodalCallsAfterFirstSync = embedBatchInputCalls; + await firstManager.close?.(); + + const second = await getMemorySearchManager({ + cfg: createCfg({ + storePath, + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: [mediaDir], + multimodal: { enabled: true, modalities: ["image"] }, + }), + agentId: "main", + }); + const secondManager = requireManager(second); + await secondManager.sync?.({ reason: "test" }); + expect(embedBatchInputCalls).toBeGreaterThan(multimodalCallsAfterFirstSync); + const results = await secondManager.search("image"); + expect(results.some((result) => result.path.endsWith("scope-media/diagram.png"))).toBe(true); + await secondManager.close?.(); + }); + it("reuses cached embeddings on forced reindex", async () => { const cfg = createCfg({ storePath: indexMainPath, cacheEnabled: true }); const manager = await getPersistentManager(cfg); diff --git a/src/memory/internal.test.ts b/src/memory/internal.test.ts index 0f17843a88d..d18120b413a 100644 --- a/src/memory/internal.test.ts +++ b/src/memory/internal.test.ts @@ -3,12 +3,17 @@ import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { + buildMultimodalChunkForIndexing, buildFileEntry, chunkMarkdown, listMemoryFiles, normalizeExtraMemoryPaths, remapChunkLines, } from "./internal.js"; +import { + DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES, + type MemoryMultimodalSettings, +} from "./multimodal.js"; function setupTempDirLifecycle(prefix: string): () => string { let tmpDir = ""; @@ -38,6 +43,11 @@ describe("normalizeExtraMemoryPaths", () => { describe("listMemoryFiles", () => { const getTmpDir = setupTempDirLifecycle("memory-test-"); + const multimodal: MemoryMultimodalSettings = { + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES, + }; it("includes files from additional paths (directory)", async () => { const tmpDir = getTmpDir(); @@ -131,10 +141,29 @@ describe("listMemoryFiles", () => { const memoryMatches = files.filter((file) => file.endsWith("MEMORY.md")); expect(memoryMatches).toHaveLength(1); }); + + it("includes image and audio files from extra paths when multimodal is enabled", async () => { + const tmpDir = getTmpDir(); + const extraDir = path.join(tmpDir, "media"); + await fs.mkdir(extraDir, { recursive: true }); + await fs.writeFile(path.join(extraDir, "diagram.png"), Buffer.from("png")); + await fs.writeFile(path.join(extraDir, "note.wav"), Buffer.from("wav")); + await fs.writeFile(path.join(extraDir, "ignore.bin"), Buffer.from("bin")); + + const files = await listMemoryFiles(tmpDir, [extraDir], multimodal); + expect(files.some((file) => file.endsWith("diagram.png"))).toBe(true); + expect(files.some((file) => file.endsWith("note.wav"))).toBe(true); + expect(files.some((file) => file.endsWith("ignore.bin"))).toBe(false); + }); }); describe("buildFileEntry", () => { const getTmpDir = setupTempDirLifecycle("memory-build-entry-"); + const multimodal: MemoryMultimodalSettings = { + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES, + }; it("returns null when the file disappears before reading", async () => { const tmpDir = getTmpDir(); @@ -154,6 +183,59 @@ describe("buildFileEntry", () => { expect(entry?.path).toBe("note.md"); expect(entry?.size).toBeGreaterThan(0); }); + + it("returns multimodal metadata for eligible image files", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + + expect(entry).toMatchObject({ + path: "diagram.png", + kind: "multimodal", + modality: "image", + mimeType: "image/png", + contentText: "Image file: diagram.png", + }); + }); + + it("builds a multimodal chunk lazily for indexing", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + const built = await buildMultimodalChunkForIndexing(entry!); + + expect(built?.chunk.embeddingInput?.parts).toEqual([ + { type: "text", text: "Image file: diagram.png" }, + expect.objectContaining({ type: "inline-data", mimeType: "image/png" }), + ]); + expect(built?.structuredInputBytes).toBeGreaterThan(0); + }); + + it("skips lazy multimodal indexing when the file grows after discovery", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + await fs.writeFile(target, Buffer.alloc(entry!.size + 32, 1)); + + await expect(buildMultimodalChunkForIndexing(entry!)).resolves.toBeNull(); + }); + + it("skips lazy multimodal indexing when file bytes change after discovery", async () => { + const tmpDir = getTmpDir(); + const target = path.join(tmpDir, "diagram.png"); + await fs.writeFile(target, Buffer.from("png")); + + const entry = await buildFileEntry(target, tmpDir, multimodal); + await fs.writeFile(target, Buffer.from("gif")); + + await expect(buildMultimodalChunkForIndexing(entry!)).resolves.toBeNull(); + }); }); describe("chunkMarkdown", () => { diff --git a/src/memory/internal.ts b/src/memory/internal.ts index d39e355d2c0..d1d7e9c2e96 100644 --- a/src/memory/internal.ts +++ b/src/memory/internal.ts @@ -2,8 +2,17 @@ import crypto from "node:crypto"; import fsSync from "node:fs"; import fs from "node:fs/promises"; import path from "node:path"; +import { detectMime } from "../media/mime.js"; import { runTasksWithConcurrency } from "../utils/run-with-concurrency.js"; +import { estimateStructuredEmbeddingInputBytes } from "./embedding-input-limits.js"; +import { buildTextEmbeddingInput, type EmbeddingInput } from "./embedding-inputs.js"; import { isFileMissingError } from "./fs-utils.js"; +import { + buildMemoryMultimodalLabel, + classifyMemoryMultimodalPath, + type MemoryMultimodalModality, + type MemoryMultimodalSettings, +} from "./multimodal.js"; export type MemoryFileEntry = { path: string; @@ -11,6 +20,11 @@ export type MemoryFileEntry = { mtimeMs: number; size: number; hash: string; + dataHash?: string; + kind?: "markdown" | "multimodal"; + contentText?: string; + modality?: MemoryMultimodalModality; + mimeType?: string; }; export type MemoryChunk = { @@ -18,6 +32,18 @@ export type MemoryChunk = { endLine: number; text: string; hash: string; + embeddingInput?: EmbeddingInput; +}; + +export type MultimodalMemoryChunk = { + chunk: MemoryChunk; + structuredInputBytes: number; +}; + +const DISABLED_MULTIMODAL_SETTINGS: MemoryMultimodalSettings = { + enabled: false, + modalities: [], + maxFileBytes: 0, }; export function ensureDir(dir: string): string { @@ -56,7 +82,16 @@ export function isMemoryPath(relPath: string): boolean { return normalized.startsWith("memory/"); } -async function walkDir(dir: string, files: string[]) { +function isAllowedMemoryFilePath(filePath: string, multimodal?: MemoryMultimodalSettings): boolean { + if (filePath.endsWith(".md")) { + return true; + } + return ( + classifyMemoryMultimodalPath(filePath, multimodal ?? DISABLED_MULTIMODAL_SETTINGS) !== null + ); +} + +async function walkDir(dir: string, files: string[], multimodal?: MemoryMultimodalSettings) { const entries = await fs.readdir(dir, { withFileTypes: true }); for (const entry of entries) { const full = path.join(dir, entry.name); @@ -64,13 +99,13 @@ async function walkDir(dir: string, files: string[]) { continue; } if (entry.isDirectory()) { - await walkDir(full, files); + await walkDir(full, files, multimodal); continue; } if (!entry.isFile()) { continue; } - if (!entry.name.endsWith(".md")) { + if (!isAllowedMemoryFilePath(full, multimodal)) { continue; } files.push(full); @@ -80,6 +115,7 @@ async function walkDir(dir: string, files: string[]) { export async function listMemoryFiles( workspaceDir: string, extraPaths?: string[], + multimodal?: MemoryMultimodalSettings, ): Promise { const result: string[] = []; const memoryFile = path.join(workspaceDir, "MEMORY.md"); @@ -117,10 +153,10 @@ export async function listMemoryFiles( continue; } if (stat.isDirectory()) { - await walkDir(inputPath, result); + await walkDir(inputPath, result, multimodal); continue; } - if (stat.isFile() && inputPath.endsWith(".md")) { + if (stat.isFile() && isAllowedMemoryFilePath(inputPath, multimodal)) { result.push(inputPath); } } catch {} @@ -152,6 +188,7 @@ export function hashText(value: string): string { export async function buildFileEntry( absPath: string, workspaceDir: string, + multimodal?: MemoryMultimodalSettings, ): Promise { let stat; try { @@ -162,6 +199,49 @@ export async function buildFileEntry( } throw err; } + const normalizedPath = path.relative(workspaceDir, absPath).replace(/\\/g, "/"); + const multimodalSettings = multimodal ?? DISABLED_MULTIMODAL_SETTINGS; + const modality = classifyMemoryMultimodalPath(absPath, multimodalSettings); + if (modality) { + if (stat.size > multimodalSettings.maxFileBytes) { + return null; + } + let buffer: Buffer; + try { + buffer = await fs.readFile(absPath); + } catch (err) { + if (isFileMissingError(err)) { + return null; + } + throw err; + } + const mimeType = await detectMime({ buffer: buffer.subarray(0, 512), filePath: absPath }); + if (!mimeType || !mimeType.startsWith(`${modality}/`)) { + return null; + } + const contentText = buildMemoryMultimodalLabel(modality, normalizedPath); + const dataHash = crypto.createHash("sha256").update(buffer).digest("hex"); + const chunkHash = hashText( + JSON.stringify({ + path: normalizedPath, + contentText, + mimeType, + dataHash, + }), + ); + return { + path: normalizedPath, + absPath, + mtimeMs: stat.mtimeMs, + size: stat.size, + hash: chunkHash, + dataHash, + kind: "multimodal", + contentText, + modality, + mimeType, + }; + } let content: string; try { content = await fs.readFile(absPath, "utf-8"); @@ -173,11 +253,81 @@ export async function buildFileEntry( } const hash = hashText(content); return { - path: path.relative(workspaceDir, absPath).replace(/\\/g, "/"), + path: normalizedPath, absPath, mtimeMs: stat.mtimeMs, size: stat.size, hash, + kind: "markdown", + }; +} + +async function loadMultimodalEmbeddingInput( + entry: Pick< + MemoryFileEntry, + "absPath" | "contentText" | "mimeType" | "kind" | "size" | "dataHash" + >, +): Promise { + if (entry.kind !== "multimodal" || !entry.contentText || !entry.mimeType) { + return null; + } + let stat; + try { + stat = await fs.stat(entry.absPath); + } catch (err) { + if (isFileMissingError(err)) { + return null; + } + throw err; + } + if (stat.size !== entry.size) { + return null; + } + let buffer: Buffer; + try { + buffer = await fs.readFile(entry.absPath); + } catch (err) { + if (isFileMissingError(err)) { + return null; + } + throw err; + } + const dataHash = crypto.createHash("sha256").update(buffer).digest("hex"); + if (entry.dataHash && entry.dataHash !== dataHash) { + return null; + } + return { + text: entry.contentText, + parts: [ + { type: "text", text: entry.contentText }, + { + type: "inline-data", + mimeType: entry.mimeType, + data: buffer.toString("base64"), + }, + ], + }; +} + +export async function buildMultimodalChunkForIndexing( + entry: Pick< + MemoryFileEntry, + "absPath" | "contentText" | "mimeType" | "kind" | "hash" | "size" | "dataHash" + >, +): Promise { + const embeddingInput = await loadMultimodalEmbeddingInput(entry); + if (!embeddingInput) { + return null; + } + return { + chunk: { + startLine: 1, + endLine: 1, + text: entry.contentText ?? embeddingInput.text, + hash: entry.hash, + embeddingInput, + }, + structuredInputBytes: estimateStructuredEmbeddingInputBytes(embeddingInput), }; } @@ -213,6 +363,7 @@ export function chunkMarkdown( endLine, text, hash: hashText(text), + embeddingInput: buildTextEmbeddingInput(text), }); }; diff --git a/src/memory/manager-embedding-ops.ts b/src/memory/manager-embedding-ops.ts index 965058c8a3b..49171d809cb 100644 --- a/src/memory/manager-embedding-ops.ts +++ b/src/memory/manager-embedding-ops.ts @@ -8,8 +8,14 @@ import { } from "./batch-openai.js"; import { type VoyageBatchRequest, runVoyageEmbeddingBatches } from "./batch-voyage.js"; import { enforceEmbeddingMaxInputTokens } from "./embedding-chunk-limits.js"; -import { estimateUtf8Bytes } from "./embedding-input-limits.js"; import { + estimateStructuredEmbeddingInputBytes, + estimateUtf8Bytes, +} from "./embedding-input-limits.js"; +import { type EmbeddingInput, hasNonTextEmbeddingParts } from "./embedding-inputs.js"; +import { buildGeminiEmbeddingRequest } from "./embeddings-gemini.js"; +import { + buildMultimodalChunkForIndexing, chunkMarkdown, hashText, parseEmbedding, @@ -52,7 +58,9 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { let currentTokens = 0; for (const chunk of chunks) { - const estimate = estimateUtf8Bytes(chunk.text); + const estimate = chunk.embeddingInput + ? estimateStructuredEmbeddingInputBytes(chunk.embeddingInput) + : estimateUtf8Bytes(chunk.text); const wouldExceed = current.length > 0 && currentTokens + estimate > EMBEDDING_BATCH_MAX_TOKENS; if (wouldExceed) { @@ -187,9 +195,22 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { const missingChunks = missing.map((m) => m.chunk); const batches = this.buildEmbeddingBatches(missingChunks); const toCache: Array<{ hash: string; embedding: number[] }> = []; + const provider = this.provider; + if (!provider) { + throw new Error("Cannot embed batch in FTS-only mode (no embedding provider)"); + } let cursor = 0; for (const batch of batches) { - const batchEmbeddings = await this.embedBatchWithRetry(batch.map((chunk) => chunk.text)); + const inputs = batch.map((chunk) => chunk.embeddingInput ?? { text: chunk.text }); + const hasStructuredInputs = inputs.some((input) => hasNonTextEmbeddingParts(input)); + if (hasStructuredInputs && !provider.embedBatchInputs) { + throw new Error( + `Embedding provider "${provider.id}" does not support multimodal memory inputs.`, + ); + } + const batchEmbeddings = hasStructuredInputs + ? await this.embedBatchInputsWithRetry(inputs) + : await this.embedBatchWithRetry(batch.map((chunk) => chunk.text)); for (let i = 0; i < batch.length; i += 1) { const item = missing[cursor + i]; const embedding = batchEmbeddings[i] ?? []; @@ -236,6 +257,7 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { provider: "gemini", baseUrl: this.gemini.baseUrl, model: this.gemini.model, + outputDimensionality: this.gemini.outputDimensionality, headers: entries, }), ); @@ -474,6 +496,9 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { source: MemorySource, ): Promise { const gemini = this.gemini; + if (chunks.some((chunk) => hasNonTextEmbeddingParts(chunk.embeddingInput))) { + return await this.embedChunksInBatches(chunks); + } return await this.embedChunksWithProviderBatch({ chunks, entry, @@ -481,8 +506,12 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { provider: "gemini", enabled: Boolean(gemini), buildRequest: (chunk) => ({ - content: { parts: [{ text: chunk.text }] }, - taskType: "RETRIEVAL_DOCUMENT", + request: buildGeminiEmbeddingRequest({ + input: chunk.embeddingInput ?? { text: chunk.text }, + taskType: "RETRIEVAL_DOCUMENT", + modelPath: this.gemini?.modelPath, + outputDimensionality: this.gemini?.outputDimensionality, + }), }), runBatch: async (runnerOptions) => await runGeminiEmbeddingBatches({ @@ -531,6 +560,45 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { } } + protected async embedBatchInputsWithRetry(inputs: EmbeddingInput[]): Promise { + if (inputs.length === 0) { + return []; + } + if (!this.provider?.embedBatchInputs) { + return await this.embedBatchWithRetry(inputs.map((input) => input.text)); + } + let attempt = 0; + let delayMs = EMBEDDING_RETRY_BASE_DELAY_MS; + while (true) { + try { + const timeoutMs = this.resolveEmbeddingTimeout("batch"); + log.debug("memory embeddings: structured batch start", { + provider: this.provider.id, + items: inputs.length, + timeoutMs, + }); + return await this.withTimeout( + this.provider.embedBatchInputs(inputs), + timeoutMs, + `memory embeddings batch timed out after ${Math.round(timeoutMs / 1000)}s`, + ); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + if (!this.isRetryableEmbeddingError(message) || attempt >= EMBEDDING_RETRY_MAX_ATTEMPTS) { + throw err; + } + const waitMs = Math.min( + EMBEDDING_RETRY_MAX_DELAY_MS, + Math.round(delayMs * (1 + Math.random() * 0.2)), + ); + log.warn(`memory embeddings rate limited; retrying structured batch in ${waitMs}ms`); + await new Promise((resolve) => setTimeout(resolve, waitMs)); + delayMs *= 2; + attempt += 1; + } + } + } + private isRetryableEmbeddingError(message: string): boolean { return /(rate[_ ]limit|too many requests|429|resource has been exhausted|5\d\d|cloudflare|tokens per day)/i.test( message, @@ -690,6 +758,49 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { return this.batch.enabled ? this.batch.concurrency : EMBEDDING_INDEX_CONCURRENCY; } + private clearIndexedFileData(pathname: string, source: MemorySource): void { + if (this.vector.enabled) { + try { + this.db + .prepare( + `DELETE FROM ${VECTOR_TABLE} WHERE id IN (SELECT id FROM chunks WHERE path = ? AND source = ?)`, + ) + .run(pathname, source); + } catch {} + } + if (this.fts.enabled && this.fts.available && this.provider) { + try { + this.db + .prepare(`DELETE FROM ${FTS_TABLE} WHERE path = ? AND source = ? AND model = ?`) + .run(pathname, source, this.provider.model); + } catch {} + } + this.db.prepare(`DELETE FROM chunks WHERE path = ? AND source = ?`).run(pathname, source); + } + + private upsertFileRecord(entry: MemoryFileEntry | SessionFileEntry, source: MemorySource): void { + this.db + .prepare( + `INSERT INTO files (path, source, hash, mtime, size) VALUES (?, ?, ?, ?, ?) + ON CONFLICT(path) DO UPDATE SET + source=excluded.source, + hash=excluded.hash, + mtime=excluded.mtime, + size=excluded.size`, + ) + .run(entry.path, source, entry.hash, entry.mtimeMs, entry.size); + } + + private deleteFileRecord(pathname: string, source: MemorySource): void { + this.db.prepare(`DELETE FROM files WHERE path = ? AND source = ?`).run(pathname, source); + } + + private isStructuredInputTooLargeError(message: string): boolean { + return /(413|payload too large|request too large|input too large|too many tokens|input limit|request size)/i.test( + message, + ); + } + protected async indexFile( entry: MemoryFileEntry | SessionFileEntry, options: { source: MemorySource; content?: string }, @@ -703,42 +814,59 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { return; } - const content = options.content ?? (await fs.readFile(entry.absPath, "utf-8")); - const chunks = enforceEmbeddingMaxInputTokens( - this.provider, - chunkMarkdown(content, this.settings.chunking).filter( - (chunk) => chunk.text.trim().length > 0, - ), - EMBEDDING_BATCH_MAX_TOKENS, - ); - if (options.source === "sessions" && "lineMap" in entry) { - remapChunkLines(chunks, entry.lineMap); + let chunks: MemoryChunk[]; + let structuredInputBytes: number | undefined; + if ("kind" in entry && entry.kind === "multimodal") { + const multimodalChunk = await buildMultimodalChunkForIndexing(entry); + if (!multimodalChunk) { + this.clearIndexedFileData(entry.path, options.source); + this.deleteFileRecord(entry.path, options.source); + return; + } + structuredInputBytes = multimodalChunk.structuredInputBytes; + chunks = [multimodalChunk.chunk]; + } else { + const content = options.content ?? (await fs.readFile(entry.absPath, "utf-8")); + chunks = enforceEmbeddingMaxInputTokens( + this.provider, + chunkMarkdown(content, this.settings.chunking).filter( + (chunk) => chunk.text.trim().length > 0, + ), + EMBEDDING_BATCH_MAX_TOKENS, + ); + if (options.source === "sessions" && "lineMap" in entry) { + remapChunkLines(chunks, entry.lineMap); + } + } + let embeddings: number[][]; + try { + embeddings = this.batch.enabled + ? await this.embedChunksWithBatch(chunks, entry, options.source) + : await this.embedChunksInBatches(chunks); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + if ( + "kind" in entry && + entry.kind === "multimodal" && + this.isStructuredInputTooLargeError(message) + ) { + log.warn("memory embeddings: skipping multimodal file rejected as too large", { + path: entry.path, + bytes: structuredInputBytes, + provider: this.provider.id, + model: this.provider.model, + error: message, + }); + this.clearIndexedFileData(entry.path, options.source); + this.upsertFileRecord(entry, options.source); + return; + } + throw err; } - const embeddings = this.batch.enabled - ? await this.embedChunksWithBatch(chunks, entry, options.source) - : await this.embedChunksInBatches(chunks); const sample = embeddings.find((embedding) => embedding.length > 0); const vectorReady = sample ? await this.ensureVectorReady(sample.length) : false; const now = Date.now(); - if (vectorReady) { - try { - this.db - .prepare( - `DELETE FROM ${VECTOR_TABLE} WHERE id IN (SELECT id FROM chunks WHERE path = ? AND source = ?)`, - ) - .run(entry.path, options.source); - } catch {} - } - if (this.fts.enabled && this.fts.available) { - try { - this.db - .prepare(`DELETE FROM ${FTS_TABLE} WHERE path = ? AND source = ? AND model = ?`) - .run(entry.path, options.source, this.provider.model); - } catch {} - } - this.db - .prepare(`DELETE FROM chunks WHERE path = ? AND source = ?`) - .run(entry.path, options.source); + this.clearIndexedFileData(entry.path, options.source); for (let i = 0; i < chunks.length; i++) { const chunk = chunks[i]; const embedding = embeddings[i] ?? []; @@ -793,15 +921,6 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { ); } } - this.db - .prepare( - `INSERT INTO files (path, source, hash, mtime, size) VALUES (?, ?, ?, ?, ?) - ON CONFLICT(path) DO UPDATE SET - source=excluded.source, - hash=excluded.hash, - mtime=excluded.mtime, - size=excluded.size`, - ) - .run(entry.path, options.source, entry.hash, entry.mtimeMs, entry.size); + this.upsertFileRecord(entry, options.source); } } diff --git a/src/memory/manager-sync-ops.ts b/src/memory/manager-sync-ops.ts index 1fe91599b34..6fd3e6bb9c0 100644 --- a/src/memory/manager-sync-ops.ts +++ b/src/memory/manager-sync-ops.ts @@ -29,12 +29,18 @@ import { isFileMissingError } from "./fs-utils.js"; import { buildFileEntry, ensureDir, + hashText, listMemoryFiles, normalizeExtraMemoryPaths, runWithConcurrency, } from "./internal.js"; import { type MemoryFileEntry } from "./internal.js"; import { ensureMemoryIndexSchema } from "./memory-schema.js"; +import { + buildCaseInsensitiveExtensionGlob, + classifyMemoryMultimodalPath, + getMemoryMultimodalExtensions, +} from "./multimodal.js"; import type { SessionFileEntry } from "./session-files.js"; import { buildSessionEntry, @@ -50,6 +56,7 @@ type MemoryIndexMeta = { provider: string; providerKey?: string; sources?: MemorySource[]; + scopeHash?: string; chunkTokens: number; chunkOverlap: number; vectorDims?: number; @@ -383,9 +390,22 @@ export abstract class MemoryManagerSyncOps { } if (stat.isDirectory()) { watchPaths.add(path.join(entry, "**", "*.md")); + if (this.settings.multimodal.enabled) { + for (const modality of this.settings.multimodal.modalities) { + for (const extension of getMemoryMultimodalExtensions(modality)) { + watchPaths.add( + path.join(entry, "**", buildCaseInsensitiveExtensionGlob(extension)), + ); + } + } + } continue; } - if (stat.isFile() && entry.toLowerCase().endsWith(".md")) { + if ( + stat.isFile() && + (entry.toLowerCase().endsWith(".md") || + classifyMemoryMultimodalPath(entry, this.settings.multimodal) !== null) + ) { watchPaths.add(entry); } } catch { @@ -649,9 +669,19 @@ export abstract class MemoryManagerSyncOps { return; } - const files = await listMemoryFiles(this.workspaceDir, this.settings.extraPaths); + const files = await listMemoryFiles( + this.workspaceDir, + this.settings.extraPaths, + this.settings.multimodal, + ); const fileEntries = ( - await Promise.all(files.map(async (file) => buildFileEntry(file, this.workspaceDir))) + await runWithConcurrency( + files.map( + (file) => async () => + await buildFileEntry(file, this.workspaceDir, this.settings.multimodal), + ), + this.getIndexConcurrency(), + ) ).filter((entry): entry is MemoryFileEntry => entry !== null); log.debug("memory sync: indexing memory files", { files: fileEntries.length, @@ -868,6 +898,7 @@ export abstract class MemoryManagerSyncOps { const vectorReady = await this.ensureVectorReady(); const meta = this.readMeta(); const configuredSources = this.resolveConfiguredSourcesForMeta(); + const configuredScopeHash = this.resolveConfiguredScopeHash(); const needsFullReindex = params?.force || !meta || @@ -875,6 +906,7 @@ export abstract class MemoryManagerSyncOps { (this.provider && meta.provider !== this.provider.id) || meta.providerKey !== this.providerKey || this.metaSourcesDiffer(meta, configuredSources) || + meta.scopeHash !== configuredScopeHash || meta.chunkTokens !== this.settings.chunking.tokens || meta.chunkOverlap !== this.settings.chunking.overlap || (vectorReady && !meta?.vectorDims); @@ -996,6 +1028,7 @@ export abstract class MemoryManagerSyncOps { provider: fallback, remote: this.settings.remote, model: fallbackModel, + outputDimensionality: this.settings.outputDimensionality, fallback: "none", local: this.settings.local, }); @@ -1087,6 +1120,7 @@ export abstract class MemoryManagerSyncOps { provider: this.provider?.id ?? "none", providerKey: this.providerKey!, sources: this.resolveConfiguredSourcesForMeta(), + scopeHash: this.resolveConfiguredScopeHash(), chunkTokens: this.settings.chunking.tokens, chunkOverlap: this.settings.chunking.overlap, }; @@ -1158,6 +1192,7 @@ export abstract class MemoryManagerSyncOps { provider: this.provider?.id ?? "none", providerKey: this.providerKey!, sources: this.resolveConfiguredSourcesForMeta(), + scopeHash: this.resolveConfiguredScopeHash(), chunkTokens: this.settings.chunking.tokens, chunkOverlap: this.settings.chunking.overlap, }; @@ -1235,6 +1270,22 @@ export abstract class MemoryManagerSyncOps { return normalized.length > 0 ? normalized : ["memory"]; } + private resolveConfiguredScopeHash(): string { + const extraPaths = normalizeExtraMemoryPaths(this.workspaceDir, this.settings.extraPaths) + .map((value) => value.replace(/\\/g, "/")) + .toSorted(); + return hashText( + JSON.stringify({ + extraPaths, + multimodal: { + enabled: this.settings.multimodal.enabled, + modalities: [...this.settings.multimodal.modalities].toSorted(), + maxFileBytes: this.settings.multimodal.maxFileBytes, + }, + }), + ); + } + private metaSourcesDiffer(meta: MemoryIndexMeta, configuredSources: MemorySource[]): boolean { const metaSources = this.normalizeMetaSources(meta); if (metaSources.length !== configuredSources.length) { diff --git a/src/memory/manager.ts b/src/memory/manager.ts index 9b1ff74e54c..e79f83c570a 100644 --- a/src/memory/manager.ts +++ b/src/memory/manager.ts @@ -157,6 +157,7 @@ export class MemoryIndexManager extends MemoryManagerEmbeddingOps implements Mem provider: settings.provider, remote: settings.remote, model: settings.model, + outputDimensionality: settings.outputDimensionality, fallback: settings.fallback, local: settings.local, }); diff --git a/src/memory/manager.watcher-config.test.ts b/src/memory/manager.watcher-config.test.ts index 77221df34b6..43682183676 100644 --- a/src/memory/manager.watcher-config.test.ts +++ b/src/memory/manager.watcher-config.test.ts @@ -106,4 +106,50 @@ describe("memory watcher config", () => { expect(ignored?.(path.join(workspaceDir, "memory", ".venv", "lib", "python.md"))).toBe(true); expect(ignored?.(path.join(workspaceDir, "memory", "project", "notes.md"))).toBe(false); }); + + it("watches multimodal extensions with case-insensitive globs", async () => { + workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-watch-")); + extraDir = path.join(workspaceDir, "extra"); + await fs.mkdir(path.join(workspaceDir, "memory"), { recursive: true }); + await fs.mkdir(extraDir, { recursive: true }); + await fs.writeFile(path.join(extraDir, "PHOTO.PNG"), "png"); + + const cfg = { + agents: { + defaults: { + workspace: workspaceDir, + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + store: { path: path.join(workspaceDir, "index.sqlite"), vector: { enabled: false } }, + sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, + query: { minScore: 0, hybrid: { enabled: false } }, + extraPaths: [extraDir], + multimodal: { enabled: true, modalities: ["image", "audio"] }, + }, + }, + list: [{ id: "main", default: true }], + }, + } as OpenClawConfig; + + const result = await getMemorySearchManager({ cfg, agentId: "main" }); + expect(result.manager).not.toBeNull(); + if (!result.manager) { + throw new Error("manager missing"); + } + manager = result.manager as unknown as MemoryIndexManager; + + expect(watchMock).toHaveBeenCalledTimes(1); + const [watchedPaths] = watchMock.mock.calls[0] as unknown as [ + string[], + Record, + ]; + expect(watchedPaths).toEqual( + expect.arrayContaining([ + path.join(extraDir, "**", "*.[pP][nN][gG]"), + path.join(extraDir, "**", "*.[wW][aA][vV]"), + ]), + ); + }); }); diff --git a/src/memory/multimodal.ts b/src/memory/multimodal.ts new file mode 100644 index 00000000000..df72ed8c495 --- /dev/null +++ b/src/memory/multimodal.ts @@ -0,0 +1,118 @@ +const MEMORY_MULTIMODAL_SPECS = { + image: { + labelPrefix: "Image file", + extensions: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".heic", ".heif"], + }, + audio: { + labelPrefix: "Audio file", + extensions: [".mp3", ".wav", ".ogg", ".opus", ".m4a", ".aac", ".flac"], + }, +} as const; + +export type MemoryMultimodalModality = keyof typeof MEMORY_MULTIMODAL_SPECS; +export const MEMORY_MULTIMODAL_MODALITIES = Object.keys( + MEMORY_MULTIMODAL_SPECS, +) as MemoryMultimodalModality[]; +export type MemoryMultimodalSelection = MemoryMultimodalModality | "all"; + +export type MemoryMultimodalSettings = { + enabled: boolean; + modalities: MemoryMultimodalModality[]; + maxFileBytes: number; +}; + +export const DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES = 10 * 1024 * 1024; + +export function normalizeMemoryMultimodalModalities( + raw: MemoryMultimodalSelection[] | undefined, +): MemoryMultimodalModality[] { + if (raw === undefined || raw.includes("all")) { + return [...MEMORY_MULTIMODAL_MODALITIES]; + } + const normalized = new Set(); + for (const value of raw) { + if (value === "image" || value === "audio") { + normalized.add(value); + } + } + return Array.from(normalized); +} + +export function normalizeMemoryMultimodalSettings(raw: { + enabled?: boolean; + modalities?: MemoryMultimodalSelection[]; + maxFileBytes?: number; +}): MemoryMultimodalSettings { + const enabled = raw.enabled === true; + const maxFileBytes = + typeof raw.maxFileBytes === "number" && Number.isFinite(raw.maxFileBytes) + ? Math.max(1, Math.floor(raw.maxFileBytes)) + : DEFAULT_MEMORY_MULTIMODAL_MAX_FILE_BYTES; + return { + enabled, + modalities: enabled ? normalizeMemoryMultimodalModalities(raw.modalities) : [], + maxFileBytes, + }; +} + +export function isMemoryMultimodalEnabled(settings: MemoryMultimodalSettings): boolean { + return settings.enabled && settings.modalities.length > 0; +} + +export function getMemoryMultimodalExtensions( + modality: MemoryMultimodalModality, +): readonly string[] { + return MEMORY_MULTIMODAL_SPECS[modality].extensions; +} + +export function buildMemoryMultimodalLabel( + modality: MemoryMultimodalModality, + normalizedPath: string, +): string { + return `${MEMORY_MULTIMODAL_SPECS[modality].labelPrefix}: ${normalizedPath}`; +} + +export function buildCaseInsensitiveExtensionGlob(extension: string): string { + const normalized = extension.trim().replace(/^\./, "").toLowerCase(); + if (!normalized) { + return "*"; + } + const parts = Array.from(normalized, (char) => `[${char.toLowerCase()}${char.toUpperCase()}]`); + return `*.${parts.join("")}`; +} + +export function classifyMemoryMultimodalPath( + filePath: string, + settings: MemoryMultimodalSettings, +): MemoryMultimodalModality | null { + if (!isMemoryMultimodalEnabled(settings)) { + return null; + } + const lower = filePath.trim().toLowerCase(); + for (const modality of settings.modalities) { + for (const extension of getMemoryMultimodalExtensions(modality)) { + if (lower.endsWith(extension)) { + return modality; + } + } + } + return null; +} + +export function normalizeGeminiEmbeddingModelForMemory(model: string): string { + const trimmed = model.trim(); + if (!trimmed) { + return ""; + } + return trimmed.replace(/^models\//, "").replace(/^(gemini|google)\//, ""); +} + +export function supportsMemoryMultimodalEmbeddings(params: { + provider: string; + model: string; +}): boolean { + if (params.provider !== "gemini") { + return false; + } + return normalizeGeminiEmbeddingModelForMemory(params.model) === "gemini-embedding-2-preview"; +} diff --git a/src/node-host/invoke-system-run-plan.test.ts b/src/node-host/invoke-system-run-plan.test.ts index c192509197e..3e1736000aa 100644 --- a/src/node-host/invoke-system-run-plan.test.ts +++ b/src/node-host/invoke-system-run-plan.test.ts @@ -246,6 +246,38 @@ describe("hardenApprovedExecutionPaths", () => { initialBody: 'console.log("SAFE");\n', expectedArgvIndex: 1, }, + { + name: "tsx direct file", + binName: "tsx", + argv: ["tsx", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, + { + name: "jiti direct file", + binName: "jiti", + argv: ["jiti", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, + { + name: "ts-node direct file", + binName: "ts-node", + argv: ["ts-node", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, + { + name: "vite-node direct file", + binName: "vite-node", + argv: ["vite-node", "./run.ts"], + scriptName: "run.ts", + initialBody: 'console.log("SAFE");\n', + expectedArgvIndex: 1, + }, { name: "bun direct file", binName: "bun", @@ -387,4 +419,26 @@ describe("hardenApprovedExecutionPaths", () => { }, }); }); + + it("rejects tsx eval invocations that do not bind a concrete file", () => { + withFakeRuntimeBin({ + binName: "tsx", + run: () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-tsx-eval-")); + try { + const prepared = buildSystemRunApprovalPlan({ + command: ["tsx", "--eval", "console.log('SAFE')"], + cwd: tmp, + }); + expect(prepared).toEqual({ + ok: false, + message: + "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", + }); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }, + }); + }); }); diff --git a/src/node-host/invoke-system-run-plan.ts b/src/node-host/invoke-system-run-plan.ts index 606d50e7653..1b46312c3a1 100644 --- a/src/node-host/invoke-system-run-plan.ts +++ b/src/node-host/invoke-system-run-plan.ts @@ -33,6 +33,15 @@ const MUTABLE_ARGV1_INTERPRETER_PATTERNS = [ /^ruby$/, ] as const; +const GENERIC_MUTABLE_SCRIPT_RUNNERS = new Set([ + "esno", + "jiti", + "ts-node", + "ts-node-esm", + "tsx", + "vite-node", +]); + const BUN_SUBCOMMANDS = new Set([ "add", "audit", @@ -409,6 +418,10 @@ function resolveDenoRunScriptOperandIndex(params: { }); } +function isMutableScriptRunner(executable: string): boolean { + return GENERIC_MUTABLE_SCRIPT_RUNNERS.has(executable) || isInterpreterLikeSafeBin(executable); +} + function resolveMutableFileOperandIndex(argv: string[], cwd: string | undefined): number | null { const unwrapped = unwrapArgvForMutableOperand(argv); const executable = normalizeExecutableToken(unwrapped.argv[0] ?? ""); @@ -443,7 +456,7 @@ function resolveMutableFileOperandIndex(argv: string[], cwd: string | undefined) return unwrapped.baseIndex + denoIndex; } } - if (!isInterpreterLikeSafeBin(executable)) { + if (!isMutableScriptRunner(executable)) { return null; } const genericIndex = resolveGenericInterpreterScriptOperandIndex({ @@ -468,10 +481,10 @@ function requiresStableInterpreterApprovalBindingWithShellCommand(params: { if ((POSIX_SHELL_WRAPPERS as ReadonlySet).has(executable)) { return false; } - return isInterpreterLikeSafeBin(executable); + return isMutableScriptRunner(executable); } -function resolveMutableFileOperandSnapshotSync(params: { +export function resolveMutableFileOperandSnapshotSync(params: { argv: string[]; cwd: string | undefined; shellCommand: string | null; diff --git a/src/node-host/invoke-system-run.test.ts b/src/node-host/invoke-system-run.test.ts index c4e5bc345f6..d183f9087c3 100644 --- a/src/node-host/invoke-system-run.test.ts +++ b/src/node-host/invoke-system-run.test.ts @@ -109,27 +109,50 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { }; } - function createRuntimeScriptOperandFixture(params: { tmp: string; runtime: "bun" | "deno" }): { + function createRuntimeScriptOperandFixture(params: { + tmp: string; + runtime: "bun" | "deno" | "jiti" | "tsx"; + }): { command: string[]; scriptPath: string; initialBody: string; changedBody: string; } { const scriptPath = path.join(params.tmp, "run.ts"); - if (params.runtime === "bun") { - return { - command: ["bun", "run", "./run.ts"], - scriptPath, - initialBody: 'console.log("SAFE");\n', - changedBody: 'console.log("PWNED");\n', - }; + const initialBody = 'console.log("SAFE");\n'; + const changedBody = 'console.log("PWNED");\n'; + switch (params.runtime) { + case "bun": + return { + command: ["bun", "run", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; + case "deno": + return { + command: ["deno", "run", "-A", "--allow-read", "--", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; + case "jiti": + return { + command: ["jiti", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; + case "tsx": + return { + command: ["tsx", "./run.ts"], + scriptPath, + initialBody, + changedBody, + }; } - return { - command: ["deno", "run", "-A", "--allow-read", "--", "./run.ts"], - scriptPath, - initialBody: 'console.log("SAFE");\n', - changedBody: 'console.log("PWNED");\n', - }; + const unsupportedRuntime: never = params.runtime; + throw new Error(`unsupported runtime fixture: ${String(unsupportedRuntime)}`); } function buildNestedEnvShellCommand(params: { depth: number; payload: string }): string[] { @@ -223,7 +246,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { } async function withFakeRuntimeOnPath(params: { - runtime: "bun" | "deno"; + runtime: "bun" | "deno" | "jiti" | "tsx"; run: () => Promise; }): Promise { const tmp = fs.mkdtempSync(path.join(os.tmpdir(), `openclaw-${params.runtime}-path-`)); @@ -842,7 +865,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { } }); - for (const runtime of ["bun", "deno"] as const) { + for (const runtime of ["bun", "deno", "tsx", "jiti"] as const) { it(`denies approval-based execution when a ${runtime} script operand changes after approval`, async () => { await withFakeRuntimeOnPath({ runtime, @@ -926,6 +949,50 @@ describe("handleSystemRunInvoke mac app exec host routing", () => { }); } + it("denies approval-based execution when tsx is missing a required mutable script binding", async () => { + await withFakeRuntimeOnPath({ + runtime: "tsx", + run: async () => { + const tmp = fs.mkdtempSync( + path.join(os.tmpdir(), "openclaw-approval-tsx-missing-binding-"), + ); + const fixture = createRuntimeScriptOperandFixture({ tmp, runtime: "tsx" }); + fs.writeFileSync(fixture.scriptPath, fixture.initialBody); + try { + const prepared = buildSystemRunApprovalPlan({ + command: fixture.command, + cwd: tmp, + }); + expect(prepared.ok).toBe(true); + if (!prepared.ok) { + throw new Error("unreachable"); + } + + const planWithoutBinding = { ...prepared.plan }; + delete planWithoutBinding.mutableFileOperand; + const { runCommand, sendInvokeResult } = await runSystemInvoke({ + preferMacAppExecHost: false, + command: prepared.plan.argv, + rawCommand: prepared.plan.commandText, + systemRunPlan: planWithoutBinding, + cwd: prepared.plan.cwd ?? tmp, + approved: true, + security: "full", + ask: "off", + }); + + expect(runCommand).not.toHaveBeenCalled(); + expectInvokeErrorMessage(sendInvokeResult, { + message: "SYSTEM_RUN_DENIED: approval missing script operand binding", + exact: true, + }); + } finally { + fs.rmSync(tmp, { recursive: true, force: true }); + } + }, + }); + }); + it("denies ./sh wrapper spoof in allowlist on-miss mode before execution", async () => { const marker = path.join(os.tmpdir(), `openclaw-wrapper-spoof-${process.pid}-${Date.now()}`); const runCommand = vi.fn(async () => { diff --git a/src/node-host/invoke-system-run.ts b/src/node-host/invoke-system-run.ts index 3ed2a30d188..3730e3b2824 100644 --- a/src/node-host/invoke-system-run.ts +++ b/src/node-host/invoke-system-run.ts @@ -29,6 +29,7 @@ import { hardenApprovedExecutionPaths, revalidateApprovedCwdSnapshot, revalidateApprovedMutableFileOperand, + resolveMutableFileOperandSnapshotSync, type ApprovedCwdSnapshot, } from "./invoke-system-run-plan.js"; import type { @@ -98,6 +99,8 @@ type SystemRunPolicyPhase = SystemRunParsePhase & { const safeBinTrustedDirWarningCache = new Set(); const APPROVAL_CWD_DRIFT_DENIED_MESSAGE = "SYSTEM_RUN_DENIED: approval cwd changed before execution"; +const APPROVAL_SCRIPT_OPERAND_BINDING_DENIED_MESSAGE = + "SYSTEM_RUN_DENIED: approval missing script operand binding"; const APPROVAL_SCRIPT_OPERAND_DRIFT_DENIED_MESSAGE = "SYSTEM_RUN_DENIED: approval script operand changed before execution"; @@ -385,6 +388,29 @@ async function executeSystemRunPhase( }); return; } + const expectedMutableFileOperand = phase.approvalPlan + ? resolveMutableFileOperandSnapshotSync({ + argv: phase.argv, + cwd: phase.cwd, + shellCommand: phase.shellPayload, + }) + : null; + if (expectedMutableFileOperand && !expectedMutableFileOperand.ok) { + logWarn(`security: system.run approval script binding blocked (runId=${phase.runId})`); + await sendSystemRunDenied(opts, phase.execution, { + reason: "approval-required", + message: expectedMutableFileOperand.message, + }); + return; + } + if (expectedMutableFileOperand?.snapshot && !phase.approvalPlan?.mutableFileOperand) { + logWarn(`security: system.run approval script binding missing (runId=${phase.runId})`); + await sendSystemRunDenied(opts, phase.execution, { + reason: "approval-required", + message: APPROVAL_SCRIPT_OPERAND_BINDING_DENIED_MESSAGE, + }); + return; + } if ( phase.approvalPlan?.mutableFileOperand && !revalidateApprovedMutableFileOperand({ diff --git a/src/plugin-sdk/voice-call.ts b/src/plugin-sdk/voice-call.ts index da8a1f12613..c50b979a145 100644 --- a/src/plugin-sdk/voice-call.ts +++ b/src/plugin-sdk/voice-call.ts @@ -7,6 +7,7 @@ export { TtsModeSchema, TtsProviderSchema, } from "../config/zod-schema.core.js"; +export { resolveOpenAITtsInstructions } from "../tts/tts-core.js"; export type { GatewayRequestHandlerOptions } from "../gateway/server-methods/types.js"; export { isRequestBodyLimitError, diff --git a/src/plugins/bundled-dir.ts b/src/plugins/bundled-dir.ts index 4837ae59dc9..09f28bcdc19 100644 --- a/src/plugins/bundled-dir.ts +++ b/src/plugins/bundled-dir.ts @@ -2,8 +2,8 @@ import fs from "node:fs"; import path from "node:path"; import { fileURLToPath } from "node:url"; -export function resolveBundledPluginsDir(): string | undefined { - const override = process.env.OPENCLAW_BUNDLED_PLUGINS_DIR?.trim(); +export function resolveBundledPluginsDir(env: NodeJS.ProcessEnv = process.env): string | undefined { + const override = env.OPENCLAW_BUNDLED_PLUGINS_DIR?.trim(); if (override) { return override; } diff --git a/src/plugins/discovery.test.ts b/src/plugins/discovery.test.ts index aa33803c2ab..00430037b86 100644 --- a/src/plugins/discovery.test.ts +++ b/src/plugins/discovery.test.ts @@ -3,7 +3,6 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; import { afterEach, describe, expect, it } from "vitest"; -import { withEnvAsync } from "../test-utils/env.js"; import { clearPluginDiscoveryCache, discoverOpenClawPlugins } from "./discovery.js"; const tempDirs: string[] = []; @@ -15,24 +14,20 @@ function makeTempDir() { return dir; } -async function withStateDir(stateDir: string, fn: () => Promise) { - return await withEnvAsync( - { - OPENCLAW_STATE_DIR: stateDir, - CLAWDBOT_STATE_DIR: undefined, - OPENCLAW_BUNDLED_PLUGINS_DIR: "/nonexistent/bundled/plugins", - }, - fn, - ); +function buildDiscoveryEnv(stateDir: string): NodeJS.ProcessEnv { + return { + ...process.env, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_BUNDLED_PLUGINS_DIR: "/nonexistent/bundled/plugins", + }; } async function discoverWithStateDir( stateDir: string, params: Parameters[0], ) { - return await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins(params); - }); + return discoverOpenClawPlugins({ ...params, env: buildDiscoveryEnv(stateDir) }); } function writePluginPackageManifest(params: { @@ -80,9 +75,7 @@ describe("discoverOpenClawPlugins", () => { fs.mkdirSync(workspaceExt, { recursive: true }); fs.writeFileSync(path.join(workspaceExt, "beta.ts"), "export default function () {}", "utf-8"); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({ workspaceDir }); - }); + const { candidates } = await discoverWithStateDir(stateDir, { workspaceDir }); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("alpha"); @@ -110,9 +103,7 @@ describe("discoverOpenClawPlugins", () => { fs.mkdirSync(liveDir, { recursive: true }); fs.writeFileSync(path.join(liveDir, "index.ts"), "export default function () {}", "utf-8"); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); const ids = candidates.map((candidate) => candidate.idHint); expect(ids).toContain("live"); @@ -142,9 +133,7 @@ describe("discoverOpenClawPlugins", () => { "utf-8", ); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("pack/one"); @@ -167,9 +156,7 @@ describe("discoverOpenClawPlugins", () => { "utf-8", ); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("voice-call"); @@ -187,9 +174,7 @@ describe("discoverOpenClawPlugins", () => { }); fs.writeFileSync(path.join(packDir, "index.js"), "module.exports = {}", "utf-8"); - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({ extraPaths: [packDir] }); - }); + const { candidates } = await discoverWithStateDir(stateDir, { extraPaths: [packDir] }); const ids = candidates.map((c) => c.idHint); expect(ids).toContain("demo-plugin-dir"); @@ -266,9 +251,7 @@ describe("discoverOpenClawPlugins", () => { extensions: ["./escape.ts"], }); - const { candidates, diagnostics } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates, diagnostics } = await discoverWithStateDir(stateDir, {}); expect(candidates.some((candidate) => candidate.idHint === "pack")).toBe(false); expectEscapesPackageDiagnostic(diagnostics); @@ -303,9 +286,7 @@ describe("discoverOpenClawPlugins", () => { throw err; } - const { candidates } = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const { candidates } = await discoverWithStateDir(stateDir, {}); expect(candidates.some((candidate) => candidate.idHint === "pack")).toBe(false); }); @@ -318,9 +299,7 @@ describe("discoverOpenClawPlugins", () => { fs.writeFileSync(pluginPath, "export default function () {}", "utf-8"); fs.chmodSync(pluginPath, 0o777); - const result = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({}); - }); + const result = await discoverWithStateDir(stateDir, {}); expect(result.candidates).toHaveLength(0); expect(result.diagnostics.some((diag) => diag.message.includes("world-writable path"))).toBe( @@ -328,6 +307,35 @@ describe("discoverOpenClawPlugins", () => { ); }); + it.runIf(process.platform !== "win32")( + "repairs world-writable bundled plugin dirs before loading them", + async () => { + const stateDir = makeTempDir(); + const bundledDir = path.join(stateDir, "bundled"); + const packDir = path.join(bundledDir, "demo-pack"); + fs.mkdirSync(packDir, { recursive: true }); + fs.writeFileSync(path.join(packDir, "index.ts"), "export default function () {}", "utf-8"); + fs.chmodSync(packDir, 0o777); + + const result = discoverOpenClawPlugins({ + env: { + ...process.env, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_BUNDLED_PLUGINS_DIR: bundledDir, + }, + }); + + expect(result.candidates.some((candidate) => candidate.idHint === "demo-pack")).toBe(true); + expect( + result.diagnostics.some( + (diag) => diag.source === packDir && diag.message.includes("world-writable path"), + ), + ).toBe(false); + expect(fs.statSync(packDir).mode & 0o777).toBe(0o755); + }, + ); + it.runIf(process.platform !== "win32" && typeof process.getuid === "function")( "blocks suspicious ownership when uid mismatch is detected", async () => { @@ -341,9 +349,7 @@ describe("discoverOpenClawPlugins", () => { ); const actualUid = (process as NodeJS.Process & { getuid: () => number }).getuid(); - const result = await withStateDir(stateDir, async () => { - return discoverOpenClawPlugins({ ownershipUid: actualUid + 1 }); - }); + const result = await discoverWithStateDir(stateDir, { ownershipUid: actualUid + 1 }); const shouldBlockForMismatch = actualUid !== 0; expect(result.candidates).toHaveLength(shouldBlockForMismatch ? 0 : 1); expect(result.diagnostics.some((diag) => diag.message.includes("suspicious ownership"))).toBe( @@ -359,32 +365,32 @@ describe("discoverOpenClawPlugins", () => { const pluginPath = path.join(globalExt, "cached.ts"); fs.writeFileSync(pluginPath, "export default function () {}", "utf-8"); - const first = await withEnvAsync( - { + const first = discoverOpenClawPlugins({ + env: { + ...buildDiscoveryEnv(stateDir), OPENCLAW_PLUGIN_DISCOVERY_CACHE_MS: "5000", }, - async () => withStateDir(stateDir, async () => discoverOpenClawPlugins({})), - ); + }); expect(first.candidates.some((candidate) => candidate.idHint === "cached")).toBe(true); fs.rmSync(pluginPath, { force: true }); - const second = await withEnvAsync( - { + const second = discoverOpenClawPlugins({ + env: { + ...buildDiscoveryEnv(stateDir), OPENCLAW_PLUGIN_DISCOVERY_CACHE_MS: "5000", }, - async () => withStateDir(stateDir, async () => discoverOpenClawPlugins({})), - ); + }); expect(second.candidates.some((candidate) => candidate.idHint === "cached")).toBe(true); clearPluginDiscoveryCache(); - const third = await withEnvAsync( - { + const third = discoverOpenClawPlugins({ + env: { + ...buildDiscoveryEnv(stateDir), OPENCLAW_PLUGIN_DISCOVERY_CACHE_MS: "5000", }, - async () => withStateDir(stateDir, async () => discoverOpenClawPlugins({})), - ); + }); expect(third.candidates.some((candidate) => candidate.idHint === "cached")).toBe(false); }); }); diff --git a/src/plugins/discovery.ts b/src/plugins/discovery.ts index c03b0fe01bf..686c1f7fd86 100644 --- a/src/plugins/discovery.ts +++ b/src/plugins/discovery.ts @@ -69,10 +69,11 @@ function buildDiscoveryCacheKey(params: { workspaceDir?: string; extraPaths?: string[]; ownershipUid?: number | null; + env: NodeJS.ProcessEnv; }): string { const workspaceKey = params.workspaceDir ? resolveUserPath(params.workspaceDir) : ""; - const configExtensionsRoot = path.join(resolveConfigDir(), "extensions"); - const bundledRoot = resolveBundledPluginsDir() ?? ""; + const configExtensionsRoot = path.join(resolveConfigDir(params.env), "extensions"); + const bundledRoot = resolveBundledPluginsDir(params.env) ?? ""; const normalizedExtraPaths = (params.extraPaths ?? []) .filter((entry): entry is string => typeof entry === "string") .map((entry) => entry.trim()) @@ -153,7 +154,7 @@ function checkPathStatAndPermissions(params: { continue; } seen.add(normalized); - const stat = safeStatSync(targetPath); + let stat = safeStatSync(targetPath); if (!stat) { return { reason: "path_stat_failed", @@ -162,7 +163,28 @@ function checkPathStatAndPermissions(params: { targetPath, }; } - const modeBits = stat.mode & 0o777; + let modeBits = stat.mode & 0o777; + if ((modeBits & 0o002) !== 0 && params.origin === "bundled") { + // npm/global installs can create package-managed extension dirs without + // directory entries in the tarball, which may widen them to 0777. + // Tighten bundled dirs in place before applying the normal safety gate. + try { + fs.chmodSync(targetPath, modeBits & ~0o022); + const repairedStat = safeStatSync(targetPath); + if (!repairedStat) { + return { + reason: "path_stat_failed", + sourcePath: params.source, + rootPath: params.rootDir, + targetPath, + }; + } + stat = repairedStat; + modeBits = repairedStat.mode & 0o777; + } catch { + // Fall through to the normal block path below when repair is not possible. + } + } if ((modeBits & 0o002) !== 0) { return { reason: "path_world_writable", @@ -628,6 +650,7 @@ export function discoverOpenClawPlugins(params: { workspaceDir: params.workspaceDir, extraPaths: params.extraPaths, ownershipUid: params.ownershipUid, + env, }); if (cacheEnabled) { const cached = discoveryCache.get(cacheKey); @@ -676,7 +699,7 @@ export function discoverOpenClawPlugins(params: { } } - const bundledDir = resolveBundledPluginsDir(); + const bundledDir = resolveBundledPluginsDir(env); if (bundledDir) { discoverInDirectory({ dir: bundledDir, @@ -690,7 +713,7 @@ export function discoverOpenClawPlugins(params: { // Keep auto-discovered global extensions behind bundled plugins. // Users can still intentionally override via plugins.load.paths (origin=config). - const globalDir = path.join(resolveConfigDir(), "extensions"); + const globalDir = path.join(resolveConfigDir(env), "extensions"); discoverInDirectory({ dir: globalDir, origin: "global", diff --git a/src/plugins/manifest-registry.ts b/src/plugins/manifest-registry.ts index d392144f925..eb6702d54b1 100644 --- a/src/plugins/manifest-registry.ts +++ b/src/plugins/manifest-registry.ts @@ -1,6 +1,8 @@ import fs from "node:fs"; +import path from "node:path"; import type { OpenClawConfig } from "../config/config.js"; -import { resolveUserPath } from "../utils.js"; +import { resolveConfigDir, resolveUserPath } from "../utils.js"; +import { resolveBundledPluginsDir } from "./bundled-dir.js"; import { normalizePluginsConfig, type NormalizedPluginsConfig } from "./config-state.js"; import { discoverOpenClawPlugins, type PluginCandidate } from "./discovery.js"; import { loadPluginManifest, type PluginManifest } from "./manifest.js"; @@ -79,8 +81,11 @@ function shouldUseManifestCache(env: NodeJS.ProcessEnv): boolean { function buildCacheKey(params: { workspaceDir?: string; plugins: NormalizedPluginsConfig; + env: NodeJS.ProcessEnv; }): string { const workspaceKey = params.workspaceDir ? resolveUserPath(params.workspaceDir) : ""; + const configExtensionsRoot = path.join(resolveConfigDir(params.env), "extensions"); + const bundledRoot = resolveBundledPluginsDir(params.env) ?? ""; // The manifest registry only depends on where plugins are discovered from (workspace + load paths). // It does not depend on allow/deny/entries enable-state, so exclude those for higher cache hit rates. const loadPaths = params.plugins.loadPaths @@ -88,7 +93,7 @@ function buildCacheKey(params: { .map((p) => p.trim()) .filter(Boolean) .toSorted(); - return `${workspaceKey}::${JSON.stringify(loadPaths)}`; + return `${workspaceKey}::${configExtensionsRoot}::${bundledRoot}::${JSON.stringify(loadPaths)}`; } function safeStatMtimeMs(filePath: string): number | null { @@ -142,8 +147,8 @@ export function loadPluginManifestRegistry(params: { }): PluginManifestRegistry { const config = params.config ?? {}; const normalized = normalizePluginsConfig(config.plugins); - const cacheKey = buildCacheKey({ workspaceDir: params.workspaceDir, plugins: normalized }); const env = params.env ?? process.env; + const cacheKey = buildCacheKey({ workspaceDir: params.workspaceDir, plugins: normalized, env }); const cacheEnabled = params.cache !== false && shouldUseManifestCache(env); if (cacheEnabled) { const cached = registryCache.get(cacheKey); @@ -160,6 +165,7 @@ export function loadPluginManifestRegistry(params: { : discoverOpenClawPlugins({ workspaceDir: params.workspaceDir, extraPaths: normalized.loadPaths, + env, }); const diagnostics: PluginDiagnostic[] = [...discovery.diagnostics]; const candidates: PluginCandidate[] = discovery.candidates; diff --git a/src/plugins/runtime/gateway-request-scope.ts b/src/plugins/runtime/gateway-request-scope.ts index 11ed9cb4980..72a6f5af402 100644 --- a/src/plugins/runtime/gateway-request-scope.ts +++ b/src/plugins/runtime/gateway-request-scope.ts @@ -5,7 +5,8 @@ import type { } from "../../gateway/server-methods/types.js"; export type PluginRuntimeGatewayRequestScope = { - context: GatewayRequestContext; + context?: GatewayRequestContext; + client?: GatewayRequestOptions["client"]; isWebchatConnect: GatewayRequestOptions["isWebchatConnect"]; }; diff --git a/src/secrets/provider-env-vars.ts b/src/secrets/provider-env-vars.ts index 866fa6c33f7..88900893376 100644 --- a/src/secrets/provider-env-vars.ts +++ b/src/secrets/provider-env-vars.ts @@ -15,6 +15,7 @@ export const PROVIDER_ENV_VARS: Record = { litellm: ["LITELLM_API_KEY"], "vercel-ai-gateway": ["AI_GATEWAY_API_KEY"], opencode: ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], + "opencode-go": ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], together: ["TOGETHER_API_KEY"], huggingface: ["HUGGINGFACE_HUB_TOKEN", "HF_TOKEN"], qianfan: ["QIANFAN_API_KEY"], diff --git a/src/secrets/runtime-web-tools.test.ts b/src/secrets/runtime-web-tools.test.ts index b8c1e679ba6..b4484095188 100644 --- a/src/secrets/runtime-web-tools.test.ts +++ b/src/secrets/runtime-web-tools.test.ts @@ -184,7 +184,7 @@ describe("runtime web tools resolution", () => { }, }), env: { - BRAVE_API_KEY_REF: "brave-runtime-key", + BRAVE_API_KEY_REF: "brave-runtime-key", // pragma: allowlist secret }, }); @@ -225,7 +225,7 @@ describe("runtime web tools resolution", () => { }, }), env: { - GEMINI_API_KEY_REF: "gemini-runtime-key", + GEMINI_API_KEY_REF: "gemini-runtime-key", // pragma: allowlist secret }, }); @@ -260,7 +260,7 @@ describe("runtime web tools resolution", () => { }, }), env: { - GEMINI_API_KEY_REF: "gemini-runtime-key", + GEMINI_API_KEY_REF: "gemini-runtime-key", // pragma: allowlist secret }, }); @@ -397,7 +397,7 @@ describe("runtime web tools resolution", () => { }, }), env: { - FIRECRAWL_API_KEY: "firecrawl-fallback-key", + FIRECRAWL_API_KEY: "firecrawl-fallback-key", // pragma: allowlist secret }, }); diff --git a/src/secrets/runtime-web-tools.ts b/src/secrets/runtime-web-tools.ts index 004af2bdfe2..d888b36e8ab 100644 --- a/src/secrets/runtime-web-tools.ts +++ b/src/secrets/runtime-web-tools.ts @@ -18,7 +18,7 @@ const OPENROUTER_KEY_PREFIXES = ["sk-or-"]; type WebSearchProvider = (typeof WEB_SEARCH_PROVIDERS)[number]; -type SecretResolutionSource = "config" | "secretRef" | "env" | "missing"; +type SecretResolutionSource = "config" | "secretRef" | "env" | "missing"; // pragma: allowlist secret type RuntimeWebProviderSource = "configured" | "auto-detect" | "none"; export type RuntimeWebDiagnosticCode = diff --git a/src/telegram/bot-message-dispatch.test.ts b/src/telegram/bot-message-dispatch.test.ts index 4f5e2484d50..17ec8ac21a9 100644 --- a/src/telegram/bot-message-dispatch.test.ts +++ b/src/telegram/bot-message-dispatch.test.ts @@ -1031,6 +1031,81 @@ describe("dispatchTelegramMessage draft streaming", () => { expect(deliverReplies).not.toHaveBeenCalled(); }); + it("clears the active preview when a later final falls back after archived retain", async () => { + let answerMessageId: number | undefined; + let answerDraftParams: + | { + onSupersededPreview?: (preview: { messageId: number; textSnapshot: string }) => void; + } + | undefined; + const answerDraftStream = { + update: vi.fn().mockImplementation((text: string) => { + if (text.includes("Message B")) { + answerMessageId = 1002; + } + }), + flush: vi.fn().mockResolvedValue(undefined), + messageId: vi.fn().mockImplementation(() => answerMessageId), + clear: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + forceNewMessage: vi.fn().mockImplementation(() => { + answerMessageId = undefined; + }), + }; + const reasoningDraftStream = createDraftStream(); + createTelegramDraftStream + .mockImplementationOnce((params) => { + answerDraftParams = params as typeof answerDraftParams; + return answerDraftStream; + }) + .mockImplementationOnce(() => reasoningDraftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation( + async ({ dispatcherOptions, replyOptions }) => { + await replyOptions?.onPartialReply?.({ text: "Message A partial" }); + await replyOptions?.onAssistantMessageStart?.(); + await replyOptions?.onPartialReply?.({ text: "Message B partial" }); + answerDraftParams?.onSupersededPreview?.({ + messageId: 1001, + textSnapshot: "Message A partial", + }); + + await dispatcherOptions.deliver({ text: "Message A final" }, { kind: "final" }); + await dispatcherOptions.deliver({ text: "Message B final" }, { kind: "final" }); + return { queuedFinal: true }; + }, + ); + deliverReplies.mockResolvedValue({ delivered: true }); + const preConnectErr = new Error("connect ECONNREFUSED 149.154.167.220:443"); + (preConnectErr as NodeJS.ErrnoException).code = "ECONNREFUSED"; + editMessageTelegram + .mockRejectedValueOnce(new Error("400: Bad Request: message to edit not found")) + .mockRejectedValueOnce(preConnectErr); + + await dispatchWithContext({ context: createContext(), streamMode: "partial" }); + + expect(editMessageTelegram).toHaveBeenNthCalledWith( + 1, + 123, + 1001, + "Message A final", + expect.any(Object), + ); + expect(editMessageTelegram).toHaveBeenNthCalledWith( + 2, + 123, + 1002, + "Message B final", + expect.any(Object), + ); + const finalTextSentViaDeliverReplies = deliverReplies.mock.calls.some((call: unknown[]) => + (call[0] as { replies?: Array<{ text?: string }> })?.replies?.some( + (r: { text?: string }) => r.text === "Message B final", + ), + ); + expect(finalTextSentViaDeliverReplies).toBe(true); + expect(answerDraftStream.clear).toHaveBeenCalledTimes(1); + }); + it.each(["partial", "block"] as const)( "keeps finalized text preview when the next assistant message is media-only (%s mode)", async (streamMode) => { diff --git a/src/telegram/bot.fetch-abort.test.ts b/src/telegram/bot.fetch-abort.test.ts index 471654686f7..0d9bd53643b 100644 --- a/src/telegram/bot.fetch-abort.test.ts +++ b/src/telegram/bot.fetch-abort.test.ts @@ -1,10 +1,10 @@ import { describe, expect, it, vi } from "vitest"; import { botCtorSpy } from "./bot.create-telegram-bot.test-harness.js"; import { createTelegramBot } from "./bot.js"; +import { getTelegramNetworkErrorOrigin } from "./network-errors.js"; describe("createTelegramBot fetch abort", () => { it("aborts wrapped client fetch when fetchAbortSignal aborts", async () => { - const originalFetch = globalThis.fetch; const shutdown = new AbortController(); const fetchSpy = vi.fn( (_input: RequestInfo | URL, init?: RequestInit) => @@ -13,22 +13,78 @@ describe("createTelegramBot fetch abort", () => { signal.addEventListener("abort", () => resolve(signal), { once: true }); }), ); - globalThis.fetch = fetchSpy as unknown as typeof fetch; - try { - botCtorSpy.mockClear(); - createTelegramBot({ token: "tok", fetchAbortSignal: shutdown.signal }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch: fetchSpy as unknown as typeof fetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); - const observedSignalPromise = clientFetch("https://example.test"); - shutdown.abort(new Error("shutdown")); - const observedSignal = (await observedSignalPromise) as AbortSignal; + const observedSignalPromise = clientFetch("https://example.test"); + shutdown.abort(new Error("shutdown")); + const observedSignal = (await observedSignalPromise) as AbortSignal; - expect(observedSignal).toBeInstanceOf(AbortSignal); - expect(observedSignal.aborted).toBe(true); - } finally { - globalThis.fetch = originalFetch; - } + expect(observedSignal).toBeInstanceOf(AbortSignal); + expect(observedSignal.aborted).toBe(true); + }); + + it("tags wrapped Telegram fetch failures with the Bot API method", async () => { + const shutdown = new AbortController(); + const fetchError = Object.assign(new TypeError("fetch failed"), { + cause: Object.assign(new Error("connect timeout"), { + code: "UND_ERR_CONNECT_TIMEOUT", + }), + }); + const fetchSpy = vi.fn(async () => { + throw fetchError; + }); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch: fetchSpy as unknown as typeof fetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); + + await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( + fetchError, + ); + expect(getTelegramNetworkErrorOrigin(fetchError)).toEqual({ + method: "getupdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + }); + + it("preserves the original fetch error when tagging cannot attach metadata", async () => { + const shutdown = new AbortController(); + const frozenError = Object.freeze( + Object.assign(new TypeError("fetch failed"), { + cause: Object.assign(new Error("connect timeout"), { + code: "UND_ERR_CONNECT_TIMEOUT", + }), + }), + ); + const fetchSpy = vi.fn(async () => { + throw frozenError; + }); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch: fetchSpy as unknown as typeof fetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); + + await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( + frozenError, + ); + expect(getTelegramNetworkErrorOrigin(frozenError)).toBeNull(); }); }); diff --git a/src/telegram/bot.ts b/src/telegram/bot.ts index 48d0c745b42..b0c288efcea 100644 --- a/src/telegram/bot.ts +++ b/src/telegram/bot.ts @@ -39,6 +39,7 @@ import { } from "./bot-updates.js"; import { buildTelegramGroupPeerId, resolveTelegramStreamMode } from "./bot/helpers.js"; import { resolveTelegramFetch } from "./fetch.js"; +import { tagTelegramNetworkError } from "./network-errors.js"; import { createTelegramSendChatActionHandler } from "./sendchataction-401-backoff.js"; import { getTelegramSequentialKey } from "./sequential-key.js"; import { createTelegramThreadBindingManager } from "./thread-bindings.js"; @@ -68,6 +69,34 @@ export type TelegramBotOptions = { export { getTelegramSequentialKey }; +function readRequestUrl(input: RequestInfo | URL): string | null { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + if (typeof input === "object" && input !== null && "url" in input) { + const url = (input as { url?: unknown }).url; + return typeof url === "string" ? url : null; + } + return null; +} + +function extractTelegramApiMethod(input: RequestInfo | URL): string | null { + const url = readRequestUrl(input); + if (!url) { + return null; + } + try { + const pathname = new URL(url).pathname; + const segments = pathname.split("/").filter(Boolean); + return segments.length > 0 ? (segments.at(-1) ?? null) : null; + } catch { + return null; + } +} + export function createTelegramBot(opts: TelegramBotOptions) { const runtime: RuntimeEnv = opts.runtime ?? createNonExitingRuntime(); const cfg = opts.config ?? loadConfig(); @@ -147,6 +176,23 @@ export function createTelegramBot(opts: TelegramBotOptions) { }); }) as unknown as NonNullable; } + if (finalFetch) { + const baseFetch = finalFetch; + finalFetch = ((input: RequestInfo | URL, init?: RequestInit) => { + return Promise.resolve(baseFetch(input, init)).catch((err: unknown) => { + try { + tagTelegramNetworkError(err, { + method: extractTelegramApiMethod(input), + url: readRequestUrl(input), + }); + } catch { + // Tagging is best-effort; preserve the original fetch failure if the + // error object cannot accept extra metadata. + } + throw err; + }); + }) as unknown as NonNullable; + } const timeoutSeconds = typeof telegramCfg?.timeoutSeconds === "number" && Number.isFinite(telegramCfg.timeoutSeconds) diff --git a/src/telegram/draft-stream.test-helpers.ts b/src/telegram/draft-stream.test-helpers.ts index 0a6073309c7..b68a3c226b1 100644 --- a/src/telegram/draft-stream.test-helpers.ts +++ b/src/telegram/draft-stream.test-helpers.ts @@ -13,6 +13,7 @@ export type TestDraftStream = { stop: ReturnType Promise>>; materialize: ReturnType Promise>>; forceNewMessage: ReturnType void>>; + sendMayHaveLanded: ReturnType boolean>>; setMessageId: (value: number | undefined) => void; }; @@ -47,6 +48,7 @@ export function createTestDraftStream(params?: { messageId = undefined; } }), + sendMayHaveLanded: vi.fn().mockReturnValue(false), setMessageId: (value: number | undefined) => { messageId = value; }, @@ -77,6 +79,7 @@ export function createSequencedTestDraftStream(startMessageId = 1001): TestDraft forceNewMessage: vi.fn().mockImplementation(() => { activeMessageId = undefined; }), + sendMayHaveLanded: vi.fn().mockReturnValue(false), setMessageId: (value: number | undefined) => { activeMessageId = value; }, diff --git a/src/telegram/draft-stream.test.ts b/src/telegram/draft-stream.test.ts index fc65dd6b82a..58990c41abf 100644 --- a/src/telegram/draft-stream.test.ts +++ b/src/telegram/draft-stream.test.ts @@ -435,6 +435,46 @@ describe("createTelegramDraftStream", () => { expect(api.editMessageText).not.toHaveBeenCalledWith(123, 17, "Message B partial"); }); + it("marks sendMayHaveLanded after an ambiguous first preview send failure", async () => { + const api = createMockDraftApi(); + api.sendMessage.mockRejectedValueOnce(new Error("timeout after Telegram accepted send")); + const stream = createDraftStream(api); + + stream.update("Hello"); + await stream.flush(); + + expect(api.sendMessage).toHaveBeenCalledTimes(1); + expect(stream.sendMayHaveLanded?.()).toBe(true); + }); + + it("clears sendMayHaveLanded on pre-connect first preview send failures", async () => { + const api = createMockDraftApi(); + api.sendMessage.mockRejectedValueOnce( + Object.assign(new Error("connect ECONNREFUSED"), { code: "ECONNREFUSED" }), + ); + const stream = createDraftStream(api); + + stream.update("Hello"); + await stream.flush(); + + expect(api.sendMessage).toHaveBeenCalledTimes(1); + expect(stream.sendMayHaveLanded?.()).toBe(false); + }); + + it("clears sendMayHaveLanded on Telegram 4xx client rejections", async () => { + const api = createMockDraftApi(); + api.sendMessage.mockRejectedValueOnce( + Object.assign(new Error("403: Forbidden"), { error_code: 403 }), + ); + const stream = createDraftStream(api); + + stream.update("Hello"); + await stream.flush(); + + expect(api.sendMessage).toHaveBeenCalledTimes(1); + expect(stream.sendMayHaveLanded?.()).toBe(false); + }); + it("supports rendered previews with parse_mode", async () => { const api = createMockDraftApi(); const stream = createTelegramDraftStream({ diff --git a/src/telegram/draft-stream.ts b/src/telegram/draft-stream.ts index f7f03db48f6..ddb0595312b 100644 --- a/src/telegram/draft-stream.ts +++ b/src/telegram/draft-stream.ts @@ -1,6 +1,7 @@ import type { Bot } from "grammy"; import { createFinalizableDraftLifecycle } from "../channels/draft-stream-controls.js"; import { buildTelegramThreadParams, type TelegramThreadSpec } from "./bot/helpers.js"; +import { isSafeToRetrySendError, isTelegramClientRejection } from "./network-errors.js"; const TELEGRAM_STREAM_MAX_CHARS = 4096; const DEFAULT_THROTTLE_MS = 1000; @@ -66,6 +67,8 @@ export type TelegramDraftStream = { materialize?: () => Promise; /** Reset internal state so the next update creates a new message instead of editing. */ forceNewMessage: () => void; + /** True when a preview sendMessage was attempted but the response was lost. */ + sendMayHaveLanded?: () => boolean; }; type TelegramDraftPreview = { @@ -126,6 +129,7 @@ export function createTelegramDraftStream(params: { } const streamState = { stopped: false, final: false }; + let messageSendAttempted = false; let streamMessageId: number | undefined; let streamDraftId = usesDraftTransport ? allocateTelegramDraftId() : undefined; let previewTransport: "message" | "draft" = usesDraftTransport ? "draft" : "message"; @@ -192,12 +196,24 @@ export function createTelegramDraftStream(params: { } return true; } - const { sent } = await sendRenderedMessageWithThreadFallback({ - renderedText, - renderedParseMode, - fallbackWarnMessage: - "telegram stream preview send failed with message_thread_id, retrying without thread", - }); + messageSendAttempted = true; + let sent: Awaited>["sent"]; + try { + ({ sent } = await sendRenderedMessageWithThreadFallback({ + renderedText, + renderedParseMode, + fallbackWarnMessage: + "telegram stream preview send failed with message_thread_id, retrying without thread", + })); + } catch (err) { + // Pre-connect failures (DNS, refused) and explicit Telegram rejections (4xx) + // guarantee the message was never delivered — clear the flag so + // sendMayHaveLanded() doesn't suppress fallback. + if (isSafeToRetrySendError(err) || isTelegramClientRejection(err)) { + messageSendAttempted = false; + } + throw err; + } const sentMessageId = sent?.message_id; if (typeof sentMessageId !== "number" || !Number.isFinite(sentMessageId)) { streamState.stopped = true; @@ -345,6 +361,7 @@ export function createTelegramDraftStream(params: { // Re-open the stream lifecycle for the next assistant segment. streamState.final = false; generation += 1; + messageSendAttempted = false; streamMessageId = undefined; if (previewTransport === "draft") { streamDraftId = allocateTelegramDraftId(); @@ -421,5 +438,6 @@ export function createTelegramDraftStream(params: { stop, materialize, forceNewMessage, + sendMayHaveLanded: () => messageSendAttempted && typeof streamMessageId !== "number", }; } diff --git a/src/telegram/fetch.ts b/src/telegram/fetch.ts index 3934c10c391..a6b2cec4810 100644 --- a/src/telegram/fetch.ts +++ b/src/telegram/fetch.ts @@ -2,6 +2,7 @@ import * as dns from "node:dns"; import { Agent, EnvHttpProxyAgent, ProxyAgent, fetch as undiciFetch } from "undici"; import type { TelegramNetworkConfig } from "../config/types.telegram.js"; import { resolveFetch } from "../infra/fetch.js"; +import { hasEnvHttpProxyConfigured } from "../infra/net/proxy-env.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { resolveTelegramAutoSelectFamilyDecision, @@ -177,13 +178,7 @@ function shouldBypassEnvProxyForTelegramApi(env: NodeJS.ProcessEnv = process.env } function hasEnvHttpProxyForTelegramApi(env: NodeJS.ProcessEnv = process.env): boolean { - // Match EnvHttpProxyAgent behavior (undici) for HTTPS requests: - // - lower-case env vars take precedence over upper-case - // - HTTPS requests use https_proxy/HTTPS_PROXY first, then fall back to http_proxy/HTTP_PROXY - // - ALL_PROXY is ignored by EnvHttpProxyAgent - const httpProxy = env.http_proxy ?? env.HTTP_PROXY; - const httpsProxy = env.https_proxy ?? env.HTTPS_PROXY; - return Boolean(httpProxy) || Boolean(httpsProxy); + return hasEnvHttpProxyConfigured("https", env); } function createTelegramDispatcher(params: { diff --git a/src/telegram/lane-delivery-text-deliverer.ts b/src/telegram/lane-delivery-text-deliverer.ts index c8eb10a9bb1..000087cc692 100644 --- a/src/telegram/lane-delivery-text-deliverer.ts +++ b/src/telegram/lane-delivery-text-deliverer.ts @@ -1,7 +1,11 @@ import type { ReplyPayload } from "../auto-reply/types.js"; import type { TelegramInlineButtons } from "./button-types.js"; import type { TelegramDraftStream } from "./draft-stream.js"; -import { isRecoverableTelegramNetworkError, isSafeToRetrySendError } from "./network-errors.js"; +import { + isRecoverableTelegramNetworkError, + isSafeToRetrySendError, + isTelegramClientRejection, +} from "./network-errors.js"; const MESSAGE_NOT_MODIFIED_RE = /400:\s*Bad Request:\s*message is not modified|MESSAGE_NOT_MODIFIED/i; @@ -270,10 +274,18 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { params.markDelivered(); return "retained"; } + if (isTelegramClientRejection(err)) { + params.log( + `telegram: ${args.laneName} preview final edit rejected by Telegram (client error); falling back to standard send (${String(err)})`, + ); + return "fallback"; + } + // Default: ambiguous error — prefer incomplete over duplicate params.log( - `telegram: ${args.laneName} preview final edit rejected by Telegram; falling back to standard send (${String(err)})`, + `telegram: ${args.laneName} preview final edit failed with ambiguous error; keeping existing preview to avoid duplicate (${String(err)})`, ); - return "fallback"; + params.markDelivered(); + return "retained"; } params.log( `telegram: ${args.laneName} preview ${args.context} edit failed; falling back to standard send (${String(err)})`, @@ -367,6 +379,17 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { context, }); if (typeof previewTargetAfterStop.previewMessageId !== "number") { + // Only retain for final delivery when a prior preview is already visible + // to the user — otherwise falling back is safer than silence. For updates, + // always fall back so the caller can attempt sendPayload without stale + // markDelivered() state. + if (context === "final" && lane.hasStreamedMessage && lane.stream?.sendMayHaveLanded?.()) { + params.log( + `telegram: ${laneName} preview send may have landed despite missing message id; keeping to avoid duplicate`, + ); + params.markDelivered(); + return "retained"; + } return "fallback"; } const activePreviewMessageId = lane.stream?.messageId(); @@ -441,6 +464,12 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) { !hasMedia && text.length > 0 && text.length <= params.draftMaxChars && !payload.isError; if (infoKind === "final") { + // Transient previews must decide cleanup retention per final attempt. + // Completed previews intentionally stay retained so later extra payloads + // do not clear the already-finalized message. + if (params.activePreviewLifecycleByLane[laneName] === "transient") { + params.retainPreviewOnCleanupByLane[laneName] = false; + } if (laneName === "answer") { const archivedResult = await consumeArchivedAnswerPreviewForFinal({ lane, diff --git a/src/telegram/lane-delivery.test.ts b/src/telegram/lane-delivery.test.ts index a2dae1f05b9..3a165147d84 100644 --- a/src/telegram/lane-delivery.test.ts +++ b/src/telegram/lane-delivery.test.ts @@ -174,8 +174,9 @@ describe("createLaneTextDeliverer", () => { ); }); - it("falls back to sendPayload when an existing preview final edit is rejected", async () => { + it("retains preview when an existing preview final edit fails with ambiguous error", async () => { const harness = createHarness({ answerMessageId: 999 }); + // Plain Error with no error_code → ambiguous, prefer incomplete over duplicate harness.editPreview.mockRejectedValue(new Error("500: preview edit failed")); const result = await harness.deliverLaneText({ @@ -185,13 +186,11 @@ describe("createLaneTextDeliverer", () => { infoKind: "final", }); - expect(result).toBe("sent"); + expect(result).toBe("preview-retained"); expect(harness.editPreview).toHaveBeenCalledTimes(1); - expect(harness.sendPayload).toHaveBeenCalledWith( - expect.objectContaining({ text: "Hello final" }), - ); + expect(harness.sendPayload).not.toHaveBeenCalled(); expect(harness.log).toHaveBeenCalledWith( - expect.stringContaining("edit rejected by Telegram; falling back"), + expect.stringContaining("ambiguous error; keeping existing preview to avoid duplicate"), ); }); @@ -433,8 +432,9 @@ describe("createLaneTextDeliverer", () => { // During final delivery, only ambiguous post-connect failures keep the // preview. Definite non-delivery falls back to a real send. - it("falls back on API error during final", async () => { + it("retains preview on ambiguous API error during final", async () => { const harness = createHarness({ answerMessageId: 999 }); + // Plain Error with no error_code → ambiguous, prefer incomplete over duplicate harness.editPreview.mockRejectedValue(new Error("500: Internal Server Error")); const result = await harness.deliverLaneText({ @@ -444,9 +444,9 @@ describe("createLaneTextDeliverer", () => { infoKind: "final", }); - expect(result).toBe("sent"); + expect(result).toBe("preview-retained"); expect(harness.editPreview).toHaveBeenCalledTimes(1); - expect(harness.sendPayload).toHaveBeenCalledTimes(1); + expect(harness.sendPayload).not.toHaveBeenCalled(); }); it("falls back when an archived preview edit target is missing and no alternate preview exists", async () => { @@ -497,6 +497,93 @@ describe("createLaneTextDeliverer", () => { ); }); + it("falls back on 4xx client rejection with error_code during final", async () => { + const harness = createHarness({ answerMessageId: 999 }); + const err = Object.assign(new Error("403: Forbidden"), { error_code: 403 }); + harness.editPreview.mockRejectedValue(err); + + const result = await harness.deliverLaneText({ + laneName: "answer", + text: "Hello final", + payload: { text: "Hello final" }, + infoKind: "final", + }); + + expect(result).toBe("sent"); + expect(harness.editPreview).toHaveBeenCalledTimes(1); + expect(harness.sendPayload).toHaveBeenCalledWith( + expect.objectContaining({ text: "Hello final" }), + ); + expect(harness.log).toHaveBeenCalledWith( + expect.stringContaining("rejected by Telegram (client error); falling back"), + ); + }); + + it("retains preview on 502 with error_code during final (ambiguous server error)", async () => { + const harness = createHarness({ answerMessageId: 999 }); + const err = Object.assign(new Error("502: Bad Gateway"), { error_code: 502 }); + harness.editPreview.mockRejectedValue(err); + + const result = await harness.deliverLaneText({ + laneName: "answer", + text: "Hello final", + payload: { text: "Hello final" }, + infoKind: "final", + }); + + expect(result).toBe("preview-retained"); + expect(harness.sendPayload).not.toHaveBeenCalled(); + expect(harness.log).toHaveBeenCalledWith( + expect.stringContaining("ambiguous error; keeping existing preview to avoid duplicate"), + ); + }); + + it("falls back when the first preview send may have landed without a message id", async () => { + const stream = createTestDraftStream(); + stream.sendMayHaveLanded.mockReturnValue(true); + const harness = createHarness({ answerStream: stream }); + + const result = await harness.deliverLaneText({ + laneName: "answer", + text: "Hello final", + payload: { text: "Hello final" }, + infoKind: "final", + }); + + expect(result).toBe("sent"); + expect(harness.sendPayload).toHaveBeenCalledWith( + expect.objectContaining({ text: "Hello final" }), + ); + }); + + it("retains when sendMayHaveLanded is true and a prior preview was visible", async () => { + // Stream has a messageId (visible preview) but loses it after stop + const stream = createTestDraftStream({ messageId: 999 }); + stream.sendMayHaveLanded.mockReturnValue(true); + const harness = createHarness({ + answerStream: stream, + answerHasStreamedMessage: true, + }); + // Simulate messageId lost after stop (e.g. forceNewMessage or timeout) + harness.stopDraftLane.mockImplementation(async (lane: DraftLaneState) => { + stream.setMessageId(undefined); + await lane.stream?.stop(); + }); + + const result = await harness.deliverLaneText({ + laneName: "answer", + text: "Hello final", + payload: { text: "Hello final" }, + infoKind: "final", + }); + + expect(result).toBe("preview-retained"); + expect(harness.sendPayload).not.toHaveBeenCalled(); + expect(harness.log).toHaveBeenCalledWith( + expect.stringContaining("preview send may have landed despite missing message id"), + ); + }); + it("deletes consumed boundary previews after fallback final send", async () => { const harness = createHarness(); harness.archivedAnswerPreviews.push({ diff --git a/src/telegram/monitor.test.ts b/src/telegram/monitor.test.ts index bd9a35fc97c..d7ebef73373 100644 --- a/src/telegram/monitor.test.ts +++ b/src/telegram/monitor.test.ts @@ -1,5 +1,6 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { monitorTelegramProvider } from "./monitor.js"; +import { tagTelegramNetworkError } from "./network-errors.js"; type MockCtx = { message: { @@ -102,6 +103,15 @@ function makeRecoverableFetchError() { }); } +function makeTaggedPollingFetchError() { + const err = makeRecoverableFetchError(); + tagTelegramNetworkError(err, { + method: "getUpdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + return err; +} + const createAbortTask = ( abort: AbortController, beforeAbort?: () => void, @@ -398,6 +408,20 @@ describe("monitorTelegramProvider (grammY)", () => { expect(createdBotStops[0]).toHaveBeenCalledTimes(1); }); + it("clears bounded cleanup timers after a clean stop", async () => { + vi.useFakeTimers(); + try { + const abort = new AbortController(); + mockRunOnceAndAbort(abort); + + await monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); + + expect(vi.getTimerCount()).toBe(0); + } finally { + vi.useRealTimers(); + } + }); + it("surfaces non-recoverable errors", async () => { runSpy.mockImplementationOnce(() => makeRunnerStub({ @@ -439,7 +463,7 @@ describe("monitorTelegramProvider (grammY)", () => { const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); - expect(emitUnhandledRejection(new TypeError("fetch failed"))).toBe(true); + expect(emitUnhandledRejection(makeTaggedPollingFetchError())).toBe(true); await monitor; expect(stop.mock.calls.length).toBeGreaterThanOrEqual(1); @@ -482,13 +506,54 @@ describe("monitorTelegramProvider (grammY)", () => { expect(firstSignal).toBeInstanceOf(AbortSignal); expect((firstSignal as AbortSignal).aborted).toBe(false); - expect(emitUnhandledRejection(new TypeError("fetch failed"))).toBe(true); + expect(emitUnhandledRejection(makeTaggedPollingFetchError())).toBe(true); await monitor; expect((firstSignal as AbortSignal).aborted).toBe(true); expect(stop).toHaveBeenCalled(); }); + it("ignores unrelated process-level network errors while telegram polling is active", async () => { + const abort = new AbortController(); + let running = true; + let releaseTask: (() => void) | undefined; + const stop = vi.fn(async () => { + running = false; + releaseTask?.(); + }); + + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: () => + new Promise((resolve) => { + releaseTask = resolve; + }), + stop, + isRunning: () => running, + }), + ); + + const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); + await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); + + const slackDnsError = Object.assign( + new Error("A request error occurred: getaddrinfo ENOTFOUND slack.com"), + { + code: "ENOTFOUND", + hostname: "slack.com", + }, + ); + expect(emitUnhandledRejection(slackDnsError)).toBe(false); + + abort.abort(); + await monitor; + + expect(stop).toHaveBeenCalledTimes(1); + expect(computeBackoff).not.toHaveBeenCalled(); + expect(sleepWithAbort).not.toHaveBeenCalled(); + expect(runSpy).toHaveBeenCalledTimes(1); + }); + it("passes configured webhookHost to webhook listener", async () => { await monitorTelegramProvider({ token: "tok", diff --git a/src/telegram/monitor.ts b/src/telegram/monitor.ts index 7131876e6f1..f7704f62dea 100644 --- a/src/telegram/monitor.ts +++ b/src/telegram/monitor.ts @@ -9,7 +9,10 @@ import type { RuntimeEnv } from "../runtime.js"; import { resolveTelegramAccount } from "./accounts.js"; import { resolveTelegramAllowedUpdates } from "./allowed-updates.js"; import { TelegramExecApprovalHandler } from "./exec-approvals-handler.js"; -import { isRecoverableTelegramNetworkError } from "./network-errors.js"; +import { + isRecoverableTelegramNetworkError, + isTelegramPollingNetworkError, +} from "./network-errors.js"; import { TelegramPollingSession } from "./polling-session.js"; import { makeProxyFetch } from "./proxy.js"; import { readTelegramUpdateOffset, writeTelegramUpdateOffset } from "./update-offset-store.js"; @@ -78,13 +81,14 @@ export async function monitorTelegramProvider(opts: MonitorTelegramOpts = {}) { const unregisterHandler = registerUnhandledRejectionHandler((err) => { const isNetworkError = isRecoverableTelegramNetworkError(err, { context: "polling" }); - if (isGrammyHttpError(err) && isNetworkError) { + const isTelegramPollingError = isTelegramPollingNetworkError(err); + if (isGrammyHttpError(err) && isNetworkError && isTelegramPollingError) { log(`[telegram] Suppressed network error: ${formatErrorMessage(err)}`); return true; } const activeRunner = pollingSession?.activeRunner; - if (isNetworkError && activeRunner && activeRunner.isRunning()) { + if (isNetworkError && isTelegramPollingError && activeRunner && activeRunner.isRunning()) { pollingSession?.markForceRestarted(); pollingSession?.abortActiveFetch(); void activeRunner.stop().catch(() => {}); diff --git a/src/telegram/network-errors.test.ts b/src/telegram/network-errors.test.ts index d4572eda9c8..56106a292b8 100644 --- a/src/telegram/network-errors.test.ts +++ b/src/telegram/network-errors.test.ts @@ -1,7 +1,37 @@ import { describe, expect, it } from "vitest"; -import { isRecoverableTelegramNetworkError, isSafeToRetrySendError } from "./network-errors.js"; +import { + getTelegramNetworkErrorOrigin, + isRecoverableTelegramNetworkError, + isSafeToRetrySendError, + isTelegramClientRejection, + isTelegramPollingNetworkError, + isTelegramServerError, + tagTelegramNetworkError, +} from "./network-errors.js"; describe("isRecoverableTelegramNetworkError", () => { + it("tracks Telegram polling origin separately from generic network matching", () => { + const slackDnsError = Object.assign( + new Error("A request error occurred: getaddrinfo ENOTFOUND slack.com"), + { + code: "ENOTFOUND", + hostname: "slack.com", + }, + ); + expect(isRecoverableTelegramNetworkError(slackDnsError)).toBe(true); + expect(isTelegramPollingNetworkError(slackDnsError)).toBe(false); + + tagTelegramNetworkError(slackDnsError, { + method: "getUpdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + expect(getTelegramNetworkErrorOrigin(slackDnsError)).toEqual({ + method: "getupdates", + url: "https://api.telegram.org/bot123456:ABC/getUpdates", + }); + expect(isTelegramPollingNetworkError(slackDnsError)).toBe(true); + }); + it("detects recoverable error codes", () => { const err = Object.assign(new Error("timeout"), { code: "ETIMEDOUT" }); expect(isRecoverableTelegramNetworkError(err)).toBe(true); @@ -164,3 +194,51 @@ describe("isSafeToRetrySendError", () => { expect(isSafeToRetrySendError(wrapped)).toBe(true); }); }); + +describe("isTelegramServerError", () => { + it("returns true for error_code 500", () => { + const err = Object.assign(new Error("Internal Server Error"), { error_code: 500 }); + expect(isTelegramServerError(err)).toBe(true); + }); + + it("returns true for error_code 502", () => { + const err = Object.assign(new Error("Bad Gateway"), { error_code: 502 }); + expect(isTelegramServerError(err)).toBe(true); + }); + + it("returns false for error_code 403", () => { + const err = Object.assign(new Error("Forbidden"), { error_code: 403 }); + expect(isTelegramServerError(err)).toBe(false); + }); + + it("returns false for plain Error", () => { + expect(isTelegramServerError(new Error("500: Internal Server Error"))).toBe(false); + }); +}); + +describe("isTelegramClientRejection", () => { + it("returns true for error_code 400", () => { + const err = Object.assign(new Error("Bad Request"), { error_code: 400 }); + expect(isTelegramClientRejection(err)).toBe(true); + }); + + it("returns true for error_code 403", () => { + const err = Object.assign(new Error("Forbidden"), { error_code: 403 }); + expect(isTelegramClientRejection(err)).toBe(true); + }); + + it("returns false for error_code 502", () => { + const err = Object.assign(new Error("Bad Gateway"), { error_code: 502 }); + expect(isTelegramClientRejection(err)).toBe(false); + }); + + it("returns false for plain Error", () => { + expect(isTelegramClientRejection(new Error("400: Bad Request"))).toBe(false); + }); + + it("detects error_code in nested cause", () => { + const inner = Object.assign(new Error("Forbidden"), { error_code: 403 }); + const outer = Object.assign(new Error("wrapped"), { cause: inner }); + expect(isTelegramClientRejection(outer)).toBe(true); + }); +}); diff --git a/src/telegram/network-errors.ts b/src/telegram/network-errors.ts index bf5aa9cbcbe..08e5d2dc2c0 100644 --- a/src/telegram/network-errors.ts +++ b/src/telegram/network-errors.ts @@ -5,6 +5,8 @@ import { readErrorName, } from "../infra/errors.js"; +const TELEGRAM_NETWORK_ORIGIN = Symbol("openclaw.telegram.network-origin"); + const RECOVERABLE_ERROR_CODES = new Set([ "ECONNRESET", "ECONNREFUSED", @@ -101,6 +103,51 @@ function getErrorCode(err: unknown): string | undefined { } export type TelegramNetworkErrorContext = "polling" | "send" | "webhook" | "unknown"; +export type TelegramNetworkErrorOrigin = { + method?: string | null; + url?: string | null; +}; + +function normalizeTelegramNetworkMethod(method?: string | null): string | null { + const trimmed = method?.trim(); + if (!trimmed) { + return null; + } + return trimmed.toLowerCase(); +} + +export function tagTelegramNetworkError(err: unknown, origin: TelegramNetworkErrorOrigin): void { + if (!err || typeof err !== "object") { + return; + } + Object.defineProperty(err, TELEGRAM_NETWORK_ORIGIN, { + value: { + method: normalizeTelegramNetworkMethod(origin.method), + url: typeof origin.url === "string" && origin.url.trim() ? origin.url : null, + } satisfies TelegramNetworkErrorOrigin, + configurable: true, + }); +} + +export function getTelegramNetworkErrorOrigin(err: unknown): TelegramNetworkErrorOrigin | null { + for (const candidate of collectTelegramErrorCandidates(err)) { + if (!candidate || typeof candidate !== "object") { + continue; + } + const origin = (candidate as Record)[TELEGRAM_NETWORK_ORIGIN]; + if (!origin || typeof origin !== "object") { + continue; + } + const method = "method" in origin && typeof origin.method === "string" ? origin.method : null; + const url = "url" in origin && typeof origin.url === "string" ? origin.url : null; + return { method, url }; + } + return null; +} + +export function isTelegramPollingNetworkError(err: unknown): boolean { + return getTelegramNetworkErrorOrigin(err)?.method === "getupdates"; +} /** * Returns true if the error is safe to retry for a non-idempotent Telegram send operation @@ -123,6 +170,29 @@ export function isSafeToRetrySendError(err: unknown): boolean { return false; } +function hasTelegramErrorCode(err: unknown, matches: (code: number) => boolean): boolean { + for (const candidate of collectTelegramErrorCandidates(err)) { + if (!candidate || typeof candidate !== "object" || !("error_code" in candidate)) { + continue; + } + const code = (candidate as { error_code: unknown }).error_code; + if (typeof code === "number" && matches(code)) { + return true; + } + } + return false; +} + +/** Returns true for HTTP 5xx server errors (error may have been processed). */ +export function isTelegramServerError(err: unknown): boolean { + return hasTelegramErrorCode(err, (code) => code >= 500); +} + +/** Returns true for HTTP 4xx client errors (Telegram explicitly rejected, not applied). */ +export function isTelegramClientRejection(err: unknown): boolean { + return hasTelegramErrorCode(err, (code) => code >= 400 && code < 500); +} + export function isRecoverableTelegramNetworkError( err: unknown, options: { context?: TelegramNetworkErrorContext; allowMessageMatch?: boolean } = {}, diff --git a/src/telegram/polling-session.ts b/src/telegram/polling-session.ts index 784c8b2d759..3a78747e41f 100644 --- a/src/telegram/polling-session.ts +++ b/src/telegram/polling-session.ts @@ -15,6 +15,24 @@ const TELEGRAM_POLL_RESTART_POLICY = { const POLL_STALL_THRESHOLD_MS = 90_000; const POLL_WATCHDOG_INTERVAL_MS = 30_000; +const POLL_STOP_GRACE_MS = 15_000; + +const waitForGracefulStop = async (stop: () => Promise) => { + let timer: ReturnType | undefined; + try { + await Promise.race([ + stop(), + new Promise((resolve) => { + timer = setTimeout(resolve, POLL_STOP_GRACE_MS); + timer.unref?.(); + }), + ]); + } finally { + if (timer) { + clearTimeout(timer); + } + } +}; type TelegramBot = ReturnType; @@ -176,6 +194,11 @@ export class TelegramPollingSession { const fetchAbortController = this.#activeFetchAbort; let stopPromise: Promise | undefined; let stalledRestart = false; + let forceCycleTimer: ReturnType | undefined; + let forceCycleResolve: (() => void) | undefined; + const forceCyclePromise = new Promise((resolve) => { + forceCycleResolve = resolve; + }); const stopRunner = () => { fetchAbortController?.abort(); stopPromise ??= Promise.resolve(runner.stop()) @@ -209,12 +232,24 @@ export class TelegramPollingSession { `[telegram] Polling stall detected (no getUpdates for ${formatDurationPrecise(elapsed)}); forcing restart.`, ); void stopRunner(); + void stopBot(); + if (!forceCycleTimer) { + forceCycleTimer = setTimeout(() => { + if (this.opts.abortSignal?.aborted) { + return; + } + this.opts.log( + `[telegram] Polling runner stop timed out after ${formatDurationPrecise(POLL_STOP_GRACE_MS)}; forcing restart cycle.`, + ); + forceCycleResolve?.(); + }, POLL_STOP_GRACE_MS); + } } }, POLL_WATCHDOG_INTERVAL_MS); this.opts.abortSignal?.addEventListener("abort", stopOnAbort, { once: true }); try { - await runner.task(); + await Promise.race([runner.task(), forceCyclePromise]); if (this.opts.abortSignal?.aborted) { return "exit"; } @@ -249,9 +284,12 @@ export class TelegramPollingSession { return shouldRestart ? "continue" : "exit"; } finally { clearInterval(watchdog); + if (forceCycleTimer) { + clearTimeout(forceCycleTimer); + } this.opts.abortSignal?.removeEventListener("abort", stopOnAbort); - await stopRunner(); - await stopBot(); + await waitForGracefulStop(stopRunner); + await waitForGracefulStop(stopBot); this.#activeRunner = undefined; if (this.#activeFetchAbort === fetchAbortController) { this.#activeFetchAbort = undefined; diff --git a/src/telegram/send.test.ts b/src/telegram/send.test.ts index a00d1b2e89e..2bd6556ee42 100644 --- a/src/telegram/send.test.ts +++ b/src/telegram/send.test.ts @@ -1734,6 +1734,22 @@ describe("editMessageTelegram", () => { expect(botApi.editMessageText).toHaveBeenCalledTimes(1); }); + it("retries editMessageTelegram on Telegram 5xx errors", async () => { + botApi.editMessageText + .mockRejectedValueOnce(Object.assign(new Error("502: Bad Gateway"), { error_code: 502 })) + .mockResolvedValueOnce({ message_id: 1, chat: { id: "123" } }); + + await expect( + editMessageTelegram("123", 1, "hi", { + token: "tok", + cfg: {}, + retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 }, + }), + ).resolves.toEqual({ ok: true, messageId: "1", chatId: "123" }); + + expect(botApi.editMessageText).toHaveBeenCalledTimes(2); + }); + it("disables link previews when linkPreview is false", async () => { botApi.editMessageText.mockResolvedValue({ message_id: 1, chat: { id: "123" } }); diff --git a/src/telegram/send.ts b/src/telegram/send.ts index 44e18ee2340..5261887779f 100644 --- a/src/telegram/send.ts +++ b/src/telegram/send.ts @@ -27,7 +27,11 @@ import type { TelegramInlineButtons } from "./button-types.js"; import { splitTelegramCaption } from "./caption.js"; import { resolveTelegramFetch } from "./fetch.js"; import { renderTelegramHtmlText, splitTelegramHtmlChunks } from "./format.js"; -import { isRecoverableTelegramNetworkError, isSafeToRetrySendError } from "./network-errors.js"; +import { + isRecoverableTelegramNetworkError, + isSafeToRetrySendError, + isTelegramServerError, +} from "./network-errors.js"; import { makeProxyFetch } from "./proxy.js"; import { recordSentMessage } from "./sent-message-cache.js"; import { maybePersistResolvedTelegramTarget } from "./target-writeback.js"; @@ -1150,6 +1154,9 @@ export async function editMessageTelegram( account, retry: opts.retry, verbose: opts.verbose, + shouldRetry: (err) => + isRecoverableTelegramNetworkError(err, { allowMessageMatch: true }) || + isTelegramServerError(err), }); const requestWithEditShouldLog = ( fn: () => Promise, diff --git a/src/terminal/ansi.test.ts b/src/terminal/ansi.test.ts index 30ae4c82eb3..3970868d3f8 100644 --- a/src/terminal/ansi.test.ts +++ b/src/terminal/ansi.test.ts @@ -1,5 +1,5 @@ import { describe, expect, it } from "vitest"; -import { sanitizeForLog, stripAnsi } from "./ansi.js"; +import { sanitizeForLog, splitGraphemes, stripAnsi, visibleWidth } from "./ansi.js"; describe("terminal ansi helpers", () => { it("strips ANSI and OSC8 sequences", () => { @@ -11,4 +11,16 @@ describe("terminal ansi helpers", () => { const input = "\u001B[31mwarn\u001B[0m\r\nnext\u0000line\u007f"; expect(sanitizeForLog(input)).toBe("warnnextline"); }); + + it("measures wide graphemes by terminal cell width", () => { + expect(visibleWidth("abc")).toBe(3); + expect(visibleWidth("📸 skill")).toBe(8); + expect(visibleWidth("表")).toBe(2); + expect(visibleWidth("\u001B[31m📸\u001B[0m")).toBe(2); + }); + + it("keeps emoji zwj sequences as single graphemes", () => { + expect(splitGraphemes("👨‍👩‍👧‍👦")).toEqual(["👨‍👩‍👧‍👦"]); + expect(visibleWidth("👨‍👩‍👧‍👦")).toBe(2); + }); }); diff --git a/src/terminal/ansi.ts b/src/terminal/ansi.ts index d9adaa38633..471611fcc2e 100644 --- a/src/terminal/ansi.ts +++ b/src/terminal/ansi.ts @@ -4,11 +4,29 @@ const OSC8_PATTERN = "\\x1b\\]8;;.*?\\x1b\\\\|\\x1b\\]8;;\\x1b\\\\"; const ANSI_REGEX = new RegExp(ANSI_SGR_PATTERN, "g"); const OSC8_REGEX = new RegExp(OSC8_PATTERN, "g"); +const graphemeSegmenter = + typeof Intl !== "undefined" && "Segmenter" in Intl + ? new Intl.Segmenter(undefined, { granularity: "grapheme" }) + : null; export function stripAnsi(input: string): string { return input.replace(OSC8_REGEX, "").replace(ANSI_REGEX, ""); } +export function splitGraphemes(input: string): string[] { + if (!input) { + return []; + } + if (!graphemeSegmenter) { + return Array.from(input); + } + try { + return Array.from(graphemeSegmenter.segment(input), (segment) => segment.segment); + } catch { + return Array.from(input); + } +} + /** * Sanitize a value for safe interpolation into log messages. * Strips ANSI escape sequences, C0 control characters (U+0000–U+001F), @@ -22,6 +40,75 @@ export function sanitizeForLog(v: string): string { return out.replaceAll(String.fromCharCode(0x7f), ""); } -export function visibleWidth(input: string): number { - return Array.from(stripAnsi(input)).length; +function isZeroWidthCodePoint(codePoint: number): boolean { + return ( + (codePoint >= 0x0300 && codePoint <= 0x036f) || + (codePoint >= 0x1ab0 && codePoint <= 0x1aff) || + (codePoint >= 0x1dc0 && codePoint <= 0x1dff) || + (codePoint >= 0x20d0 && codePoint <= 0x20ff) || + (codePoint >= 0xfe20 && codePoint <= 0xfe2f) || + (codePoint >= 0xfe00 && codePoint <= 0xfe0f) || + codePoint === 0x200d + ); +} + +function isFullWidthCodePoint(codePoint: number): boolean { + if (codePoint < 0x1100) { + return false; + } + return ( + codePoint <= 0x115f || + codePoint === 0x2329 || + codePoint === 0x232a || + (codePoint >= 0x2e80 && codePoint <= 0x3247 && codePoint !== 0x303f) || + (codePoint >= 0x3250 && codePoint <= 0x4dbf) || + (codePoint >= 0x4e00 && codePoint <= 0xa4c6) || + (codePoint >= 0xa960 && codePoint <= 0xa97c) || + (codePoint >= 0xac00 && codePoint <= 0xd7a3) || + (codePoint >= 0xf900 && codePoint <= 0xfaff) || + (codePoint >= 0xfe10 && codePoint <= 0xfe19) || + (codePoint >= 0xfe30 && codePoint <= 0xfe6b) || + (codePoint >= 0xff01 && codePoint <= 0xff60) || + (codePoint >= 0xffe0 && codePoint <= 0xffe6) || + (codePoint >= 0x1aff0 && codePoint <= 0x1aff3) || + (codePoint >= 0x1aff5 && codePoint <= 0x1affb) || + (codePoint >= 0x1affd && codePoint <= 0x1affe) || + (codePoint >= 0x1b000 && codePoint <= 0x1b2ff) || + (codePoint >= 0x1f200 && codePoint <= 0x1f251) || + (codePoint >= 0x20000 && codePoint <= 0x3fffd) + ); +} + +const emojiLikePattern = /[\p{Extended_Pictographic}\p{Regional_Indicator}\u20e3]/u; + +function graphemeWidth(grapheme: string): number { + if (!grapheme) { + return 0; + } + if (emojiLikePattern.test(grapheme)) { + return 2; + } + + let sawPrintable = false; + for (const char of grapheme) { + const codePoint = char.codePointAt(0); + if (codePoint == null) { + continue; + } + if (isZeroWidthCodePoint(codePoint)) { + continue; + } + if (isFullWidthCodePoint(codePoint)) { + return 2; + } + sawPrintable = true; + } + return sawPrintable ? 1 : 0; +} + +export function visibleWidth(input: string): number { + return splitGraphemes(stripAnsi(input)).reduce( + (sum, grapheme) => sum + graphemeWidth(grapheme), + 0, + ); } diff --git a/src/terminal/table.test.ts b/src/terminal/table.test.ts index bb6f2082fe3..bad2fe48cf2 100644 --- a/src/terminal/table.test.ts +++ b/src/terminal/table.test.ts @@ -1,9 +1,18 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { visibleWidth } from "./ansi.js"; import { wrapNoteMessage } from "./note.js"; import { renderTable } from "./table.js"; describe("renderTable", () => { + const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + + afterEach(() => { + vi.unstubAllEnvs(); + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + it("prefers shrinking flex columns to avoid wrapping non-flex labels", () => { const out = renderTable({ width: 40, @@ -83,6 +92,38 @@ describe("renderTable", () => { } }); + it("trims leading spaces on wrapped ANSI-colored continuation lines", () => { + const out = renderTable({ + width: 113, + columns: [ + { key: "Status", header: "Status", minWidth: 10 }, + { key: "Skill", header: "Skill", minWidth: 18, flex: true }, + { key: "Description", header: "Description", minWidth: 24, flex: true }, + { key: "Source", header: "Source", minWidth: 10 }, + ], + rows: [ + { + Status: "✓ ready", + Skill: "🌤️ weather", + Description: + `\x1b[2mGet current weather and forecasts via wttr.in or Open-Meteo. ` + + `Use when: user asks about weather, temperature, or forecasts for any location.` + + `\x1b[0m`, + Source: "openclaw-bundled", + }, + ], + }); + + const lines = out + .trimEnd() + .split("\n") + .filter((line) => line.includes("Use when")); + expect(lines).toHaveLength(1); + expect(lines[0]).toContain("\u001b[2mUse when"); + expect(lines[0]).not.toContain("│ Use when"); + expect(lines[0]).not.toContain("│ \x1b[2m Use when"); + }); + it("respects explicit newlines in cell values", () => { const out = renderTable({ width: 48, @@ -99,6 +140,81 @@ describe("renderTable", () => { expect(line1Index).toBeGreaterThan(-1); expect(line2Index).toBe(line1Index + 1); }); + + it("keeps table borders aligned when cells contain wide emoji graphemes", () => { + const width = 72; + const out = renderTable({ + width, + columns: [ + { key: "Status", header: "Status", minWidth: 10 }, + { key: "Skill", header: "Skill", minWidth: 18 }, + { key: "Description", header: "Description", minWidth: 18, flex: true }, + { key: "Source", header: "Source", minWidth: 10 }, + ], + rows: [ + { + Status: "✗ missing", + Skill: "📸 peekaboo", + Description: "Capture screenshots from macOS windows and keep table wrapping stable.", + Source: "openclaw-bundled", + }, + ], + }); + + for (const line of out.trimEnd().split("\n")) { + expect(visibleWidth(line)).toBe(width); + } + }); + + it("consumes unsupported escape sequences without hanging", () => { + const out = renderTable({ + width: 48, + columns: [ + { key: "K", header: "K", minWidth: 6 }, + { key: "V", header: "V", minWidth: 12, flex: true }, + ], + rows: [{ K: "row", V: "before \x1b[2J after" }], + }); + + expect(out).toContain("before"); + expect(out).toContain("after"); + }); + + it("falls back to ASCII borders on legacy Windows consoles", () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + vi.stubEnv("WT_SESSION", ""); + vi.stubEnv("TERM_PROGRAM", ""); + vi.stubEnv("TERM", "vt100"); + + const out = renderTable({ + columns: [ + { key: "A", header: "A", minWidth: 6 }, + { key: "B", header: "B", minWidth: 10, flex: true }, + ], + rows: [{ A: "row", B: "value" }], + }); + + expect(out).toContain("+"); + expect(out).not.toContain("┌"); + }); + + it("keeps unicode borders on modern Windows terminals", () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + vi.stubEnv("WT_SESSION", "1"); + vi.stubEnv("TERM", ""); + vi.stubEnv("TERM_PROGRAM", ""); + + const out = renderTable({ + columns: [ + { key: "A", header: "A", minWidth: 6 }, + { key: "B", header: "B", minWidth: 10, flex: true }, + ], + rows: [{ A: "row", B: "value" }], + }); + + expect(out).toContain("┌"); + expect(out).not.toContain("+"); + }); }); describe("wrapNoteMessage", () => { diff --git a/src/terminal/table.ts b/src/terminal/table.ts index 34d7b15dd05..7c55ba7f2dd 100644 --- a/src/terminal/table.ts +++ b/src/terminal/table.ts @@ -1,5 +1,5 @@ import { displayString } from "../utils.js"; -import { visibleWidth } from "./ansi.js"; +import { splitGraphemes, visibleWidth } from "./ansi.js"; type Align = "left" | "right" | "center"; @@ -20,6 +20,26 @@ export type RenderTableOptions = { border?: "unicode" | "ascii" | "none"; }; +function resolveDefaultBorder( + platform: NodeJS.Platform, + env: NodeJS.ProcessEnv, +): "unicode" | "ascii" { + if (platform !== "win32") { + return "unicode"; + } + + const term = env.TERM ?? ""; + const termProgram = env.TERM_PROGRAM ?? ""; + const isModernTerminal = + Boolean(env.WT_SESSION) || + term.includes("xterm") || + term.includes("cygwin") || + term.includes("msys") || + termProgram === "vscode"; + + return isModernTerminal ? "unicode" : "ascii"; +} + function repeat(ch: string, n: number): string { if (n <= 0) { return ""; @@ -94,13 +114,22 @@ function wrapLine(text: string, width: number): string[] { } } - const cp = text.codePointAt(i); - if (!cp) { - break; + let nextEsc = text.indexOf(ESC, i); + if (nextEsc < 0) { + nextEsc = text.length; } - const ch = String.fromCodePoint(cp); - tokens.push({ kind: "char", value: ch }); - i += ch.length; + if (nextEsc === i) { + // Consume unsupported escape bytes as plain characters so wrapping + // cannot stall on unknown ANSI/control sequences. + tokens.push({ kind: "char", value: ESC }); + i += ESC.length; + continue; + } + const plainChunk = text.slice(i, nextEsc); + for (const grapheme of splitGraphemes(plainChunk)) { + tokens.push({ kind: "char", value: grapheme }); + } + i = nextEsc; } const firstCharIndex = tokens.findIndex((t) => t.kind === "char"); @@ -139,7 +168,7 @@ function wrapLine(text: string, width: number): string[] { const bufToString = (slice?: Token[]) => (slice ?? buf).map((t) => t.value).join(""); const bufVisibleWidth = (slice: Token[]) => - slice.reduce((acc, t) => acc + (t.kind === "char" ? 1 : 0), 0); + slice.reduce((acc, t) => acc + (t.kind === "char" ? visibleWidth(t.value) : 0), 0); const pushLine = (value: string) => { const cleaned = value.replace(/\s+$/, ""); @@ -149,6 +178,20 @@ function wrapLine(text: string, width: number): string[] { lines.push(cleaned); }; + const trimLeadingSpaces = (tokens: Token[]) => { + while (true) { + const firstCharIndex = tokens.findIndex((token) => token.kind === "char"); + if (firstCharIndex < 0) { + return; + } + const firstChar = tokens[firstCharIndex]; + if (!firstChar || !isSpaceChar(firstChar.value)) { + return; + } + tokens.splice(firstCharIndex, 1); + } + }; + const flushAt = (breakAt: number | null) => { if (buf.length === 0) { return; @@ -164,10 +207,7 @@ function wrapLine(text: string, width: number): string[] { const left = buf.slice(0, breakAt); const rest = buf.slice(breakAt); pushLine(bufToString(left)); - - while (rest.length > 0 && rest[0]?.kind === "char" && isSpaceChar(rest[0].value)) { - rest.shift(); - } + trimLeadingSpaces(rest); buf.length = 0; buf.push(...rest); @@ -195,12 +235,16 @@ function wrapLine(text: string, width: number): string[] { } continue; } - if (bufVisible + 1 > width && bufVisible > 0) { + const charWidth = visibleWidth(ch); + if (bufVisible + charWidth > width && bufVisible > 0) { flushAt(lastBreakIndex); } + if (bufVisible === 0 && isSpaceChar(ch)) { + continue; + } buf.push(token); - bufVisible += 1; + bufVisible += charWidth; if (isBreakChar(ch)) { lastBreakIndex = buf.length; } @@ -231,6 +275,10 @@ function normalizeWidth(n: number | undefined): number | undefined { return Math.floor(n); } +export function getTerminalTableWidth(minWidth = 60, fallbackWidth = 120): number { + return Math.max(minWidth, process.stdout.columns ?? fallbackWidth); +} + export function renderTable(opts: RenderTableOptions): string { const rows = opts.rows.map((row) => { const next: Record = {}; @@ -239,7 +287,7 @@ export function renderTable(opts: RenderTableOptions): string { } return next; }); - const border = opts.border ?? "unicode"; + const border = opts.border ?? resolveDefaultBorder(process.platform, process.env); if (border === "none") { const columns = opts.columns; const header = columns.map((c) => c.header).join(" | "); diff --git a/src/tts/tts-core.ts b/src/tts/tts-core.ts index 08f80c3d60c..279fc3cc1ed 100644 --- a/src/tts/tts-core.ts +++ b/src/tts/tts-core.ts @@ -43,6 +43,11 @@ function normalizeOpenAITtsBaseUrl(baseUrl?: string): string { return trimmed.replace(/\/+$/, ""); } +function trimToUndefined(value?: string): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + function requireInRange(value: number, min: number, max: number, label: string): void { if (!Number.isFinite(value) || value < min || value > max) { throw new Error(`${label} must be between ${min} and ${max}`); @@ -383,6 +388,14 @@ export function isValidOpenAIModel(model: string, baseUrl?: string): boolean { return OPENAI_TTS_MODELS.includes(model as (typeof OPENAI_TTS_MODELS)[number]); } +export function resolveOpenAITtsInstructions( + model: string, + instructions?: string, +): string | undefined { + const next = trimToUndefined(instructions); + return next && model.includes("gpt-4o-mini-tts") ? next : undefined; +} + export function isValidOpenAIVoice(voice: string, baseUrl?: string): voice is OpenAiTtsVoice { // Allow any voice when using custom endpoint (e.g., Kokoro Chinese voices) if (isCustomOpenAIEndpoint(baseUrl)) { @@ -619,10 +632,14 @@ export async function openaiTTS(params: { baseUrl: string; model: string; voice: string; + speed?: number; + instructions?: string; responseFormat: "mp3" | "opus" | "pcm"; timeoutMs: number; }): Promise { - const { text, apiKey, baseUrl, model, voice, responseFormat, timeoutMs } = params; + const { text, apiKey, baseUrl, model, voice, speed, instructions, responseFormat, timeoutMs } = + params; + const effectiveInstructions = resolveOpenAITtsInstructions(model, instructions); if (!isValidOpenAIModel(model, baseUrl)) { throw new Error(`Invalid model: ${model}`); @@ -646,6 +663,8 @@ export async function openaiTTS(params: { input: text, voice, response_format: responseFormat, + ...(speed != null && { speed }), + ...(effectiveInstructions != null && { instructions: effectiveInstructions }), }), signal: controller.signal, }); diff --git a/src/tts/tts.test.ts b/src/tts/tts.test.ts index f3b5d8ce0ee..d11190a21d4 100644 --- a/src/tts/tts.test.ts +++ b/src/tts/tts.test.ts @@ -7,15 +7,16 @@ import type { OpenClawConfig } from "../config/config.js"; import { withEnv } from "../test-utils/env.js"; import * as tts from "./tts.js"; -vi.mock("@mariozechner/pi-ai", () => ({ - completeSimple: vi.fn(), -})); - -vi.mock("@mariozechner/pi-ai/oauth", () => ({ - // Some auth helpers import oauth provider metadata at module load time. - getOAuthProviders: () => [], - getOAuthApiKey: vi.fn(async () => null), -})); +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + completeSimple: vi.fn(), + // Some auth helpers import oauth provider metadata at module load time. + getOAuthProviders: () => [], + getOAuthApiKey: vi.fn(async () => null), + }; +}); vi.mock("../agents/pi-embedded-runner/model.js", () => ({ resolveModel: vi.fn((provider: string, modelId: string) => ({ @@ -57,6 +58,7 @@ const { OPENAI_TTS_MODELS, OPENAI_TTS_VOICES, parseTtsDirectives, + resolveOpenAITtsInstructions, resolveModelOverridePolicy, summarizeText, resolveOutputFormat, @@ -169,6 +171,20 @@ describe("tts", () => { }); }); + describe("resolveOpenAITtsInstructions", () => { + it("keeps instructions only for gpt-4o-mini-tts variants", () => { + expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts", " Speak warmly ")).toBe( + "Speak warmly", + ); + expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts-2025-12-15", "Speak warmly")).toBe( + "Speak warmly", + ); + expect(resolveOpenAITtsInstructions("tts-1", "Speak warmly")).toBeUndefined(); + expect(resolveOpenAITtsInstructions("tts-1-hd", "Speak warmly")).toBeUndefined(); + expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts", " ")).toBeUndefined(); + }); + }); + describe("resolveOutputFormat", () => { it("selects opus for voice-bubble channels (telegram/feishu/whatsapp) and mp3 for others", () => { const cases = [ @@ -557,6 +573,84 @@ describe("tts", () => { }); }); + describe("textToSpeechTelephony – openai instructions", () => { + const withMockedTelephonyFetch = async ( + run: (fetchMock: ReturnType) => Promise, + ) => { + const originalFetch = globalThis.fetch; + const fetchMock = vi.fn(async () => ({ + ok: true, + arrayBuffer: async () => new ArrayBuffer(2), + })); + globalThis.fetch = fetchMock as unknown as typeof fetch; + try { + await run(fetchMock); + } finally { + globalThis.fetch = originalFetch; + } + }; + + it("omits instructions for unsupported speech models", async () => { + const cfg: OpenClawConfig = { + messages: { + tts: { + provider: "openai", + openai: { + apiKey: "test-key", + model: "tts-1", + voice: "alloy", + instructions: "Speak warmly", + }, + }, + }, + }; + + await withMockedTelephonyFetch(async (fetchMock) => { + const result = await tts.textToSpeechTelephony({ + text: "Hello there, friendly caller.", + cfg, + }); + + expect(result.success).toBe(true); + expect(fetchMock).toHaveBeenCalledTimes(1); + const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; + expect(typeof init.body).toBe("string"); + const body = JSON.parse(init.body as string) as Record; + expect(body.instructions).toBeUndefined(); + }); + }); + + it("includes instructions for gpt-4o-mini-tts", async () => { + const cfg: OpenClawConfig = { + messages: { + tts: { + provider: "openai", + openai: { + apiKey: "test-key", + model: "gpt-4o-mini-tts", + voice: "alloy", + instructions: "Speak warmly", + }, + }, + }, + }; + + await withMockedTelephonyFetch(async (fetchMock) => { + const result = await tts.textToSpeechTelephony({ + text: "Hello there, friendly caller.", + cfg, + }); + + expect(result.success).toBe(true); + expect(fetchMock).toHaveBeenCalledTimes(1); + const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; + expect(typeof init.body).toBe("string"); + const body = JSON.parse(init.body as string) as Record; + expect(body.instructions).toBe("Speak warmly"); + }); + }); + }); + describe("maybeApplyTtsToPayload", () => { const baseCfg: OpenClawConfig = { agents: { defaults: { model: { primary: "openai/gpt-4o-mini" } } }, diff --git a/src/tts/tts.ts b/src/tts/tts.ts index f76000029f6..5cd306f13a9 100644 --- a/src/tts/tts.ts +++ b/src/tts/tts.ts @@ -37,6 +37,7 @@ import { isValidVoiceId, OPENAI_TTS_MODELS, OPENAI_TTS_VOICES, + resolveOpenAITtsInstructions, openaiTTS, parseTtsDirectives, scheduleCleanup, @@ -117,6 +118,8 @@ export type ResolvedTtsConfig = { baseUrl: string; model: string; voice: string; + speed?: number; + instructions?: string; }; edge: { enabled: boolean; @@ -304,6 +307,8 @@ export function resolveTtsConfig(cfg: OpenClawConfig): ResolvedTtsConfig { ).replace(/\/+$/, ""), model: raw.openai?.model ?? DEFAULT_OPENAI_MODEL, voice: raw.openai?.voice ?? DEFAULT_OPENAI_VOICE, + speed: raw.openai?.speed, + instructions: raw.openai?.instructions?.trim() || undefined, }, edge: { enabled: raw.edge?.enabled ?? true, @@ -692,6 +697,8 @@ export async function textToSpeech(params: { baseUrl: config.openai.baseUrl, model: openaiModelOverride ?? config.openai.model, voice: openaiVoiceOverride ?? config.openai.voice, + speed: config.openai.speed, + instructions: config.openai.instructions, responseFormat: output.openai, timeoutMs: config.timeoutMs, }); @@ -789,6 +796,8 @@ export async function textToSpeechTelephony(params: { baseUrl: config.openai.baseUrl, model: config.openai.model, voice: config.openai.voice, + speed: config.openai.speed, + instructions: config.openai.instructions, responseFormat: output.format, timeoutMs: config.timeoutMs, }); @@ -961,6 +970,7 @@ export const _test = { isValidOpenAIModel, OPENAI_TTS_MODELS, OPENAI_TTS_VOICES, + resolveOpenAITtsInstructions, parseTtsDirectives, resolveModelOverridePolicy, summarizeText, diff --git a/src/tui/components/chat-log.test.ts b/src/tui/components/chat-log.test.ts index 02607568b1d..b81740a2e8c 100644 --- a/src/tui/components/chat-log.test.ts +++ b/src/tui/components/chat-log.test.ts @@ -29,6 +29,17 @@ describe("ChatLog", () => { expect(rendered).toContain("recreated"); }); + it("does not append duplicate assistant components when a run is started twice", () => { + const chatLog = new ChatLog(40); + chatLog.startAssistant("first", "run-dup"); + chatLog.startAssistant("second", "run-dup"); + + const rendered = chatLog.render(120).join("\n"); + expect(rendered).toContain("second"); + expect(rendered).not.toContain("first"); + expect(chatLog.children.length).toBe(1); + }); + it("drops stale tool references when old components are pruned", () => { const chatLog = new ChatLog(20); chatLog.startTool("tool-1", "read_file", { path: "a.txt" }); diff --git a/src/tui/components/chat-log.ts b/src/tui/components/chat-log.ts index 4ddf1d5b1de..76ac7d93654 100644 --- a/src/tui/components/chat-log.ts +++ b/src/tui/components/chat-log.ts @@ -65,8 +65,14 @@ export class ChatLog extends Container { } startAssistant(text: string, runId?: string) { + const effectiveRunId = this.resolveRunId(runId); + const existing = this.streamingRuns.get(effectiveRunId); + if (existing) { + existing.setText(text); + return existing; + } const component = new AssistantMessageComponent(text); - this.streamingRuns.set(this.resolveRunId(runId), component); + this.streamingRuns.set(effectiveRunId, component); this.append(component); return component; } diff --git a/src/tui/tui-status-summary.ts b/src/tui/tui-status-summary.ts index 64fc00adad6..dcbcd00329d 100644 --- a/src/tui/tui-status-summary.ts +++ b/src/tui/tui-status-summary.ts @@ -6,6 +6,9 @@ import type { GatewayStatusSummary } from "./tui-types.js"; export function formatStatusSummary(summary: GatewayStatusSummary) { const lines: string[] = []; lines.push("Gateway status"); + if (summary.runtimeVersion) { + lines.push(`Version: ${summary.runtimeVersion}`); + } if (!summary.linkChannel) { lines.push("Link channel: unknown"); diff --git a/src/tui/tui-types.ts b/src/tui/tui-types.ts index 087d7958950..e0af351d462 100644 --- a/src/tui/tui-types.ts +++ b/src/tui/tui-types.ts @@ -49,6 +49,7 @@ export type AgentSummary = { }; export type GatewayStatusSummary = { + runtimeVersion?: string | null; linkChannel?: { id?: string; label?: string; diff --git a/src/web/outbound.test.ts b/src/web/outbound.test.ts index e494392d750..506d7816630 100644 --- a/src/web/outbound.test.ts +++ b/src/web/outbound.test.ts @@ -48,6 +48,34 @@ describe("web outbound", () => { expect(sendMessage).toHaveBeenCalledWith("+1555", "hi", undefined, undefined); }); + it("trims leading whitespace before sending text and captions", async () => { + await sendMessageWhatsApp("+1555", "\n \thello", { verbose: false }); + expect(sendMessage).toHaveBeenLastCalledWith("+1555", "hello", undefined, undefined); + + const buf = Buffer.from("img"); + loadWebMediaMock.mockResolvedValueOnce({ + buffer: buf, + contentType: "image/jpeg", + kind: "image", + }); + await sendMessageWhatsApp("+1555", "\n \tcaption", { + verbose: false, + mediaUrl: "/tmp/pic.jpg", + }); + expect(sendMessage).toHaveBeenLastCalledWith("+1555", "caption", buf, "image/jpeg"); + }); + + it("skips whitespace-only text sends without media", async () => { + const result = await sendMessageWhatsApp("+1555", "\n \t", { verbose: false }); + + expect(result).toEqual({ + messageId: "", + toJid: "1555@s.whatsapp.net", + }); + expect(sendComposingTo).not.toHaveBeenCalled(); + expect(sendMessage).not.toHaveBeenCalled(); + }); + it("throws a helpful error when no active listener exists", async () => { setActiveWebListener(null); await expect( diff --git a/src/web/outbound.ts b/src/web/outbound.ts index 43136c6f779..1fcaa807c37 100644 --- a/src/web/outbound.ts +++ b/src/web/outbound.ts @@ -26,7 +26,11 @@ export async function sendMessageWhatsApp( accountId?: string; }, ): Promise<{ messageId: string; toJid: string }> { - let text = body; + let text = body.trimStart(); + const jid = toWhatsappJid(to); + if (!text && !options.mediaUrl) { + return { messageId: "", toJid: jid }; + } const correlationId = generateSecureUuid(); const startedAt = Date.now(); const { listener: active, accountId: resolvedAccountId } = requireActiveWebListener( @@ -51,7 +55,6 @@ export async function sendMessageWhatsApp( to: redactedTo, }); try { - const jid = toWhatsappJid(to); const redactedJid = redactIdentifier(jid); let mediaBuffer: Buffer | undefined; let mediaType: string | undefined; diff --git a/src/wizard/onboarding.finalize.test.ts b/src/wizard/onboarding.finalize.test.ts index 314d22d8ca3..0fa67d16a8f 100644 --- a/src/wizard/onboarding.finalize.test.ts +++ b/src/wizard/onboarding.finalize.test.ts @@ -13,6 +13,13 @@ const buildGatewayInstallPlan = vi.hoisted(() => })), ); const gatewayServiceInstall = vi.hoisted(() => vi.fn(async () => {})); +const gatewayServiceRestart = vi.hoisted(() => + vi.fn<() => Promise<{ outcome: "completed" } | { outcome: "scheduled" }>>(async () => ({ + outcome: "completed", + })), +); +const gatewayServiceUninstall = vi.hoisted(() => vi.fn(async () => {})); +const gatewayServiceIsLoaded = vi.hoisted(() => vi.fn(async () => false)); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn(async () => ({ token: undefined, @@ -56,14 +63,18 @@ vi.mock("../commands/health.js", () => ({ healthCommand: vi.fn(async () => {}), })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: vi.fn(() => ({ - isLoaded: vi.fn(async () => false), - restart: vi.fn(async () => {}), - uninstall: vi.fn(async () => {}), - install: gatewayServiceInstall, - })), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: vi.fn(() => ({ + isLoaded: gatewayServiceIsLoaded, + restart: gatewayServiceRestart, + uninstall: gatewayServiceUninstall, + install: gatewayServiceInstall, + })), + }; +}); vi.mock("../daemon/systemd.js", async (importOriginal) => { const actual = await importOriginal(); @@ -113,6 +124,11 @@ describe("finalizeOnboardingWizard", () => { setupOnboardingShellCompletion.mockClear(); buildGatewayInstallPlan.mockClear(); gatewayServiceInstall.mockClear(); + gatewayServiceIsLoaded.mockReset(); + gatewayServiceIsLoaded.mockResolvedValue(false); + gatewayServiceRestart.mockReset(); + gatewayServiceRestart.mockResolvedValue({ outcome: "completed" }); + gatewayServiceUninstall.mockReset(); resolveGatewayInstallToken.mockClear(); isSystemdUserServiceAvailable.mockReset(); isSystemdUserServiceAvailable.mockResolvedValue(true); @@ -244,4 +260,51 @@ describe("finalizeOnboardingWizard", () => { expectFirstOnboardingInstallPlanCallOmitsToken(); expect(gatewayServiceInstall).toHaveBeenCalledTimes(1); }); + + it("stops after a scheduled restart instead of reinstalling the service", async () => { + const progressUpdate = vi.fn(); + const progressStop = vi.fn(); + gatewayServiceIsLoaded.mockResolvedValue(true); + gatewayServiceRestart.mockResolvedValueOnce({ outcome: "scheduled" }); + const prompter = buildWizardPrompter({ + select: vi.fn(async (params: { message: string }) => { + if (params.message === "Gateway service already installed") { + return "restart"; + } + return "later"; + }) as never, + confirm: vi.fn(async () => false), + progress: vi.fn(() => ({ update: progressUpdate, stop: progressStop })), + }); + + await finalizeOnboardingWizard({ + flow: "advanced", + opts: { + acceptRisk: true, + authChoice: "skip", + installDaemon: true, + skipHealth: true, + skipUi: true, + }, + baseConfig: {}, + nextConfig: {}, + workspaceDir: "/tmp", + settings: { + port: 18789, + bind: "loopback", + authMode: "token", + gatewayToken: undefined, + tailscaleMode: "off", + tailscaleResetOnExit: false, + }, + prompter, + runtime: createRuntime(), + }); + + expect(gatewayServiceRestart).toHaveBeenCalledTimes(1); + expect(gatewayServiceInstall).not.toHaveBeenCalled(); + expect(gatewayServiceUninstall).not.toHaveBeenCalled(); + expect(progressUpdate).toHaveBeenCalledWith("Restarting Gateway service…"); + expect(progressStop).toHaveBeenCalledWith("Gateway service restart scheduled."); + }); }); diff --git a/src/wizard/onboarding.finalize.ts b/src/wizard/onboarding.finalize.ts index fdb1143933c..b218e160ed5 100644 --- a/src/wizard/onboarding.finalize.ts +++ b/src/wizard/onboarding.finalize.ts @@ -23,7 +23,7 @@ import { } from "../commands/onboard-helpers.js"; import type { OnboardOptions } from "../commands/onboard-types.js"; import type { OpenClawConfig } from "../config/config.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { isSystemdUserServiceAvailable } from "../daemon/systemd.js"; import { ensureControlUiAssetsBuilt } from "../infra/control-ui-assets.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -53,14 +53,16 @@ export async function finalizeOnboardingWizard( const withWizardProgress = async ( label: string, - options: { doneMessage?: string }, + options: { doneMessage?: string | (() => string | undefined) }, work: (progress: { update: (message: string) => void }) => Promise, ): Promise => { const progress = prompter.progress(label); try { return await work(progress); } finally { - progress.stop(options.doneMessage); + progress.stop( + typeof options.doneMessage === "function" ? options.doneMessage() : options.doneMessage, + ); } }; @@ -128,6 +130,7 @@ export async function finalizeOnboardingWizard( } const service = resolveGatewayService(); const loaded = await service.isLoaded({ env: process.env }); + let restartWasScheduled = false; if (loaded) { const action = await prompter.select({ message: "Gateway service already installed", @@ -138,15 +141,19 @@ export async function finalizeOnboardingWizard( ], }); if (action === "restart") { + let restartDoneMessage = "Gateway service restarted."; await withWizardProgress( "Gateway service", - { doneMessage: "Gateway service restarted." }, + { doneMessage: () => restartDoneMessage }, async (progress) => { progress.update("Restarting Gateway service…"); - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + restartDoneMessage = restartStatus.progressMessage; + restartWasScheduled = restartStatus.scheduled; }, ); } else if (action === "reinstall") { @@ -161,7 +168,10 @@ export async function finalizeOnboardingWizard( } } - if (!loaded || (loaded && !(await service.isLoaded({ env: process.env })))) { + if ( + !loaded || + (!restartWasScheduled && loaded && !(await service.isLoaded({ env: process.env }))) + ) { const progress = prompter.progress("Gateway service"); let installError: string | null = null; try { diff --git a/src/wizard/onboarding.ts b/src/wizard/onboarding.ts index 47825eeae52..554c8046b60 100644 --- a/src/wizard/onboarding.ts +++ b/src/wizard/onboarding.ts @@ -442,13 +442,17 @@ export async function runOnboardingWizard( config: nextConfig, prompter, runtime, - setDefaultModel: true, + setDefaultModel: !(authChoiceFromPrompt && authChoice === "ollama"), opts: { tokenProvider: opts.tokenProvider, token: opts.authChoice === "apiKey" && opts.token ? opts.token : undefined, }, }); nextConfig = authResult.config; + + if (authResult.agentModelOverride) { + nextConfig = applyPrimaryModel(nextConfig, authResult.agentModelOverride); + } } if (authChoiceFromPrompt && authChoice !== "custom-api-key") { @@ -468,6 +472,11 @@ export async function runOnboardingWizard( } } + if (authChoice === "ollama") { + const { ensureOllamaModelPulled } = await import("../commands/ollama-setup.js"); + await ensureOllamaModelPulled({ config: nextConfig, prompter }); + } + await warnIfModelConfigLooksOff(nextConfig, prompter); const { configureGatewayForOnboarding } = await import("./onboarding.gateway-config.js"); diff --git a/test/openclaw-npm-release-check.test.ts b/test/openclaw-npm-release-check.test.ts index 7bd1c98d92d..50f4cb7a5ab 100644 --- a/test/openclaw-npm-release-check.test.ts +++ b/test/openclaw-npm-release-check.test.ts @@ -8,30 +8,30 @@ import { describe("parseReleaseVersion", () => { it("parses stable CalVer releases", () => { - expect(parseReleaseVersion("2026.3.9")).toMatchObject({ - version: "2026.3.9", + expect(parseReleaseVersion("2026.3.10")).toMatchObject({ + version: "2026.3.10", channel: "stable", year: 2026, month: 3, - day: 9, + day: 10, }); }); it("parses beta CalVer releases", () => { - expect(parseReleaseVersion("2026.3.9-beta.2")).toMatchObject({ - version: "2026.3.9-beta.2", + expect(parseReleaseVersion("2026.3.10-beta.2")).toMatchObject({ + version: "2026.3.10-beta.2", channel: "beta", year: 2026, month: 3, - day: 9, + day: 10, betaNumber: 2, }); }); it("rejects legacy and malformed release formats", () => { - expect(parseReleaseVersion("2026.3.9-1")).toBeNull(); + expect(parseReleaseVersion("2026.3.10-1")).toBeNull(); expect(parseReleaseVersion("2026.03.09")).toBeNull(); - expect(parseReleaseVersion("v2026.3.9")).toBeNull(); + expect(parseReleaseVersion("v2026.3.10")).toBeNull(); expect(parseReleaseVersion("2026.2.30")).toBeNull(); expect(parseReleaseVersion("2.0.0-beta2")).toBeNull(); }); @@ -49,8 +49,8 @@ describe("collectReleaseTagErrors", () => { it("accepts versions within the two-day CalVer window", () => { expect( collectReleaseTagErrors({ - packageVersion: "2026.3.9", - releaseTag: "v2026.3.9", + packageVersion: "2026.3.10", + releaseTag: "v2026.3.10", now: new Date("2026-03-11T12:00:00Z"), }), ).toEqual([]); @@ -59,9 +59,9 @@ describe("collectReleaseTagErrors", () => { it("rejects versions outside the two-day CalVer window", () => { expect( collectReleaseTagErrors({ - packageVersion: "2026.3.9", - releaseTag: "v2026.3.9", - now: new Date("2026-03-12T00:00:00Z"), + packageVersion: "2026.3.10", + releaseTag: "v2026.3.10", + now: new Date("2026-03-13T00:00:00Z"), }), ).toContainEqual(expect.stringContaining("must be within 2 days")); }); @@ -69,9 +69,9 @@ describe("collectReleaseTagErrors", () => { it("rejects tags that do not match the current release format", () => { expect( collectReleaseTagErrors({ - packageVersion: "2026.3.9", - releaseTag: "v2026.3.9-1", - now: new Date("2026-03-09T00:00:00Z"), + packageVersion: "2026.3.10", + releaseTag: "v2026.3.10-1", + now: new Date("2026-03-10T00:00:00Z"), }), ).toContainEqual(expect.stringContaining("must match vYYYY.M.D or vYYYY.M.D-beta.N")); }); diff --git a/test/setup.ts b/test/setup.ts index f232e5fc2d0..a6f902cb90f 100644 --- a/test/setup.ts +++ b/test/setup.ts @@ -1,5 +1,15 @@ import { afterAll, afterEach, beforeAll, vi } from "vitest"; +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + getOAuthApiKey: () => undefined, + getOAuthProviders: () => [], + loginOpenAICodex: vi.fn(), + }; +}); + vi.mock("@mariozechner/pi-ai/oauth", () => ({ getOAuthApiKey: () => undefined, getOAuthProviders: () => [], diff --git a/ui/package.json b/ui/package.json index b1f548f2869..1944c788cae 100644 --- a/ui/package.json +++ b/ui/package.json @@ -12,7 +12,7 @@ "@lit-labs/signals": "^0.2.0", "@lit/context": "^1.1.6", "@noble/ed25519": "3.0.0", - "dompurify": "^3.3.2", + "dompurify": "^3.3.3", "lit": "^3.3.2", "marked": "^17.0.4", "signal-polyfill": "^0.2.2", diff --git a/ui/src/ui/chat-export.ts b/ui/src/ui/chat-export.ts new file mode 100644 index 00000000000..ed5bbf931f8 --- /dev/null +++ b/ui/src/ui/chat-export.ts @@ -0,0 +1 @@ +export { exportChatMarkdown } from "./chat/export.ts"; diff --git a/ui/src/ui/chat/deleted-messages.ts b/ui/src/ui/chat/deleted-messages.ts new file mode 100644 index 00000000000..fd3916d78c7 --- /dev/null +++ b/ui/src/ui/chat/deleted-messages.ts @@ -0,0 +1,49 @@ +const PREFIX = "openclaw:deleted:"; + +export class DeletedMessages { + private key: string; + private _keys = new Set(); + + constructor(sessionKey: string) { + this.key = PREFIX + sessionKey; + this.load(); + } + + has(key: string): boolean { + return this._keys.has(key); + } + + delete(key: string): void { + this._keys.add(key); + this.save(); + } + + restore(key: string): void { + this._keys.delete(key); + this.save(); + } + + clear(): void { + this._keys.clear(); + this.save(); + } + + private load(): void { + try { + const raw = localStorage.getItem(this.key); + if (!raw) { + return; + } + const arr = JSON.parse(raw); + if (Array.isArray(arr)) { + this._keys = new Set(arr.filter((s) => typeof s === "string")); + } + } catch { + // ignore + } + } + + private save(): void { + localStorage.setItem(this.key, JSON.stringify([...this._keys])); + } +} diff --git a/ui/src/ui/chat/export.node.test.ts b/ui/src/ui/chat/export.node.test.ts new file mode 100644 index 00000000000..fa4bb428b3b --- /dev/null +++ b/ui/src/ui/chat/export.node.test.ts @@ -0,0 +1,38 @@ +import { describe, expect, it } from "vitest"; +import { buildChatExportFilename, buildChatMarkdown, sanitizeFilenameComponent } from "./export.ts"; + +describe("chat export hardening", () => { + it("escapes raw HTML in exported markdown content and labels", () => { + const markdown = buildChatMarkdown( + [ + { + role: "assistant", + content: "", + timestamp: Date.UTC(2026, 2, 11, 12, 0, 0), + }, + ], + "Bot ", + ); + + expect(markdown).toContain( + "# Chat with Bot </script><script>alert(3)</script>", + ); + expect(markdown).toContain( + "## Bot </script><script>alert(3)</script> (2026-03-11T12:00:00.000Z)", + ); + expect(markdown).toContain( + "<img src=x onerror=alert(1)><script>alert(2)</script>", + ); + expect(markdown).not.toContain("")).toBe( + "NUL scriptalert1-script", + ); + expect(buildChatExportFilename("../NUL\t", 123)).toBe( + "chat-NUL scriptalert1-script-123.md", + ); + }); +}); diff --git a/ui/src/ui/chat/export.ts b/ui/src/ui/chat/export.ts new file mode 100644 index 00000000000..b42796f6a0a --- /dev/null +++ b/ui/src/ui/chat/export.ts @@ -0,0 +1,68 @@ +/** + * Export chat history as markdown file. + */ +export function escapeHtmlInMarkdown(text: string): string { + return text.replaceAll("&", "&").replaceAll("<", "<").replaceAll(">", ">"); +} + +export function normalizeSingleLineLabel(label: string, fallback = "Assistant"): string { + const normalized = label.replace(/[\r\n\t]+/g, " ").trim(); + return normalized || fallback; +} + +export function sanitizeFilenameComponent(input: string): string { + const normalized = normalizeSingleLineLabel(input, "assistant").normalize("NFKC"); + const sanitized = normalized + .replace(/[\\/]/g, "-") + .replace(/[^a-zA-Z0-9 _.-]/g, "") + .replace(/\s+/g, " ") + .replace(/-+/g, "-") + .trim() + .replace(/^[.-]+/, "") + .slice(0, 50); + return sanitized || "assistant"; +} + +export function buildChatMarkdown(messages: unknown[], assistantNameRaw: string): string | null { + const assistantName = escapeHtmlInMarkdown(normalizeSingleLineLabel(assistantNameRaw)); + const history = Array.isArray(messages) ? messages : []; + if (history.length === 0) { + return null; + } + const lines: string[] = [`# Chat with ${assistantName}`, ""]; + for (const msg of history) { + const m = msg as Record; + const role = m.role === "user" ? "You" : m.role === "assistant" ? assistantName : "Tool"; + const content = escapeHtmlInMarkdown( + typeof m.content === "string" + ? m.content + : Array.isArray(m.content) + ? (m.content as Array<{ type?: string; text?: string }>) + .filter((b) => b?.type === "text" && typeof b.text === "string") + .map((b) => b.text) + .join("") + : "", + ); + const ts = typeof m.timestamp === "number" ? new Date(m.timestamp).toISOString() : ""; + lines.push(`## ${role}${ts ? ` (${ts})` : ""}`, "", content, ""); + } + return lines.join("\n"); +} + +export function buildChatExportFilename(assistantNameRaw: string, now = Date.now()): string { + return `chat-${sanitizeFilenameComponent(assistantNameRaw)}-${now}.md`; +} + +export function exportChatMarkdown(messages: unknown[], assistantName: string): void { + const markdown = buildChatMarkdown(messages, assistantName); + if (!markdown) { + return; + } + const blob = new Blob([markdown], { type: "text/markdown" }); + const url = URL.createObjectURL(blob); + const link = document.createElement("a"); + link.href = url; + link.download = buildChatExportFilename(assistantName); + link.click(); + URL.revokeObjectURL(url); +} diff --git a/ui/src/ui/chat/input-history.ts b/ui/src/ui/chat/input-history.ts new file mode 100644 index 00000000000..34d8806d072 --- /dev/null +++ b/ui/src/ui/chat/input-history.ts @@ -0,0 +1,49 @@ +const MAX = 50; + +export class InputHistory { + private items: string[] = []; + private cursor = -1; + + push(text: string): void { + const trimmed = text.trim(); + if (!trimmed) { + return; + } + if (this.items[this.items.length - 1] === trimmed) { + return; + } + this.items.push(trimmed); + if (this.items.length > MAX) { + this.items.shift(); + } + this.cursor = -1; + } + + up(): string | null { + if (this.items.length === 0) { + return null; + } + if (this.cursor < 0) { + this.cursor = this.items.length - 1; + } else if (this.cursor > 0) { + this.cursor--; + } + return this.items[this.cursor] ?? null; + } + + down(): string | null { + if (this.cursor < 0) { + return null; + } + this.cursor++; + if (this.cursor >= this.items.length) { + this.cursor = -1; + return null; + } + return this.items[this.cursor] ?? null; + } + + reset(): void { + this.cursor = -1; + } +} diff --git a/ui/src/ui/chat/pinned-messages.ts b/ui/src/ui/chat/pinned-messages.ts new file mode 100644 index 00000000000..4914b0db32a --- /dev/null +++ b/ui/src/ui/chat/pinned-messages.ts @@ -0,0 +1,61 @@ +const PREFIX = "openclaw:pinned:"; + +export class PinnedMessages { + private key: string; + private _indices = new Set(); + + constructor(sessionKey: string) { + this.key = PREFIX + sessionKey; + this.load(); + } + + get indices(): Set { + return this._indices; + } + + has(index: number): boolean { + return this._indices.has(index); + } + + pin(index: number): void { + this._indices.add(index); + this.save(); + } + + unpin(index: number): void { + this._indices.delete(index); + this.save(); + } + + toggle(index: number): void { + if (this._indices.has(index)) { + this.unpin(index); + } else { + this.pin(index); + } + } + + clear(): void { + this._indices.clear(); + this.save(); + } + + private load(): void { + try { + const raw = localStorage.getItem(this.key); + if (!raw) { + return; + } + const arr = JSON.parse(raw); + if (Array.isArray(arr)) { + this._indices = new Set(arr.filter((n) => typeof n === "number")); + } + } catch { + // ignore + } + } + + private save(): void { + localStorage.setItem(this.key, JSON.stringify([...this._indices])); + } +} diff --git a/ui/src/ui/chat/slash-command-executor.node.test.ts b/ui/src/ui/chat/slash-command-executor.node.test.ts new file mode 100644 index 00000000000..ca30fdc54d5 --- /dev/null +++ b/ui/src/ui/chat/slash-command-executor.node.test.ts @@ -0,0 +1,381 @@ +import { describe, expect, it, vi } from "vitest"; +import type { GatewayBrowserClient } from "../gateway.ts"; +import type { GatewaySessionRow } from "../types.ts"; +import { executeSlashCommand } from "./slash-command-executor.ts"; + +function row(key: string, overrides?: Partial): GatewaySessionRow { + return { + key, + spawnedBy: overrides?.spawnedBy, + kind: "direct", + updatedAt: null, + ...overrides, + }; +} + +describe("executeSlashCommand /kill", () => { + it("aborts every sub-agent session for /kill all", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("main"), + row("agent:main:subagent:one", { spawnedBy: "main" }), + row("agent:main:subagent:parent", { spawnedBy: "main" }), + row("agent:main:subagent:parent:subagent:child", { + spawnedBy: "agent:main:subagent:parent", + }), + row("agent:other:main"), + ], + }; + } + if (method === "chat.abort") { + return { ok: true, aborted: true }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "kill", + "all", + ); + + expect(result.content).toBe("Aborted 3 sub-agent sessions."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "chat.abort", { + sessionKey: "agent:main:subagent:one", + }); + expect(request).toHaveBeenNthCalledWith(3, "chat.abort", { + sessionKey: "agent:main:subagent:parent", + }); + expect(request).toHaveBeenNthCalledWith(4, "chat.abort", { + sessionKey: "agent:main:subagent:parent:subagent:child", + }); + }); + + it("aborts matching sub-agent sessions for /kill ", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("agent:main:subagent:one", { spawnedBy: "agent:main:main" }), + row("agent:main:subagent:two", { spawnedBy: "agent:main:main" }), + row("agent:other:subagent:three", { spawnedBy: "agent:other:main" }), + ], + }; + } + if (method === "chat.abort") { + return { ok: true, aborted: true }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "kill", + "main", + ); + + expect(result.content).toBe("Aborted 2 matching sub-agent sessions for `main`."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "chat.abort", { + sessionKey: "agent:main:subagent:one", + }); + expect(request).toHaveBeenNthCalledWith(3, "chat.abort", { + sessionKey: "agent:main:subagent:two", + }); + }); + + it("does not exact-match a session key outside the current subagent subtree", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("agent:main:subagent:parent", { spawnedBy: "agent:main:main" }), + row("agent:main:subagent:parent:subagent:child", { + spawnedBy: "agent:main:subagent:parent", + }), + row("agent:main:subagent:sibling", { spawnedBy: "agent:main:main" }), + ], + }; + } + if (method === "chat.abort") { + return { ok: true, aborted: true }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:subagent:parent", + "kill", + "agent:main:subagent:sibling", + ); + + expect(result.content).toBe( + "No matching sub-agent sessions found for `agent:main:subagent:sibling`.", + ); + expect(request).toHaveBeenCalledTimes(1); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + }); + + it("returns a no-op summary when matching sessions have no active runs", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("agent:main:subagent:one", { spawnedBy: "agent:main:main" }), + row("agent:main:subagent:two", { spawnedBy: "agent:main:main" }), + ], + }; + } + if (method === "chat.abort") { + return { ok: true, aborted: false }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "kill", + "all", + ); + + expect(result.content).toBe("No active sub-agent runs to abort."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "chat.abort", { + sessionKey: "agent:main:subagent:one", + }); + expect(request).toHaveBeenNthCalledWith(3, "chat.abort", { + sessionKey: "agent:main:subagent:two", + }); + }); + + it("treats the legacy main session key as the default agent scope", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("main"), + row("agent:main:subagent:one", { spawnedBy: "agent:main:main" }), + row("agent:main:subagent:two", { spawnedBy: "agent:main:main" }), + row("agent:other:subagent:three", { spawnedBy: "agent:other:main" }), + ], + }; + } + if (method === "chat.abort") { + return { ok: true, aborted: true }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "main", + "kill", + "all", + ); + + expect(result.content).toBe("Aborted 2 sub-agent sessions."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "chat.abort", { + sessionKey: "agent:main:subagent:one", + }); + expect(request).toHaveBeenNthCalledWith(3, "chat.abort", { + sessionKey: "agent:main:subagent:two", + }); + }); + + it("does not abort unrelated same-agent subagents from another root session", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("agent:main:main"), + row("agent:main:subagent:mine", { spawnedBy: "agent:main:main" }), + row("agent:main:subagent:mine:subagent:child", { + spawnedBy: "agent:main:subagent:mine", + }), + row("agent:main:subagent:other-root", { + spawnedBy: "agent:main:discord:dm:alice", + }), + ], + }; + } + if (method === "chat.abort") { + return { ok: true, aborted: true }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "kill", + "all", + ); + + expect(result.content).toBe("Aborted 2 sub-agent sessions."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "chat.abort", { + sessionKey: "agent:main:subagent:mine", + }); + expect(request).toHaveBeenNthCalledWith(3, "chat.abort", { + sessionKey: "agent:main:subagent:mine:subagent:child", + }); + }); +}); + +describe("executeSlashCommand directives", () => { + it("resolves the legacy main alias for bare /model", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + defaults: { model: "default-model" }, + sessions: [ + row("agent:main:main", { + model: "gpt-4.1-mini", + }), + ], + }; + } + if (method === "models.list") { + return { + models: [{ id: "gpt-4.1-mini" }, { id: "gpt-4.1" }], + }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "main", + "model", + "", + ); + + expect(result.content).toBe( + "**Current model:** `gpt-4.1-mini`\n**Available:** `gpt-4.1-mini`, `gpt-4.1`", + ); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "models.list", {}); + }); + + it("resolves the legacy main alias for /usage", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("agent:main:main", { + model: "gpt-4.1-mini", + inputTokens: 1200, + outputTokens: 300, + totalTokens: 1500, + contextTokens: 4000, + }), + ], + }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "main", + "usage", + "", + ); + + expect(result.content).toBe( + "**Session Usage**\nInput: **1.2k** tokens\nOutput: **300** tokens\nTotal: **1.5k** tokens\nContext: **30%** of 4k\nModel: `gpt-4.1-mini`", + ); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + }); + + it("reports the current thinking level for bare /think", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [ + row("agent:main:main", { + modelProvider: "openai", + model: "gpt-4.1-mini", + }), + ], + }; + } + if (method === "models.list") { + return { + models: [{ id: "gpt-4.1-mini", provider: "openai", reasoning: true }], + }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "think", + "", + ); + + expect(result.content).toBe( + "Current thinking level: low.\nOptions: off, minimal, low, medium, high, adaptive.", + ); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + expect(request).toHaveBeenNthCalledWith(2, "models.list", {}); + }); + + it("accepts minimal and xhigh thinking levels", async () => { + const request = vi.fn().mockResolvedValueOnce({ ok: true }).mockResolvedValueOnce({ ok: true }); + + const minimal = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "think", + "minimal", + ); + const xhigh = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "think", + "xhigh", + ); + + expect(minimal.content).toBe("Thinking level set to **minimal**."); + expect(xhigh.content).toBe("Thinking level set to **xhigh**."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.patch", { + key: "agent:main:main", + thinkingLevel: "minimal", + }); + expect(request).toHaveBeenNthCalledWith(2, "sessions.patch", { + key: "agent:main:main", + thinkingLevel: "xhigh", + }); + }); + + it("reports the current verbose level for bare /verbose", async () => { + const request = vi.fn(async (method: string, _payload?: unknown) => { + if (method === "sessions.list") { + return { + sessions: [row("agent:main:main", { verboseLevel: "full" })], + }; + } + throw new Error(`unexpected method: ${method}`); + }); + + const result = await executeSlashCommand( + { request } as unknown as GatewayBrowserClient, + "agent:main:main", + "verbose", + "", + ); + + expect(result.content).toBe("Current verbose level: full.\nOptions: on, full, off."); + expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {}); + }); +}); diff --git a/ui/src/ui/chat/slash-command-executor.ts b/ui/src/ui/chat/slash-command-executor.ts new file mode 100644 index 00000000000..999a21487b9 --- /dev/null +++ b/ui/src/ui/chat/slash-command-executor.ts @@ -0,0 +1,545 @@ +/** + * Client-side execution engine for slash commands. + * Calls gateway RPC methods and returns formatted results. + */ + +import type { ModelCatalogEntry } from "../../../../src/agents/model-catalog.js"; +import { resolveThinkingDefault } from "../../../../src/agents/model-selection.js"; +import { + formatThinkingLevels, + normalizeThinkLevel, + normalizeVerboseLevel, +} from "../../../../src/auto-reply/thinking.js"; +import type { HealthSummary } from "../../../../src/commands/health.js"; +import type { OpenClawConfig } from "../../../../src/config/config.js"; +import { + DEFAULT_AGENT_ID, + DEFAULT_MAIN_KEY, + isSubagentSessionKey, + parseAgentSessionKey, +} from "../../../../src/routing/session-key.js"; +import type { GatewayBrowserClient } from "../gateway.ts"; +import type { AgentsListResult, GatewaySessionRow, SessionsListResult } from "../types.ts"; +import { SLASH_COMMANDS } from "./slash-commands.ts"; + +export type SlashCommandResult = { + /** Markdown-formatted result to display in chat. */ + content: string; + /** Side-effect action the caller should perform after displaying the result. */ + action?: + | "refresh" + | "export" + | "new-session" + | "reset" + | "stop" + | "clear" + | "toggle-focus" + | "navigate-usage"; +}; + +export async function executeSlashCommand( + client: GatewayBrowserClient, + sessionKey: string, + commandName: string, + args: string, +): Promise { + switch (commandName) { + case "help": + return executeHelp(); + case "status": + return await executeStatus(client); + case "new": + return { content: "Starting new session...", action: "new-session" }; + case "reset": + return { content: "Resetting session...", action: "reset" }; + case "stop": + return { content: "Stopping current run...", action: "stop" }; + case "clear": + return { content: "Chat history cleared.", action: "clear" }; + case "focus": + return { content: "Toggled focus mode.", action: "toggle-focus" }; + case "compact": + return await executeCompact(client, sessionKey); + case "model": + return await executeModel(client, sessionKey, args); + case "think": + return await executeThink(client, sessionKey, args); + case "verbose": + return await executeVerbose(client, sessionKey, args); + case "export": + return { content: "Exporting session...", action: "export" }; + case "usage": + return await executeUsage(client, sessionKey); + case "agents": + return await executeAgents(client); + case "kill": + return await executeKill(client, sessionKey, args); + default: + return { content: `Unknown command: \`/${commandName}\`` }; + } +} + +// ── Command Implementations ── + +function executeHelp(): SlashCommandResult { + const lines = ["**Available Commands**\n"]; + let currentCategory = ""; + + for (const cmd of SLASH_COMMANDS) { + const cat = cmd.category ?? "session"; + if (cat !== currentCategory) { + currentCategory = cat; + lines.push(`**${cat.charAt(0).toUpperCase() + cat.slice(1)}**`); + } + const argStr = cmd.args ? ` ${cmd.args}` : ""; + const local = cmd.executeLocal ? "" : " *(agent)*"; + lines.push(`\`/${cmd.name}${argStr}\` — ${cmd.description}${local}`); + } + + lines.push("\nType `/` to open the command menu."); + return { content: lines.join("\n") }; +} + +async function executeStatus(client: GatewayBrowserClient): Promise { + try { + const health = await client.request("health", {}); + const status = health.ok ? "Healthy" : "Degraded"; + const agentCount = health.agents?.length ?? 0; + const sessionCount = health.sessions?.count ?? 0; + const lines = [ + `**System Status:** ${status}`, + `**Agents:** ${agentCount}`, + `**Sessions:** ${sessionCount}`, + `**Default Agent:** ${health.defaultAgentId || "none"}`, + ]; + if (health.durationMs) { + lines.push(`**Response:** ${health.durationMs}ms`); + } + return { content: lines.join("\n") }; + } catch (err) { + return { content: `Failed to fetch status: ${String(err)}` }; + } +} + +async function executeCompact( + client: GatewayBrowserClient, + sessionKey: string, +): Promise { + try { + await client.request("sessions.compact", { key: sessionKey }); + return { content: "Context compacted successfully.", action: "refresh" }; + } catch (err) { + return { content: `Compaction failed: ${String(err)}` }; + } +} + +async function executeModel( + client: GatewayBrowserClient, + sessionKey: string, + args: string, +): Promise { + if (!args) { + try { + const [sessions, models] = await Promise.all([ + client.request("sessions.list", {}), + client.request<{ models: ModelCatalogEntry[] }>("models.list", {}), + ]); + const session = resolveCurrentSession(sessions, sessionKey); + const model = session?.model || sessions?.defaults?.model || "default"; + const available = models?.models?.map((m: ModelCatalogEntry) => m.id) ?? []; + const lines = [`**Current model:** \`${model}\``]; + if (available.length > 0) { + lines.push( + `**Available:** ${available + .slice(0, 10) + .map((m: string) => `\`${m}\``) + .join(", ")}${available.length > 10 ? ` +${available.length - 10} more` : ""}`, + ); + } + return { content: lines.join("\n") }; + } catch (err) { + return { content: `Failed to get model info: ${String(err)}` }; + } + } + + try { + await client.request("sessions.patch", { key: sessionKey, model: args.trim() }); + return { content: `Model set to \`${args.trim()}\`.`, action: "refresh" }; + } catch (err) { + return { content: `Failed to set model: ${String(err)}` }; + } +} + +async function executeThink( + client: GatewayBrowserClient, + sessionKey: string, + args: string, +): Promise { + const rawLevel = args.trim(); + if (!rawLevel) { + try { + const { session, models } = await loadThinkingCommandState(client, sessionKey); + return { + content: formatDirectiveOptions( + `Current thinking level: ${resolveCurrentThinkingLevel(session, models)}.`, + formatThinkingLevels(session?.modelProvider, session?.model), + ), + }; + } catch (err) { + return { content: `Failed to get thinking level: ${String(err)}` }; + } + } + + const level = normalizeThinkLevel(rawLevel); + if (!level) { + try { + const session = await loadCurrentSession(client, sessionKey); + return { + content: `Unrecognized thinking level "${rawLevel}". Valid levels: ${formatThinkingLevels(session?.modelProvider, session?.model)}.`, + }; + } catch (err) { + return { content: `Failed to validate thinking level: ${String(err)}` }; + } + } + + try { + await client.request("sessions.patch", { key: sessionKey, thinkingLevel: level }); + return { + content: `Thinking level set to **${level}**.`, + action: "refresh", + }; + } catch (err) { + return { content: `Failed to set thinking level: ${String(err)}` }; + } +} + +async function executeVerbose( + client: GatewayBrowserClient, + sessionKey: string, + args: string, +): Promise { + const rawLevel = args.trim(); + if (!rawLevel) { + try { + const session = await loadCurrentSession(client, sessionKey); + return { + content: formatDirectiveOptions( + `Current verbose level: ${normalizeVerboseLevel(session?.verboseLevel) ?? "off"}.`, + "on, full, off", + ), + }; + } catch (err) { + return { content: `Failed to get verbose level: ${String(err)}` }; + } + } + + const level = normalizeVerboseLevel(rawLevel); + if (!level) { + return { + content: `Unrecognized verbose level "${rawLevel}". Valid levels: off, on, full.`, + }; + } + + try { + await client.request("sessions.patch", { key: sessionKey, verboseLevel: level }); + return { + content: `Verbose mode set to **${level}**.`, + action: "refresh", + }; + } catch (err) { + return { content: `Failed to set verbose mode: ${String(err)}` }; + } +} + +async function executeUsage( + client: GatewayBrowserClient, + sessionKey: string, +): Promise { + try { + const sessions = await client.request("sessions.list", {}); + const session = resolveCurrentSession(sessions, sessionKey); + if (!session) { + return { content: "No active session." }; + } + const input = session.inputTokens ?? 0; + const output = session.outputTokens ?? 0; + const total = session.totalTokens ?? input + output; + const ctx = session.contextTokens ?? 0; + const pct = ctx > 0 ? Math.round((input / ctx) * 100) : null; + + const lines = [ + "**Session Usage**", + `Input: **${fmtTokens(input)}** tokens`, + `Output: **${fmtTokens(output)}** tokens`, + `Total: **${fmtTokens(total)}** tokens`, + ]; + if (pct !== null) { + lines.push(`Context: **${pct}%** of ${fmtTokens(ctx)}`); + } + if (session.model) { + lines.push(`Model: \`${session.model}\``); + } + return { content: lines.join("\n") }; + } catch (err) { + return { content: `Failed to get usage: ${String(err)}` }; + } +} + +async function executeAgents(client: GatewayBrowserClient): Promise { + try { + const result = await client.request("agents.list", {}); + const agents = result?.agents ?? []; + if (agents.length === 0) { + return { content: "No agents configured." }; + } + const lines = [`**Agents** (${agents.length})\n`]; + for (const agent of agents) { + const isDefault = agent.id === result?.defaultId; + const name = agent.identity?.name || agent.name || agent.id; + const marker = isDefault ? " *(default)*" : ""; + lines.push(`- \`${agent.id}\` — ${name}${marker}`); + } + return { content: lines.join("\n") }; + } catch (err) { + return { content: `Failed to list agents: ${String(err)}` }; + } +} + +async function executeKill( + client: GatewayBrowserClient, + sessionKey: string, + args: string, +): Promise { + const target = args.trim(); + if (!target) { + return { content: "Usage: `/kill `" }; + } + try { + const sessions = await client.request("sessions.list", {}); + const matched = resolveKillTargets(sessions?.sessions ?? [], sessionKey, target); + if (matched.length === 0) { + return { + content: + target.toLowerCase() === "all" + ? "No active sub-agent sessions found." + : `No matching sub-agent sessions found for \`${target}\`.`, + }; + } + + const results = await Promise.allSettled( + matched.map((key) => + client.request<{ aborted?: boolean }>("chat.abort", { sessionKey: key }), + ), + ); + const rejected = results.filter((entry) => entry.status === "rejected"); + const successCount = results.filter( + (entry) => + entry.status === "fulfilled" && (entry.value as { aborted?: boolean })?.aborted !== false, + ).length; + if (successCount === 0) { + if (rejected.length === 0) { + return { + content: + target.toLowerCase() === "all" + ? "No active sub-agent runs to abort." + : `No active runs matched \`${target}\`.`, + }; + } + throw rejected[0]?.reason ?? new Error("abort failed"); + } + + if (target.toLowerCase() === "all") { + return { + content: + successCount === matched.length + ? `Aborted ${successCount} sub-agent session${successCount === 1 ? "" : "s"}.` + : `Aborted ${successCount} of ${matched.length} sub-agent sessions.`, + }; + } + + return { + content: + successCount === matched.length + ? `Aborted ${successCount} matching sub-agent session${successCount === 1 ? "" : "s"} for \`${target}\`.` + : `Aborted ${successCount} of ${matched.length} matching sub-agent sessions for \`${target}\`.`, + }; + } catch (err) { + return { content: `Failed to abort: ${String(err)}` }; + } +} + +function resolveKillTargets( + sessions: GatewaySessionRow[], + currentSessionKey: string, + target: string, +): string[] { + const normalizedTarget = target.trim().toLowerCase(); + if (!normalizedTarget) { + return []; + } + + const keys = new Set(); + const normalizedCurrentSessionKey = currentSessionKey.trim().toLowerCase(); + const currentParsed = parseAgentSessionKey(normalizedCurrentSessionKey); + const currentAgentId = + currentParsed?.agentId ?? + (normalizedCurrentSessionKey === DEFAULT_MAIN_KEY ? DEFAULT_AGENT_ID : undefined); + const sessionIndex = buildSessionIndex(sessions); + for (const session of sessions) { + const key = session?.key?.trim(); + if (!key || !isSubagentSessionKey(key)) { + continue; + } + const normalizedKey = key.toLowerCase(); + const parsed = parseAgentSessionKey(normalizedKey); + const belongsToCurrentSession = isWithinCurrentSessionSubtree( + normalizedKey, + normalizedCurrentSessionKey, + sessionIndex, + currentAgentId, + parsed?.agentId, + ); + const isMatch = + (normalizedTarget === "all" && belongsToCurrentSession) || + (belongsToCurrentSession && normalizedKey === normalizedTarget) || + (belongsToCurrentSession && + ((parsed?.agentId ?? "") === normalizedTarget || + normalizedKey.endsWith(`:subagent:${normalizedTarget}`) || + normalizedKey === `subagent:${normalizedTarget}`)); + if (isMatch) { + keys.add(key); + } + } + return [...keys]; +} + +function isWithinCurrentSessionSubtree( + candidateSessionKey: string, + currentSessionKey: string, + sessionIndex: Map, + currentAgentId: string | undefined, + candidateAgentId: string | undefined, +): boolean { + if (!currentAgentId || candidateAgentId !== currentAgentId) { + return false; + } + + const currentAliases = resolveEquivalentSessionKeys(currentSessionKey, currentAgentId); + const seen = new Set(); + let parentSessionKey = normalizeSessionKey(sessionIndex.get(candidateSessionKey)?.spawnedBy); + while (parentSessionKey && !seen.has(parentSessionKey)) { + if (currentAliases.has(parentSessionKey)) { + return true; + } + seen.add(parentSessionKey); + parentSessionKey = normalizeSessionKey(sessionIndex.get(parentSessionKey)?.spawnedBy); + } + + // Older gateways may not include spawnedBy on session rows yet; keep prefix + // matching for nested subagent sessions as a compatibility fallback. + return isSubagentSessionKey(currentSessionKey) + ? candidateSessionKey.startsWith(`${currentSessionKey}:subagent:`) + : false; +} + +function buildSessionIndex(sessions: GatewaySessionRow[]): Map { + const index = new Map(); + for (const session of sessions) { + const normalizedKey = normalizeSessionKey(session?.key); + if (!normalizedKey) { + continue; + } + index.set(normalizedKey, session); + } + return index; +} + +function normalizeSessionKey(key?: string | null): string | undefined { + const normalized = key?.trim().toLowerCase(); + return normalized || undefined; +} + +function resolveEquivalentSessionKeys( + currentSessionKey: string, + currentAgentId: string | undefined, +): Set { + const keys = new Set([currentSessionKey]); + if (currentAgentId === DEFAULT_AGENT_ID) { + const canonicalDefaultMain = `agent:${DEFAULT_AGENT_ID}:main`; + if (currentSessionKey === DEFAULT_MAIN_KEY) { + keys.add(canonicalDefaultMain); + } else if (currentSessionKey === canonicalDefaultMain) { + keys.add(DEFAULT_MAIN_KEY); + } + } + return keys; +} + +function formatDirectiveOptions(text: string, options: string): string { + return `${text}\nOptions: ${options}.`; +} + +async function loadCurrentSession( + client: GatewayBrowserClient, + sessionKey: string, +): Promise { + const sessions = await client.request("sessions.list", {}); + return resolveCurrentSession(sessions, sessionKey); +} + +function resolveCurrentSession( + sessions: SessionsListResult | undefined, + sessionKey: string, +): GatewaySessionRow | undefined { + const normalizedSessionKey = normalizeSessionKey(sessionKey); + const currentAgentId = + parseAgentSessionKey(normalizedSessionKey ?? "")?.agentId ?? + (normalizedSessionKey === DEFAULT_MAIN_KEY ? DEFAULT_AGENT_ID : undefined); + const aliases = normalizedSessionKey + ? resolveEquivalentSessionKeys(normalizedSessionKey, currentAgentId) + : new Set(); + return sessions?.sessions?.find((session: GatewaySessionRow) => { + const key = normalizeSessionKey(session.key); + return key ? aliases.has(key) : false; + }); +} + +async function loadThinkingCommandState(client: GatewayBrowserClient, sessionKey: string) { + const [sessions, models] = await Promise.all([ + client.request("sessions.list", {}), + client.request<{ models: ModelCatalogEntry[] }>("models.list", {}), + ]); + return { + session: resolveCurrentSession(sessions, sessionKey), + models: models?.models ?? [], + }; +} + +function resolveCurrentThinkingLevel( + session: GatewaySessionRow | undefined, + models: ModelCatalogEntry[], +): string { + const persisted = normalizeThinkLevel(session?.thinkingLevel); + if (persisted) { + return persisted; + } + if (!session?.modelProvider || !session.model) { + return "off"; + } + return resolveThinkingDefault({ + cfg: {} as OpenClawConfig, + provider: session.modelProvider, + model: session.model, + catalog: models, + }); +} + +function fmtTokens(n: number): string { + if (n >= 1_000_000) { + return `${(n / 1_000_000).toFixed(1).replace(/\.0$/, "")}M`; + } + if (n >= 1_000) { + return `${(n / 1_000).toFixed(1).replace(/\.0$/, "")}k`; + } + return String(n); +} diff --git a/ui/src/ui/chat/slash-commands.node.test.ts b/ui/src/ui/chat/slash-commands.node.test.ts new file mode 100644 index 00000000000..cb07109df9f --- /dev/null +++ b/ui/src/ui/chat/slash-commands.node.test.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from "vitest"; +import { parseSlashCommand } from "./slash-commands.ts"; + +describe("parseSlashCommand", () => { + it("parses commands with an optional colon separator", () => { + expect(parseSlashCommand("/think: high")).toMatchObject({ + command: { name: "think" }, + args: "high", + }); + expect(parseSlashCommand("/think:high")).toMatchObject({ + command: { name: "think" }, + args: "high", + }); + expect(parseSlashCommand("/help:")).toMatchObject({ + command: { name: "help" }, + args: "", + }); + }); + + it("still parses space-delimited commands", () => { + expect(parseSlashCommand("/verbose full")).toMatchObject({ + command: { name: "verbose" }, + args: "full", + }); + }); +}); diff --git a/ui/src/ui/chat/slash-commands.ts b/ui/src/ui/chat/slash-commands.ts new file mode 100644 index 00000000000..27acd90025e --- /dev/null +++ b/ui/src/ui/chat/slash-commands.ts @@ -0,0 +1,222 @@ +import type { IconName } from "../icons.ts"; + +export type SlashCommandCategory = "session" | "model" | "agents" | "tools"; + +export type SlashCommandDef = { + name: string; + description: string; + args?: string; + icon?: IconName; + category?: SlashCommandCategory; + /** When true, the command is executed client-side via RPC instead of sent to the agent. */ + executeLocal?: boolean; + /** Fixed argument choices for inline hints. */ + argOptions?: string[]; + /** Keyboard shortcut hint shown in the menu (display only). */ + shortcut?: string; +}; + +export const SLASH_COMMANDS: SlashCommandDef[] = [ + // ── Session ── + { + name: "new", + description: "Start a new session", + icon: "circle", + category: "session", + executeLocal: true, + }, + { + name: "reset", + description: "Reset current session", + icon: "loader", + category: "session", + executeLocal: true, + }, + { + name: "compact", + description: "Compact session context", + icon: "loader", + category: "session", + executeLocal: true, + }, + { + name: "stop", + description: "Stop current run", + icon: "x", + category: "session", + executeLocal: true, + }, + { + name: "clear", + description: "Clear chat history", + icon: "x", + category: "session", + executeLocal: true, + }, + { + name: "focus", + description: "Toggle focus mode", + icon: "search", + category: "session", + executeLocal: true, + }, + + // ── Model ── + { + name: "model", + description: "Show or set model", + args: "", + icon: "brain", + category: "model", + executeLocal: true, + }, + { + name: "think", + description: "Set thinking level", + args: "", + icon: "brain", + category: "model", + executeLocal: true, + argOptions: ["off", "minimal", "low", "medium", "high", "xhigh", "adaptive"], + }, + { + name: "verbose", + description: "Toggle verbose mode", + args: "", + icon: "fileCode", + category: "model", + executeLocal: true, + argOptions: ["on", "off", "full"], + }, + + // ── Tools ── + { + name: "help", + description: "Show available commands", + icon: "book", + category: "tools", + executeLocal: true, + }, + { + name: "status", + description: "Show system status", + icon: "barChart", + category: "tools", + executeLocal: true, + }, + { + name: "export", + description: "Export session to Markdown", + icon: "arrowDown", + category: "tools", + executeLocal: true, + }, + { + name: "usage", + description: "Show token usage", + icon: "barChart", + category: "tools", + executeLocal: true, + }, + + // ── Agents ── + { + name: "agents", + description: "List agents", + icon: "monitor", + category: "agents", + executeLocal: true, + }, + { + name: "kill", + description: "Abort sub-agents", + args: "", + icon: "x", + category: "agents", + executeLocal: true, + }, + { + name: "skill", + description: "Run a skill", + args: "", + icon: "zap", + category: "tools", + }, + { + name: "steer", + description: "Steer a sub-agent", + args: " ", + icon: "zap", + category: "agents", + }, +]; + +const CATEGORY_ORDER: SlashCommandCategory[] = ["session", "model", "tools", "agents"]; + +export const CATEGORY_LABELS: Record = { + session: "Session", + model: "Model", + agents: "Agents", + tools: "Tools", +}; + +export function getSlashCommandCompletions(filter: string): SlashCommandDef[] { + const lower = filter.toLowerCase(); + const commands = lower + ? SLASH_COMMANDS.filter( + (cmd) => cmd.name.startsWith(lower) || cmd.description.toLowerCase().includes(lower), + ) + : SLASH_COMMANDS; + return commands.toSorted((a, b) => { + const ai = CATEGORY_ORDER.indexOf(a.category ?? "session"); + const bi = CATEGORY_ORDER.indexOf(b.category ?? "session"); + if (ai !== bi) { + return ai - bi; + } + // Exact prefix matches first + if (lower) { + const aExact = a.name.startsWith(lower) ? 0 : 1; + const bExact = b.name.startsWith(lower) ? 0 : 1; + if (aExact !== bExact) { + return aExact - bExact; + } + } + return 0; + }); +} + +export type ParsedSlashCommand = { + command: SlashCommandDef; + args: string; +}; + +/** + * Parse a message as a slash command. Returns null if it doesn't match. + * Supports `/command`, `/command args...`, and `/command: args...`. + */ +export function parseSlashCommand(text: string): ParsedSlashCommand | null { + const trimmed = text.trim(); + if (!trimmed.startsWith("/")) { + return null; + } + + const body = trimmed.slice(1); + const firstSeparator = body.search(/[\s:]/u); + const name = firstSeparator === -1 ? body : body.slice(0, firstSeparator); + let remainder = firstSeparator === -1 ? "" : body.slice(firstSeparator).trimStart(); + if (remainder.startsWith(":")) { + remainder = remainder.slice(1).trimStart(); + } + const args = remainder.trim(); + + if (!name) { + return null; + } + + const command = SLASH_COMMANDS.find((cmd) => cmd.name === name.toLowerCase()); + if (!command) { + return null; + } + + return { command, args }; +} diff --git a/ui/src/ui/chat/speech.ts b/ui/src/ui/chat/speech.ts new file mode 100644 index 00000000000..4db4e6944a1 --- /dev/null +++ b/ui/src/ui/chat/speech.ts @@ -0,0 +1,225 @@ +/** + * Browser-native speech services: STT via SpeechRecognition, TTS via SpeechSynthesis. + * Falls back gracefully when APIs are unavailable. + */ + +// ─── STT (Speech-to-Text) ─── + +type SpeechRecognitionEvent = Event & { + results: SpeechRecognitionResultList; + resultIndex: number; +}; + +type SpeechRecognitionErrorEvent = Event & { + error: string; + message?: string; +}; + +interface SpeechRecognitionInstance extends EventTarget { + continuous: boolean; + interimResults: boolean; + lang: string; + start(): void; + stop(): void; + abort(): void; + onresult: ((event: SpeechRecognitionEvent) => void) | null; + onerror: ((event: SpeechRecognitionErrorEvent) => void) | null; + onend: (() => void) | null; + onstart: (() => void) | null; +} + +type SpeechRecognitionCtor = new () => SpeechRecognitionInstance; + +function getSpeechRecognitionCtor(): SpeechRecognitionCtor | null { + const w = globalThis as Record; + return (w.SpeechRecognition ?? w.webkitSpeechRecognition ?? null) as SpeechRecognitionCtor | null; +} + +export function isSttSupported(): boolean { + return getSpeechRecognitionCtor() !== null; +} + +export type SttCallbacks = { + onTranscript: (text: string, isFinal: boolean) => void; + onStart?: () => void; + onEnd?: () => void; + onError?: (error: string) => void; +}; + +let activeRecognition: SpeechRecognitionInstance | null = null; + +export function startStt(callbacks: SttCallbacks): boolean { + const Ctor = getSpeechRecognitionCtor(); + if (!Ctor) { + callbacks.onError?.("Speech recognition is not supported in this browser"); + return false; + } + + stopStt(); + + const recognition = new Ctor(); + recognition.continuous = true; + recognition.interimResults = true; + recognition.lang = navigator.language || "en-US"; + + recognition.addEventListener("start", () => callbacks.onStart?.()); + + recognition.addEventListener("result", (event) => { + const speechEvent = event as unknown as SpeechRecognitionEvent; + let interimTranscript = ""; + let finalTranscript = ""; + + for (let i = speechEvent.resultIndex; i < speechEvent.results.length; i++) { + const result = speechEvent.results[i]; + if (!result?.[0]) { + continue; + } + const transcript = result[0].transcript; + if (result.isFinal) { + finalTranscript += transcript; + } else { + interimTranscript += transcript; + } + } + + if (finalTranscript) { + callbacks.onTranscript(finalTranscript, true); + } else if (interimTranscript) { + callbacks.onTranscript(interimTranscript, false); + } + }); + + recognition.addEventListener("error", (event) => { + const speechEvent = event as unknown as SpeechRecognitionErrorEvent; + if (speechEvent.error === "aborted" || speechEvent.error === "no-speech") { + return; + } + callbacks.onError?.(speechEvent.error); + }); + + recognition.addEventListener("end", () => { + if (activeRecognition === recognition) { + activeRecognition = null; + } + callbacks.onEnd?.(); + }); + + activeRecognition = recognition; + recognition.start(); + return true; +} + +export function stopStt(): void { + if (activeRecognition) { + const r = activeRecognition; + activeRecognition = null; + try { + r.stop(); + } catch { + // already stopped + } + } +} + +export function isSttActive(): boolean { + return activeRecognition !== null; +} + +// ─── TTS (Text-to-Speech) ─── + +export function isTtsSupported(): boolean { + return "speechSynthesis" in globalThis; +} + +let currentUtterance: SpeechSynthesisUtterance | null = null; + +export function speakText( + text: string, + opts?: { + onStart?: () => void; + onEnd?: () => void; + onError?: (error: string) => void; + }, +): boolean { + if (!isTtsSupported()) { + opts?.onError?.("Speech synthesis is not supported in this browser"); + return false; + } + + stopTts(); + + const cleaned = stripMarkdown(text); + if (!cleaned.trim()) { + return false; + } + + const utterance = new SpeechSynthesisUtterance(cleaned); + utterance.rate = 1.0; + utterance.pitch = 1.0; + + utterance.addEventListener("start", () => opts?.onStart?.()); + utterance.addEventListener("end", () => { + if (currentUtterance === utterance) { + currentUtterance = null; + } + opts?.onEnd?.(); + }); + utterance.addEventListener("error", (e) => { + if (currentUtterance === utterance) { + currentUtterance = null; + } + if (e.error === "canceled" || e.error === "interrupted") { + return; + } + opts?.onError?.(e.error); + }); + + currentUtterance = utterance; + speechSynthesis.speak(utterance); + return true; +} + +export function stopTts(): void { + if (currentUtterance) { + currentUtterance = null; + } + if (isTtsSupported()) { + speechSynthesis.cancel(); + } +} + +export function isTtsSpeaking(): boolean { + return isTtsSupported() && speechSynthesis.speaking; +} + +/** Strip common markdown syntax for cleaner speech output. */ +function stripMarkdown(text: string): string { + return ( + text + // code blocks + .replace(/```[\s\S]*?```/g, "") + // inline code + .replace(/`[^`]+`/g, "") + // images + .replace(/!\[.*?\]\(.*?\)/g, "") + // links → keep text + .replace(/\[([^\]]+)\]\(.*?\)/g, "$1") + // headings + .replace(/^#{1,6}\s+/gm, "") + // bold/italic + .replace(/\*{1,3}(.*?)\*{1,3}/g, "$1") + .replace(/_{1,3}(.*?)_{1,3}/g, "$1") + // blockquotes + .replace(/^>\s?/gm, "") + // horizontal rules + .replace(/^[-*_]{3,}\s*$/gm, "") + // list markers + .replace(/^\s*[-*+]\s+/gm, "") + .replace(/^\s*\d+\.\s+/gm, "") + // HTML tags + .replace(/<[^>]+>/g, "") + // collapse whitespace + .replace(/\n{3,}/g, "\n\n") + .trim() + ); +} diff --git a/ui/src/ui/types.ts b/ui/src/ui/types.ts index 10d66068288..a2308100db5 100644 --- a/ui/src/ui/types.ts +++ b/ui/src/ui/types.ts @@ -366,6 +366,7 @@ export type AgentsFilesSetResult = { export type GatewaySessionRow = { key: string; + spawnedBy?: string; kind: "direct" | "group" | "global" | "unknown"; label?: string; displayName?: string;