Merge branch 'main' into fix/acp-topic-binding-media-forwarding

This commit is contained in:
lumenclaw-cloud 2026-03-12 03:27:12 -07:00 committed by GitHub
commit f2358a9f66
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
358 changed files with 16744 additions and 2062 deletions

View File

@ -12991,7 +12991,7 @@
"filename": "ui/src/i18n/locales/en.ts",
"hashed_secret": "de0ff6b974d6910aca8d6b830e1b761f076d8fe6",
"is_verified": false,
"line_number": 61
"line_number": 74
}
],
"ui/src/i18n/locales/pt-BR.ts": [
@ -13000,7 +13000,7 @@
"filename": "ui/src/i18n/locales/pt-BR.ts",
"hashed_secret": "ef7b6f95faca2d7d3a5aa5a6434c89530c6dd243",
"is_verified": false,
"line_number": 61
"line_number": 73
}
],
"vendor/a2ui/README.md": [

View File

@ -4,20 +4,56 @@ Docs: https://docs.openclaw.ai
## Unreleased
### Security
- Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible `\u{...}` escapes instead of spoofing the reviewed command. (`GHSA-pcqg-f7rg-xfvv`)(#43687) Thanks @EkiXu and @vincentkoc.
- Security/device pairing: cap issued and verified device-token scopes to each paired device's approved scope baseline so stale or overbroad tokens cannot exceed approved access. (`GHSA-2pwv-x786-56f8`)(#43686) Thanks @tdjackey and @vincentkoc.
- Security/proxy attachments: restore the shared media-store size cap for persisted browser proxy files so oversized payloads are rejected instead of overriding the intended 5 MB limit. (`GHSA-6rph-mmhp-h7h9`)(#43684) Thanks @tdjackey and @vincentkoc.
- Security/host env: block inherited `GIT_EXEC_PATH` from sanitized host exec environments so Git helper resolution cannot be steered by host environment state. (`GHSA-jf5v-pqgw-gm5m`)(#43685) Thanks @zpbrent and @vincentkoc.
- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`. (`GHSA-wcxr-59v9-rxr8`)(#43754) Thanks @tdjackey and @vincentkoc.
- Models/secrets: enforce source-managed SecretRef markers in generated `models.json` so runtime-resolved provider secrets are not persisted when runtime projection is skipped. (#43759) Thanks @joshavant.
- Security/browser.request: block persistent browser profile create/delete routes from write-scoped `browser.request` so callers can no longer persist admin-only browser profile changes through the browser control surface. (`GHSA-vmhq-cqm9-6p7q`)(#43800) Thanks @tdjackey and @vincentkoc.
- Security/agent: reject public spawned-run lineage fields and keep workspace inheritance on the internal spawned-session path so external `agent` callers can no longer override the gateway workspace boundary. (`GHSA-2rqg-gjgv-84jm`)(#43801) Thanks @tdjackey and @vincentkoc.
- Security/exec allowlist: preserve POSIX case sensitivity and keep `?` within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (`GHSA-f8r2-vg7x-gh8m`)(#43798) Thanks @zpbrent and @vincentkoc.
### Changes
- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky.
- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle.
- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn.
- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc.
### Fixes
- Cron/proactive delivery: keep isolated direct cron sends out of the write-ahead resend queue so transient-send retries do not replay duplicate proactive messages after restart. (#40646) Thanks @openperf and @vincentkoc.
- TUI/chat log: reuse the active assistant message component for the same streaming run so `openclaw tui` no longer renders duplicate assistant replies. (#35364) Thanks @lisitan.
- macOS/Reminders: add the missing `NSRemindersUsageDescription` to the bundled app so `apple-reminders` can trigger the system permission prompt from OpenClaw.app. (#8559) Thanks @dinakars777.
- iMessage/self-chat echo dedupe: drop reflected duplicate copies only when a matching `is_from_me` event was just seen for the same chat, text, and `created_at`, preventing self-chat loops without broad text-only suppression. Related to #32166. (#38440) Thanks @vincentkoc.
- Mattermost/block streaming: fix duplicate message delivery (one threaded, one top-level) when block streaming is active by excluding `replyToId` from the block reply dedup key and adding an explicit `threading` dock to the Mattermost plugin. (#41362) Thanks @mathiasnagler and @vincentkoc.
- BlueBubbles/self-chat echo dedupe: drop reflected duplicate webhook copies only when a matching `fromMe` event was just seen for the same chat, body, and timestamp, preventing self-chat loops without broad webhook suppression. Related to #32166. (#38442) Thanks @vincentkoc.
- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz.
- Sandbox/write: preserve pinned mutation-helper payload stdin so sandboxed `write` no longer reports success while creating empty files. (#43876) Thanks @glitch418x.
- Gateway/main-session routing: keep TUI and other `mode:UI` main-session sends on the internal surface when `deliver` is enabled, so replies no longer inherit the session's persisted Telegram/WhatsApp route. (#43918) Thanks @obviyus.
## 2026.3.11
### Security
- Gateway/WebSocket: enforce browser origin validation for all browser-originated connections regardless of whether proxy headers are present, closing a cross-site WebSocket hijacking path in `trusted-proxy` mode that could grant untrusted origins `operator.admin` access. (GHSA-5wcw-8jjv-m286)
### Changes
- OpenRouter/models: add temporary Hunter Alpha and Healer Alpha entries to the built-in catalog so OpenRouter users can try the new free stealth models during their roughly one-week availability window. (#43642) Thanks @ping-Toven.
- iOS/Home canvas: add a bundled welcome screen with a live agent overview that refreshes on connect, reconnect, and foreground return, and move the compact connection pill off the top-left canvas overlay. (#42456) Thanks @ngutman.
- iOS/Home canvas: replace floating controls with a docked toolbar, make the bundled home scaffold adapt to smaller phones, and open chat in the resolved main session instead of a synthetic `ios` session. (#42456) Thanks @ngutman.
- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman.
- OpenCode/onboarding: add new OpenCode Go provider, treat Zen and Go as one OpenCode setup in the wizard/docs while keeping the runtime providers split, store one shared OpenCode key for both profiles, and stop overriding the built-in `opencode-go` catalog routing. (#42313) Thanks @ImLukeF and @vincentkoc.
- macOS/chat UI: add a chat model picker, persist explicit thinking-level selections across relaunch, and harden provider-aware session model sync for the shared chat composer. (#42314) Thanks @ImLukeF.
- iOS/TestFlight: add a local beta release flow with Fastlane prepare/archive/upload support, canonical beta bundle IDs, and watch-app archive fixes. (#42991) Thanks @ngutman.
- macOS/onboarding: detect when remote gateways need a shared auth token, explain where to find it on the gateway host, and clarify when a successful check used paired-device auth instead. (#43100) Thanks @ngutman.
- Onboarding/Ollama: add first-class Ollama setup with Local or Cloud + Local modes, browser-based cloud sign-in, curated model suggestions, and cloud-model handling that skips unnecessary local pulls. (#41529) Thanks @BruceMacD.
- OpenCode/onboarding: add new OpenCode Go provider, treat Zen and Go as one OpenCode setup in the wizard/docs while keeping the runtime providers split, store one shared OpenCode key for both profiles, and stop overriding the built-in `opencode-go` catalog routing. (#42313) Thanks @ImLukeF and @vincentkoc.
- Memory: add opt-in multimodal image and audio indexing for `memorySearch.extraPaths` with Gemini `gemini-embedding-2-preview`, strict fallback gating, and scope-based reindexing. (#43460) Thanks @gumadeiras.
- Memory/Gemini: add `gemini-embedding-2-preview` memory-search support with configurable output dimensions and automatic reindexing when the configured dimensions change. (#42501) Thanks @BillChirico and @gumadeiras.
- macOS/onboarding: detect when remote gateways need a shared auth token, explain where to find it on the gateway host, and clarify when a successful check used paired-device auth instead. (#43100) Thanks @ngutman.
- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman.
- iOS/TestFlight: add a local beta release flow with Fastlane prepare/archive/upload support, canonical beta bundle IDs, and watch-app archive fixes. (#42991) Thanks @ngutman.
- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn.
- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky.
- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle.
- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc.
- LLM Task/Lobster: add an optional `thinking` override so workflow calls can explicitly set embedded reasoning level with shared validation for invalid values and unsupported `xhigh` modes. (#15606) Thanks @xadenryan and @ImLukeF.
### Breaking
@ -26,81 +62,97 @@ Docs: https://docs.openclaw.ai
### Fixes
- Agents/text sanitization: strip leaked model control tokens (`<|...|>` and full-width `<...>` variants) from user-facing assistant text, preventing GLM-5 and DeepSeek internal delimiters from reaching end users. (#42173) Thanks @imwyvern.
- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant.
- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura.
- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob.
- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes.
- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky.
- Gateway/Control UI: keep dashboard auth tokens in session-scoped browser storage so same-tab refreshes preserve remote token auth without restoring long-lived localStorage token persistence, while scoping tokens to the selected gateway URL and fragment-only bootstrap flow. (#40892) thanks @velvet-shark.
- Gateway/macOS launchd restarts: keep the LaunchAgent registered during explicit restarts, hand off self-restarts through a detached launchd helper, and recover config/hot reload restart paths without unloading the service. Fixes #43311, #43406, #43035, and #43049.
- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes.
- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera.
- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura.
- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz.
- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus.
- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev.
- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev.
- Telegram/final preview cleanup follow-up: clear stale cleanup-retain state only for transient preview finals so archived-preview retains no longer leave a stale partial bubble beside a later fallback-sent final. (#41763) Thanks @obviyus.
- Telegram/poll restarts: scope process-level polling restarts to real Telegram `getUpdates` failures so unrelated network errors, such as Slack DNS misses, no longer bounce Telegram polling. (#43799) Thanks @obviyus.
- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant.
- Gateway/config errors: surface up to three validation issues in top-level `config.set`, `config.patch`, and `config.apply` error messages while preserving structured issue details. (#42664) Thanks @huntharo.
- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk.
- Agents/error rendering: ignore stale assistant `errorMessage` fields on successful turns so background/tool-side failures no longer prepend synthetic billing errors over valid replies. (#40616) Thanks @ingyukoh.
- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf.
- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk.
- Agents/fallback: recognize Venice `402 Insufficient USD or Diem balance` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#43205) Thanks @Squabble9.
- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio.
- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode.
- Agents/cooldowns: default cooldown windows with no recorded failure history to `unknown` instead of `rate_limit`, avoiding false API rate-limit warnings while preserving cooldown recovery probes. (#42911) Thanks @VibhorGautam.
- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x.
- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn.
- Agents/context pruning: prune image-only tool results during soft-trim, align context-pruning coverage with the new tool-result contract, and extend historical image cleanup to the same screenshot-heavy session path. (#43045) Thanks @MoerAI.
- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab.
- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases.
- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant.
- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2.
- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo.
- CLI/skills JSON: strip ANSI and C1 control bytes from `skills list --json`, `skills info --json`, and `skills check --json` so machine-readable output stays valid for terminals and skill metadata with embedded control characters. Fixes #27530. Related #27557. Thanks @Jimmy-xuzimo and @vincentkoc.
- CLI/tables: default shared tables to ASCII borders on legacy Windows consoles while keeping Unicode borders on modern Windows terminals, so commands like `openclaw skills` stop rendering mojibake under GBK/936 consoles. Fixes #40853. Related #41015. Thanks @ApacheBin and @vincentkoc.
- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth.
- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng.
- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet.
- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931.
- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda.
- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux.
- Signal/config schema: accept `channels.signal.accountUuid` in strict config validation so loop-protection configs no longer fail with an unrecognized-key error. (#35578) Thanks @ingyukoh.
- Telegram/config schema: accept `channels.telegram.actions.editMessage` and `createForumTopic` in strict config validation so existing Telegram action toggles no longer fail as unrecognized keys. (#35498) Thanks @ingyukoh.
- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf.
- Discord/config typing: expose channel-level `autoThread` on the canonical guild-channel config type so strict config loading matches the existing Discord schema and runtime behavior. (#35608) Thanks @ingyukoh.
- fix(models): guard optional model.input capability checks (#42096) thanks @andyliu
- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu.
- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant.
- Secret files: harden CLI and channel credential file reads against path-swap races by requiring direct regular files for `*File` secret inputs and rejecting symlink-backed secret files.
- Archive extraction: harden TAR and external `tar.bz2` installs against destination symlink and pre-existing child-symlink escapes by extracting into staging first and merging into the canonical destination with safe file opens.
- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz.
- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis.
- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek.
- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant.
- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey.
- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant.
- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting.
- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting.
- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting.
- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth.
- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set.
- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94.
- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo.
- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey.
- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob.
- ACP/sessions.patch: allow `spawnedBy` and `spawnDepth` lineage fields on ACP session keys so `sessions_spawn` with `runtime: "acp"` no longer fails during child-session setup. Fixes #40971. (#40995) thanks @xaeon2026.
- ACP/stop reason mapping: resolve gateway chat `state: "error"` completions as ACP `end_turn` instead of `refusal` so transient backend failures are not surfaced as deliberate refusals. (#41187) thanks @pejmanjohn.
- ACP/setSessionMode: propagate gateway `sessions.patch` failures back to ACP clients so rejected mode changes no longer return silent success. (#41185) thanks @pejmanjohn.
- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf.
- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky.
- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927.
- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x.
- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky.
- ACP/bridge mode: reject unsupported per-session MCP server setup and propagate rejected session-mode changes so IDE clients see explicit bridge limitations instead of silent success. (#41424) Thanks @mbelinky.
- ACP/session UX: replay stored user and assistant text on `loadSession`, expose Gateway-backed session controls and metadata, and emit approximate session usage updates so IDE clients restore context more faithfully. (#41425) Thanks @mbelinky.
- ACP/tool streaming: enrich `tool_call` and `tool_call_update` events with best-effort text content and file-location hints so IDE clients can follow bridge tool activity more naturally. (#41442) Thanks @mbelinky.
- ACP/runtime attachments: forward normalized inbound image attachments into ACP runtime turns so ACPX sessions can preserve image prompt content on the runtime path. (#41427) Thanks @mbelinky.
- ACP/regressions: add gateway RPC coverage for ACP lineage patching, ACPX runtime coverage for image prompt serialization, and an operator smoke-test procedure for live ACP spawn verification. (#41456) Thanks @mbelinky.
- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf.
- ACP/follow-up hardening: make session restore and prompt completion degrade gracefully on transcript/update failures, enforce bounded tool-location traversal, and skip non-image ACPX turns the runtime cannot serialize. (#41464) Thanks @mbelinky.
- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf.
- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky.
- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera.
- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf.
- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk.
- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman.
- ACP/main session aliases: canonicalize `main` before ACP session lookup so restarted ACP main sessions rehydrate instead of failing closed with `Session is not ACP-enabled: main`. (#43285, fixes #25692)
- Plugins/context-engine model auth: expose `runtime.modelAuth` and plugin-sdk auth helpers so plugins can resolve provider/model API keys through the normal auth pipeline. (#41090) thanks @xinhuagu.
- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth.
- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2.
- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo.
- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng.
- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev.
- Hooks/plugin context parity followup: pass `trigger` and `channelId` through embedded `llm_input`, `agent_end`, and `llm_output` hook contexts so plugins receive the same agent metadata across hook phases. (#42362) Thanks @zhoulf1006.
- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc.
- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis.
- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek.
- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf.
- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung.
- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf.
- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf.
- Agents/context-engine compaction: guard thrown engine-owned overflow compaction attempts and fire compaction hooks for `ownsCompaction` engines so overflow recovery no longer crashes and plugin subscribers still observe compact runs. (#41361) thanks @davidrudduck.
- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky.
- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky.
- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927.
- Cron/state errors: record `lastErrorReason` in cron job state and keep the gateway schema aligned with the full failover-reason set, including regression coverage for protocol conformance. (#14382) thanks @futuremind2026.
- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo.
- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94.
- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet.
- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn.
- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn.
- CI/CodeQL Swift toolchain: select Xcode 26.1 before installing Swift build tools so the CodeQL Swift job uses Swift tools 6.2 on `macos-latest`. (#41787) thanks @BunsDev.
- Sandbox/subagents: pass the real configured workspace through `sessions_spawn` inheritance when a parent agent runs in a copied-workspace sandbox, so child `/agent` mounts point at the configured workspace instead of the parent sandbox copy. (#40757) Thanks @dsantoreis.
- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda.
- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux.
- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931.
- Agents/fallback cooldown probing: cap cooldown-bypass probing to one attempt per provider per fallback run so multi-model same-provider cooldown chains can continue to cross-provider fallbacks instead of repeatedly stalling on duplicate cooldown probes. (#41711) Thanks @cgdusek.
- Telegram/direct delivery: bridge direct delivery sends to internal `message:sent` hooks so internal hook listeners observe successful Telegram deliveries. (#40185) Thanks @vincentkoc.
- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc.
- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio.
- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus.
- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung.
- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode.
- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant.
- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant.
- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf.
- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu.
- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey.
- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman.
- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab.
- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn.
- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant.
- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases.
- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant.
- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey.
- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting.
- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting.
- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting.
- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev.
- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk.
- Agents/context pruning: prune image-only tool results during soft-trim, align context-pruning coverage with the new tool-result contract, and extend historical image cleanup to the same screenshot-heavy session path. (#43045) Thanks @MoerAI.
- fix(models): guard optional model.input capability checks (#42096) thanks @andyliu
- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth.
- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`.
- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set.
- Dependencies: refresh workspace dependencies except the pinned Carbon package, and harden ACP session-config writes against non-string SDK values so newer ACP clients fail fast instead of tripping type/runtime mismatches.
- Telegram/polling restarts: clear bounded cleanup timeout handles after `runner.stop()` and `bot.stop()` settle so stall recovery no longer leaves stray 15-second timers behind on clean shutdown. (#43188) thanks @kyohwang.
## 2026.3.8
@ -174,6 +226,8 @@ Docs: https://docs.openclaw.ai
- SecretRef/models: harden custom/provider secret persistence and reuse across models.json snapshots, merge behavior, runtime headers, and secret audits. (#42554) Thanks @joshavant.
- macOS/browser proxy: serialize non-GET browser proxy request bodies through `AnyCodable.foundationValue` so nested JSON bodies no longer crash the macOS app with `Invalid type in JSON write (__SwiftValue)`. (#43069) Thanks @Effet.
- CLI/skills tables: keep terminal table borders aligned for wide graphemes, use full reported terminal width, and switch a few ambiguous skill icons to Terminal-safe emoji so `openclaw skills` renders more consistently in Terminal.app and iTerm. Thanks @vincentkoc.
- Memory/Gemini: normalize returned Gemini embeddings across direct query, direct batch, and async batch paths so memory search uses consistent vector handling for Gemini too. (#43409) Thanks @gumadeiras.
- Agents/failover: recognize additional serialized network errno strings plus `EHOSTDOWN` and `EPIPE` structured codes so transient transport failures trigger timeout failover more reliably. (#42830) Thanks @jnMetaCode.
## 2026.3.7
@ -534,6 +588,7 @@ Docs: https://docs.openclaw.ai
- Browser/config schema: accept `browser.profiles.*.driver: "openclaw"` while preserving legacy `"clawd"` compatibility in validated config. (#39374; based on #35621) Thanks @gambletan and @ingyukoh.
- Memory flush/bootstrap file protection: restrict memory-flush runs to append-only `read`/`write` tools and route host-side memory appends through root-enforced safe file handles so flush turns cannot overwrite bootstrap files via `exec` or unsafe raw rewrites. (#38574) Thanks @frankekn.
- Mattermost/DM media uploads: resolve bare 26-character Mattermost IDs user-first for direct messages so media sends no longer fail with `403 Forbidden` when targets are configured as unprefixed user IDs. (#29925) Thanks @teconomix.
- Voice-call/OpenAI TTS config parity: add missing `speed`, `instructions`, and `baseUrl` fields to the OpenAI TTS config schema and gate `instructions` to supported models so voice-call overrides validate and route cleanly through core TTS. (#39226) Thanks @ademczuk.
## 2026.3.2
@ -1041,6 +1096,7 @@ Docs: https://docs.openclaw.ai
- Browser/Navigate: resolve the correct `targetId` in navigate responses after renderer swaps. (#25326) Thanks @stone-jin and @vincentkoc.
- FS/Sandbox workspace boundaries: add a dedicated `outside-workspace` safe-open error code for root-escape checks, and propagate specific outside-workspace messages across edit/browser/media consumers instead of generic not-found/invalid-path fallbacks. (#29715) Thanks @YuzuruS.
- Diagnostics/Stuck session signal: add configurable stuck-session warning threshold via `diagnostics.stuckSessionWarnMs` (default 120000ms) to reduce false-positive warnings on long multi-tool turns. (#31032)
- Agents/error classification: check billing errors before context overflow heuristics in the agent runner catch block so spend-limit and quota errors show the billing-specific message instead of being misclassified as "Context overflow: prompt too large". (#40409) Thanks @ademczuk.
## 2026.2.26
@ -4013,6 +4069,7 @@ Thanks @AlexMikhalev, @CoreyH, @John-Rood, @KrauseFx, @MaudeBot, @Nachx639, @Nic
- Gateway/Daemon/Doctor: atomic config writes; repair gateway service entrypoint + install switches; non-interactive legacy migrations; systemd unit alignment + KillMode=process; node bridge keepalive/pings; Launch at Login persistence; bundle MoltbotKit resources + Swift 6.2 compat dylib; relay version check + remove smoke test; regen Swift GatewayModels + keep agent provider string; cron jobId alias + channel alias migration + main session key normalization; heartbeat Telegram accountId resolution; avoid WhatsApp fallback for internal runs; gateway listener error wording; serveBaseUrl param; honor gateway --dev; fix wide-area discovery updates; align agents.defaults schema; provider account metadata in daemon status; refresh Carbon patch for gateway fixes; restore doctor prompter initialValue handling.
- Control UI/TUI: persist per-session verbose off + hide tool cards; logs tab opens at bottom; relative asset paths + landing cleanup; session labels lookup/persistence; stop pinning main session in recents; start logs at bottom; TUI status bar refresh + timeout handling + hide reasoning label when off.
- Onboarding/Configure: QuickStart single-select provider picker; avoid Codex CLI false-expiry warnings; clarify WhatsApp owner prompt; fix Minimax hosted onboarding (agents.defaults + msteams heartbeat target); remove configure Control UI prompt; honor gateway --dev flag.
- Agent loop: guard overflow compaction throws and restore compaction hooks for engine-owned context engines. (#41361) — thanks @davidrudduck
### Maintenance

View File

@ -63,8 +63,8 @@ android {
applicationId = "ai.openclaw.app"
minSdk = 31
targetSdk = 36
versionCode = 202603090
versionName = "2026.3.9"
versionCode = 202603110
versionName = "2026.3.11"
ndk {
// Support all major ABIs — native libs are tiny (~47 KB per ABI)
abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64")

View File

@ -64,9 +64,9 @@ Release behavior:
- Beta release uses canonical `ai.openclaw.client*` bundle IDs through a temporary generated xcconfig in `apps/ios/build/BetaRelease.xcconfig`.
- The beta flow does not modify `apps/ios/.local-signing.xcconfig` or `apps/ios/LocalSigning.xcconfig`.
- Root `package.json.version` is the only version source for iOS.
- A root version like `2026.3.9-beta.1` becomes:
- `CFBundleShortVersionString = 2026.3.9`
- `CFBundleVersion = next TestFlight build number for 2026.3.9`
- A root version like `2026.3.11-beta.1` becomes:
- `CFBundleShortVersionString = 2026.3.11`
- `CFBundleVersion = next TestFlight build number for 2026.3.11`
Archive without upload:

View File

@ -99,7 +99,7 @@ def normalize_release_version(raw_value)
version = raw_value.to_s.strip.sub(/\Av/, "")
UI.user_error!("Missing root package.json version.") unless env_present?(version)
unless version.match?(/\A\d+\.\d+\.\d+(?:[.-]?beta[.-]\d+)?\z/i)
UI.user_error!("Invalid package.json version '#{raw_value}'. Expected 2026.3.9 or 2026.3.9-beta.1.")
UI.user_error!("Invalid package.json version '#{raw_value}'. Expected 2026.3.11 or 2026.3.11-beta.1.")
end
version

View File

@ -17,6 +17,7 @@ enum HostEnvSecurityPolicy {
"BASH_ENV",
"ENV",
"GIT_EXTERNAL_DIFF",
"GIT_EXEC_PATH",
"SHELL",
"SHELLOPTS",
"PS4",

View File

@ -15,9 +15,9 @@
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>2026.3.9</string>
<string>2026.3.11</string>
<key>CFBundleVersion</key>
<string>202603080</string>
<string>202603110</string>
<key>CFBundleIconFile</key>
<string>OpenClaw</string>
<key>CFBundleURLTypes</key>
@ -59,6 +59,8 @@
<string>OpenClaw uses speech recognition to detect your Voice Wake trigger phrase.</string>
<key>NSAppleEventsUsageDescription</key>
<string>OpenClaw needs Automation (AppleScript) permission to drive Terminal and other apps for agent actions.</string>
<key>NSRemindersUsageDescription</key>
<string>OpenClaw can access Reminders when requested by the agent for the apple-reminders skill.</string>
<key>NSAppTransportSecurity</key>
<dict>

View File

@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable {
public let inputprovenance: [String: AnyCodable]?
public let idempotencykey: String
public let label: String?
public let spawnedby: String?
public let workspacedir: String?
public init(
message: String,
@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable {
internalevents: [[String: AnyCodable]]?,
inputprovenance: [String: AnyCodable]?,
idempotencykey: String,
label: String?,
spawnedby: String?,
workspacedir: String?)
label: String?)
{
self.message = message
self.agentid = agentid
@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable {
self.inputprovenance = inputprovenance
self.idempotencykey = idempotencykey
self.label = label
self.spawnedby = spawnedby
self.workspacedir = workspacedir
}
private enum CodingKeys: String, CodingKey {
@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable {
case inputprovenance = "inputProvenance"
case idempotencykey = "idempotencyKey"
case label
case spawnedby = "spawnedBy"
case workspacedir = "workspaceDir"
}
}
@ -1336,6 +1328,7 @@ public struct SessionsPatchParams: Codable, Sendable {
public let execnode: AnyCodable?
public let model: AnyCodable?
public let spawnedby: AnyCodable?
public let spawnedworkspacedir: AnyCodable?
public let spawndepth: AnyCodable?
public let subagentrole: AnyCodable?
public let subagentcontrolscope: AnyCodable?
@ -1356,6 +1349,7 @@ public struct SessionsPatchParams: Codable, Sendable {
execnode: AnyCodable?,
model: AnyCodable?,
spawnedby: AnyCodable?,
spawnedworkspacedir: AnyCodable?,
spawndepth: AnyCodable?,
subagentrole: AnyCodable?,
subagentcontrolscope: AnyCodable?,
@ -1375,6 +1369,7 @@ public struct SessionsPatchParams: Codable, Sendable {
self.execnode = execnode
self.model = model
self.spawnedby = spawnedby
self.spawnedworkspacedir = spawnedworkspacedir
self.spawndepth = spawndepth
self.subagentrole = subagentrole
self.subagentcontrolscope = subagentcontrolscope
@ -1396,6 +1391,7 @@ public struct SessionsPatchParams: Codable, Sendable {
case execnode = "execNode"
case model
case spawnedby = "spawnedBy"
case spawnedworkspacedir = "spawnedWorkspaceDir"
case spawndepth = "spawnDepth"
case subagentrole = "subagentRole"
case subagentcontrolscope = "subagentControlScope"

View File

@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable {
public let inputprovenance: [String: AnyCodable]?
public let idempotencykey: String
public let label: String?
public let spawnedby: String?
public let workspacedir: String?
public init(
message: String,
@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable {
internalevents: [[String: AnyCodable]]?,
inputprovenance: [String: AnyCodable]?,
idempotencykey: String,
label: String?,
spawnedby: String?,
workspacedir: String?)
label: String?)
{
self.message = message
self.agentid = agentid
@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable {
self.inputprovenance = inputprovenance
self.idempotencykey = idempotencykey
self.label = label
self.spawnedby = spawnedby
self.workspacedir = workspacedir
}
private enum CodingKeys: String, CodingKey {
@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable {
case inputprovenance = "inputProvenance"
case idempotencykey = "idempotencyKey"
case label
case spawnedby = "spawnedBy"
case workspacedir = "workspaceDir"
}
}
@ -1336,6 +1328,7 @@ public struct SessionsPatchParams: Codable, Sendable {
public let execnode: AnyCodable?
public let model: AnyCodable?
public let spawnedby: AnyCodable?
public let spawnedworkspacedir: AnyCodable?
public let spawndepth: AnyCodable?
public let subagentrole: AnyCodable?
public let subagentcontrolscope: AnyCodable?
@ -1356,6 +1349,7 @@ public struct SessionsPatchParams: Codable, Sendable {
execnode: AnyCodable?,
model: AnyCodable?,
spawnedby: AnyCodable?,
spawnedworkspacedir: AnyCodable?,
spawndepth: AnyCodable?,
subagentrole: AnyCodable?,
subagentcontrolscope: AnyCodable?,
@ -1375,6 +1369,7 @@ public struct SessionsPatchParams: Codable, Sendable {
self.execnode = execnode
self.model = model
self.spawnedby = spawnedby
self.spawnedworkspacedir = spawnedworkspacedir
self.spawndepth = spawndepth
self.subagentrole = subagentrole
self.subagentcontrolscope = subagentcontrolscope
@ -1396,6 +1391,7 @@ public struct SessionsPatchParams: Codable, Sendable {
case execnode = "execNode"
case model
case spawnedby = "spawnedBy"
case spawnedworkspacedir = "spawnedWorkspaceDir"
case spawndepth = "spawnDepth"
case subagentrole = "subagentRole"
case subagentcontrolscope = "subagentControlScope"

View File

@ -25,4 +25,5 @@ openclaw agent --agent ops --message "Generate report" --deliver --reply-channel
## Notes
- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names or `secretref-managed`), not resolved secret plaintext.
- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names, `secretref-env:ENV_VAR_NAME`, or `secretref-managed`), not resolved secret plaintext.
- Marker writes are source-authoritative: OpenClaw persists markers from the active source config snapshot, not from resolved runtime secret values.

View File

@ -284,9 +284,46 @@ Notes:
- Paths can be absolute or workspace-relative.
- Directories are scanned recursively for `.md` files.
- Only Markdown files are indexed.
- By default, only Markdown files are indexed.
- If `memorySearch.multimodal.enabled = true`, OpenClaw also indexes supported image/audio files under `extraPaths` only. Default memory roots (`MEMORY.md`, `memory.md`, `memory/**/*.md`) stay Markdown-only.
- Symlinks are ignored (files or directories).
### Multimodal memory files (Gemini image + audio)
OpenClaw can index image and audio files from `memorySearch.extraPaths` when using Gemini embedding 2:
```json5
agents: {
defaults: {
memorySearch: {
provider: "gemini",
model: "gemini-embedding-2-preview",
extraPaths: ["assets/reference", "voice-notes"],
multimodal: {
enabled: true,
modalities: ["image", "audio"], // or ["all"]
maxFileBytes: 10000000
},
remote: {
apiKey: "YOUR_GEMINI_API_KEY"
}
}
}
}
```
Notes:
- Multimodal memory is currently supported only for `gemini-embedding-2-preview`.
- Multimodal indexing applies only to files discovered through `memorySearch.extraPaths`.
- Supported modalities in this phase: image and audio.
- `memorySearch.fallback` must stay `"none"` while multimodal memory is enabled.
- Matching image/audio file bytes are uploaded to the configured Gemini embedding endpoint during indexing.
- Supported image extensions: `.jpg`, `.jpeg`, `.png`, `.webp`, `.gif`, `.heic`, `.heif`.
- Supported audio extensions: `.mp3`, `.wav`, `.ogg`, `.opus`, `.m4a`, `.aac`, `.flac`.
- Search queries remain text, but Gemini can compare those text queries against indexed image/audio embeddings.
- `memory_get` still reads Markdown only; binary files are searchable but not returned as raw file contents.
### Gemini embeddings (native)
Set the provider to `gemini` to use the Gemini embeddings API directly:
@ -310,6 +347,29 @@ Notes:
- `remote.baseUrl` is optional (defaults to the Gemini API base URL).
- `remote.headers` lets you add extra headers if needed.
- Default model: `gemini-embedding-001`.
- `gemini-embedding-2-preview` is also supported: 8192 token limit and configurable dimensions (768 / 1536 / 3072, default 3072).
#### Gemini Embedding 2 (preview)
```json5
agents: {
defaults: {
memorySearch: {
provider: "gemini",
model: "gemini-embedding-2-preview",
outputDimensionality: 3072, // optional: 768, 1536, or 3072 (default)
remote: {
apiKey: "YOUR_GEMINI_API_KEY"
}
}
}
}
```
> **⚠️ Re-index required:** Switching from `gemini-embedding-001` (768 dimensions)
> to `gemini-embedding-2-preview` (3072 dimensions) changes the vector size. The same is true if you
> change `outputDimensionality` between 768, 1536, and 3072.
> OpenClaw will automatically reindex when it detects a model or dimension change.
If you want to use a **custom OpenAI-compatible endpoint** (OpenRouter, vLLM, or a proxy),
you can use the `remote` configuration with the OpenAI provider:

View File

@ -357,7 +357,7 @@ Ollama is a local LLM runtime that provides an OpenAI-compatible API:
- Provider: `ollama`
- Auth: None required (local server)
- Example model: `ollama/llama3.3`
- Installation: [https://ollama.ai](https://ollama.ai)
- Installation: [https://ollama.com/download](https://ollama.com/download)
```bash
# Install Ollama, then pull a model:
@ -372,7 +372,7 @@ ollama pull llama3.3
}
```
Ollama is automatically detected when running locally at `http://127.0.0.1:11434/v1`. See [/providers/ollama](/providers/ollama) for model recommendations and custom configuration.
Ollama is detected locally at `http://127.0.0.1:11434` when you opt in with `OLLAMA_API_KEY`, and `openclaw onboard` can configure it directly as a first-class provider. See [/providers/ollama](/providers/ollama) for onboarding, cloud/local mode, and custom configuration.
### vLLM

View File

@ -207,7 +207,7 @@ mode, pass `--yes` to accept defaults.
## Models registry (`models.json`)
Custom providers in `models.providers` are written into `models.json` under the
agent directory (default `~/.openclaw/agents/<agentId>/models.json`). This file
agent directory (default `~/.openclaw/agents/<agentId>/agent/models.json`). This file
is merged by default unless `models.mode` is set to `replace`.
Merge mode precedence for matching provider IDs:
@ -215,7 +215,9 @@ Merge mode precedence for matching provider IDs:
- Non-empty `baseUrl` already present in the agent `models.json` wins.
- Non-empty `apiKey` in the agent `models.json` wins only when that provider is not SecretRef-managed in current config/auth-profile context.
- SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets.
- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs).
- Empty or missing agent `apiKey`/`baseUrl` fall back to config `models.providers`.
- Other provider fields are refreshed from config and normalized catalog data.
This marker-based persistence applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`.
Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values.
This applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`.

View File

@ -2014,9 +2014,11 @@ OpenClaw uses the pi-coding-agent model catalog. Add custom providers via `model
- Non-empty agent `models.json` `baseUrl` values win.
- Non-empty agent `apiKey` values win only when that provider is not SecretRef-managed in current config/auth-profile context.
- SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets.
- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs).
- Empty or missing agent `apiKey`/`baseUrl` fall back to `models.providers` in config.
- Matching model `contextWindow`/`maxTokens` use the higher value between explicit config and implicit catalog values.
- Use `models.mode: "replace"` when you want config to fully rewrite `models.json`.
- Marker persistence is source-authoritative: markers are written from the active source config snapshot (pre-resolution), not from resolved runtime secret values.
### Provider field details

View File

@ -11,6 +11,8 @@ title: "Local Models"
Local is doable, but OpenClaw expects large context + strong defenses against prompt injection. Small cards truncate context and leak safety. Aim high: **≥2 maxed-out Mac Studios or equivalent GPU rig (~$30k+)**. A single **24 GB** GPU works only for lighter prompts with higher latency. Use the **largest / full-size model variant you can run**; aggressively quantized or “small” checkpoints raise prompt-injection risk (see [Security](/gateway/security)).
If you want the lowest-friction local setup, start with [Ollama](/providers/ollama) and `openclaw onboard`. This page is the opinionated guide for higher-end local stacks and custom OpenAI-compatible local servers.
## Recommended: LM Studio + MiniMax M2.5 (Responses API, full-size)
Best current local stack. Load MiniMax M2.5 in LM Studio, enable the local server (default `http://127.0.0.1:1234`), and use Responses API to keep reasoning separate from final text.

View File

@ -2084,8 +2084,21 @@ More context: [Models](/concepts/models).
### Can I use selfhosted models llamacpp vLLM Ollama
Yes. If your local server exposes an OpenAI-compatible API, you can point a
custom provider at it. Ollama is supported directly and is the easiest path.
Yes. Ollama is the easiest path for local models.
Quickest setup:
1. Install Ollama from `https://ollama.com/download`
2. Pull a local model such as `ollama pull glm-4.7-flash`
3. If you want Ollama Cloud too, run `ollama signin`
4. Run `openclaw onboard` and choose `Ollama`
5. Pick `Local` or `Cloud + Local`
Notes:
- `Cloud + Local` gives you Ollama Cloud models plus your local Ollama models
- cloud models such as `kimi-k2.5:cloud` do not need a local pull
- for manual switching, use `openclaw models list` and `openclaw models set ollama/<model>`
Security note: smaller or heavily quantized models are more vulnerable to prompt
injection. We strongly recommend **large models** for any bot that can use tools.

View File

@ -39,7 +39,7 @@ Notes:
# Default is auto-derived from APP_VERSION when omitted.
SKIP_NOTARIZE=1 \
BUNDLE_ID=ai.openclaw.mac \
APP_VERSION=2026.3.9 \
APP_VERSION=2026.3.11 \
BUILD_CONFIG=release \
SIGN_IDENTITY="Developer ID Application: <Developer Name> (<TEAMID>)" \
scripts/package-mac-dist.sh
@ -47,10 +47,10 @@ scripts/package-mac-dist.sh
# `package-mac-dist.sh` already creates the zip + DMG.
# If you used `package-mac-app.sh` directly instead, create them manually:
# If you want notarization/stapling in this step, use the NOTARIZE command below.
ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.9.zip
ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.11.zip
# Optional: build a styled DMG for humans (drag to /Applications)
scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg
scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.11.dmg
# Recommended: build + notarize/staple zip + DMG
# First, create a keychain profile once:
@ -58,13 +58,13 @@ scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg
# --apple-id "<apple-id>" --team-id "<team-id>" --password "<app-specific-password>"
NOTARIZE=1 NOTARYTOOL_PROFILE=openclaw-notary \
BUNDLE_ID=ai.openclaw.mac \
APP_VERSION=2026.3.9 \
APP_VERSION=2026.3.11 \
BUILD_CONFIG=release \
SIGN_IDENTITY="Developer ID Application: <Developer Name> (<TEAMID>)" \
scripts/package-mac-dist.sh
# Optional: ship dSYM alongside the release
ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.9.dSYM.zip
ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.11.dSYM.zip
```
## Appcast entry
@ -72,7 +72,7 @@ ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenCl
Use the release note generator so Sparkle renders formatted HTML notes:
```bash
SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.9.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml
SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.11.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml
```
Generates HTML release notes from `CHANGELOG.md` (via [`scripts/changelog-to-html.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/changelog-to-html.sh)) and embeds them in the appcast entry.
@ -80,7 +80,7 @@ Commit the updated `appcast.xml` alongside the release assets (zip + dSYM) when
## Publish & verify
- Upload `OpenClaw-2026.3.9.zip` (and `OpenClaw-2026.3.9.dSYM.zip`) to the GitHub release for tag `v2026.3.9`.
- Upload `OpenClaw-2026.3.11.zip` (and `OpenClaw-2026.3.11.dSYM.zip`) to the GitHub release for tag `v2026.3.11`.
- Ensure the raw appcast URL matches the baked feed: `https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml`.
- Sanity checks:
- `curl -I https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml` returns 200.

View File

@ -153,30 +153,33 @@ sudo systemctl status openclaw
journalctl -u openclaw -f
```
## 9) Access the Dashboard
## 9) Access the OpenClaw Dashboard
Since the Pi is headless, use an SSH tunnel:
Replace `user@gateway-host` with your Pi username and hostname or IP address.
On your computer, ask the Pi to print a fresh dashboard URL:
```bash
# From your laptop/desktop
ssh -L 18789:localhost:18789 user@gateway-host
# Then open in browser
open http://localhost:18789
ssh user@gateway-host 'openclaw dashboard --no-open'
```
Or use Tailscale for always-on access:
The command prints `Dashboard URL:`. Depending on how `gateway.auth.token`
is configured, the URL may be a plain `http://127.0.0.1:18789/` link or one
that includes `#token=...`.
In another terminal on your computer, create the SSH tunnel:
```bash
# On the Pi
curl -fsSL https://tailscale.com/install.sh | sh
sudo tailscale up
# Update config
openclaw config set gateway.bind tailnet
sudo systemctl restart openclaw
ssh -N -L 18789:127.0.0.1:18789 user@gateway-host
```
Then open the printed Dashboard URL in your local browser.
If the UI asks for auth, paste the token from `gateway.auth.token`
(or `OPENCLAW_GATEWAY_TOKEN`) into Control UI settings.
For always-on remote access, see [Tailscale](/gateway/tailscale).
---
## Performance Optimizations

View File

@ -8,7 +8,7 @@ title: "Ollama"
# Ollama
Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supporting streaming and tool calling, and can **auto-discover tool-capable models** when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry.
Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supports streaming and tool calling, and can auto-discover local Ollama models when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry.
<Warning>
**Remote Ollama users**: Do not use the `/v1` OpenAI-compatible URL (`http://host:11434/v1`) with OpenClaw. This breaks tool calling and models may output raw tool JSON as plain text. Use the native Ollama API URL instead: `baseUrl: "http://host:11434"` (no `/v1`).
@ -16,21 +16,40 @@ Ollama is a local LLM runtime that makes it easy to run open-source models on yo
## Quick start
1. Install Ollama: [https://ollama.ai](https://ollama.ai)
1. Install Ollama: [https://ollama.com/download](https://ollama.com/download)
2. Pull a model:
2. Pull a local model if you want local inference:
```bash
ollama pull glm-4.7-flash
# or
ollama pull gpt-oss:20b
# or
ollama pull llama3.3
# or
ollama pull qwen2.5-coder:32b
# or
ollama pull deepseek-r1:32b
```
3. Enable Ollama for OpenClaw (any value works; Ollama doesn't require a real key):
3. If you want Ollama Cloud models too, sign in:
```bash
ollama signin
```
4. Run onboarding and choose `Ollama`:
```bash
openclaw onboard
```
- `Local`: local models only
- `Cloud + Local`: local models plus Ollama Cloud models
- Cloud models such as `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, and `glm-5:cloud` do **not** require a local `ollama pull`
OpenClaw currently suggests:
- local default: `glm-4.7-flash`
- cloud defaults: `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, `glm-5:cloud`
5. If you prefer manual setup, enable Ollama for OpenClaw directly (any value works; Ollama doesn't require a real key):
```bash
# Set environment variable
@ -40,13 +59,20 @@ export OLLAMA_API_KEY="ollama-local"
openclaw config set models.providers.ollama.apiKey "ollama-local"
```
4. Use Ollama models:
6. Inspect or switch models:
```bash
openclaw models list
openclaw models set ollama/glm-4.7-flash
```
7. Or set the default in config:
```json5
{
agents: {
defaults: {
model: { primary: "ollama/gpt-oss:20b" },
model: { primary: "ollama/glm-4.7-flash" },
},
},
}
@ -56,14 +82,13 @@ openclaw config set models.providers.ollama.apiKey "ollama-local"
When you set `OLLAMA_API_KEY` (or an auth profile) and **do not** define `models.providers.ollama`, OpenClaw discovers models from the local Ollama instance at `http://127.0.0.1:11434`:
- Queries `/api/tags` and `/api/show`
- Keeps only models that report `tools` capability
- Marks `reasoning` when the model reports `thinking`
- Reads `contextWindow` from `model_info["<arch>.context_length"]` when available
- Sets `maxTokens` to 10× the context window
- Queries `/api/tags`
- Uses best-effort `/api/show` lookups to read `contextWindow` when available
- Marks `reasoning` with a model-name heuristic (`r1`, `reasoning`, `think`)
- Sets `maxTokens` to the default Ollama max-token cap used by OpenClaw
- Sets all costs to `0`
This avoids manual model entries while keeping the catalog aligned with Ollama's capabilities.
This avoids manual model entries while keeping the catalog aligned with the local Ollama instance.
To see what models are available:
@ -98,7 +123,7 @@ Use explicit config when:
- Ollama runs on another host/port.
- You want to force specific context windows or model lists.
- You want to include models that do not report tool support.
- You want fully manual model definitions.
```json5
{
@ -170,7 +195,7 @@ Once configured, all your Ollama models are available:
### Reasoning models
OpenClaw marks models as reasoning-capable when Ollama reports `thinking` in `/api/show`:
OpenClaw treats models with names such as `deepseek-r1`, `reasoning`, or `think` as reasoning-capable by default:
```bash
ollama pull deepseek-r1:32b
@ -230,7 +255,7 @@ When `api: "openai-completions"` is used with Ollama, OpenClaw injects `options.
### Context windows
For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it defaults to `8192`. You can override `contextWindow` and `maxTokens` in explicit provider config.
For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it falls back to the default Ollama context window used by OpenClaw. You can override `contextWindow` and `maxTokens` in explicit provider config.
## Troubleshooting
@ -250,16 +275,17 @@ curl http://localhost:11434/api/tags
### No models available
OpenClaw only auto-discovers models that report tool support. If your model isn't listed, either:
If your model is not listed, either:
- Pull a tool-capable model, or
- Pull the model locally, or
- Define the model explicitly in `models.providers.ollama`.
To add models:
```bash
ollama list # See what's installed
ollama pull gpt-oss:20b # Pull a tool-capable model
ollama pull glm-4.7-flash
ollama pull gpt-oss:20b
ollama pull llama3.3 # Or another model
```

View File

@ -101,6 +101,7 @@ Notes:
- Plan entries target `profiles.*.key` / `profiles.*.token` and write sibling refs (`keyRef` / `tokenRef`).
- Auth-profile refs are included in runtime resolution and audit coverage.
- For SecretRef-managed model providers, generated `agents/*/agent/models.json` entries persist non-secret markers (not resolved secret values) for `apiKey`/header surfaces.
- Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values.
- For web search:
- In explicit provider mode (`tools.web.search.provider` set), only the selected provider key is active.
- In auto mode (`tools.web.search.provider` unset), only the first provider key that resolves by precedence is active.

View File

@ -75,11 +75,14 @@ outside the list is rejected.
- `schema` (object, optional JSON Schema)
- `provider` (string, optional)
- `model` (string, optional)
- `thinking` (string, optional)
- `authProfileId` (string, optional)
- `temperature` (number, optional)
- `maxTokens` (number, optional)
- `timeoutMs` (number, optional)
`thinking` accepts the standard OpenClaw reasoning presets, such as `low` or `medium`.
## Output
Returns `details.json` containing the parsed JSON (and validates against
@ -90,6 +93,7 @@ Returns `details.json` containing the parsed JSON (and validates against
```lobster
openclaw.invoke --tool llm-task --action json --args-json '{
"prompt": "Given the input email, return intent and draft.",
"thinking": "low",
"input": {
"subject": "Hello",
"body": "Can you help?"

View File

@ -106,6 +106,7 @@ Use it in a pipeline:
```lobster
openclaw.invoke --tool llm-task --action json --args-json '{
"prompt": "Given the input email, return intent and draft.",
"thinking": "low",
"input": { "subject": "Hello", "body": "Can you help?" },
"schema": {
"type": "object",

View File

@ -1,10 +1,10 @@
{
"name": "@openclaw/acpx",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw ACP runtime backend via acpx",
"type": "module",
"dependencies": {
"acpx": "0.1.16"
"acpx": "0.2.0"
},
"openclaw": {
"extensions": [

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/bluebubbles",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw BlueBubbles channel plugin",
"type": "module",
"dependencies": {

View File

@ -17,9 +17,28 @@ describe("normalizeWebhookMessage", () => {
expect(result).not.toBeNull();
expect(result?.senderId).toBe("+15551234567");
expect(result?.senderIdExplicit).toBe(false);
expect(result?.chatGuid).toBe("iMessage;-;+15551234567");
});
it("marks explicit sender handles as explicit identity", () => {
const result = normalizeWebhookMessage({
type: "new-message",
data: {
guid: "msg-explicit-1",
text: "hello",
isGroup: false,
isFromMe: true,
handle: { address: "+15551234567" },
chatGuid: "iMessage;-;+15551234567",
},
});
expect(result).not.toBeNull();
expect(result?.senderId).toBe("+15551234567");
expect(result?.senderIdExplicit).toBe(true);
});
it("does not infer sender from group chatGuid when sender handle is missing", () => {
const result = normalizeWebhookMessage({
type: "new-message",
@ -72,6 +91,7 @@ describe("normalizeWebhookReaction", () => {
expect(result).not.toBeNull();
expect(result?.senderId).toBe("+15551234567");
expect(result?.senderIdExplicit).toBe(false);
expect(result?.messageId).toBe("p:0/msg-1");
expect(result?.action).toBe("added");
});

View File

@ -191,12 +191,13 @@ function readFirstChatRecord(message: Record<string, unknown>): Record<string, u
function extractSenderInfo(message: Record<string, unknown>): {
senderId: string;
senderIdExplicit: boolean;
senderName?: string;
} {
const handleValue = message.handle ?? message.sender;
const handle =
asRecord(handleValue) ?? (typeof handleValue === "string" ? { address: handleValue } : null);
const senderId =
const senderIdRaw =
readString(handle, "address") ??
readString(handle, "handle") ??
readString(handle, "id") ??
@ -204,13 +205,18 @@ function extractSenderInfo(message: Record<string, unknown>): {
readString(message, "sender") ??
readString(message, "from") ??
"";
const senderId = senderIdRaw.trim();
const senderName =
readString(handle, "displayName") ??
readString(handle, "name") ??
readString(message, "senderName") ??
undefined;
return { senderId, senderName };
return {
senderId,
senderIdExplicit: Boolean(senderId),
senderName,
};
}
function extractChatContext(message: Record<string, unknown>): {
@ -441,6 +447,7 @@ export type BlueBubblesParticipant = {
export type NormalizedWebhookMessage = {
text: string;
senderId: string;
senderIdExplicit: boolean;
senderName?: string;
messageId?: string;
timestamp?: number;
@ -466,6 +473,7 @@ export type NormalizedWebhookReaction = {
action: "added" | "removed";
emoji: string;
senderId: string;
senderIdExplicit: boolean;
senderName?: string;
messageId: string;
timestamp?: number;
@ -672,7 +680,7 @@ export function normalizeWebhookMessage(
readString(message, "subject") ??
"";
const { senderId, senderName } = extractSenderInfo(message);
const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message);
const { chatGuid, chatIdentifier, chatId, chatName, isGroup, participants } =
extractChatContext(message);
const normalizedParticipants = normalizeParticipantList(participants);
@ -717,7 +725,7 @@ export function normalizeWebhookMessage(
// BlueBubbles may omit `handle` in webhook payloads; for DM chat GUIDs we can still infer sender.
const senderFallbackFromChatGuid =
!senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
!senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || "");
if (!normalizedSender) {
return null;
@ -727,6 +735,7 @@ export function normalizeWebhookMessage(
return {
text,
senderId: normalizedSender,
senderIdExplicit,
senderName,
messageId,
timestamp,
@ -777,7 +786,7 @@ export function normalizeWebhookReaction(
const emoji = (associatedEmoji?.trim() || mapping?.emoji) ?? `reaction:${associatedType}`;
const action = mapping?.action ?? resolveTapbackActionHint(associatedType) ?? "added";
const { senderId, senderName } = extractSenderInfo(message);
const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message);
const { chatGuid, chatIdentifier, chatId, chatName, isGroup } = extractChatContext(message);
const fromMe = readBoolean(message, "isFromMe") ?? readBoolean(message, "is_from_me");
@ -793,7 +802,7 @@ export function normalizeWebhookReaction(
: undefined;
const senderFallbackFromChatGuid =
!senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
!senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || "");
if (!normalizedSender) {
return null;
@ -803,6 +812,7 @@ export function normalizeWebhookReaction(
action,
emoji,
senderId: normalizedSender,
senderIdExplicit,
senderName,
messageId: associatedGuid,
timestamp,

View File

@ -38,6 +38,10 @@ import {
resolveBlueBubblesMessageId,
resolveReplyContextFromCache,
} from "./monitor-reply-cache.js";
import {
hasBlueBubblesSelfChatCopy,
rememberBlueBubblesSelfChatCopy,
} from "./monitor-self-chat-cache.js";
import type {
BlueBubblesCoreRuntime,
BlueBubblesRuntimeEnv,
@ -47,7 +51,12 @@ import { isBlueBubblesPrivateApiEnabled } from "./probe.js";
import { normalizeBlueBubblesReactionInput, sendBlueBubblesReaction } from "./reactions.js";
import { normalizeSecretInputString } from "./secret-input.js";
import { resolveChatGuidForTarget, sendMessageBlueBubbles } from "./send.js";
import { formatBlueBubblesChatTarget, isAllowedBlueBubblesSender } from "./targets.js";
import {
extractHandleFromChatGuid,
formatBlueBubblesChatTarget,
isAllowedBlueBubblesSender,
normalizeBlueBubblesHandle,
} from "./targets.js";
const DEFAULT_TEXT_LIMIT = 4000;
const invalidAckReactions = new Set<string>();
@ -80,6 +89,19 @@ function normalizeSnippet(value: string): string {
return stripMarkdown(value).replace(/\s+/g, " ").trim().toLowerCase();
}
function isBlueBubblesSelfChatMessage(
message: NormalizedWebhookMessage,
isGroup: boolean,
): boolean {
if (isGroup || !message.senderIdExplicit) {
return false;
}
const chatHandle =
(message.chatGuid ? extractHandleFromChatGuid(message.chatGuid) : null) ??
normalizeBlueBubblesHandle(message.chatIdentifier ?? "");
return Boolean(chatHandle) && chatHandle === message.senderId;
}
function prunePendingOutboundMessageIds(now = Date.now()): void {
const cutoff = now - PENDING_OUTBOUND_MESSAGE_ID_TTL_MS;
for (let i = pendingOutboundMessageIds.length - 1; i >= 0; i--) {
@ -453,8 +475,27 @@ export async function processMessage(
? `removed ${tapbackParsed.emoji} reaction`
: `reacted with ${tapbackParsed.emoji}`
: text || placeholder;
const isSelfChatMessage = isBlueBubblesSelfChatMessage(message, isGroup);
const selfChatLookup = {
accountId: account.accountId,
chatGuid: message.chatGuid,
chatIdentifier: message.chatIdentifier,
chatId: message.chatId,
senderId: message.senderId,
body: rawBody,
timestamp: message.timestamp,
};
const cacheMessageId = message.messageId?.trim();
const confirmedOutboundCacheEntry = cacheMessageId
? resolveReplyContextFromCache({
accountId: account.accountId,
replyToId: cacheMessageId,
chatGuid: message.chatGuid,
chatIdentifier: message.chatIdentifier,
chatId: message.chatId,
})
: null;
let messageShortId: string | undefined;
const cacheInboundMessage = () => {
if (!cacheMessageId) {
@ -476,6 +517,12 @@ export async function processMessage(
if (message.fromMe) {
// Cache from-me messages so reply context can resolve sender/body.
cacheInboundMessage();
const confirmedAssistantOutbound =
confirmedOutboundCacheEntry?.senderLabel === "me" &&
normalizeSnippet(confirmedOutboundCacheEntry.body ?? "") === normalizeSnippet(rawBody);
if (isSelfChatMessage && confirmedAssistantOutbound) {
rememberBlueBubblesSelfChatCopy(selfChatLookup);
}
if (cacheMessageId) {
const pending = consumePendingOutboundMessageId({
accountId: account.accountId,
@ -499,6 +546,11 @@ export async function processMessage(
return;
}
if (isSelfChatMessage && hasBlueBubblesSelfChatCopy(selfChatLookup)) {
logVerbose(core, runtime, `drop: reflected self-chat duplicate sender=${message.senderId}`);
return;
}
if (!rawBody) {
logVerbose(core, runtime, `drop: empty text sender=${message.senderId}`);
return;

View File

@ -0,0 +1,190 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import {
hasBlueBubblesSelfChatCopy,
rememberBlueBubblesSelfChatCopy,
resetBlueBubblesSelfChatCache,
} from "./monitor-self-chat-cache.js";
describe("BlueBubbles self-chat cache", () => {
const directLookup = {
accountId: "default",
chatGuid: "iMessage;-;+15551234567",
senderId: "+15551234567",
} as const;
afterEach(() => {
resetBlueBubblesSelfChatCache();
vi.useRealTimers();
});
it("matches repeated lookups for the same scope, timestamp, and text", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
rememberBlueBubblesSelfChatCopy({
...directLookup,
body: " hello\r\nworld ",
timestamp: 123,
});
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: "hello\nworld",
timestamp: 123,
}),
).toBe(true);
});
it("canonicalizes DM scope across chatIdentifier and chatGuid", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
rememberBlueBubblesSelfChatCopy({
accountId: "default",
chatIdentifier: "+15551234567",
senderId: "+15551234567",
body: "hello",
timestamp: 123,
});
expect(
hasBlueBubblesSelfChatCopy({
accountId: "default",
chatGuid: "iMessage;-;+15551234567",
senderId: "+15551234567",
body: "hello",
timestamp: 123,
}),
).toBe(true);
resetBlueBubblesSelfChatCache();
rememberBlueBubblesSelfChatCopy({
accountId: "default",
chatGuid: "iMessage;-;+15551234567",
senderId: "+15551234567",
body: "hello",
timestamp: 123,
});
expect(
hasBlueBubblesSelfChatCopy({
accountId: "default",
chatIdentifier: "+15551234567",
senderId: "+15551234567",
body: "hello",
timestamp: 123,
}),
).toBe(true);
});
it("expires entries after the ttl window", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
rememberBlueBubblesSelfChatCopy({
...directLookup,
body: "hello",
timestamp: 123,
});
vi.advanceTimersByTime(11_001);
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: "hello",
timestamp: 123,
}),
).toBe(false);
});
it("evicts older entries when the cache exceeds its cap", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
for (let i = 0; i < 513; i += 1) {
rememberBlueBubblesSelfChatCopy({
...directLookup,
body: `message-${i}`,
timestamp: i,
});
vi.advanceTimersByTime(1_001);
}
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: "message-0",
timestamp: 0,
}),
).toBe(false);
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: "message-512",
timestamp: 512,
}),
).toBe(true);
});
it("enforces the cache cap even when cleanup is throttled", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
for (let i = 0; i < 513; i += 1) {
rememberBlueBubblesSelfChatCopy({
...directLookup,
body: `burst-${i}`,
timestamp: i,
});
}
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: "burst-0",
timestamp: 0,
}),
).toBe(false);
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: "burst-512",
timestamp: 512,
}),
).toBe(true);
});
it("does not collide long texts that differ only in the middle", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
const prefix = "a".repeat(256);
const suffix = "b".repeat(256);
const longBodyA = `${prefix}${"x".repeat(300)}${suffix}`;
const longBodyB = `${prefix}${"y".repeat(300)}${suffix}`;
rememberBlueBubblesSelfChatCopy({
...directLookup,
body: longBodyA,
timestamp: 123,
});
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: longBodyA,
timestamp: 123,
}),
).toBe(true);
expect(
hasBlueBubblesSelfChatCopy({
...directLookup,
body: longBodyB,
timestamp: 123,
}),
).toBe(false);
});
});

View File

@ -0,0 +1,127 @@
import { createHash } from "node:crypto";
import { extractHandleFromChatGuid, normalizeBlueBubblesHandle } from "./targets.js";
type SelfChatCacheKeyParts = {
accountId: string;
chatGuid?: string;
chatIdentifier?: string;
chatId?: number;
senderId: string;
};
type SelfChatLookup = SelfChatCacheKeyParts & {
body?: string;
timestamp?: number;
};
const SELF_CHAT_TTL_MS = 10_000;
const MAX_SELF_CHAT_CACHE_ENTRIES = 512;
const CLEANUP_MIN_INTERVAL_MS = 1_000;
const MAX_SELF_CHAT_BODY_CHARS = 32_768;
const cache = new Map<string, number>();
let lastCleanupAt = 0;
function normalizeBody(body: string | undefined): string | null {
if (!body) {
return null;
}
const bounded =
body.length > MAX_SELF_CHAT_BODY_CHARS ? body.slice(0, MAX_SELF_CHAT_BODY_CHARS) : body;
const normalized = bounded.replace(/\r\n?/g, "\n").trim();
return normalized ? normalized : null;
}
function isUsableTimestamp(timestamp: number | undefined): timestamp is number {
return typeof timestamp === "number" && Number.isFinite(timestamp);
}
function digestText(text: string): string {
return createHash("sha256").update(text).digest("base64url");
}
function trimOrUndefined(value?: string | null): string | undefined {
const trimmed = value?.trim();
return trimmed ? trimmed : undefined;
}
function resolveCanonicalChatTarget(parts: SelfChatCacheKeyParts): string | null {
const handleFromGuid = parts.chatGuid ? extractHandleFromChatGuid(parts.chatGuid) : null;
if (handleFromGuid) {
return handleFromGuid;
}
const normalizedIdentifier = normalizeBlueBubblesHandle(parts.chatIdentifier ?? "");
if (normalizedIdentifier) {
return normalizedIdentifier;
}
return (
trimOrUndefined(parts.chatGuid) ??
trimOrUndefined(parts.chatIdentifier) ??
(typeof parts.chatId === "number" ? String(parts.chatId) : null)
);
}
function buildScope(parts: SelfChatCacheKeyParts): string {
const target = resolveCanonicalChatTarget(parts) ?? parts.senderId;
return `${parts.accountId}:${target}`;
}
function cleanupExpired(now = Date.now()): void {
if (
lastCleanupAt !== 0 &&
now >= lastCleanupAt &&
now - lastCleanupAt < CLEANUP_MIN_INTERVAL_MS
) {
return;
}
lastCleanupAt = now;
for (const [key, seenAt] of cache.entries()) {
if (now - seenAt > SELF_CHAT_TTL_MS) {
cache.delete(key);
}
}
}
function enforceSizeCap(): void {
while (cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) {
const oldestKey = cache.keys().next().value;
if (typeof oldestKey !== "string") {
break;
}
cache.delete(oldestKey);
}
}
function buildKey(lookup: SelfChatLookup): string | null {
const body = normalizeBody(lookup.body);
if (!body || !isUsableTimestamp(lookup.timestamp)) {
return null;
}
return `${buildScope(lookup)}:${lookup.timestamp}:${digestText(body)}`;
}
export function rememberBlueBubblesSelfChatCopy(lookup: SelfChatLookup): void {
cleanupExpired();
const key = buildKey(lookup);
if (!key) {
return;
}
cache.set(key, Date.now());
enforceSizeCap();
}
export function hasBlueBubblesSelfChatCopy(lookup: SelfChatLookup): boolean {
cleanupExpired();
const key = buildKey(lookup);
if (!key) {
return false;
}
const seenAt = cache.get(key);
return typeof seenAt === "number" && Date.now() - seenAt <= SELF_CHAT_TTL_MS;
}
export function resetBlueBubblesSelfChatCache(): void {
cache.clear();
lastCleanupAt = 0;
}

View File

@ -5,6 +5,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createPluginRuntimeMock } from "../../test-utils/plugin-runtime-mock.js";
import type { ResolvedBlueBubblesAccount } from "./accounts.js";
import { fetchBlueBubblesHistory } from "./history.js";
import { resetBlueBubblesSelfChatCache } from "./monitor-self-chat-cache.js";
import {
handleBlueBubblesWebhookRequest,
registerBlueBubblesWebhookTarget,
@ -246,6 +247,7 @@ describe("BlueBubbles webhook monitor", () => {
vi.clearAllMocks();
// Reset short ID state between tests for predictable behavior
_resetBlueBubblesShortIdState();
resetBlueBubblesSelfChatCache();
mockFetchBlueBubblesHistory.mockResolvedValue({ entries: [], resolved: true });
mockReadAllowFromStore.mockResolvedValue([]);
mockUpsertPairingRequest.mockResolvedValue({ code: "TESTCODE", created: true });
@ -259,6 +261,7 @@ describe("BlueBubbles webhook monitor", () => {
afterEach(() => {
unregister?.();
vi.useRealTimers();
});
describe("DM pairing behavior vs allowFrom", () => {
@ -2676,5 +2679,449 @@ describe("BlueBubbles webhook monitor", () => {
expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
});
it("drops reflected self-chat duplicates after a confirmed assistant outbound", async () => {
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const { sendMessageBlueBubbles } = await import("./send.js");
vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "msg-self-1" });
mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => {
await params.dispatcherOptions.deliver({ text: "replying now" }, { kind: "final" });
return EMPTY_DISPATCH_RESULT;
});
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const timestamp = Date.now();
const inboundPayload = {
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-self-0",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
const fromMePayload = {
type: "new-message",
data: {
text: "replying now",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: true,
guid: "msg-self-1",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
createMockResponse(),
);
await flushAsync();
const reflectedPayload = {
type: "new-message",
data: {
text: "replying now",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-self-2",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
});
it("does not drop inbound messages when no fromMe self-chat copy was seen", async () => {
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const inboundPayload = {
type: "new-message",
data: {
text: "genuinely new message",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-inbound-1",
chatGuid: "iMessage;-;+15551234567",
date: Date.now(),
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
});
it("does not drop reflected copies after the self-chat cache TTL expires", async () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const timestamp = Date.now();
const fromMePayload = {
type: "new-message",
data: {
text: "ttl me",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: true,
guid: "msg-self-ttl-1",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
createMockResponse(),
);
await vi.runAllTimersAsync();
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
vi.advanceTimersByTime(10_001);
const reflectedPayload = {
type: "new-message",
data: {
text: "ttl me",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-self-ttl-2",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
createMockResponse(),
);
await vi.runAllTimersAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
});
it("does not cache regular fromMe DMs as self-chat reflections", async () => {
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const timestamp = Date.now();
const fromMePayload = {
type: "new-message",
data: {
text: "shared text",
handle: { address: "+15557654321" },
isGroup: false,
isFromMe: true,
guid: "msg-normal-fromme",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
createMockResponse(),
);
await flushAsync();
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
const inboundPayload = {
type: "new-message",
data: {
text: "shared text",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-normal-inbound",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
});
it("does not drop user-authored self-chat prompts without a confirmed assistant outbound", async () => {
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const timestamp = Date.now();
const fromMePayload = {
type: "new-message",
data: {
text: "user-authored self prompt",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: true,
guid: "msg-self-user-1",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
createMockResponse(),
);
await flushAsync();
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
const reflectedPayload = {
type: "new-message",
data: {
text: "user-authored self prompt",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-self-user-2",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
});
it("does not treat a pending text-only match as confirmed assistant outbound", async () => {
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const { sendMessageBlueBubbles } = await import("./send.js");
vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "ok" });
mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => {
await params.dispatcherOptions.deliver({ text: "same text" }, { kind: "final" });
return EMPTY_DISPATCH_RESULT;
});
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const timestamp = Date.now();
const inboundPayload = {
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-self-race-0",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
const fromMePayload = {
type: "new-message",
data: {
text: "same text",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: true,
guid: "msg-self-race-1",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
createMockResponse(),
);
await flushAsync();
const reflectedPayload = {
type: "new-message",
data: {
text: "same text",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-self-race-2",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
});
it("does not treat chatGuid-inferred sender ids as self-chat evidence", async () => {
const account = createMockAccount({ dmPolicy: "open" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const timestamp = Date.now();
const fromMePayload = {
type: "new-message",
data: {
text: "shared inferred text",
handle: null,
isGroup: false,
isFromMe: true,
guid: "msg-inferred-fromme",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
createMockResponse(),
);
await flushAsync();
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
const inboundPayload = {
type: "new-message",
data: {
text: "shared inferred text",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-inferred-inbound",
chatGuid: "iMessage;-;+15551234567",
date: timestamp,
},
};
await handleBlueBubblesWebhookRequest(
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
createMockResponse(),
);
await flushAsync();
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
});
});
});

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/copilot-proxy",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw Copilot Proxy provider plugin",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/diagnostics-otel",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw diagnostics OpenTelemetry exporter",
"type": "module",
"dependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/diffs",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw diff viewer plugin",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/discord",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Discord channel plugin",
"type": "module",
"openclaw": {

View File

@ -1,12 +1,12 @@
{
"name": "@openclaw/feishu",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Feishu/Lark channel plugin (community maintained by @m1heng)",
"type": "module",
"dependencies": {
"@larksuiteoapi/node-sdk": "^1.59.0",
"@sinclair/typebox": "0.34.48",
"https-proxy-agent": "^7.0.6",
"https-proxy-agent": "^8.0.0",
"zod": "^4.3.6"
},
"openclaw": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/google-gemini-cli-auth",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw Gemini CLI OAuth provider plugin",
"type": "module",

View File

@ -1,15 +1,12 @@
{
"name": "@openclaw/googlechat",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw Google Chat channel plugin",
"type": "module",
"dependencies": {
"google-auth-library": "^10.6.1"
},
"devDependencies": {
"openclaw": "workspace:*"
},
"peerDependencies": {
"openclaw": ">=2026.3.7"
},

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/imessage",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw iMessage channel plugin",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/irc",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw IRC channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/line",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw LINE channel plugin",
"type": "module",

View File

@ -69,6 +69,7 @@ outside the list is rejected.
- `schema` (object, optional JSON Schema)
- `provider` (string, optional)
- `model` (string, optional)
- `thinking` (string, optional)
- `authProfileId` (string, optional)
- `temperature` (number, optional)
- `maxTokens` (number, optional)

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/llm-task",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw JSON-only LLM task plugin",
"type": "module",

View File

@ -109,6 +109,59 @@ describe("llm-task tool (json-only)", () => {
expect(call.model).toBe("claude-4-sonnet");
});
it("passes thinking override to embedded runner", async () => {
// oxlint-disable-next-line typescript/no-explicit-any
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
meta: {},
payloads: [{ text: JSON.stringify({ ok: true }) }],
});
const tool = createLlmTaskTool(fakeApi());
await tool.execute("id", { prompt: "x", thinking: "high" });
// oxlint-disable-next-line typescript/no-explicit-any
const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0];
expect(call.thinkLevel).toBe("high");
});
it("normalizes thinking aliases", async () => {
// oxlint-disable-next-line typescript/no-explicit-any
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
meta: {},
payloads: [{ text: JSON.stringify({ ok: true }) }],
});
const tool = createLlmTaskTool(fakeApi());
await tool.execute("id", { prompt: "x", thinking: "on" });
// oxlint-disable-next-line typescript/no-explicit-any
const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0];
expect(call.thinkLevel).toBe("low");
});
it("throws on invalid thinking level", async () => {
const tool = createLlmTaskTool(fakeApi());
await expect(tool.execute("id", { prompt: "x", thinking: "banana" })).rejects.toThrow(
/invalid thinking level/i,
);
});
it("throws on unsupported xhigh thinking level", async () => {
const tool = createLlmTaskTool(fakeApi());
await expect(tool.execute("id", { prompt: "x", thinking: "xhigh" })).rejects.toThrow(
/only supported/i,
);
});
it("does not pass thinkLevel when thinking is omitted", async () => {
// oxlint-disable-next-line typescript/no-explicit-any
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
meta: {},
payloads: [{ text: JSON.stringify({ ok: true }) }],
});
const tool = createLlmTaskTool(fakeApi());
await tool.execute("id", { prompt: "x" });
// oxlint-disable-next-line typescript/no-explicit-any
const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0];
expect(call.thinkLevel).toBeUndefined();
});
it("enforces allowedModels", async () => {
// oxlint-disable-next-line typescript/no-explicit-any
(runEmbeddedPiAgent as any).mockResolvedValueOnce({

View File

@ -2,7 +2,13 @@ import fs from "node:fs/promises";
import path from "node:path";
import { Type } from "@sinclair/typebox";
import Ajv from "ajv";
import { resolvePreferredOpenClawTmpDir } from "openclaw/plugin-sdk/llm-task";
import {
formatThinkingLevels,
formatXHighModelHint,
normalizeThinkLevel,
resolvePreferredOpenClawTmpDir,
supportsXHighThinking,
} from "openclaw/plugin-sdk/llm-task";
// NOTE: This extension is intended to be bundled with OpenClaw.
// When running from source (tests/dev), OpenClaw internals live under src/.
// When running from a built install, internals live under dist/ (no src/ tree).
@ -86,6 +92,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
Type.String({ description: "Provider override (e.g. openai-codex, anthropic)." }),
),
model: Type.Optional(Type.String({ description: "Model id override." })),
thinking: Type.Optional(Type.String({ description: "Thinking level override." })),
authProfileId: Type.Optional(Type.String({ description: "Auth profile override." })),
temperature: Type.Optional(Type.Number({ description: "Best-effort temperature override." })),
maxTokens: Type.Optional(Type.Number({ description: "Best-effort maxTokens override." })),
@ -144,6 +151,18 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
);
}
const thinkingRaw =
typeof params.thinking === "string" && params.thinking.trim() ? params.thinking : undefined;
const thinkLevel = thinkingRaw ? normalizeThinkLevel(thinkingRaw) : undefined;
if (thinkingRaw && !thinkLevel) {
throw new Error(
`Invalid thinking level "${thinkingRaw}". Use one of: ${formatThinkingLevels(provider, model)}.`,
);
}
if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`);
}
const timeoutMs =
(typeof params.timeoutMs === "number" && params.timeoutMs > 0
? params.timeoutMs
@ -204,6 +223,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
model,
authProfileId,
authProfileIdSource: authProfileId ? "user" : "auto",
thinkLevel,
streamParams,
disableTools: true,
});

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/lobster",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "Lobster workflow tool plugin (typed pipelines + resumable approvals)",
"type": "module",
"dependencies": {

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/matrix",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Matrix channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/mattermost",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Mattermost channel plugin",
"type": "module",
"dependencies": {

View File

@ -270,6 +270,16 @@ export const mattermostPlugin: ChannelPlugin<ResolvedMattermostAccount> = {
streaming: {
blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 },
},
threading: {
resolveReplyToMode: ({ cfg, accountId }) => {
const account = resolveMattermostAccount({ cfg, accountId: accountId ?? "default" });
const mode = account.config.replyToMode;
if (mode === "off" || mode === "first") {
return mode;
}
return "all";
},
},
reload: { configPrefixes: ["channels.mattermost"] },
configSchema: buildChannelConfigSchema(MattermostConfigSchema),
config: {

View File

@ -43,6 +43,7 @@ const MattermostAccountSchemaBase = z
chunkMode: z.enum(["length", "newline"]).optional(),
blockStreaming: z.boolean().optional(),
blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(),
replyToMode: z.enum(["off", "first", "all"]).optional(),
responsePrefix: z.string().optional(),
actions: z
.object({

View File

@ -109,6 +109,29 @@ describe("mattermost mention gating", () => {
});
});
describe("resolveMattermostReplyRootId with block streaming payloads", () => {
it("uses threadRootId for block-streamed payloads with replyToId", () => {
// When block streaming sends a payload with replyToId from the threading
// mode, the deliver callback should still use the existing threadRootId.
expect(
resolveMattermostReplyRootId({
threadRootId: "thread-root-1",
replyToId: "streamed-reply-id",
}),
).toBe("thread-root-1");
});
it("falls back to payload replyToId when no threadRootId in block streaming", () => {
// Top-level channel message: no threadRootId, payload carries the
// inbound post id as replyToId from the "all" threading mode.
expect(
resolveMattermostReplyRootId({
replyToId: "inbound-post-for-threading",
}),
).toBe("inbound-post-for-threading");
});
});
describe("resolveMattermostReplyRootId", () => {
it("uses replyToId for top-level replies", () => {
expect(

View File

@ -52,6 +52,8 @@ export type MattermostAccountConfig = {
blockStreaming?: boolean;
/** Merge streamed block replies before sending. */
blockStreamingCoalesce?: BlockStreamingCoalesceConfig;
/** Control reply threading (off|first|all). Default: "all". */
replyToMode?: "off" | "first" | "all";
/** Outbound response prefix override for this channel/account. */
responsePrefix?: string;
/** Action toggles for this account. */

View File

@ -1,12 +1,9 @@
{
"name": "@openclaw/memory-core",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw core memory search plugin",
"type": "module",
"devDependencies": {
"openclaw": "workspace:*"
},
"peerDependencies": {
"openclaw": ">=2026.3.7"
},

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/memory-lancedb",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw LanceDB-backed long-term memory plugin with auto-recall/capture",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/minimax-portal-auth",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw MiniMax Portal OAuth provider plugin",
"type": "module",

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/msteams",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Microsoft Teams channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/nextcloud-talk",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Nextcloud Talk channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/nostr",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Nostr channel plugin for NIP-04 encrypted DMs",
"type": "module",
"dependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/open-prose",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenProse VM skill pack plugin (slash command + telemetry).",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/signal",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw Signal channel plugin",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/slack",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw Slack channel plugin",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/synology-chat",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "Synology Chat channel plugin for OpenClaw",
"type": "module",
"dependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/telegram",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw Telegram channel plugin",
"type": "module",

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/tlon",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Tlon/Urbit channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/twitch",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Twitch channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -522,11 +522,22 @@
"apiKey": {
"type": "string"
},
"baseUrl": {
"type": "string"
},
"model": {
"type": "string"
},
"voice": {
"type": "string"
},
"speed": {
"type": "number",
"minimum": 0.25,
"maximum": 4.0
},
"instructions": {
"type": "string"
}
}
},

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/voice-call",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw voice-call plugin",
"type": "module",
"dependencies": {

View File

@ -1,3 +1,4 @@
import { resolveOpenAITtsInstructions } from "openclaw/plugin-sdk/voice-call";
import { pcmToMulaw } from "../telephony-audio.js";
/**
@ -110,9 +111,11 @@ export class OpenAITTSProvider {
speed: this.speed,
};
// Add instructions if using gpt-4o-mini-tts model
const effectiveInstructions = trimToUndefined(instructions) ?? this.instructions;
if (effectiveInstructions && this.model.includes("gpt-4o-mini-tts")) {
const effectiveInstructions = resolveOpenAITtsInstructions(
this.model,
trimToUndefined(instructions) ?? this.instructions,
);
if (effectiveInstructions) {
body.instructions = effectiveInstructions;
}

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/whatsapp",
"version": "2026.3.9",
"version": "2026.3.11",
"private": true,
"description": "OpenClaw WhatsApp channel plugin",
"type": "module",

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/zalo",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Zalo channel plugin",
"type": "module",
"dependencies": {

View File

@ -1,5 +1,17 @@
# Changelog
## 2026.3.11
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.10
### Changes
- Version alignment with core OpenClaw release numbers.
## 2026.3.9
### Changes

View File

@ -1,6 +1,6 @@
{
"name": "@openclaw/zalouser",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "OpenClaw Zalo Personal Account plugin via native zca-js integration",
"type": "module",
"dependencies": {

View File

@ -5,6 +5,7 @@ import {
primeSendMock,
} from "../../../src/test-utils/send-payload-contract.js";
import { zalouserPlugin } from "./channel.js";
import { setZalouserRuntime } from "./runtime.js";
vi.mock("./send.js", () => ({
sendMessageZalouser: vi.fn().mockResolvedValue({ ok: true, messageId: "zlu-1" }),
@ -38,6 +39,14 @@ describe("zalouserPlugin outbound sendPayload", () => {
let mockedSend: ReturnType<typeof vi.mocked<(typeof import("./send.js"))["sendMessageZalouser"]>>;
beforeEach(async () => {
setZalouserRuntime({
channel: {
text: {
resolveChunkMode: vi.fn(() => "length"),
resolveTextChunkLimit: vi.fn(() => 1200),
},
},
} as never);
const mod = await import("./send.js");
mockedSend = vi.mocked(mod.sendMessageZalouser);
mockedSend.mockClear();
@ -55,7 +64,7 @@ describe("zalouserPlugin outbound sendPayload", () => {
expect(mockedSend).toHaveBeenCalledWith(
"1471383327500481391",
"hello group",
expect.objectContaining({ isGroup: true }),
expect.objectContaining({ isGroup: true, textMode: "markdown" }),
);
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g1" });
});
@ -71,7 +80,7 @@ describe("zalouserPlugin outbound sendPayload", () => {
expect(mockedSend).toHaveBeenCalledWith(
"987654321",
"hello",
expect.objectContaining({ isGroup: false }),
expect.objectContaining({ isGroup: false, textMode: "markdown" }),
);
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-d1" });
});
@ -87,14 +96,37 @@ describe("zalouserPlugin outbound sendPayload", () => {
expect(mockedSend).toHaveBeenCalledWith(
"g-1471383327500481391",
"hello native group",
expect.objectContaining({ isGroup: true }),
expect.objectContaining({ isGroup: true, textMode: "markdown" }),
);
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g-native" });
});
it("passes long markdown through once so formatting happens before chunking", async () => {
const text = `**${"a".repeat(2501)}**`;
mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-code" });
const result = await zalouserPlugin.outbound!.sendPayload!({
...baseCtx({ text }),
to: "987654321",
});
expect(mockedSend).toHaveBeenCalledTimes(1);
expect(mockedSend).toHaveBeenCalledWith(
"987654321",
text,
expect.objectContaining({
isGroup: false,
textMode: "markdown",
textChunkMode: "length",
textChunkLimit: 1200,
}),
);
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-code" });
});
installSendPayloadContractSuite({
channel: "zalouser",
chunking: { mode: "split", longTextLength: 3000, maxChunkLength: 2000 },
chunking: { mode: "passthrough", longTextLength: 3000 },
createHarness: ({ payload, sendResults }) => {
primeSendMock(mockedSend, { ok: true, messageId: "zlu-1" }, sendResults);
return {

View File

@ -1,5 +1,7 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import { chunkMarkdownText } from "../../../src/auto-reply/chunk.js";
import { zalouserPlugin } from "./channel.js";
import { setZalouserRuntime } from "./runtime.js";
import { sendReactionZalouser } from "./send.js";
vi.mock("./send.js", async (importOriginal) => {
@ -13,6 +15,16 @@ vi.mock("./send.js", async (importOriginal) => {
const mockSendReaction = vi.mocked(sendReactionZalouser);
describe("zalouser outbound chunker", () => {
beforeEach(() => {
setZalouserRuntime({
channel: {
text: {
chunkMarkdownText,
},
},
} as never);
});
it("chunks without empty strings and respects limit", () => {
const chunker = zalouserPlugin.outbound?.chunker;
expect(chunker).toBeTypeOf("function");

View File

@ -20,7 +20,6 @@ import {
buildBaseAccountStatusSnapshot,
buildChannelConfigSchema,
DEFAULT_ACCOUNT_ID,
chunkTextForOutbound,
deleteAccountFromConfigSection,
formatAllowFromLowercase,
isNumericTargetId,
@ -43,6 +42,7 @@ import { resolveZalouserReactionMessageIds } from "./message-sid.js";
import { zalouserOnboardingAdapter } from "./onboarding.js";
import { probeZalouser } from "./probe.js";
import { writeQrDataUrlToTempFile } from "./qr-temp-file.js";
import { getZalouserRuntime } from "./runtime.js";
import { sendMessageZalouser, sendReactionZalouser } from "./send.js";
import { collectZalouserStatusIssues } from "./status-issues.js";
import {
@ -166,6 +166,16 @@ function resolveZalouserQrProfile(accountId?: string | null): string {
return normalized;
}
function resolveZalouserOutboundChunkMode(cfg: OpenClawConfig, accountId?: string) {
return getZalouserRuntime().channel.text.resolveChunkMode(cfg, "zalouser", accountId);
}
function resolveZalouserOutboundTextChunkLimit(cfg: OpenClawConfig, accountId?: string) {
return getZalouserRuntime().channel.text.resolveTextChunkLimit(cfg, "zalouser", accountId, {
fallbackLimit: zalouserDock.outbound?.textChunkLimit ?? 2000,
});
}
function mapUser(params: {
id: string;
name?: string | null;
@ -595,14 +605,11 @@ export const zalouserPlugin: ChannelPlugin<ResolvedZalouserAccount> = {
},
outbound: {
deliveryMode: "direct",
chunker: chunkTextForOutbound,
chunkerMode: "text",
textChunkLimit: 2000,
chunker: (text, limit) => getZalouserRuntime().channel.text.chunkMarkdownText(text, limit),
chunkerMode: "markdown",
sendPayload: async (ctx) =>
await sendPayloadWithChunkedTextAndMedia({
ctx,
textChunkLimit: zalouserPlugin.outbound!.textChunkLimit,
chunker: zalouserPlugin.outbound!.chunker,
sendText: (nextCtx) => zalouserPlugin.outbound!.sendText!(nextCtx),
sendMedia: (nextCtx) => zalouserPlugin.outbound!.sendMedia!(nextCtx),
emptyResult: { channel: "zalouser", messageId: "" },
@ -613,6 +620,9 @@ export const zalouserPlugin: ChannelPlugin<ResolvedZalouserAccount> = {
const result = await sendMessageZalouser(target.threadId, text, {
profile: account.profile,
isGroup: target.isGroup,
textMode: "markdown",
textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId),
textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId),
});
return buildChannelSendResult("zalouser", result);
},
@ -624,6 +634,9 @@ export const zalouserPlugin: ChannelPlugin<ResolvedZalouserAccount> = {
isGroup: target.isGroup,
mediaUrl,
mediaLocalRoots,
textMode: "markdown",
textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId),
textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId),
});
return buildChannelSendResult("zalouser", result);
},

View File

@ -51,6 +51,7 @@ function createRuntimeEnv(): RuntimeEnv {
function installRuntime(params: {
commandAuthorized?: boolean;
replyPayload?: { text?: string; mediaUrl?: string; mediaUrls?: string[] };
resolveCommandAuthorizedFromAuthorizers?: (params: {
useAccessGroups: boolean;
authorizers: Array<{ configured: boolean; allowed: boolean }>;
@ -58,6 +59,9 @@ function installRuntime(params: {
}) {
const dispatchReplyWithBufferedBlockDispatcher = vi.fn(async ({ dispatcherOptions, ctx }) => {
await dispatcherOptions.typingCallbacks?.onReplyStart?.();
if (params.replyPayload) {
await dispatcherOptions.deliver(params.replyPayload);
}
return { queuedFinal: false, counts: { tool: 0, block: 0, final: 0 }, ctx };
});
const resolveCommandAuthorizedFromAuthorizers = vi.fn(
@ -166,7 +170,8 @@ function installRuntime(params: {
text: {
resolveMarkdownTableMode: vi.fn(() => "code"),
convertMarkdownTables: vi.fn((text: string) => text),
resolveChunkMode: vi.fn(() => "line"),
resolveChunkMode: vi.fn(() => "length"),
resolveTextChunkLimit: vi.fn(() => 1200),
chunkMarkdownTextWithMode: vi.fn((text: string) => [text]),
},
},
@ -304,6 +309,42 @@ describe("zalouser monitor group mention gating", () => {
expect(callArg?.ctx?.WasMentioned).toBe(true);
});
it("passes long markdown replies through once so formatting happens before chunking", async () => {
const replyText = `**${"a".repeat(2501)}**`;
installRuntime({
commandAuthorized: false,
replyPayload: { text: replyText },
});
await __testing.processMessage({
message: createDmMessage({
content: "hello",
}),
account: {
...createAccount(),
config: {
...createAccount().config,
dmPolicy: "open",
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
expect(sendMessageZalouserMock).toHaveBeenCalledTimes(1);
expect(sendMessageZalouserMock).toHaveBeenCalledWith(
"u-1",
replyText,
expect.objectContaining({
isGroup: false,
profile: "default",
textMode: "markdown",
textChunkMode: "length",
textChunkLimit: 1200,
}),
);
});
it("uses commandContent for mention-prefixed control commands", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: true,

View File

@ -703,6 +703,10 @@ async function deliverZalouserReply(params: {
params;
const tableMode = params.tableMode ?? "code";
const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode);
const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId);
const textChunkLimit = core.channel.text.resolveTextChunkLimit(config, "zalouser", accountId, {
fallbackLimit: ZALOUSER_TEXT_LIMIT,
});
const sentMedia = await sendMediaWithLeadingCaption({
mediaUrls: resolveOutboundMediaUrls(payload),
@ -713,6 +717,9 @@ async function deliverZalouserReply(params: {
profile,
mediaUrl,
isGroup,
textMode: "markdown",
textChunkMode: chunkMode,
textChunkLimit,
});
statusSink?.({ lastOutboundAt: Date.now() });
},
@ -725,20 +732,17 @@ async function deliverZalouserReply(params: {
}
if (text) {
const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId);
const chunks = core.channel.text.chunkMarkdownTextWithMode(
text,
ZALOUSER_TEXT_LIMIT,
chunkMode,
);
logVerbose(core, runtime, `Sending ${chunks.length} text chunk(s) to ${chatId}`);
for (const chunk of chunks) {
try {
await sendMessageZalouser(chatId, chunk, { profile, isGroup });
statusSink?.({ lastOutboundAt: Date.now() });
} catch (err) {
runtime.error(`Zalouser message send failed: ${String(err)}`);
}
try {
await sendMessageZalouser(chatId, text, {
profile,
isGroup,
textMode: "markdown",
textChunkMode: chunkMode,
textChunkLimit,
});
statusSink?.({ lastOutboundAt: Date.now() });
} catch (err) {
runtime.error(`Zalouser message send failed: ${String(err)}`);
}
}
}

View File

@ -8,6 +8,7 @@ import {
sendSeenZalouser,
sendTypingZalouser,
} from "./send.js";
import { parseZalouserTextStyles } from "./text-styles.js";
import {
sendZaloDeliveredEvent,
sendZaloLink,
@ -16,6 +17,7 @@ import {
sendZaloTextMessage,
sendZaloTypingEvent,
} from "./zalo-js.js";
import { TextStyle } from "./zca-client.js";
vi.mock("./zalo-js.js", () => ({
sendZaloTextMessage: vi.fn(),
@ -43,36 +45,272 @@ describe("zalouser send helpers", () => {
mockSendSeen.mockReset();
});
it("delegates text send to JS transport", async () => {
it("keeps plain text literal by default", async () => {
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1" });
const result = await sendMessageZalouser("thread-1", "hello", {
const result = await sendMessageZalouser("thread-1", "**hello**", {
profile: "default",
isGroup: true,
});
expect(mockSendText).toHaveBeenCalledWith("thread-1", "hello", {
profile: "default",
isGroup: true,
});
expect(mockSendText).toHaveBeenCalledWith(
"thread-1",
"**hello**",
expect.objectContaining({
profile: "default",
isGroup: true,
}),
);
expect(result).toEqual({ ok: true, messageId: "mid-1" });
});
it("maps image helper to media send", async () => {
it("formats markdown text when markdown mode is enabled", async () => {
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1b" });
await sendMessageZalouser("thread-1", "**hello**", {
profile: "default",
isGroup: true,
textMode: "markdown",
});
expect(mockSendText).toHaveBeenCalledWith(
"thread-1",
"hello",
expect.objectContaining({
profile: "default",
isGroup: true,
textMode: "markdown",
textStyles: [{ start: 0, len: 5, st: TextStyle.Bold }],
}),
);
});
it("formats image captions in markdown mode", async () => {
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2" });
await sendImageZalouser("thread-2", "https://example.com/a.png", {
profile: "p2",
caption: "cap",
caption: "_cap_",
isGroup: false,
textMode: "markdown",
});
expect(mockSendText).toHaveBeenCalledWith("thread-2", "cap", {
expect(mockSendText).toHaveBeenCalledWith(
"thread-2",
"cap",
expect.objectContaining({
profile: "p2",
caption: undefined,
isGroup: false,
mediaUrl: "https://example.com/a.png",
textMode: "markdown",
textStyles: [{ start: 0, len: 3, st: TextStyle.Italic }],
}),
);
});
it("does not keep the raw markdown caption as a media fallback after formatting", async () => {
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2b" });
await sendImageZalouser("thread-2", "https://example.com/a.png", {
profile: "p2",
caption: "cap",
caption: "```\n```",
isGroup: false,
mediaUrl: "https://example.com/a.png",
textMode: "markdown",
});
expect(mockSendText).toHaveBeenCalledWith(
"thread-2",
"",
expect.objectContaining({
profile: "p2",
caption: undefined,
isGroup: false,
mediaUrl: "https://example.com/a.png",
textMode: "markdown",
textStyles: undefined,
}),
);
});
it("rechunks normalized markdown text before sending to avoid transport truncation", async () => {
const text = "\t".repeat(500) + "a".repeat(1500);
const formatted = parseZalouserTextStyles(text);
mockSendText
.mockResolvedValueOnce({ ok: true, messageId: "mid-2c-1" })
.mockResolvedValueOnce({ ok: true, messageId: "mid-2c-2" });
const result = await sendMessageZalouser("thread-2c", text, {
profile: "p2c",
isGroup: false,
textMode: "markdown",
});
expect(formatted.text.length).toBeGreaterThan(2000);
expect(mockSendText).toHaveBeenCalledTimes(2);
expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text);
expect(mockSendText.mock.calls.every((call) => (call[1] as string).length <= 2000)).toBe(true);
expect(result).toEqual({ ok: true, messageId: "mid-2c-2" });
});
it("preserves text styles when splitting long formatted markdown", async () => {
const text = `**${"a".repeat(2501)}**`;
mockSendText
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-1" })
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-2" });
const result = await sendMessageZalouser("thread-2d", text, {
profile: "p2d",
isGroup: false,
textMode: "markdown",
});
expect(mockSendText).toHaveBeenNthCalledWith(
1,
"thread-2d",
"a".repeat(2000),
expect.objectContaining({
profile: "p2d",
isGroup: false,
textMode: "markdown",
textStyles: [{ start: 0, len: 2000, st: TextStyle.Bold }],
}),
);
expect(mockSendText).toHaveBeenNthCalledWith(
2,
"thread-2d",
"a".repeat(501),
expect.objectContaining({
profile: "p2d",
isGroup: false,
textMode: "markdown",
textStyles: [{ start: 0, len: 501, st: TextStyle.Bold }],
}),
);
expect(result).toEqual({ ok: true, messageId: "mid-2d-2" });
});
it("preserves formatted text and styles when newline chunk mode splits after parsing", async () => {
const text = `**${"a".repeat(1995)}**\n\nsecond paragraph`;
const formatted = parseZalouserTextStyles(text);
mockSendText
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-3" })
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-4" });
const result = await sendMessageZalouser("thread-2d-2", text, {
profile: "p2d-2",
isGroup: false,
textMode: "markdown",
textChunkMode: "newline",
});
expect(mockSendText).toHaveBeenCalledTimes(2);
expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text);
expect(mockSendText).toHaveBeenNthCalledWith(
1,
"thread-2d-2",
`${"a".repeat(1995)}\n\n`,
expect.objectContaining({
profile: "p2d-2",
isGroup: false,
textMode: "markdown",
textChunkMode: "newline",
textStyles: [{ start: 0, len: 1995, st: TextStyle.Bold }],
}),
);
expect(mockSendText).toHaveBeenNthCalledWith(
2,
"thread-2d-2",
"second paragraph",
expect.objectContaining({
profile: "p2d-2",
isGroup: false,
textMode: "markdown",
textChunkMode: "newline",
textStyles: undefined,
}),
);
expect(result).toEqual({ ok: true, messageId: "mid-2d-4" });
});
it("respects an explicit text chunk limit when splitting formatted markdown", async () => {
const text = `**${"a".repeat(1501)}**`;
mockSendText
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-5" })
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-6" });
const result = await sendMessageZalouser("thread-2d-3", text, {
profile: "p2d-3",
isGroup: false,
textMode: "markdown",
textChunkLimit: 1200,
} as never);
expect(mockSendText).toHaveBeenCalledTimes(2);
expect(mockSendText).toHaveBeenNthCalledWith(
1,
"thread-2d-3",
"a".repeat(1200),
expect.objectContaining({
profile: "p2d-3",
isGroup: false,
textMode: "markdown",
textChunkLimit: 1200,
textStyles: [{ start: 0, len: 1200, st: TextStyle.Bold }],
}),
);
expect(mockSendText).toHaveBeenNthCalledWith(
2,
"thread-2d-3",
"a".repeat(301),
expect.objectContaining({
profile: "p2d-3",
isGroup: false,
textMode: "markdown",
textChunkLimit: 1200,
textStyles: [{ start: 0, len: 301, st: TextStyle.Bold }],
}),
);
expect(result).toEqual({ ok: true, messageId: "mid-2d-6" });
});
it("sends overflow markdown captions as follow-up text after the media message", async () => {
const caption = "\t".repeat(500) + "a".repeat(1500);
const formatted = parseZalouserTextStyles(caption);
mockSendText
.mockResolvedValueOnce({ ok: true, messageId: "mid-2e-1" })
.mockResolvedValueOnce({ ok: true, messageId: "mid-2e-2" });
const result = await sendImageZalouser("thread-2e", "https://example.com/long.png", {
profile: "p2e",
caption,
isGroup: false,
textMode: "markdown",
});
expect(mockSendText).toHaveBeenCalledTimes(2);
expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text);
expect(mockSendText).toHaveBeenNthCalledWith(
1,
"thread-2e",
expect.any(String),
expect.objectContaining({
profile: "p2e",
caption: undefined,
isGroup: false,
mediaUrl: "https://example.com/long.png",
textMode: "markdown",
}),
);
expect(mockSendText).toHaveBeenNthCalledWith(
2,
"thread-2e",
expect.any(String),
expect.not.objectContaining({
mediaUrl: "https://example.com/long.png",
}),
);
expect(result).toEqual({ ok: true, messageId: "mid-2e-2" });
});
it("delegates link helper to JS transport", async () => {

View File

@ -1,3 +1,4 @@
import { parseZalouserTextStyles } from "./text-styles.js";
import type { ZaloEventMessage, ZaloSendOptions, ZaloSendResult } from "./types.js";
import {
sendZaloDeliveredEvent,
@ -7,16 +8,58 @@ import {
sendZaloTextMessage,
sendZaloTypingEvent,
} from "./zalo-js.js";
import { TextStyle } from "./zca-client.js";
export type ZalouserSendOptions = ZaloSendOptions;
export type ZalouserSendResult = ZaloSendResult;
const ZALO_TEXT_LIMIT = 2000;
const DEFAULT_TEXT_CHUNK_MODE = "length";
type StyledTextChunk = {
text: string;
styles?: ZaloSendOptions["textStyles"];
};
type TextChunkMode = NonNullable<ZaloSendOptions["textChunkMode"]>;
export async function sendMessageZalouser(
threadId: string,
text: string,
options: ZalouserSendOptions = {},
): Promise<ZalouserSendResult> {
return await sendZaloTextMessage(threadId, text, options);
const prepared =
options.textMode === "markdown"
? parseZalouserTextStyles(text)
: { text, styles: options.textStyles };
const textChunkLimit = options.textChunkLimit ?? ZALO_TEXT_LIMIT;
const chunks = splitStyledText(
prepared.text,
(prepared.styles?.length ?? 0) > 0 ? prepared.styles : undefined,
textChunkLimit,
options.textChunkMode,
);
let lastResult: ZalouserSendResult | null = null;
for (const [index, chunk] of chunks.entries()) {
const chunkOptions =
index === 0
? { ...options, textStyles: chunk.styles }
: {
...options,
caption: undefined,
mediaLocalRoots: undefined,
mediaUrl: undefined,
textStyles: chunk.styles,
};
const result = await sendZaloTextMessage(threadId, chunk.text, chunkOptions);
if (!result.ok) {
return result;
}
lastResult = result;
}
return lastResult ?? { ok: false, error: "No message content provided" };
}
export async function sendImageZalouser(
@ -24,8 +67,9 @@ export async function sendImageZalouser(
imageUrl: string,
options: ZalouserSendOptions = {},
): Promise<ZalouserSendResult> {
return await sendZaloTextMessage(threadId, options.caption ?? "", {
return await sendMessageZalouser(threadId, options.caption ?? "", {
...options,
caption: undefined,
mediaUrl: imageUrl,
});
}
@ -85,3 +129,144 @@ export async function sendSeenZalouser(params: {
}): Promise<void> {
await sendZaloSeenEvent(params);
}
function splitStyledText(
text: string,
styles: ZaloSendOptions["textStyles"],
limit: number,
mode: ZaloSendOptions["textChunkMode"],
): StyledTextChunk[] {
if (text.length === 0) {
return [{ text, styles: undefined }];
}
const chunks: StyledTextChunk[] = [];
for (const range of splitTextRanges(text, limit, mode ?? DEFAULT_TEXT_CHUNK_MODE)) {
const { start, end } = range;
chunks.push({
text: text.slice(start, end),
styles: sliceTextStyles(styles, start, end),
});
}
return chunks;
}
function sliceTextStyles(
styles: ZaloSendOptions["textStyles"],
start: number,
end: number,
): ZaloSendOptions["textStyles"] {
if (!styles || styles.length === 0) {
return undefined;
}
const chunkStyles = styles
.map((style) => {
const overlapStart = Math.max(style.start, start);
const overlapEnd = Math.min(style.start + style.len, end);
if (overlapEnd <= overlapStart) {
return null;
}
if (style.st === TextStyle.Indent) {
return {
start: overlapStart - start,
len: overlapEnd - overlapStart,
st: style.st,
indentSize: style.indentSize,
};
}
return {
start: overlapStart - start,
len: overlapEnd - overlapStart,
st: style.st,
};
})
.filter((style): style is NonNullable<typeof style> => style !== null);
return chunkStyles.length > 0 ? chunkStyles : undefined;
}
function splitTextRanges(
text: string,
limit: number,
mode: TextChunkMode,
): Array<{ start: number; end: number }> {
if (mode === "newline") {
return splitTextRangesByPreferredBreaks(text, limit);
}
const ranges: Array<{ start: number; end: number }> = [];
for (let start = 0; start < text.length; start += limit) {
ranges.push({
start,
end: Math.min(text.length, start + limit),
});
}
return ranges;
}
function splitTextRangesByPreferredBreaks(
text: string,
limit: number,
): Array<{ start: number; end: number }> {
const ranges: Array<{ start: number; end: number }> = [];
let start = 0;
while (start < text.length) {
const maxEnd = Math.min(text.length, start + limit);
let end = maxEnd;
if (maxEnd < text.length) {
end =
findParagraphBreak(text, start, maxEnd) ??
findLastBreak(text, "\n", start, maxEnd) ??
findLastWhitespaceBreak(text, start, maxEnd) ??
maxEnd;
}
if (end <= start) {
end = maxEnd;
}
ranges.push({ start, end });
start = end;
}
return ranges;
}
function findParagraphBreak(text: string, start: number, end: number): number | undefined {
const slice = text.slice(start, end);
const matches = slice.matchAll(/\n[\t ]*\n+/g);
let lastMatch: RegExpMatchArray | undefined;
for (const match of matches) {
lastMatch = match;
}
if (!lastMatch || lastMatch.index === undefined) {
return undefined;
}
return start + lastMatch.index + lastMatch[0].length;
}
function findLastBreak(
text: string,
marker: string,
start: number,
end: number,
): number | undefined {
const index = text.lastIndexOf(marker, end - 1);
if (index < start) {
return undefined;
}
return index + marker.length;
}
function findLastWhitespaceBreak(text: string, start: number, end: number): number | undefined {
for (let index = end - 1; index > start; index -= 1) {
if (/\s/.test(text[index])) {
return index + 1;
}
}
return undefined;
}

View File

@ -0,0 +1,203 @@
import { describe, expect, it } from "vitest";
import { parseZalouserTextStyles } from "./text-styles.js";
import { TextStyle } from "./zca-client.js";
describe("parseZalouserTextStyles", () => {
it("renders inline markdown emphasis as Zalo style ranges", () => {
expect(parseZalouserTextStyles("**bold** *italic* ~~strike~~")).toEqual({
text: "bold italic strike",
styles: [
{ start: 0, len: 4, st: TextStyle.Bold },
{ start: 5, len: 6, st: TextStyle.Italic },
{ start: 12, len: 6, st: TextStyle.StrikeThrough },
],
});
});
it("keeps inline code and plain math markers literal", () => {
expect(parseZalouserTextStyles("before `inline *code*` after\n2 * 3 * 4")).toEqual({
text: "before `inline *code*` after\n2 * 3 * 4",
styles: [],
});
});
it("preserves backslash escapes inside code spans and fenced code blocks", () => {
expect(parseZalouserTextStyles("before `\\*` after\n```ts\n\\*\\_\\\\\n```")).toEqual({
text: "before `\\*` after\n\\*\\_\\\\",
styles: [],
});
});
it("closes fenced code blocks when the input uses CRLF newlines", () => {
expect(parseZalouserTextStyles("```\r\n*code*\r\n```\r\n**after**")).toEqual({
text: "*code*\nafter",
styles: [{ start: 7, len: 5, st: TextStyle.Bold }],
});
});
it("maps headings, block quotes, and lists into line styles", () => {
expect(parseZalouserTextStyles(["# Title", "> quoted", " - nested"].join("\n"))).toEqual({
text: "Title\nquoted\nnested",
styles: [
{ start: 0, len: 5, st: TextStyle.Bold },
{ start: 0, len: 5, st: TextStyle.Big },
{ start: 6, len: 6, st: TextStyle.Indent, indentSize: 1 },
{ start: 13, len: 6, st: TextStyle.UnorderedList },
],
});
});
it("treats 1-3 leading spaces as markdown padding for headings and lists", () => {
expect(parseZalouserTextStyles(" # Title\n 1. item\n - bullet")).toEqual({
text: "Title\nitem\nbullet",
styles: [
{ start: 0, len: 5, st: TextStyle.Bold },
{ start: 0, len: 5, st: TextStyle.Big },
{ start: 6, len: 4, st: TextStyle.OrderedList },
{ start: 11, len: 6, st: TextStyle.UnorderedList },
],
});
});
it("strips fenced code markers and preserves leading indentation with nbsp", () => {
expect(parseZalouserTextStyles("```ts\n const x = 1\n\treturn x\n```")).toEqual({
text: "\u00A0\u00A0const x = 1\n\u00A0\u00A0\u00A0\u00A0return x",
styles: [],
});
});
it("treats tilde fences as literal code blocks", () => {
expect(parseZalouserTextStyles("~~~bash\n*cmd*\n~~~")).toEqual({
text: "*cmd*",
styles: [],
});
});
it("treats fences indented under list items as literal code blocks", () => {
expect(parseZalouserTextStyles(" ```\n*cmd*\n ```")).toEqual({
text: "*cmd*",
styles: [],
});
});
it("treats quoted backtick fences as literal code blocks", () => {
expect(parseZalouserTextStyles("> ```js\n> *cmd*\n> ```")).toEqual({
text: "*cmd*",
styles: [],
});
});
it("treats quoted tilde fences as literal code blocks", () => {
expect(parseZalouserTextStyles("> ~~~\n> *cmd*\n> ~~~")).toEqual({
text: "*cmd*",
styles: [],
});
});
it("preserves quote-prefixed lines inside normal fenced code blocks", () => {
expect(parseZalouserTextStyles("```\n> prompt\n```")).toEqual({
text: "> prompt",
styles: [],
});
});
it("does not treat quote-prefixed fence text inside code as a closing fence", () => {
expect(parseZalouserTextStyles("```\n> ```\n*still code*\n```")).toEqual({
text: "> ```\n*still code*",
styles: [],
});
});
it("treats indented blockquotes as quoted lines", () => {
expect(parseZalouserTextStyles(" > quoted")).toEqual({
text: "quoted",
styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 1 }],
});
});
it("treats spaced nested blockquotes as deeper quoted lines", () => {
expect(parseZalouserTextStyles("> > quoted")).toEqual({
text: "quoted",
styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 2 }],
});
});
it("treats indented quoted fences as literal code blocks", () => {
expect(parseZalouserTextStyles(" > ```\n > *cmd*\n > ```")).toEqual({
text: "*cmd*",
styles: [],
});
});
it("treats spaced nested quoted fences as literal code blocks", () => {
expect(parseZalouserTextStyles("> > ```\n> > code\n> > ```")).toEqual({
text: "code",
styles: [],
});
});
it("preserves inner quote markers inside quoted fenced code blocks", () => {
expect(parseZalouserTextStyles("> ```\n>> prompt\n> ```")).toEqual({
text: "> prompt",
styles: [],
});
});
it("keeps quote indentation on heading lines", () => {
expect(parseZalouserTextStyles("> # Title")).toEqual({
text: "Title",
styles: [
{ start: 0, len: 5, st: TextStyle.Bold },
{ start: 0, len: 5, st: TextStyle.Big },
{ start: 0, len: 5, st: TextStyle.Indent, indentSize: 1 },
],
});
});
it("keeps unmatched fences literal", () => {
expect(parseZalouserTextStyles("```python")).toEqual({
text: "```python",
styles: [],
});
});
it("keeps unclosed fenced blocks literal until eof", () => {
expect(parseZalouserTextStyles("```python\n\\*not italic*\n_next_")).toEqual({
text: "```python\n\\*not italic*\n_next_",
styles: [],
});
});
it("supports nested markdown and tag styles regardless of order", () => {
expect(parseZalouserTextStyles("**{red}x{/red}** {red}**y**{/red}")).toEqual({
text: "x y",
styles: [
{ start: 0, len: 1, st: TextStyle.Bold },
{ start: 0, len: 1, st: TextStyle.Red },
{ start: 2, len: 1, st: TextStyle.Red },
{ start: 2, len: 1, st: TextStyle.Bold },
],
});
});
it("treats small text tags as normal text", () => {
expect(parseZalouserTextStyles("{small}tiny{/small}")).toEqual({
text: "tiny",
styles: [],
});
});
it("keeps escaped markers literal", () => {
expect(parseZalouserTextStyles("\\*literal\\* \\{underline}tag{/underline}")).toEqual({
text: "*literal* {underline}tag{/underline}",
styles: [],
});
});
it("keeps indented code blocks literal", () => {
expect(parseZalouserTextStyles(" *cmd*")).toEqual({
text: "\u00A0\u00A0\u00A0\u00A0*cmd*",
styles: [],
});
});
});

View File

@ -0,0 +1,537 @@
import { TextStyle, type Style } from "./zca-client.js";
type InlineStyle = (typeof TextStyle)[keyof typeof TextStyle];
type LineStyle = {
lineIndex: number;
style: InlineStyle;
indentSize?: number;
};
type Segment = {
text: string;
styles: InlineStyle[];
};
type InlineMarker = {
pattern: RegExp;
extractText: (match: RegExpExecArray) => string;
resolveStyles?: (match: RegExpExecArray) => InlineStyle[];
literal?: boolean;
};
type ResolvedInlineMatch = {
match: RegExpExecArray;
marker: InlineMarker;
styles: InlineStyle[];
text: string;
priority: number;
};
type FenceMarker = {
char: "`" | "~";
length: number;
indent: number;
};
type ActiveFence = FenceMarker & {
quoteIndent: number;
};
const TAG_STYLE_MAP: Record<string, InlineStyle | null> = {
red: TextStyle.Red,
orange: TextStyle.Orange,
yellow: TextStyle.Yellow,
green: TextStyle.Green,
small: null,
big: TextStyle.Big,
underline: TextStyle.Underline,
};
const INLINE_MARKERS: InlineMarker[] = [
{
pattern: /`([^`\n]+)`/g,
extractText: (match) => match[0],
literal: true,
},
{
pattern: /\\([*_~#\\{}>+\-`])/g,
extractText: (match) => match[1],
literal: true,
},
{
pattern: new RegExp(`\\{(${Object.keys(TAG_STYLE_MAP).join("|")})\\}(.+?)\\{/\\1\\}`, "g"),
extractText: (match) => match[2],
resolveStyles: (match) => {
const style = TAG_STYLE_MAP[match[1]];
return style ? [style] : [];
},
},
{
pattern: /(?<!\*)\*\*\*(?=\S)([^\n]*?\S)(?<!\*)\*\*\*(?!\*)/g,
extractText: (match) => match[1],
resolveStyles: () => [TextStyle.Bold, TextStyle.Italic],
},
{
pattern: /(?<!\*)\*\*(?![\s*])([^\n]*?\S)(?<!\*)\*\*(?!\*)/g,
extractText: (match) => match[1],
resolveStyles: () => [TextStyle.Bold],
},
{
pattern: /(?<![\w_])__(?![\s_])([^\n]*?\S)(?<!_)__(?![\w_])/g,
extractText: (match) => match[1],
resolveStyles: () => [TextStyle.Bold],
},
{
pattern: /(?<!~)~~(?=\S)([^\n]*?\S)(?<!~)~~(?!~)/g,
extractText: (match) => match[1],
resolveStyles: () => [TextStyle.StrikeThrough],
},
{
pattern: /(?<!\*)\*(?![\s*])([^\n]*?\S)(?<!\*)\*(?!\*)/g,
extractText: (match) => match[1],
resolveStyles: () => [TextStyle.Italic],
},
{
pattern: /(?<![\w_])_(?![\s_])([^\n]*?\S)(?<!_)_(?![\w_])/g,
extractText: (match) => match[1],
resolveStyles: () => [TextStyle.Italic],
},
];
export function parseZalouserTextStyles(input: string): { text: string; styles: Style[] } {
const allStyles: Style[] = [];
const escapeMap: string[] = [];
const lines = input.replace(/\r\n?/g, "\n").split("\n");
const lineStyles: LineStyle[] = [];
const processedLines: string[] = [];
let activeFence: ActiveFence | null = null;
for (let lineIndex = 0; lineIndex < lines.length; lineIndex += 1) {
const rawLine = lines[lineIndex];
const { text: unquotedLine, indent: baseIndent } = stripQuotePrefix(rawLine);
if (activeFence) {
const codeLine =
activeFence.quoteIndent > 0
? stripQuotePrefix(rawLine, activeFence.quoteIndent).text
: rawLine;
if (isClosingFence(codeLine, activeFence)) {
activeFence = null;
continue;
}
processedLines.push(
escapeLiteralText(
normalizeCodeBlockLeadingWhitespace(stripCodeFenceIndent(codeLine, activeFence.indent)),
escapeMap,
),
);
continue;
}
let line = unquotedLine;
const openingFence = resolveOpeningFence(rawLine);
if (openingFence) {
const fenceLine = openingFence.quoteIndent > 0 ? unquotedLine : rawLine;
if (!hasClosingFence(lines, lineIndex + 1, openingFence)) {
processedLines.push(escapeLiteralText(fenceLine, escapeMap));
activeFence = openingFence;
continue;
}
activeFence = openingFence;
continue;
}
const outputLineIndex = processedLines.length;
if (isIndentedCodeBlockLine(line)) {
if (baseIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: baseIndent,
});
}
processedLines.push(escapeLiteralText(normalizeCodeBlockLeadingWhitespace(line), escapeMap));
continue;
}
const { text: markdownLine, size: markdownPadding } = stripOptionalMarkdownPadding(line);
const headingMatch = markdownLine.match(/^(#{1,4})\s(.*)$/);
if (headingMatch) {
const depth = headingMatch[1].length;
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Bold });
if (depth === 1) {
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Big });
}
if (baseIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: baseIndent,
});
}
processedLines.push(headingMatch[2]);
continue;
}
const indentMatch = markdownLine.match(/^(\s+)(.*)$/);
let indentLevel = 0;
let content = markdownLine;
if (indentMatch) {
indentLevel = clampIndent(indentMatch[1].length);
content = indentMatch[2];
}
const totalIndent = Math.min(5, baseIndent + indentLevel);
if (/^[-*+]\s\[[ xX]\]\s/.test(content)) {
if (totalIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: totalIndent,
});
}
processedLines.push(content);
continue;
}
const orderedListMatch = content.match(/^(\d+)\.\s(.*)$/);
if (orderedListMatch) {
if (totalIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: totalIndent,
});
}
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.OrderedList });
processedLines.push(orderedListMatch[2]);
continue;
}
const unorderedListMatch = content.match(/^[-*+]\s(.*)$/);
if (unorderedListMatch) {
if (totalIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: totalIndent,
});
}
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.UnorderedList });
processedLines.push(unorderedListMatch[1]);
continue;
}
if (markdownPadding > 0) {
if (baseIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: baseIndent,
});
}
processedLines.push(line);
continue;
}
if (totalIndent > 0) {
lineStyles.push({
lineIndex: outputLineIndex,
style: TextStyle.Indent,
indentSize: totalIndent,
});
processedLines.push(content);
continue;
}
processedLines.push(line);
}
const segments = parseInlineSegments(processedLines.join("\n"));
let plainText = "";
for (const segment of segments) {
const start = plainText.length;
plainText += segment.text;
for (const style of segment.styles) {
allStyles.push({ start, len: segment.text.length, st: style } as Style);
}
}
if (escapeMap.length > 0) {
const escapeRegex = /\x01(\d+)\x02/g;
const shifts: Array<{ pos: number; delta: number }> = [];
let cumulativeDelta = 0;
for (const match of plainText.matchAll(escapeRegex)) {
const escapeIndex = Number.parseInt(match[1], 10);
cumulativeDelta += match[0].length - escapeMap[escapeIndex].length;
shifts.push({ pos: (match.index ?? 0) + match[0].length, delta: cumulativeDelta });
}
for (const style of allStyles) {
let startDelta = 0;
let endDelta = 0;
const end = style.start + style.len;
for (const shift of shifts) {
if (shift.pos <= style.start) {
startDelta = shift.delta;
}
if (shift.pos <= end) {
endDelta = shift.delta;
}
}
style.start -= startDelta;
style.len -= endDelta - startDelta;
}
plainText = plainText.replace(
escapeRegex,
(_match, index) => escapeMap[Number.parseInt(index, 10)],
);
}
const finalLines = plainText.split("\n");
let offset = 0;
for (let lineIndex = 0; lineIndex < finalLines.length; lineIndex += 1) {
const lineLength = finalLines[lineIndex].length;
if (lineLength > 0) {
for (const lineStyle of lineStyles) {
if (lineStyle.lineIndex !== lineIndex) {
continue;
}
if (lineStyle.style === TextStyle.Indent) {
allStyles.push({
start: offset,
len: lineLength,
st: TextStyle.Indent,
indentSize: lineStyle.indentSize,
});
} else {
allStyles.push({ start: offset, len: lineLength, st: lineStyle.style } as Style);
}
}
}
offset += lineLength + 1;
}
return { text: plainText, styles: allStyles };
}
function clampIndent(spaceCount: number): number {
return Math.min(5, Math.max(1, Math.floor(spaceCount / 2)));
}
function stripOptionalMarkdownPadding(line: string): { text: string; size: number } {
const match = line.match(/^( {1,3})(?=\S)/);
if (!match) {
return { text: line, size: 0 };
}
return {
text: line.slice(match[1].length),
size: match[1].length,
};
}
function hasClosingFence(lines: string[], startIndex: number, fence: ActiveFence): boolean {
for (let index = startIndex; index < lines.length; index += 1) {
const candidate =
fence.quoteIndent > 0 ? stripQuotePrefix(lines[index], fence.quoteIndent).text : lines[index];
if (isClosingFence(candidate, fence)) {
return true;
}
}
return false;
}
function resolveOpeningFence(line: string): ActiveFence | null {
const directFence = parseFenceMarker(line);
if (directFence) {
return { ...directFence, quoteIndent: 0 };
}
const quoted = stripQuotePrefix(line);
if (quoted.indent === 0) {
return null;
}
const quotedFence = parseFenceMarker(quoted.text);
if (!quotedFence) {
return null;
}
return {
...quotedFence,
quoteIndent: quoted.indent,
};
}
function stripQuotePrefix(
line: string,
maxDepth = Number.POSITIVE_INFINITY,
): { text: string; indent: number } {
let cursor = 0;
while (cursor < line.length && cursor < 3 && line[cursor] === " ") {
cursor += 1;
}
let removedDepth = 0;
let consumedCursor = cursor;
while (removedDepth < maxDepth && consumedCursor < line.length && line[consumedCursor] === ">") {
removedDepth += 1;
consumedCursor += 1;
if (line[consumedCursor] === " ") {
consumedCursor += 1;
}
}
if (removedDepth === 0) {
return { text: line, indent: 0 };
}
return {
text: line.slice(consumedCursor),
indent: Math.min(5, removedDepth),
};
}
function parseFenceMarker(line: string): FenceMarker | null {
const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})(.*)$/);
if (!match) {
return null;
}
const marker = match[2];
const char = marker[0];
if (char !== "`" && char !== "~") {
return null;
}
return {
char,
length: marker.length,
indent: match[1].length,
};
}
function isClosingFence(line: string, fence: FenceMarker): boolean {
const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})[ \t]*$/);
if (!match) {
return false;
}
return match[2][0] === fence.char && match[2].length >= fence.length;
}
function escapeLiteralText(input: string, escapeMap: string[]): string {
return input.replace(/[\\*_~{}`]/g, (ch) => {
const index = escapeMap.length;
escapeMap.push(ch);
return `\x01${index}\x02`;
});
}
function parseInlineSegments(text: string, inheritedStyles: InlineStyle[] = []): Segment[] {
const segments: Segment[] = [];
let cursor = 0;
while (cursor < text.length) {
const nextMatch = findNextInlineMatch(text, cursor);
if (!nextMatch) {
pushSegment(segments, text.slice(cursor), inheritedStyles);
break;
}
if (nextMatch.match.index > cursor) {
pushSegment(segments, text.slice(cursor, nextMatch.match.index), inheritedStyles);
}
const combinedStyles = [...inheritedStyles, ...nextMatch.styles];
if (nextMatch.marker.literal) {
pushSegment(segments, nextMatch.text, combinedStyles);
} else {
segments.push(...parseInlineSegments(nextMatch.text, combinedStyles));
}
cursor = nextMatch.match.index + nextMatch.match[0].length;
}
return segments;
}
function findNextInlineMatch(text: string, startIndex: number): ResolvedInlineMatch | null {
let bestMatch: ResolvedInlineMatch | null = null;
for (const [priority, marker] of INLINE_MARKERS.entries()) {
const regex = new RegExp(marker.pattern.source, marker.pattern.flags);
regex.lastIndex = startIndex;
const match = regex.exec(text);
if (!match) {
continue;
}
if (
bestMatch &&
(match.index > bestMatch.match.index ||
(match.index === bestMatch.match.index && priority > bestMatch.priority))
) {
continue;
}
bestMatch = {
match,
marker,
text: marker.extractText(match),
styles: marker.resolveStyles?.(match) ?? [],
priority,
};
}
return bestMatch;
}
function pushSegment(segments: Segment[], text: string, styles: InlineStyle[]): void {
if (!text) {
return;
}
const lastSegment = segments.at(-1);
if (lastSegment && sameStyles(lastSegment.styles, styles)) {
lastSegment.text += text;
return;
}
segments.push({
text,
styles: [...styles],
});
}
function sameStyles(left: InlineStyle[], right: InlineStyle[]): boolean {
return left.length === right.length && left.every((style, index) => style === right[index]);
}
function normalizeCodeBlockLeadingWhitespace(line: string): string {
return line.replace(/^[ \t]+/, (leadingWhitespace) =>
leadingWhitespace.replace(/\t/g, "\u00A0\u00A0\u00A0\u00A0").replace(/ /g, "\u00A0"),
);
}
function isIndentedCodeBlockLine(line: string): boolean {
return /^(?: {4,}|\t)/.test(line);
}
function stripCodeFenceIndent(line: string, indent: number): string {
let consumed = 0;
let cursor = 0;
while (cursor < line.length && consumed < indent && line[cursor] === " ") {
cursor += 1;
consumed += 1;
}
return line.slice(cursor);
}

View File

@ -1,3 +1,5 @@
import type { Style } from "./zca-client.js";
export type ZcaFriend = {
userId: string;
displayName: string;
@ -59,6 +61,10 @@ export type ZaloSendOptions = {
caption?: string;
isGroup?: boolean;
mediaLocalRoots?: readonly string[];
textMode?: "markdown" | "plain";
textChunkMode?: "length" | "newline";
textChunkLimit?: number;
textStyles?: Style[];
};
export type ZaloSendResult = {

View File

@ -20,6 +20,7 @@ import type {
} from "./types.js";
import {
LoginQRCallbackEventType,
TextStyle,
ThreadType,
Zalo,
type API,
@ -136,6 +137,39 @@ function toErrorMessage(error: unknown): string {
return String(error);
}
function clampTextStyles(
text: string,
styles?: ZaloSendOptions["textStyles"],
): ZaloSendOptions["textStyles"] {
if (!styles || styles.length === 0) {
return undefined;
}
const maxLength = text.length;
const clamped = styles
.map((style) => {
const start = Math.max(0, Math.min(style.start, maxLength));
const end = Math.min(style.start + style.len, maxLength);
if (end <= start) {
return null;
}
if (style.st === TextStyle.Indent) {
return {
start,
len: end - start,
st: style.st,
indentSize: style.indentSize,
};
}
return {
start,
len: end - start,
st: style.st,
};
})
.filter((style): style is NonNullable<typeof style> => style !== null);
return clamped.length > 0 ? clamped : undefined;
}
function toNumberId(value: unknown): string {
if (typeof value === "number" && Number.isFinite(value)) {
return String(Math.trunc(value));
@ -1018,11 +1052,16 @@ export async function sendZaloTextMessage(
kind: media.kind,
});
const payloadText = (text || options.caption || "").slice(0, 2000);
const textStyles = clampTextStyles(payloadText, options.textStyles);
if (media.kind === "audio") {
let textMessageId: string | undefined;
if (payloadText) {
const textResponse = await api.sendMessage(payloadText, trimmedThreadId, type);
const textResponse = await api.sendMessage(
textStyles ? { msg: payloadText, styles: textStyles } : payloadText,
trimmedThreadId,
type,
);
textMessageId = extractSendMessageId(textResponse);
}
@ -1055,6 +1094,7 @@ export async function sendZaloTextMessage(
const response = await api.sendMessage(
{
msg: payloadText,
...(textStyles ? { styles: textStyles } : {}),
attachments: [
{
data: media.buffer,
@ -1071,7 +1111,13 @@ export async function sendZaloTextMessage(
return { ok: true, messageId: extractSendMessageId(response) };
}
const response = await api.sendMessage(text.slice(0, 2000), trimmedThreadId, type);
const payloadText = text.slice(0, 2000);
const textStyles = clampTextStyles(payloadText, options.textStyles);
const response = await api.sendMessage(
textStyles ? { msg: payloadText, styles: textStyles } : payloadText,
trimmedThreadId,
type,
);
return { ok: true, messageId: extractSendMessageId(response) };
} catch (error) {
return { ok: false, error: toErrorMessage(error) };

View File

@ -28,6 +28,39 @@ export const Reactions = ReactionsRuntime as Record<string, string> & {
NONE: string;
};
// Mirror zca-js sendMessage style constants locally because the package root
// typing surface does not consistently expose TextStyle/Style to tsgo.
export const TextStyle = {
Bold: "b",
Italic: "i",
Underline: "u",
StrikeThrough: "s",
Red: "c_db342e",
Orange: "c_f27806",
Yellow: "c_f7b503",
Green: "c_15a85f",
Small: "f_13",
Big: "f_18",
UnorderedList: "lst_1",
OrderedList: "lst_2",
Indent: "ind_$",
} as const;
type TextStyleValue = (typeof TextStyle)[keyof typeof TextStyle];
export type Style =
| {
start: number;
len: number;
st: Exclude<TextStyleValue, typeof TextStyle.Indent>;
}
| {
start: number;
len: number;
st: typeof TextStyle.Indent;
indentSize?: number;
};
export type Credentials = {
imei: string;
cookie: unknown;

View File

@ -1,6 +1,6 @@
{
"name": "openclaw",
"version": "2026.3.9",
"version": "2026.3.11",
"description": "Multi-channel AI gateway with extensible messaging integrations",
"keywords": [],
"homepage": "https://github.com/openclaw/openclaw#readme",
@ -338,11 +338,11 @@
"ui:install": "node scripts/ui.js install"
},
"dependencies": {
"@agentclientprotocol/sdk": "0.15.0",
"@aws-sdk/client-bedrock": "^3.1004.0",
"@agentclientprotocol/sdk": "0.16.1",
"@aws-sdk/client-bedrock": "^3.1007.0",
"@buape/carbon": "0.0.0-beta-20260216184201",
"@clack/prompts": "^1.1.0",
"@discordjs/voice": "^0.19.0",
"@discordjs/voice": "^0.19.1",
"@grammyjs/runner": "^2.0.3",
"@grammyjs/transformer-throttler": "^1.2.1",
"@homebridge/ciao": "^1.3.5",
@ -364,13 +364,13 @@
"cli-highlight": "^2.1.11",
"commander": "^14.0.3",
"croner": "^10.0.1",
"discord-api-types": "^0.38.41",
"discord-api-types": "^0.38.42",
"dotenv": "^17.3.1",
"express": "^5.2.1",
"file-type": "^21.3.1",
"grammy": "^1.41.1",
"hono": "4.12.7",
"https-proxy-agent": "^7.0.6",
"https-proxy-agent": "^8.0.0",
"ipaddr.js": "^2.3.0",
"jiti": "^2.6.1",
"json5": "^2.2.3",
@ -399,18 +399,18 @@
"@lit/context": "^1.1.6",
"@types/express": "^5.0.6",
"@types/markdown-it": "^14.1.2",
"@types/node": "^25.3.5",
"@types/node": "^25.4.0",
"@types/qrcode-terminal": "^0.12.2",
"@types/ws": "^8.18.1",
"@typescript/native-preview": "7.0.0-dev.20260308.1",
"@typescript/native-preview": "7.0.0-dev.20260311.1",
"@vitest/coverage-v8": "^4.0.18",
"jscpd": "4.0.8",
"lit": "^3.3.2",
"oxfmt": "0.36.0",
"oxlint": "^1.51.0",
"oxfmt": "0.38.0",
"oxlint": "^1.53.0",
"oxlint-tsgolint": "^0.16.0",
"signal-utils": "0.21.1",
"tsdown": "0.21.0",
"tsdown": "0.21.2",
"tsx": "^4.21.0",
"typescript": "^5.9.3",
"vitest": "^4.0.18"

1374
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -73,7 +73,7 @@ fi
if [[ "${PACKAGE_VERSION}" =~ ^([0-9]{4}\.[0-9]{1,2}\.[0-9]{1,2})([.-]?beta[.-][0-9]+)?$ ]]; then
MARKETING_VERSION="${BASH_REMATCH[1]}"
else
echo "Unsupported package.json.version '${PACKAGE_VERSION}'. Expected 2026.3.9 or 2026.3.9-beta.1." >&2
echo "Unsupported package.json.version '${PACKAGE_VERSION}'. Expected 2026.3.11 or 2026.3.11-beta.1." >&2
exit 1
fi

View File

@ -44,11 +44,11 @@ import {
type TurnLatencyStats,
} from "./manager.types.js";
import {
canonicalizeAcpSessionKey,
createUnsupportedControlError,
hasLegacyAcpIdentityProjection,
normalizeAcpErrorCode,
normalizeActorKey,
normalizeSessionKey,
requireReadySessionMeta,
resolveAcpAgentFromSessionKey,
resolveAcpSessionResolutionError,
@ -87,7 +87,7 @@ export class AcpSessionManager {
constructor(private readonly deps: AcpSessionManagerDeps = DEFAULT_DEPS) {}
resolveSession(params: { cfg: OpenClawConfig; sessionKey: string }): AcpSessionResolution {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
if (!sessionKey) {
return {
kind: "none",
@ -213,7 +213,10 @@ export class AcpSessionManager {
handle: AcpRuntimeHandle;
meta: SessionAcpMeta;
}> {
const sessionKey = normalizeSessionKey(input.sessionKey);
const sessionKey = canonicalizeAcpSessionKey({
cfg: input.cfg,
sessionKey: input.sessionKey,
});
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -321,7 +324,7 @@ export class AcpSessionManager {
sessionKey: string;
signal?: AbortSignal;
}): Promise<AcpSessionStatus> {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -397,7 +400,7 @@ export class AcpSessionManager {
sessionKey: string;
runtimeMode: string;
}): Promise<AcpSessionRuntimeOptions> {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -452,7 +455,7 @@ export class AcpSessionManager {
key: string;
value: string;
}): Promise<AcpSessionRuntimeOptions> {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -525,7 +528,7 @@ export class AcpSessionManager {
sessionKey: string;
patch: Partial<AcpSessionRuntimeOptions>;
}): Promise<AcpSessionRuntimeOptions> {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
const validatedPatch = validateRuntimeOptionPatch(params.patch);
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
@ -555,7 +558,7 @@ export class AcpSessionManager {
cfg: OpenClawConfig;
sessionKey: string;
}): Promise<AcpSessionRuntimeOptions> {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -591,7 +594,10 @@ export class AcpSessionManager {
}
async runTurn(input: AcpRunTurnInput): Promise<void> {
const sessionKey = normalizeSessionKey(input.sessionKey);
const sessionKey = canonicalizeAcpSessionKey({
cfg: input.cfg,
sessionKey: input.sessionKey,
});
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -738,7 +744,7 @@ export class AcpSessionManager {
sessionKey: string;
reason?: string;
}): Promise<void> {
const sessionKey = normalizeSessionKey(params.sessionKey);
const sessionKey = canonicalizeAcpSessionKey(params);
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}
@ -806,7 +812,10 @@ export class AcpSessionManager {
}
async closeSession(input: AcpCloseSessionInput): Promise<AcpCloseSessionResult> {
const sessionKey = normalizeSessionKey(input.sessionKey);
const sessionKey = canonicalizeAcpSessionKey({
cfg: input.cfg,
sessionKey: input.sessionKey,
});
if (!sessionKey) {
throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required.");
}

View File

@ -170,6 +170,57 @@ describe("AcpSessionManager", () => {
expect(resolved.error.message).toContain("ACP metadata is missing");
});
it("canonicalizes the main alias before ACP rehydrate after restart", async () => {
const runtimeState = createRuntime();
hoisted.requireAcpRuntimeBackendMock.mockReturnValue({
id: "acpx",
runtime: runtimeState.runtime,
});
hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => {
const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey;
if (sessionKey !== "agent:main:main") {
return null;
}
return {
sessionKey,
storeSessionKey: sessionKey,
acp: {
...readySessionMeta(),
agent: "main",
runtimeSessionName: sessionKey,
},
};
});
const manager = new AcpSessionManager();
const cfg = {
...baseCfg,
session: { mainKey: "main" },
agents: { list: [{ id: "main", default: true }] },
} as OpenClawConfig;
await manager.runTurn({
cfg,
sessionKey: "main",
text: "after restart",
mode: "prompt",
requestId: "r-main",
});
expect(hoisted.readAcpSessionEntryMock).toHaveBeenCalledWith(
expect.objectContaining({
cfg,
sessionKey: "agent:main:main",
}),
);
expect(runtimeState.ensureSession).toHaveBeenCalledWith(
expect.objectContaining({
agent: "main",
sessionKey: "agent:main:main",
}),
);
});
it("serializes concurrent turns for the same ACP session", async () => {
const runtimeState = createRuntime();
hoisted.requireAcpRuntimeBackendMock.mockReturnValue({

View File

@ -1,6 +1,14 @@
import type { OpenClawConfig } from "../../config/config.js";
import {
canonicalizeMainSessionAlias,
resolveMainSessionKey,
} from "../../config/sessions/main-session.js";
import type { SessionAcpMeta } from "../../config/sessions/types.js";
import { normalizeAgentId, parseAgentSessionKey } from "../../routing/session-key.js";
import {
normalizeAgentId,
normalizeMainKey,
parseAgentSessionKey,
} from "../../routing/session-key.js";
import { ACP_ERROR_CODES, AcpRuntimeError } from "../runtime/errors.js";
import type { AcpSessionResolution } from "./manager.types.js";
@ -42,6 +50,33 @@ export function normalizeSessionKey(sessionKey: string): string {
return sessionKey.trim();
}
export function canonicalizeAcpSessionKey(params: {
cfg: OpenClawConfig;
sessionKey: string;
}): string {
const normalized = normalizeSessionKey(params.sessionKey);
if (!normalized) {
return "";
}
const lowered = normalized.toLowerCase();
if (lowered === "global" || lowered === "unknown") {
return lowered;
}
const parsed = parseAgentSessionKey(lowered);
if (parsed) {
return canonicalizeMainSessionAlias({
cfg: params.cfg,
agentId: parsed.agentId,
sessionKey: lowered,
});
}
const mainKey = normalizeMainKey(params.cfg.session?.mainKey);
if (lowered === "main" || lowered === mainKey) {
return resolveMainSessionKey(params.cfg);
}
return lowered;
}
export function normalizeActorKey(sessionKey: string): string {
return sessionKey.trim().toLowerCase();
}

View File

@ -52,7 +52,7 @@ function createSetSessionModeRequest(sessionId: string, modeId: string): SetSess
function createSetSessionConfigOptionRequest(
sessionId: string,
configId: string,
value: string,
value: string | boolean,
): SetSessionConfigOptionRequest {
return {
sessionId,
@ -644,6 +644,55 @@ describe("acp setSessionConfigOption bridge behavior", () => {
sessionStore.clearAllSessionsForTest();
});
it("rejects non-string ACP config option values", async () => {
const sessionStore = createInMemorySessionStore();
const connection = createAcpConnection();
const request = vi.fn(async (method: string) => {
if (method === "sessions.list") {
return {
ts: Date.now(),
path: "/tmp/sessions.json",
count: 1,
defaults: {
modelProvider: null,
model: null,
contextTokens: null,
},
sessions: [
{
key: "bool-config-session",
kind: "direct",
updatedAt: Date.now(),
thinkingLevel: "minimal",
modelProvider: "openai",
model: "gpt-5.4",
},
],
};
}
return { ok: true };
}) as GatewayClient["request"];
const agent = new AcpGatewayAgent(connection, createAcpGateway(request), {
sessionStore,
});
await agent.loadSession(createLoadSessionRequest("bool-config-session"));
await expect(
agent.setSessionConfigOption(
createSetSessionConfigOptionRequest("bool-config-session", "thought_level", false),
),
).rejects.toThrow(
'ACP bridge does not support non-string session config option values for "thought_level".',
);
expect(request).not.toHaveBeenCalledWith(
"sessions.patch",
expect.objectContaining({ key: "bool-config-session" }),
);
sessionStore.clearAllSessionsForTest();
});
});
describe("acp tool streaming bridge behavior", () => {

View File

@ -937,11 +937,16 @@ export class AcpGatewayAgent implements Agent {
private resolveSessionConfigPatch(
configId: string,
value: string,
value: string | boolean,
): {
overrides: Partial<GatewaySessionPresentationRow>;
patch: Record<string, string>;
} {
if (typeof value !== "string") {
throw new Error(
`ACP bridge does not support non-string session config option values for "${configId}".`,
);
}
switch (configId) {
case ACP_THOUGHT_LEVEL_CONFIG_ID:
return {

View File

@ -207,7 +207,7 @@ describe("resolveProfilesUnavailableReason", () => {
).toBe("overloaded");
});
it("falls back to rate_limit when active cooldown has no reason history", () => {
it("falls back to unknown when active cooldown has no reason history", () => {
const now = Date.now();
const store = makeStore({
"anthropic:default": {
@ -221,7 +221,7 @@ describe("resolveProfilesUnavailableReason", () => {
profileIds: ["anthropic:default"],
now,
}),
).toBe("rate_limit");
).toBe("unknown");
});
it("ignores expired windows and returns null when no profile is actively unavailable", () => {

View File

@ -110,7 +110,11 @@ export function resolveProfilesUnavailableReason(params: {
recordedReason = true;
}
if (!recordedReason) {
addScore("rate_limit", 1);
// No failure counts recorded for this cooldown window. Previously this
// defaulted to "rate_limit", which caused false "rate limit reached"
// warnings when the actual reason was unknown (e.g. transient network
// blip or server error without a classified failure count).
addScore("unknown", 1);
}
}

View File

@ -274,6 +274,8 @@ describe("failover-error", () => {
it("infers timeout from common node error codes", () => {
expect(resolveFailoverReasonFromError({ code: "ETIMEDOUT" })).toBe("timeout");
expect(resolveFailoverReasonFromError({ code: "ECONNRESET" })).toBe("timeout");
expect(resolveFailoverReasonFromError({ code: "EHOSTDOWN" })).toBe("timeout");
expect(resolveFailoverReasonFromError({ code: "EPIPE" })).toBe("timeout");
});
it("infers timeout from abort/error stop-reason messages", () => {

Some files were not shown because too many files have changed in this diff Show More