Merge branch 'main' into feature/before-dispatch-hook
This commit is contained in:
commit
9e76a214d8
@ -12991,7 +12991,7 @@
|
||||
"filename": "ui/src/i18n/locales/en.ts",
|
||||
"hashed_secret": "de0ff6b974d6910aca8d6b830e1b761f076d8fe6",
|
||||
"is_verified": false,
|
||||
"line_number": 61
|
||||
"line_number": 74
|
||||
}
|
||||
],
|
||||
"ui/src/i18n/locales/pt-BR.ts": [
|
||||
@ -13000,7 +13000,7 @@
|
||||
"filename": "ui/src/i18n/locales/pt-BR.ts",
|
||||
"hashed_secret": "ef7b6f95faca2d7d3a5aa5a6434c89530c6dd243",
|
||||
"is_verified": false,
|
||||
"line_number": 61
|
||||
"line_number": 73
|
||||
}
|
||||
],
|
||||
"vendor/a2ui/README.md": [
|
||||
|
||||
31
CHANGELOG.md
31
CHANGELOG.md
@ -6,6 +6,33 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Security
|
||||
|
||||
- Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible `\u{...}` escapes instead of spoofing the reviewed command. (`GHSA-pcqg-f7rg-xfvv`)(#43687) Thanks @EkiXu and @vincentkoc.
|
||||
- Security/device pairing: cap issued and verified device-token scopes to each paired device's approved scope baseline so stale or overbroad tokens cannot exceed approved access. (`GHSA-2pwv-x786-56f8`)(#43686) Thanks @tdjackey and @vincentkoc.
|
||||
- Security/proxy attachments: restore the shared media-store size cap for persisted browser proxy files so oversized payloads are rejected instead of overriding the intended 5 MB limit. (`GHSA-6rph-mmhp-h7h9`)(#43684) Thanks @tdjackey and @vincentkoc.
|
||||
- Security/host env: block inherited `GIT_EXEC_PATH` from sanitized host exec environments so Git helper resolution cannot be steered by host environment state. (`GHSA-jf5v-pqgw-gm5m`)(#43685) Thanks @zpbrent and @vincentkoc.
|
||||
- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`. (`GHSA-wcxr-59v9-rxr8`)(#43754) Thanks @tdjackey and @vincentkoc.
|
||||
- Models/secrets: enforce source-managed SecretRef markers in generated `models.json` so runtime-resolved provider secrets are not persisted when runtime projection is skipped. (#43759) Thanks @joshavant.
|
||||
- Security/browser.request: block persistent browser profile create/delete routes from write-scoped `browser.request` so callers can no longer persist admin-only browser profile changes through the browser control surface. (`GHSA-vmhq-cqm9-6p7q`)(#43800) Thanks @tdjackey and @vincentkoc.
|
||||
- Security/agent: reject public spawned-run lineage fields and keep workspace inheritance on the internal spawned-session path so external `agent` callers can no longer override the gateway workspace boundary. (`GHSA-2rqg-gjgv-84jm`)(#43801) Thanks @tdjackey and @vincentkoc.
|
||||
- Security/exec allowlist: preserve POSIX case sensitivity and keep `?` within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (`GHSA-f8r2-vg7x-gh8m`)(#43798) Thanks @zpbrent and @vincentkoc.
|
||||
|
||||
### Changes
|
||||
|
||||
### Fixes
|
||||
|
||||
- Cron/proactive delivery: keep isolated direct cron sends out of the write-ahead resend queue so transient-send retries do not replay duplicate proactive messages after restart. (#40646) Thanks @openperf and @vincentkoc.
|
||||
- TUI/chat log: reuse the active assistant message component for the same streaming run so `openclaw tui` no longer renders duplicate assistant replies. (#35364) Thanks @lisitan.
|
||||
- macOS/Reminders: add the missing `NSRemindersUsageDescription` to the bundled app so `apple-reminders` can trigger the system permission prompt from OpenClaw.app. (#8559) Thanks @dinakars777.
|
||||
- iMessage/self-chat echo dedupe: drop reflected duplicate copies only when a matching `is_from_me` event was just seen for the same chat, text, and `created_at`, preventing self-chat loops without broad text-only suppression. Related to #32166. (#38440) Thanks @vincentkoc.
|
||||
- Mattermost/block streaming: fix duplicate message delivery (one threaded, one top-level) when block streaming is active by excluding `replyToId` from the block reply dedup key and adding an explicit `threading` dock to the Mattermost plugin. (#41362) Thanks @mathiasnagler and @vincentkoc.
|
||||
- BlueBubbles/self-chat echo dedupe: drop reflected duplicate webhook copies only when a matching `fromMe` event was just seen for the same chat, body, and timestamp, preventing self-chat loops without broad webhook suppression. Related to #32166. (#38442) Thanks @vincentkoc.
|
||||
- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz.
|
||||
- Sandbox/write: preserve pinned mutation-helper payload stdin so sandboxed `write` no longer reports success while creating empty files. (#43876) Thanks @glitch418x.
|
||||
|
||||
## 2026.3.11
|
||||
|
||||
### Security
|
||||
|
||||
- Gateway/WebSocket: enforce browser origin validation for all browser-originated connections regardless of whether proxy headers are present, closing a cross-site WebSocket hijacking path in `trusted-proxy` mode that could grant untrusted origins `operator.admin` access. (GHSA-5wcw-8jjv-m286)
|
||||
|
||||
### Changes
|
||||
@ -25,6 +52,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky.
|
||||
- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle.
|
||||
- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc.
|
||||
- LLM Task/Lobster: add an optional `thinking` override so workflow calls can explicitly set embedded reasoning level with shared validation for invalid values and unsupported `xhigh` modes. (#15606) Thanks @xadenryan and @ImLukeF.
|
||||
|
||||
### Breaking
|
||||
|
||||
@ -44,6 +72,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev.
|
||||
- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev.
|
||||
- Telegram/final preview cleanup follow-up: clear stale cleanup-retain state only for transient preview finals so archived-preview retains no longer leave a stale partial bubble beside a later fallback-sent final. (#41763) Thanks @obviyus.
|
||||
- Telegram/poll restarts: scope process-level polling restarts to real Telegram `getUpdates` failures so unrelated network errors, such as Slack DNS misses, no longer bounce Telegram polling. (#43799) Thanks @obviyus.
|
||||
- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant.
|
||||
- Gateway/config errors: surface up to three validation issues in top-level `config.set`, `config.patch`, and `config.apply` error messages while preserving structured issue details. (#42664) Thanks @huntharo.
|
||||
- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk.
|
||||
@ -86,7 +115,6 @@ Docs: https://docs.openclaw.ai
|
||||
- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting.
|
||||
- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting.
|
||||
- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth.
|
||||
- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`.
|
||||
- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set.
|
||||
- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94.
|
||||
- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo.
|
||||
@ -198,6 +226,7 @@ Docs: https://docs.openclaw.ai
|
||||
- macOS/browser proxy: serialize non-GET browser proxy request bodies through `AnyCodable.foundationValue` so nested JSON bodies no longer crash the macOS app with `Invalid type in JSON write (__SwiftValue)`. (#43069) Thanks @Effet.
|
||||
- CLI/skills tables: keep terminal table borders aligned for wide graphemes, use full reported terminal width, and switch a few ambiguous skill icons to Terminal-safe emoji so `openclaw skills` renders more consistently in Terminal.app and iTerm. Thanks @vincentkoc.
|
||||
- Memory/Gemini: normalize returned Gemini embeddings across direct query, direct batch, and async batch paths so memory search uses consistent vector handling for Gemini too. (#43409) Thanks @gumadeiras.
|
||||
- Agents/failover: recognize additional serialized network errno strings plus `EHOSTDOWN` and `EPIPE` structured codes so transient transport failures trigger timeout failover more reliably. (#42830) Thanks @jnMetaCode.
|
||||
|
||||
## 2026.3.7
|
||||
|
||||
|
||||
@ -64,7 +64,7 @@ android {
|
||||
minSdk = 31
|
||||
targetSdk = 36
|
||||
versionCode = 202603110
|
||||
versionName = "2026.3.11-beta.1"
|
||||
versionName = "2026.3.11"
|
||||
ndk {
|
||||
// Support all major ABIs — native libs are tiny (~47 KB per ABI)
|
||||
abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64")
|
||||
|
||||
@ -17,6 +17,7 @@ enum HostEnvSecurityPolicy {
|
||||
"BASH_ENV",
|
||||
"ENV",
|
||||
"GIT_EXTERNAL_DIFF",
|
||||
"GIT_EXEC_PATH",
|
||||
"SHELL",
|
||||
"SHELLOPTS",
|
||||
"PS4",
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>APPL</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>2026.3.11-beta.1</string>
|
||||
<string>2026.3.11</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>202603110</string>
|
||||
<key>CFBundleIconFile</key>
|
||||
@ -59,6 +59,8 @@
|
||||
<string>OpenClaw uses speech recognition to detect your Voice Wake trigger phrase.</string>
|
||||
<key>NSAppleEventsUsageDescription</key>
|
||||
<string>OpenClaw needs Automation (AppleScript) permission to drive Terminal and other apps for agent actions.</string>
|
||||
<key>NSRemindersUsageDescription</key>
|
||||
<string>OpenClaw can access Reminders when requested by the agent for the apple-reminders skill.</string>
|
||||
|
||||
<key>NSAppTransportSecurity</key>
|
||||
<dict>
|
||||
|
||||
@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable {
|
||||
public let inputprovenance: [String: AnyCodable]?
|
||||
public let idempotencykey: String
|
||||
public let label: String?
|
||||
public let spawnedby: String?
|
||||
public let workspacedir: String?
|
||||
|
||||
public init(
|
||||
message: String,
|
||||
@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable {
|
||||
internalevents: [[String: AnyCodable]]?,
|
||||
inputprovenance: [String: AnyCodable]?,
|
||||
idempotencykey: String,
|
||||
label: String?,
|
||||
spawnedby: String?,
|
||||
workspacedir: String?)
|
||||
label: String?)
|
||||
{
|
||||
self.message = message
|
||||
self.agentid = agentid
|
||||
@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable {
|
||||
self.inputprovenance = inputprovenance
|
||||
self.idempotencykey = idempotencykey
|
||||
self.label = label
|
||||
self.spawnedby = spawnedby
|
||||
self.workspacedir = workspacedir
|
||||
}
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable {
|
||||
case inputprovenance = "inputProvenance"
|
||||
case idempotencykey = "idempotencyKey"
|
||||
case label
|
||||
case spawnedby = "spawnedBy"
|
||||
case workspacedir = "workspaceDir"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1336,6 +1328,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
public let execnode: AnyCodable?
|
||||
public let model: AnyCodable?
|
||||
public let spawnedby: AnyCodable?
|
||||
public let spawnedworkspacedir: AnyCodable?
|
||||
public let spawndepth: AnyCodable?
|
||||
public let subagentrole: AnyCodable?
|
||||
public let subagentcontrolscope: AnyCodable?
|
||||
@ -1356,6 +1349,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
execnode: AnyCodable?,
|
||||
model: AnyCodable?,
|
||||
spawnedby: AnyCodable?,
|
||||
spawnedworkspacedir: AnyCodable?,
|
||||
spawndepth: AnyCodable?,
|
||||
subagentrole: AnyCodable?,
|
||||
subagentcontrolscope: AnyCodable?,
|
||||
@ -1375,6 +1369,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
self.execnode = execnode
|
||||
self.model = model
|
||||
self.spawnedby = spawnedby
|
||||
self.spawnedworkspacedir = spawnedworkspacedir
|
||||
self.spawndepth = spawndepth
|
||||
self.subagentrole = subagentrole
|
||||
self.subagentcontrolscope = subagentcontrolscope
|
||||
@ -1396,6 +1391,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
case execnode = "execNode"
|
||||
case model
|
||||
case spawnedby = "spawnedBy"
|
||||
case spawnedworkspacedir = "spawnedWorkspaceDir"
|
||||
case spawndepth = "spawnDepth"
|
||||
case subagentrole = "subagentRole"
|
||||
case subagentcontrolscope = "subagentControlScope"
|
||||
|
||||
@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable {
|
||||
public let inputprovenance: [String: AnyCodable]?
|
||||
public let idempotencykey: String
|
||||
public let label: String?
|
||||
public let spawnedby: String?
|
||||
public let workspacedir: String?
|
||||
|
||||
public init(
|
||||
message: String,
|
||||
@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable {
|
||||
internalevents: [[String: AnyCodable]]?,
|
||||
inputprovenance: [String: AnyCodable]?,
|
||||
idempotencykey: String,
|
||||
label: String?,
|
||||
spawnedby: String?,
|
||||
workspacedir: String?)
|
||||
label: String?)
|
||||
{
|
||||
self.message = message
|
||||
self.agentid = agentid
|
||||
@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable {
|
||||
self.inputprovenance = inputprovenance
|
||||
self.idempotencykey = idempotencykey
|
||||
self.label = label
|
||||
self.spawnedby = spawnedby
|
||||
self.workspacedir = workspacedir
|
||||
}
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable {
|
||||
case inputprovenance = "inputProvenance"
|
||||
case idempotencykey = "idempotencyKey"
|
||||
case label
|
||||
case spawnedby = "spawnedBy"
|
||||
case workspacedir = "workspaceDir"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1336,6 +1328,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
public let execnode: AnyCodable?
|
||||
public let model: AnyCodable?
|
||||
public let spawnedby: AnyCodable?
|
||||
public let spawnedworkspacedir: AnyCodable?
|
||||
public let spawndepth: AnyCodable?
|
||||
public let subagentrole: AnyCodable?
|
||||
public let subagentcontrolscope: AnyCodable?
|
||||
@ -1356,6 +1349,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
execnode: AnyCodable?,
|
||||
model: AnyCodable?,
|
||||
spawnedby: AnyCodable?,
|
||||
spawnedworkspacedir: AnyCodable?,
|
||||
spawndepth: AnyCodable?,
|
||||
subagentrole: AnyCodable?,
|
||||
subagentcontrolscope: AnyCodable?,
|
||||
@ -1375,6 +1369,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
self.execnode = execnode
|
||||
self.model = model
|
||||
self.spawnedby = spawnedby
|
||||
self.spawnedworkspacedir = spawnedworkspacedir
|
||||
self.spawndepth = spawndepth
|
||||
self.subagentrole = subagentrole
|
||||
self.subagentcontrolscope = subagentcontrolscope
|
||||
@ -1396,6 +1391,7 @@ public struct SessionsPatchParams: Codable, Sendable {
|
||||
case execnode = "execNode"
|
||||
case model
|
||||
case spawnedby = "spawnedBy"
|
||||
case spawnedworkspacedir = "spawnedWorkspaceDir"
|
||||
case spawndepth = "spawnDepth"
|
||||
case subagentrole = "subagentRole"
|
||||
case subagentcontrolscope = "subagentControlScope"
|
||||
|
||||
@ -25,4 +25,5 @@ openclaw agent --agent ops --message "Generate report" --deliver --reply-channel
|
||||
|
||||
## Notes
|
||||
|
||||
- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names or `secretref-managed`), not resolved secret plaintext.
|
||||
- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names, `secretref-env:ENV_VAR_NAME`, or `secretref-managed`), not resolved secret plaintext.
|
||||
- Marker writes are source-authoritative: OpenClaw persists markers from the active source config snapshot, not from resolved runtime secret values.
|
||||
|
||||
@ -207,7 +207,7 @@ mode, pass `--yes` to accept defaults.
|
||||
## Models registry (`models.json`)
|
||||
|
||||
Custom providers in `models.providers` are written into `models.json` under the
|
||||
agent directory (default `~/.openclaw/agents/<agentId>/models.json`). This file
|
||||
agent directory (default `~/.openclaw/agents/<agentId>/agent/models.json`). This file
|
||||
is merged by default unless `models.mode` is set to `replace`.
|
||||
|
||||
Merge mode precedence for matching provider IDs:
|
||||
@ -215,7 +215,9 @@ Merge mode precedence for matching provider IDs:
|
||||
- Non-empty `baseUrl` already present in the agent `models.json` wins.
|
||||
- Non-empty `apiKey` in the agent `models.json` wins only when that provider is not SecretRef-managed in current config/auth-profile context.
|
||||
- SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets.
|
||||
- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs).
|
||||
- Empty or missing agent `apiKey`/`baseUrl` fall back to config `models.providers`.
|
||||
- Other provider fields are refreshed from config and normalized catalog data.
|
||||
|
||||
This marker-based persistence applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`.
|
||||
Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values.
|
||||
This applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`.
|
||||
|
||||
@ -2014,9 +2014,11 @@ OpenClaw uses the pi-coding-agent model catalog. Add custom providers via `model
|
||||
- Non-empty agent `models.json` `baseUrl` values win.
|
||||
- Non-empty agent `apiKey` values win only when that provider is not SecretRef-managed in current config/auth-profile context.
|
||||
- SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets.
|
||||
- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs).
|
||||
- Empty or missing agent `apiKey`/`baseUrl` fall back to `models.providers` in config.
|
||||
- Matching model `contextWindow`/`maxTokens` use the higher value between explicit config and implicit catalog values.
|
||||
- Use `models.mode: "replace"` when you want config to fully rewrite `models.json`.
|
||||
- Marker persistence is source-authoritative: markers are written from the active source config snapshot (pre-resolution), not from resolved runtime secret values.
|
||||
|
||||
### Provider field details
|
||||
|
||||
|
||||
@ -101,6 +101,7 @@ Notes:
|
||||
- Plan entries target `profiles.*.key` / `profiles.*.token` and write sibling refs (`keyRef` / `tokenRef`).
|
||||
- Auth-profile refs are included in runtime resolution and audit coverage.
|
||||
- For SecretRef-managed model providers, generated `agents/*/agent/models.json` entries persist non-secret markers (not resolved secret values) for `apiKey`/header surfaces.
|
||||
- Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values.
|
||||
- For web search:
|
||||
- In explicit provider mode (`tools.web.search.provider` set), only the selected provider key is active.
|
||||
- In auto mode (`tools.web.search.provider` unset), only the first provider key that resolves by precedence is active.
|
||||
|
||||
@ -75,11 +75,14 @@ outside the list is rejected.
|
||||
- `schema` (object, optional JSON Schema)
|
||||
- `provider` (string, optional)
|
||||
- `model` (string, optional)
|
||||
- `thinking` (string, optional)
|
||||
- `authProfileId` (string, optional)
|
||||
- `temperature` (number, optional)
|
||||
- `maxTokens` (number, optional)
|
||||
- `timeoutMs` (number, optional)
|
||||
|
||||
`thinking` accepts the standard OpenClaw reasoning presets, such as `low` or `medium`.
|
||||
|
||||
## Output
|
||||
|
||||
Returns `details.json` containing the parsed JSON (and validates against
|
||||
@ -90,6 +93,7 @@ Returns `details.json` containing the parsed JSON (and validates against
|
||||
```lobster
|
||||
openclaw.invoke --tool llm-task --action json --args-json '{
|
||||
"prompt": "Given the input email, return intent and draft.",
|
||||
"thinking": "low",
|
||||
"input": {
|
||||
"subject": "Hello",
|
||||
"body": "Can you help?"
|
||||
|
||||
@ -106,6 +106,7 @@ Use it in a pipeline:
|
||||
```lobster
|
||||
openclaw.invoke --tool llm-task --action json --args-json '{
|
||||
"prompt": "Given the input email, return intent and draft.",
|
||||
"thinking": "low",
|
||||
"input": { "subject": "Hello", "body": "Can you help?" },
|
||||
"schema": {
|
||||
"type": "object",
|
||||
|
||||
@ -17,9 +17,28 @@ describe("normalizeWebhookMessage", () => {
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
expect(result?.senderId).toBe("+15551234567");
|
||||
expect(result?.senderIdExplicit).toBe(false);
|
||||
expect(result?.chatGuid).toBe("iMessage;-;+15551234567");
|
||||
});
|
||||
|
||||
it("marks explicit sender handles as explicit identity", () => {
|
||||
const result = normalizeWebhookMessage({
|
||||
type: "new-message",
|
||||
data: {
|
||||
guid: "msg-explicit-1",
|
||||
text: "hello",
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
handle: { address: "+15551234567" },
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
},
|
||||
});
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
expect(result?.senderId).toBe("+15551234567");
|
||||
expect(result?.senderIdExplicit).toBe(true);
|
||||
});
|
||||
|
||||
it("does not infer sender from group chatGuid when sender handle is missing", () => {
|
||||
const result = normalizeWebhookMessage({
|
||||
type: "new-message",
|
||||
@ -72,6 +91,7 @@ describe("normalizeWebhookReaction", () => {
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
expect(result?.senderId).toBe("+15551234567");
|
||||
expect(result?.senderIdExplicit).toBe(false);
|
||||
expect(result?.messageId).toBe("p:0/msg-1");
|
||||
expect(result?.action).toBe("added");
|
||||
});
|
||||
|
||||
@ -191,12 +191,13 @@ function readFirstChatRecord(message: Record<string, unknown>): Record<string, u
|
||||
|
||||
function extractSenderInfo(message: Record<string, unknown>): {
|
||||
senderId: string;
|
||||
senderIdExplicit: boolean;
|
||||
senderName?: string;
|
||||
} {
|
||||
const handleValue = message.handle ?? message.sender;
|
||||
const handle =
|
||||
asRecord(handleValue) ?? (typeof handleValue === "string" ? { address: handleValue } : null);
|
||||
const senderId =
|
||||
const senderIdRaw =
|
||||
readString(handle, "address") ??
|
||||
readString(handle, "handle") ??
|
||||
readString(handle, "id") ??
|
||||
@ -204,13 +205,18 @@ function extractSenderInfo(message: Record<string, unknown>): {
|
||||
readString(message, "sender") ??
|
||||
readString(message, "from") ??
|
||||
"";
|
||||
const senderId = senderIdRaw.trim();
|
||||
const senderName =
|
||||
readString(handle, "displayName") ??
|
||||
readString(handle, "name") ??
|
||||
readString(message, "senderName") ??
|
||||
undefined;
|
||||
|
||||
return { senderId, senderName };
|
||||
return {
|
||||
senderId,
|
||||
senderIdExplicit: Boolean(senderId),
|
||||
senderName,
|
||||
};
|
||||
}
|
||||
|
||||
function extractChatContext(message: Record<string, unknown>): {
|
||||
@ -441,6 +447,7 @@ export type BlueBubblesParticipant = {
|
||||
export type NormalizedWebhookMessage = {
|
||||
text: string;
|
||||
senderId: string;
|
||||
senderIdExplicit: boolean;
|
||||
senderName?: string;
|
||||
messageId?: string;
|
||||
timestamp?: number;
|
||||
@ -466,6 +473,7 @@ export type NormalizedWebhookReaction = {
|
||||
action: "added" | "removed";
|
||||
emoji: string;
|
||||
senderId: string;
|
||||
senderIdExplicit: boolean;
|
||||
senderName?: string;
|
||||
messageId: string;
|
||||
timestamp?: number;
|
||||
@ -672,7 +680,7 @@ export function normalizeWebhookMessage(
|
||||
readString(message, "subject") ??
|
||||
"";
|
||||
|
||||
const { senderId, senderName } = extractSenderInfo(message);
|
||||
const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message);
|
||||
const { chatGuid, chatIdentifier, chatId, chatName, isGroup, participants } =
|
||||
extractChatContext(message);
|
||||
const normalizedParticipants = normalizeParticipantList(participants);
|
||||
@ -717,7 +725,7 @@ export function normalizeWebhookMessage(
|
||||
|
||||
// BlueBubbles may omit `handle` in webhook payloads; for DM chat GUIDs we can still infer sender.
|
||||
const senderFallbackFromChatGuid =
|
||||
!senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
|
||||
!senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
|
||||
const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || "");
|
||||
if (!normalizedSender) {
|
||||
return null;
|
||||
@ -727,6 +735,7 @@ export function normalizeWebhookMessage(
|
||||
return {
|
||||
text,
|
||||
senderId: normalizedSender,
|
||||
senderIdExplicit,
|
||||
senderName,
|
||||
messageId,
|
||||
timestamp,
|
||||
@ -777,7 +786,7 @@ export function normalizeWebhookReaction(
|
||||
const emoji = (associatedEmoji?.trim() || mapping?.emoji) ?? `reaction:${associatedType}`;
|
||||
const action = mapping?.action ?? resolveTapbackActionHint(associatedType) ?? "added";
|
||||
|
||||
const { senderId, senderName } = extractSenderInfo(message);
|
||||
const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message);
|
||||
const { chatGuid, chatIdentifier, chatId, chatName, isGroup } = extractChatContext(message);
|
||||
|
||||
const fromMe = readBoolean(message, "isFromMe") ?? readBoolean(message, "is_from_me");
|
||||
@ -793,7 +802,7 @@ export function normalizeWebhookReaction(
|
||||
: undefined;
|
||||
|
||||
const senderFallbackFromChatGuid =
|
||||
!senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
|
||||
!senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null;
|
||||
const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || "");
|
||||
if (!normalizedSender) {
|
||||
return null;
|
||||
@ -803,6 +812,7 @@ export function normalizeWebhookReaction(
|
||||
action,
|
||||
emoji,
|
||||
senderId: normalizedSender,
|
||||
senderIdExplicit,
|
||||
senderName,
|
||||
messageId: associatedGuid,
|
||||
timestamp,
|
||||
|
||||
@ -38,6 +38,10 @@ import {
|
||||
resolveBlueBubblesMessageId,
|
||||
resolveReplyContextFromCache,
|
||||
} from "./monitor-reply-cache.js";
|
||||
import {
|
||||
hasBlueBubblesSelfChatCopy,
|
||||
rememberBlueBubblesSelfChatCopy,
|
||||
} from "./monitor-self-chat-cache.js";
|
||||
import type {
|
||||
BlueBubblesCoreRuntime,
|
||||
BlueBubblesRuntimeEnv,
|
||||
@ -47,7 +51,12 @@ import { isBlueBubblesPrivateApiEnabled } from "./probe.js";
|
||||
import { normalizeBlueBubblesReactionInput, sendBlueBubblesReaction } from "./reactions.js";
|
||||
import { normalizeSecretInputString } from "./secret-input.js";
|
||||
import { resolveChatGuidForTarget, sendMessageBlueBubbles } from "./send.js";
|
||||
import { formatBlueBubblesChatTarget, isAllowedBlueBubblesSender } from "./targets.js";
|
||||
import {
|
||||
extractHandleFromChatGuid,
|
||||
formatBlueBubblesChatTarget,
|
||||
isAllowedBlueBubblesSender,
|
||||
normalizeBlueBubblesHandle,
|
||||
} from "./targets.js";
|
||||
|
||||
const DEFAULT_TEXT_LIMIT = 4000;
|
||||
const invalidAckReactions = new Set<string>();
|
||||
@ -80,6 +89,19 @@ function normalizeSnippet(value: string): string {
|
||||
return stripMarkdown(value).replace(/\s+/g, " ").trim().toLowerCase();
|
||||
}
|
||||
|
||||
function isBlueBubblesSelfChatMessage(
|
||||
message: NormalizedWebhookMessage,
|
||||
isGroup: boolean,
|
||||
): boolean {
|
||||
if (isGroup || !message.senderIdExplicit) {
|
||||
return false;
|
||||
}
|
||||
const chatHandle =
|
||||
(message.chatGuid ? extractHandleFromChatGuid(message.chatGuid) : null) ??
|
||||
normalizeBlueBubblesHandle(message.chatIdentifier ?? "");
|
||||
return Boolean(chatHandle) && chatHandle === message.senderId;
|
||||
}
|
||||
|
||||
function prunePendingOutboundMessageIds(now = Date.now()): void {
|
||||
const cutoff = now - PENDING_OUTBOUND_MESSAGE_ID_TTL_MS;
|
||||
for (let i = pendingOutboundMessageIds.length - 1; i >= 0; i--) {
|
||||
@ -453,8 +475,27 @@ export async function processMessage(
|
||||
? `removed ${tapbackParsed.emoji} reaction`
|
||||
: `reacted with ${tapbackParsed.emoji}`
|
||||
: text || placeholder;
|
||||
const isSelfChatMessage = isBlueBubblesSelfChatMessage(message, isGroup);
|
||||
const selfChatLookup = {
|
||||
accountId: account.accountId,
|
||||
chatGuid: message.chatGuid,
|
||||
chatIdentifier: message.chatIdentifier,
|
||||
chatId: message.chatId,
|
||||
senderId: message.senderId,
|
||||
body: rawBody,
|
||||
timestamp: message.timestamp,
|
||||
};
|
||||
|
||||
const cacheMessageId = message.messageId?.trim();
|
||||
const confirmedOutboundCacheEntry = cacheMessageId
|
||||
? resolveReplyContextFromCache({
|
||||
accountId: account.accountId,
|
||||
replyToId: cacheMessageId,
|
||||
chatGuid: message.chatGuid,
|
||||
chatIdentifier: message.chatIdentifier,
|
||||
chatId: message.chatId,
|
||||
})
|
||||
: null;
|
||||
let messageShortId: string | undefined;
|
||||
const cacheInboundMessage = () => {
|
||||
if (!cacheMessageId) {
|
||||
@ -476,6 +517,12 @@ export async function processMessage(
|
||||
if (message.fromMe) {
|
||||
// Cache from-me messages so reply context can resolve sender/body.
|
||||
cacheInboundMessage();
|
||||
const confirmedAssistantOutbound =
|
||||
confirmedOutboundCacheEntry?.senderLabel === "me" &&
|
||||
normalizeSnippet(confirmedOutboundCacheEntry.body ?? "") === normalizeSnippet(rawBody);
|
||||
if (isSelfChatMessage && confirmedAssistantOutbound) {
|
||||
rememberBlueBubblesSelfChatCopy(selfChatLookup);
|
||||
}
|
||||
if (cacheMessageId) {
|
||||
const pending = consumePendingOutboundMessageId({
|
||||
accountId: account.accountId,
|
||||
@ -499,6 +546,11 @@ export async function processMessage(
|
||||
return;
|
||||
}
|
||||
|
||||
if (isSelfChatMessage && hasBlueBubblesSelfChatCopy(selfChatLookup)) {
|
||||
logVerbose(core, runtime, `drop: reflected self-chat duplicate sender=${message.senderId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!rawBody) {
|
||||
logVerbose(core, runtime, `drop: empty text sender=${message.senderId}`);
|
||||
return;
|
||||
|
||||
190
extensions/bluebubbles/src/monitor-self-chat-cache.test.ts
Normal file
190
extensions/bluebubbles/src/monitor-self-chat-cache.test.ts
Normal file
@ -0,0 +1,190 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
hasBlueBubblesSelfChatCopy,
|
||||
rememberBlueBubblesSelfChatCopy,
|
||||
resetBlueBubblesSelfChatCache,
|
||||
} from "./monitor-self-chat-cache.js";
|
||||
|
||||
describe("BlueBubbles self-chat cache", () => {
|
||||
const directLookup = {
|
||||
accountId: "default",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
senderId: "+15551234567",
|
||||
} as const;
|
||||
|
||||
afterEach(() => {
|
||||
resetBlueBubblesSelfChatCache();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("matches repeated lookups for the same scope, timestamp, and text", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: " hello\r\nworld ",
|
||||
timestamp: 123,
|
||||
});
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "hello\nworld",
|
||||
timestamp: 123,
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("canonicalizes DM scope across chatIdentifier and chatGuid", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
accountId: "default",
|
||||
chatIdentifier: "+15551234567",
|
||||
senderId: "+15551234567",
|
||||
body: "hello",
|
||||
timestamp: 123,
|
||||
});
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
accountId: "default",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
senderId: "+15551234567",
|
||||
body: "hello",
|
||||
timestamp: 123,
|
||||
}),
|
||||
).toBe(true);
|
||||
|
||||
resetBlueBubblesSelfChatCache();
|
||||
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
accountId: "default",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
senderId: "+15551234567",
|
||||
body: "hello",
|
||||
timestamp: 123,
|
||||
});
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
accountId: "default",
|
||||
chatIdentifier: "+15551234567",
|
||||
senderId: "+15551234567",
|
||||
body: "hello",
|
||||
timestamp: 123,
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("expires entries after the ttl window", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "hello",
|
||||
timestamp: 123,
|
||||
});
|
||||
|
||||
vi.advanceTimersByTime(11_001);
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "hello",
|
||||
timestamp: 123,
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("evicts older entries when the cache exceeds its cap", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
for (let i = 0; i < 513; i += 1) {
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: `message-${i}`,
|
||||
timestamp: i,
|
||||
});
|
||||
vi.advanceTimersByTime(1_001);
|
||||
}
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "message-0",
|
||||
timestamp: 0,
|
||||
}),
|
||||
).toBe(false);
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "message-512",
|
||||
timestamp: 512,
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("enforces the cache cap even when cleanup is throttled", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
for (let i = 0; i < 513; i += 1) {
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: `burst-${i}`,
|
||||
timestamp: i,
|
||||
});
|
||||
}
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "burst-0",
|
||||
timestamp: 0,
|
||||
}),
|
||||
).toBe(false);
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: "burst-512",
|
||||
timestamp: 512,
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("does not collide long texts that differ only in the middle", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
const prefix = "a".repeat(256);
|
||||
const suffix = "b".repeat(256);
|
||||
const longBodyA = `${prefix}${"x".repeat(300)}${suffix}`;
|
||||
const longBodyB = `${prefix}${"y".repeat(300)}${suffix}`;
|
||||
|
||||
rememberBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: longBodyA,
|
||||
timestamp: 123,
|
||||
});
|
||||
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: longBodyA,
|
||||
timestamp: 123,
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
hasBlueBubblesSelfChatCopy({
|
||||
...directLookup,
|
||||
body: longBodyB,
|
||||
timestamp: 123,
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
});
|
||||
127
extensions/bluebubbles/src/monitor-self-chat-cache.ts
Normal file
127
extensions/bluebubbles/src/monitor-self-chat-cache.ts
Normal file
@ -0,0 +1,127 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import { extractHandleFromChatGuid, normalizeBlueBubblesHandle } from "./targets.js";
|
||||
|
||||
type SelfChatCacheKeyParts = {
|
||||
accountId: string;
|
||||
chatGuid?: string;
|
||||
chatIdentifier?: string;
|
||||
chatId?: number;
|
||||
senderId: string;
|
||||
};
|
||||
|
||||
type SelfChatLookup = SelfChatCacheKeyParts & {
|
||||
body?: string;
|
||||
timestamp?: number;
|
||||
};
|
||||
|
||||
const SELF_CHAT_TTL_MS = 10_000;
|
||||
const MAX_SELF_CHAT_CACHE_ENTRIES = 512;
|
||||
const CLEANUP_MIN_INTERVAL_MS = 1_000;
|
||||
const MAX_SELF_CHAT_BODY_CHARS = 32_768;
|
||||
const cache = new Map<string, number>();
|
||||
let lastCleanupAt = 0;
|
||||
|
||||
function normalizeBody(body: string | undefined): string | null {
|
||||
if (!body) {
|
||||
return null;
|
||||
}
|
||||
const bounded =
|
||||
body.length > MAX_SELF_CHAT_BODY_CHARS ? body.slice(0, MAX_SELF_CHAT_BODY_CHARS) : body;
|
||||
const normalized = bounded.replace(/\r\n?/g, "\n").trim();
|
||||
return normalized ? normalized : null;
|
||||
}
|
||||
|
||||
function isUsableTimestamp(timestamp: number | undefined): timestamp is number {
|
||||
return typeof timestamp === "number" && Number.isFinite(timestamp);
|
||||
}
|
||||
|
||||
function digestText(text: string): string {
|
||||
return createHash("sha256").update(text).digest("base64url");
|
||||
}
|
||||
|
||||
function trimOrUndefined(value?: string | null): string | undefined {
|
||||
const trimmed = value?.trim();
|
||||
return trimmed ? trimmed : undefined;
|
||||
}
|
||||
|
||||
function resolveCanonicalChatTarget(parts: SelfChatCacheKeyParts): string | null {
|
||||
const handleFromGuid = parts.chatGuid ? extractHandleFromChatGuid(parts.chatGuid) : null;
|
||||
if (handleFromGuid) {
|
||||
return handleFromGuid;
|
||||
}
|
||||
|
||||
const normalizedIdentifier = normalizeBlueBubblesHandle(parts.chatIdentifier ?? "");
|
||||
if (normalizedIdentifier) {
|
||||
return normalizedIdentifier;
|
||||
}
|
||||
|
||||
return (
|
||||
trimOrUndefined(parts.chatGuid) ??
|
||||
trimOrUndefined(parts.chatIdentifier) ??
|
||||
(typeof parts.chatId === "number" ? String(parts.chatId) : null)
|
||||
);
|
||||
}
|
||||
|
||||
function buildScope(parts: SelfChatCacheKeyParts): string {
|
||||
const target = resolveCanonicalChatTarget(parts) ?? parts.senderId;
|
||||
return `${parts.accountId}:${target}`;
|
||||
}
|
||||
|
||||
function cleanupExpired(now = Date.now()): void {
|
||||
if (
|
||||
lastCleanupAt !== 0 &&
|
||||
now >= lastCleanupAt &&
|
||||
now - lastCleanupAt < CLEANUP_MIN_INTERVAL_MS
|
||||
) {
|
||||
return;
|
||||
}
|
||||
lastCleanupAt = now;
|
||||
for (const [key, seenAt] of cache.entries()) {
|
||||
if (now - seenAt > SELF_CHAT_TTL_MS) {
|
||||
cache.delete(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function enforceSizeCap(): void {
|
||||
while (cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) {
|
||||
const oldestKey = cache.keys().next().value;
|
||||
if (typeof oldestKey !== "string") {
|
||||
break;
|
||||
}
|
||||
cache.delete(oldestKey);
|
||||
}
|
||||
}
|
||||
|
||||
function buildKey(lookup: SelfChatLookup): string | null {
|
||||
const body = normalizeBody(lookup.body);
|
||||
if (!body || !isUsableTimestamp(lookup.timestamp)) {
|
||||
return null;
|
||||
}
|
||||
return `${buildScope(lookup)}:${lookup.timestamp}:${digestText(body)}`;
|
||||
}
|
||||
|
||||
export function rememberBlueBubblesSelfChatCopy(lookup: SelfChatLookup): void {
|
||||
cleanupExpired();
|
||||
const key = buildKey(lookup);
|
||||
if (!key) {
|
||||
return;
|
||||
}
|
||||
cache.set(key, Date.now());
|
||||
enforceSizeCap();
|
||||
}
|
||||
|
||||
export function hasBlueBubblesSelfChatCopy(lookup: SelfChatLookup): boolean {
|
||||
cleanupExpired();
|
||||
const key = buildKey(lookup);
|
||||
if (!key) {
|
||||
return false;
|
||||
}
|
||||
const seenAt = cache.get(key);
|
||||
return typeof seenAt === "number" && Date.now() - seenAt <= SELF_CHAT_TTL_MS;
|
||||
}
|
||||
|
||||
export function resetBlueBubblesSelfChatCache(): void {
|
||||
cache.clear();
|
||||
lastCleanupAt = 0;
|
||||
}
|
||||
@ -5,6 +5,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { createPluginRuntimeMock } from "../../test-utils/plugin-runtime-mock.js";
|
||||
import type { ResolvedBlueBubblesAccount } from "./accounts.js";
|
||||
import { fetchBlueBubblesHistory } from "./history.js";
|
||||
import { resetBlueBubblesSelfChatCache } from "./monitor-self-chat-cache.js";
|
||||
import {
|
||||
handleBlueBubblesWebhookRequest,
|
||||
registerBlueBubblesWebhookTarget,
|
||||
@ -246,6 +247,7 @@ describe("BlueBubbles webhook monitor", () => {
|
||||
vi.clearAllMocks();
|
||||
// Reset short ID state between tests for predictable behavior
|
||||
_resetBlueBubblesShortIdState();
|
||||
resetBlueBubblesSelfChatCache();
|
||||
mockFetchBlueBubblesHistory.mockResolvedValue({ entries: [], resolved: true });
|
||||
mockReadAllowFromStore.mockResolvedValue([]);
|
||||
mockUpsertPairingRequest.mockResolvedValue({ code: "TESTCODE", created: true });
|
||||
@ -259,6 +261,7 @@ describe("BlueBubbles webhook monitor", () => {
|
||||
|
||||
afterEach(() => {
|
||||
unregister?.();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe("DM pairing behavior vs allowFrom", () => {
|
||||
@ -2676,5 +2679,449 @@ describe("BlueBubbles webhook monitor", () => {
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("drops reflected self-chat duplicates after a confirmed assistant outbound", async () => {
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
const { sendMessageBlueBubbles } = await import("./send.js");
|
||||
vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "msg-self-1" });
|
||||
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => {
|
||||
await params.dispatcherOptions.deliver({ text: "replying now" }, { kind: "final" });
|
||||
return EMPTY_DISPATCH_RESULT;
|
||||
});
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const timestamp = Date.now();
|
||||
const inboundPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "hello",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-self-0",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
|
||||
|
||||
const fromMePayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "replying now",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
guid: "msg-self-1",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
const reflectedPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "replying now",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-self-2",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not drop inbound messages when no fromMe self-chat copy was seen", async () => {
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const inboundPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "genuinely new message",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-inbound-1",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: Date.now(),
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not drop reflected copies after the self-chat cache TTL expires", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const timestamp = Date.now();
|
||||
const fromMePayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "ttl me",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
guid: "msg-self-ttl-1",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
|
||||
vi.advanceTimersByTime(10_001);
|
||||
|
||||
const reflectedPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "ttl me",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-self-ttl-2",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not cache regular fromMe DMs as self-chat reflections", async () => {
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const timestamp = Date.now();
|
||||
const fromMePayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "shared text",
|
||||
handle: { address: "+15557654321" },
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
guid: "msg-normal-fromme",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
|
||||
|
||||
const inboundPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "shared text",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-normal-inbound",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not drop user-authored self-chat prompts without a confirmed assistant outbound", async () => {
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const timestamp = Date.now();
|
||||
const fromMePayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "user-authored self prompt",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
guid: "msg-self-user-1",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
|
||||
|
||||
const reflectedPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "user-authored self prompt",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-self-user-2",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not treat a pending text-only match as confirmed assistant outbound", async () => {
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
const { sendMessageBlueBubbles } = await import("./send.js");
|
||||
vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "ok" });
|
||||
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => {
|
||||
await params.dispatcherOptions.deliver({ text: "same text" }, { kind: "final" });
|
||||
return EMPTY_DISPATCH_RESULT;
|
||||
});
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const timestamp = Date.now();
|
||||
const inboundPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "hello",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-self-race-0",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
|
||||
|
||||
const fromMePayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "same text",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
guid: "msg-self-race-1",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
const reflectedPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "same text",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-self-race-2",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not treat chatGuid-inferred sender ids as self-chat evidence", async () => {
|
||||
const account = createMockAccount({ dmPolicy: "open" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const timestamp = Date.now();
|
||||
const fromMePayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "shared inferred text",
|
||||
handle: null,
|
||||
isGroup: false,
|
||||
isFromMe: true,
|
||||
guid: "msg-inferred-fromme",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", fromMePayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
mockDispatchReplyWithBufferedBlockDispatcher.mockClear();
|
||||
|
||||
const inboundPayload = {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "shared inferred text",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-inferred-inbound",
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
date: timestamp,
|
||||
},
|
||||
};
|
||||
|
||||
await handleBlueBubblesWebhookRequest(
|
||||
createMockRequest("POST", "/bluebubbles-webhook", inboundPayload),
|
||||
createMockResponse(),
|
||||
);
|
||||
await flushAsync();
|
||||
|
||||
expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -69,6 +69,7 @@ outside the list is rejected.
|
||||
- `schema` (object, optional JSON Schema)
|
||||
- `provider` (string, optional)
|
||||
- `model` (string, optional)
|
||||
- `thinking` (string, optional)
|
||||
- `authProfileId` (string, optional)
|
||||
- `temperature` (number, optional)
|
||||
- `maxTokens` (number, optional)
|
||||
|
||||
@ -109,6 +109,59 @@ describe("llm-task tool (json-only)", () => {
|
||||
expect(call.model).toBe("claude-4-sonnet");
|
||||
});
|
||||
|
||||
it("passes thinking override to embedded runner", async () => {
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
|
||||
meta: {},
|
||||
payloads: [{ text: JSON.stringify({ ok: true }) }],
|
||||
});
|
||||
const tool = createLlmTaskTool(fakeApi());
|
||||
await tool.execute("id", { prompt: "x", thinking: "high" });
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0];
|
||||
expect(call.thinkLevel).toBe("high");
|
||||
});
|
||||
|
||||
it("normalizes thinking aliases", async () => {
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
|
||||
meta: {},
|
||||
payloads: [{ text: JSON.stringify({ ok: true }) }],
|
||||
});
|
||||
const tool = createLlmTaskTool(fakeApi());
|
||||
await tool.execute("id", { prompt: "x", thinking: "on" });
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0];
|
||||
expect(call.thinkLevel).toBe("low");
|
||||
});
|
||||
|
||||
it("throws on invalid thinking level", async () => {
|
||||
const tool = createLlmTaskTool(fakeApi());
|
||||
await expect(tool.execute("id", { prompt: "x", thinking: "banana" })).rejects.toThrow(
|
||||
/invalid thinking level/i,
|
||||
);
|
||||
});
|
||||
|
||||
it("throws on unsupported xhigh thinking level", async () => {
|
||||
const tool = createLlmTaskTool(fakeApi());
|
||||
await expect(tool.execute("id", { prompt: "x", thinking: "xhigh" })).rejects.toThrow(
|
||||
/only supported/i,
|
||||
);
|
||||
});
|
||||
|
||||
it("does not pass thinkLevel when thinking is omitted", async () => {
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
|
||||
meta: {},
|
||||
payloads: [{ text: JSON.stringify({ ok: true }) }],
|
||||
});
|
||||
const tool = createLlmTaskTool(fakeApi());
|
||||
await tool.execute("id", { prompt: "x" });
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0];
|
||||
expect(call.thinkLevel).toBeUndefined();
|
||||
});
|
||||
|
||||
it("enforces allowedModels", async () => {
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
(runEmbeddedPiAgent as any).mockResolvedValueOnce({
|
||||
|
||||
@ -2,7 +2,13 @@ import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import Ajv from "ajv";
|
||||
import { resolvePreferredOpenClawTmpDir } from "openclaw/plugin-sdk/llm-task";
|
||||
import {
|
||||
formatThinkingLevels,
|
||||
formatXHighModelHint,
|
||||
normalizeThinkLevel,
|
||||
resolvePreferredOpenClawTmpDir,
|
||||
supportsXHighThinking,
|
||||
} from "openclaw/plugin-sdk/llm-task";
|
||||
// NOTE: This extension is intended to be bundled with OpenClaw.
|
||||
// When running from source (tests/dev), OpenClaw internals live under src/.
|
||||
// When running from a built install, internals live under dist/ (no src/ tree).
|
||||
@ -86,6 +92,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
|
||||
Type.String({ description: "Provider override (e.g. openai-codex, anthropic)." }),
|
||||
),
|
||||
model: Type.Optional(Type.String({ description: "Model id override." })),
|
||||
thinking: Type.Optional(Type.String({ description: "Thinking level override." })),
|
||||
authProfileId: Type.Optional(Type.String({ description: "Auth profile override." })),
|
||||
temperature: Type.Optional(Type.Number({ description: "Best-effort temperature override." })),
|
||||
maxTokens: Type.Optional(Type.Number({ description: "Best-effort maxTokens override." })),
|
||||
@ -144,6 +151,18 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
|
||||
);
|
||||
}
|
||||
|
||||
const thinkingRaw =
|
||||
typeof params.thinking === "string" && params.thinking.trim() ? params.thinking : undefined;
|
||||
const thinkLevel = thinkingRaw ? normalizeThinkLevel(thinkingRaw) : undefined;
|
||||
if (thinkingRaw && !thinkLevel) {
|
||||
throw new Error(
|
||||
`Invalid thinking level "${thinkingRaw}". Use one of: ${formatThinkingLevels(provider, model)}.`,
|
||||
);
|
||||
}
|
||||
if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
|
||||
throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`);
|
||||
}
|
||||
|
||||
const timeoutMs =
|
||||
(typeof params.timeoutMs === "number" && params.timeoutMs > 0
|
||||
? params.timeoutMs
|
||||
@ -204,6 +223,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
|
||||
model,
|
||||
authProfileId,
|
||||
authProfileIdSource: authProfileId ? "user" : "auto",
|
||||
thinkLevel,
|
||||
streamParams,
|
||||
disableTools: true,
|
||||
});
|
||||
|
||||
@ -270,6 +270,16 @@ export const mattermostPlugin: ChannelPlugin<ResolvedMattermostAccount> = {
|
||||
streaming: {
|
||||
blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 },
|
||||
},
|
||||
threading: {
|
||||
resolveReplyToMode: ({ cfg, accountId }) => {
|
||||
const account = resolveMattermostAccount({ cfg, accountId: accountId ?? "default" });
|
||||
const mode = account.config.replyToMode;
|
||||
if (mode === "off" || mode === "first") {
|
||||
return mode;
|
||||
}
|
||||
return "all";
|
||||
},
|
||||
},
|
||||
reload: { configPrefixes: ["channels.mattermost"] },
|
||||
configSchema: buildChannelConfigSchema(MattermostConfigSchema),
|
||||
config: {
|
||||
|
||||
@ -43,6 +43,7 @@ const MattermostAccountSchemaBase = z
|
||||
chunkMode: z.enum(["length", "newline"]).optional(),
|
||||
blockStreaming: z.boolean().optional(),
|
||||
blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(),
|
||||
replyToMode: z.enum(["off", "first", "all"]).optional(),
|
||||
responsePrefix: z.string().optional(),
|
||||
actions: z
|
||||
.object({
|
||||
|
||||
@ -109,6 +109,29 @@ describe("mattermost mention gating", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveMattermostReplyRootId with block streaming payloads", () => {
|
||||
it("uses threadRootId for block-streamed payloads with replyToId", () => {
|
||||
// When block streaming sends a payload with replyToId from the threading
|
||||
// mode, the deliver callback should still use the existing threadRootId.
|
||||
expect(
|
||||
resolveMattermostReplyRootId({
|
||||
threadRootId: "thread-root-1",
|
||||
replyToId: "streamed-reply-id",
|
||||
}),
|
||||
).toBe("thread-root-1");
|
||||
});
|
||||
|
||||
it("falls back to payload replyToId when no threadRootId in block streaming", () => {
|
||||
// Top-level channel message: no threadRootId, payload carries the
|
||||
// inbound post id as replyToId from the "all" threading mode.
|
||||
expect(
|
||||
resolveMattermostReplyRootId({
|
||||
replyToId: "inbound-post-for-threading",
|
||||
}),
|
||||
).toBe("inbound-post-for-threading");
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveMattermostReplyRootId", () => {
|
||||
it("uses replyToId for top-level replies", () => {
|
||||
expect(
|
||||
|
||||
@ -52,6 +52,8 @@ export type MattermostAccountConfig = {
|
||||
blockStreaming?: boolean;
|
||||
/** Merge streamed block replies before sending. */
|
||||
blockStreamingCoalesce?: BlockStreamingCoalesceConfig;
|
||||
/** Control reply threading (off|first|all). Default: "all". */
|
||||
replyToMode?: "off" | "first" | "all";
|
||||
/** Outbound response prefix override for this channel/account. */
|
||||
responsePrefix?: string;
|
||||
/** Action toggles for this account. */
|
||||
|
||||
@ -5,6 +5,7 @@ import {
|
||||
primeSendMock,
|
||||
} from "../../../src/test-utils/send-payload-contract.js";
|
||||
import { zalouserPlugin } from "./channel.js";
|
||||
import { setZalouserRuntime } from "./runtime.js";
|
||||
|
||||
vi.mock("./send.js", () => ({
|
||||
sendMessageZalouser: vi.fn().mockResolvedValue({ ok: true, messageId: "zlu-1" }),
|
||||
@ -38,6 +39,14 @@ describe("zalouserPlugin outbound sendPayload", () => {
|
||||
let mockedSend: ReturnType<typeof vi.mocked<(typeof import("./send.js"))["sendMessageZalouser"]>>;
|
||||
|
||||
beforeEach(async () => {
|
||||
setZalouserRuntime({
|
||||
channel: {
|
||||
text: {
|
||||
resolveChunkMode: vi.fn(() => "length"),
|
||||
resolveTextChunkLimit: vi.fn(() => 1200),
|
||||
},
|
||||
},
|
||||
} as never);
|
||||
const mod = await import("./send.js");
|
||||
mockedSend = vi.mocked(mod.sendMessageZalouser);
|
||||
mockedSend.mockClear();
|
||||
@ -55,7 +64,7 @@ describe("zalouserPlugin outbound sendPayload", () => {
|
||||
expect(mockedSend).toHaveBeenCalledWith(
|
||||
"1471383327500481391",
|
||||
"hello group",
|
||||
expect.objectContaining({ isGroup: true }),
|
||||
expect.objectContaining({ isGroup: true, textMode: "markdown" }),
|
||||
);
|
||||
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g1" });
|
||||
});
|
||||
@ -71,7 +80,7 @@ describe("zalouserPlugin outbound sendPayload", () => {
|
||||
expect(mockedSend).toHaveBeenCalledWith(
|
||||
"987654321",
|
||||
"hello",
|
||||
expect.objectContaining({ isGroup: false }),
|
||||
expect.objectContaining({ isGroup: false, textMode: "markdown" }),
|
||||
);
|
||||
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-d1" });
|
||||
});
|
||||
@ -87,14 +96,37 @@ describe("zalouserPlugin outbound sendPayload", () => {
|
||||
expect(mockedSend).toHaveBeenCalledWith(
|
||||
"g-1471383327500481391",
|
||||
"hello native group",
|
||||
expect.objectContaining({ isGroup: true }),
|
||||
expect.objectContaining({ isGroup: true, textMode: "markdown" }),
|
||||
);
|
||||
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g-native" });
|
||||
});
|
||||
|
||||
it("passes long markdown through once so formatting happens before chunking", async () => {
|
||||
const text = `**${"a".repeat(2501)}**`;
|
||||
mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-code" });
|
||||
|
||||
const result = await zalouserPlugin.outbound!.sendPayload!({
|
||||
...baseCtx({ text }),
|
||||
to: "987654321",
|
||||
});
|
||||
|
||||
expect(mockedSend).toHaveBeenCalledTimes(1);
|
||||
expect(mockedSend).toHaveBeenCalledWith(
|
||||
"987654321",
|
||||
text,
|
||||
expect.objectContaining({
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkMode: "length",
|
||||
textChunkLimit: 1200,
|
||||
}),
|
||||
);
|
||||
expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-code" });
|
||||
});
|
||||
|
||||
installSendPayloadContractSuite({
|
||||
channel: "zalouser",
|
||||
chunking: { mode: "split", longTextLength: 3000, maxChunkLength: 2000 },
|
||||
chunking: { mode: "passthrough", longTextLength: 3000 },
|
||||
createHarness: ({ payload, sendResults }) => {
|
||||
primeSendMock(mockedSend, { ok: true, messageId: "zlu-1" }, sendResults);
|
||||
return {
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { chunkMarkdownText } from "../../../src/auto-reply/chunk.js";
|
||||
import { zalouserPlugin } from "./channel.js";
|
||||
import { setZalouserRuntime } from "./runtime.js";
|
||||
import { sendReactionZalouser } from "./send.js";
|
||||
|
||||
vi.mock("./send.js", async (importOriginal) => {
|
||||
@ -13,6 +15,16 @@ vi.mock("./send.js", async (importOriginal) => {
|
||||
const mockSendReaction = vi.mocked(sendReactionZalouser);
|
||||
|
||||
describe("zalouser outbound chunker", () => {
|
||||
beforeEach(() => {
|
||||
setZalouserRuntime({
|
||||
channel: {
|
||||
text: {
|
||||
chunkMarkdownText,
|
||||
},
|
||||
},
|
||||
} as never);
|
||||
});
|
||||
|
||||
it("chunks without empty strings and respects limit", () => {
|
||||
const chunker = zalouserPlugin.outbound?.chunker;
|
||||
expect(chunker).toBeTypeOf("function");
|
||||
|
||||
@ -20,7 +20,6 @@ import {
|
||||
buildBaseAccountStatusSnapshot,
|
||||
buildChannelConfigSchema,
|
||||
DEFAULT_ACCOUNT_ID,
|
||||
chunkTextForOutbound,
|
||||
deleteAccountFromConfigSection,
|
||||
formatAllowFromLowercase,
|
||||
isNumericTargetId,
|
||||
@ -43,6 +42,7 @@ import { resolveZalouserReactionMessageIds } from "./message-sid.js";
|
||||
import { zalouserOnboardingAdapter } from "./onboarding.js";
|
||||
import { probeZalouser } from "./probe.js";
|
||||
import { writeQrDataUrlToTempFile } from "./qr-temp-file.js";
|
||||
import { getZalouserRuntime } from "./runtime.js";
|
||||
import { sendMessageZalouser, sendReactionZalouser } from "./send.js";
|
||||
import { collectZalouserStatusIssues } from "./status-issues.js";
|
||||
import {
|
||||
@ -166,6 +166,16 @@ function resolveZalouserQrProfile(accountId?: string | null): string {
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function resolveZalouserOutboundChunkMode(cfg: OpenClawConfig, accountId?: string) {
|
||||
return getZalouserRuntime().channel.text.resolveChunkMode(cfg, "zalouser", accountId);
|
||||
}
|
||||
|
||||
function resolveZalouserOutboundTextChunkLimit(cfg: OpenClawConfig, accountId?: string) {
|
||||
return getZalouserRuntime().channel.text.resolveTextChunkLimit(cfg, "zalouser", accountId, {
|
||||
fallbackLimit: zalouserDock.outbound?.textChunkLimit ?? 2000,
|
||||
});
|
||||
}
|
||||
|
||||
function mapUser(params: {
|
||||
id: string;
|
||||
name?: string | null;
|
||||
@ -595,14 +605,9 @@ export const zalouserPlugin: ChannelPlugin<ResolvedZalouserAccount> = {
|
||||
},
|
||||
outbound: {
|
||||
deliveryMode: "direct",
|
||||
chunker: chunkTextForOutbound,
|
||||
chunkerMode: "text",
|
||||
textChunkLimit: 2000,
|
||||
sendPayload: async (ctx) =>
|
||||
await sendPayloadWithChunkedTextAndMedia({
|
||||
ctx,
|
||||
textChunkLimit: zalouserPlugin.outbound!.textChunkLimit,
|
||||
chunker: zalouserPlugin.outbound!.chunker,
|
||||
sendText: (nextCtx) => zalouserPlugin.outbound!.sendText!(nextCtx),
|
||||
sendMedia: (nextCtx) => zalouserPlugin.outbound!.sendMedia!(nextCtx),
|
||||
emptyResult: { channel: "zalouser", messageId: "" },
|
||||
@ -613,6 +618,9 @@ export const zalouserPlugin: ChannelPlugin<ResolvedZalouserAccount> = {
|
||||
const result = await sendMessageZalouser(target.threadId, text, {
|
||||
profile: account.profile,
|
||||
isGroup: target.isGroup,
|
||||
textMode: "markdown",
|
||||
textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId),
|
||||
textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId),
|
||||
});
|
||||
return buildChannelSendResult("zalouser", result);
|
||||
},
|
||||
@ -624,6 +632,9 @@ export const zalouserPlugin: ChannelPlugin<ResolvedZalouserAccount> = {
|
||||
isGroup: target.isGroup,
|
||||
mediaUrl,
|
||||
mediaLocalRoots,
|
||||
textMode: "markdown",
|
||||
textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId),
|
||||
textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId),
|
||||
});
|
||||
return buildChannelSendResult("zalouser", result);
|
||||
},
|
||||
|
||||
@ -51,6 +51,7 @@ function createRuntimeEnv(): RuntimeEnv {
|
||||
|
||||
function installRuntime(params: {
|
||||
commandAuthorized?: boolean;
|
||||
replyPayload?: { text?: string; mediaUrl?: string; mediaUrls?: string[] };
|
||||
resolveCommandAuthorizedFromAuthorizers?: (params: {
|
||||
useAccessGroups: boolean;
|
||||
authorizers: Array<{ configured: boolean; allowed: boolean }>;
|
||||
@ -58,6 +59,9 @@ function installRuntime(params: {
|
||||
}) {
|
||||
const dispatchReplyWithBufferedBlockDispatcher = vi.fn(async ({ dispatcherOptions, ctx }) => {
|
||||
await dispatcherOptions.typingCallbacks?.onReplyStart?.();
|
||||
if (params.replyPayload) {
|
||||
await dispatcherOptions.deliver(params.replyPayload);
|
||||
}
|
||||
return { queuedFinal: false, counts: { tool: 0, block: 0, final: 0 }, ctx };
|
||||
});
|
||||
const resolveCommandAuthorizedFromAuthorizers = vi.fn(
|
||||
@ -166,7 +170,8 @@ function installRuntime(params: {
|
||||
text: {
|
||||
resolveMarkdownTableMode: vi.fn(() => "code"),
|
||||
convertMarkdownTables: vi.fn((text: string) => text),
|
||||
resolveChunkMode: vi.fn(() => "line"),
|
||||
resolveChunkMode: vi.fn(() => "length"),
|
||||
resolveTextChunkLimit: vi.fn(() => 1200),
|
||||
chunkMarkdownTextWithMode: vi.fn((text: string) => [text]),
|
||||
},
|
||||
},
|
||||
@ -304,6 +309,42 @@ describe("zalouser monitor group mention gating", () => {
|
||||
expect(callArg?.ctx?.WasMentioned).toBe(true);
|
||||
});
|
||||
|
||||
it("passes long markdown replies through once so formatting happens before chunking", async () => {
|
||||
const replyText = `**${"a".repeat(2501)}**`;
|
||||
installRuntime({
|
||||
commandAuthorized: false,
|
||||
replyPayload: { text: replyText },
|
||||
});
|
||||
|
||||
await __testing.processMessage({
|
||||
message: createDmMessage({
|
||||
content: "hello",
|
||||
}),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
dmPolicy: "open",
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
});
|
||||
|
||||
expect(sendMessageZalouserMock).toHaveBeenCalledTimes(1);
|
||||
expect(sendMessageZalouserMock).toHaveBeenCalledWith(
|
||||
"u-1",
|
||||
replyText,
|
||||
expect.objectContaining({
|
||||
isGroup: false,
|
||||
profile: "default",
|
||||
textMode: "markdown",
|
||||
textChunkMode: "length",
|
||||
textChunkLimit: 1200,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("uses commandContent for mention-prefixed control commands", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: true,
|
||||
|
||||
@ -703,6 +703,10 @@ async function deliverZalouserReply(params: {
|
||||
params;
|
||||
const tableMode = params.tableMode ?? "code";
|
||||
const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode);
|
||||
const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId);
|
||||
const textChunkLimit = core.channel.text.resolveTextChunkLimit(config, "zalouser", accountId, {
|
||||
fallbackLimit: ZALOUSER_TEXT_LIMIT,
|
||||
});
|
||||
|
||||
const sentMedia = await sendMediaWithLeadingCaption({
|
||||
mediaUrls: resolveOutboundMediaUrls(payload),
|
||||
@ -713,6 +717,9 @@ async function deliverZalouserReply(params: {
|
||||
profile,
|
||||
mediaUrl,
|
||||
isGroup,
|
||||
textMode: "markdown",
|
||||
textChunkMode: chunkMode,
|
||||
textChunkLimit,
|
||||
});
|
||||
statusSink?.({ lastOutboundAt: Date.now() });
|
||||
},
|
||||
@ -725,20 +732,17 @@ async function deliverZalouserReply(params: {
|
||||
}
|
||||
|
||||
if (text) {
|
||||
const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId);
|
||||
const chunks = core.channel.text.chunkMarkdownTextWithMode(
|
||||
text,
|
||||
ZALOUSER_TEXT_LIMIT,
|
||||
chunkMode,
|
||||
);
|
||||
logVerbose(core, runtime, `Sending ${chunks.length} text chunk(s) to ${chatId}`);
|
||||
for (const chunk of chunks) {
|
||||
try {
|
||||
await sendMessageZalouser(chatId, chunk, { profile, isGroup });
|
||||
statusSink?.({ lastOutboundAt: Date.now() });
|
||||
} catch (err) {
|
||||
runtime.error(`Zalouser message send failed: ${String(err)}`);
|
||||
}
|
||||
try {
|
||||
await sendMessageZalouser(chatId, text, {
|
||||
profile,
|
||||
isGroup,
|
||||
textMode: "markdown",
|
||||
textChunkMode: chunkMode,
|
||||
textChunkLimit,
|
||||
});
|
||||
statusSink?.({ lastOutboundAt: Date.now() });
|
||||
} catch (err) {
|
||||
runtime.error(`Zalouser message send failed: ${String(err)}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ import {
|
||||
sendSeenZalouser,
|
||||
sendTypingZalouser,
|
||||
} from "./send.js";
|
||||
import { parseZalouserTextStyles } from "./text-styles.js";
|
||||
import {
|
||||
sendZaloDeliveredEvent,
|
||||
sendZaloLink,
|
||||
@ -16,6 +17,7 @@ import {
|
||||
sendZaloTextMessage,
|
||||
sendZaloTypingEvent,
|
||||
} from "./zalo-js.js";
|
||||
import { TextStyle } from "./zca-client.js";
|
||||
|
||||
vi.mock("./zalo-js.js", () => ({
|
||||
sendZaloTextMessage: vi.fn(),
|
||||
@ -43,36 +45,272 @@ describe("zalouser send helpers", () => {
|
||||
mockSendSeen.mockReset();
|
||||
});
|
||||
|
||||
it("delegates text send to JS transport", async () => {
|
||||
it("keeps plain text literal by default", async () => {
|
||||
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1" });
|
||||
|
||||
const result = await sendMessageZalouser("thread-1", "hello", {
|
||||
const result = await sendMessageZalouser("thread-1", "**hello**", {
|
||||
profile: "default",
|
||||
isGroup: true,
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledWith("thread-1", "hello", {
|
||||
profile: "default",
|
||||
isGroup: true,
|
||||
});
|
||||
expect(mockSendText).toHaveBeenCalledWith(
|
||||
"thread-1",
|
||||
"**hello**",
|
||||
expect.objectContaining({
|
||||
profile: "default",
|
||||
isGroup: true,
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({ ok: true, messageId: "mid-1" });
|
||||
});
|
||||
|
||||
it("maps image helper to media send", async () => {
|
||||
it("formats markdown text when markdown mode is enabled", async () => {
|
||||
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1b" });
|
||||
|
||||
await sendMessageZalouser("thread-1", "**hello**", {
|
||||
profile: "default",
|
||||
isGroup: true,
|
||||
textMode: "markdown",
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledWith(
|
||||
"thread-1",
|
||||
"hello",
|
||||
expect.objectContaining({
|
||||
profile: "default",
|
||||
isGroup: true,
|
||||
textMode: "markdown",
|
||||
textStyles: [{ start: 0, len: 5, st: TextStyle.Bold }],
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("formats image captions in markdown mode", async () => {
|
||||
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2" });
|
||||
|
||||
await sendImageZalouser("thread-2", "https://example.com/a.png", {
|
||||
profile: "p2",
|
||||
caption: "cap",
|
||||
caption: "_cap_",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledWith("thread-2", "cap", {
|
||||
expect(mockSendText).toHaveBeenCalledWith(
|
||||
"thread-2",
|
||||
"cap",
|
||||
expect.objectContaining({
|
||||
profile: "p2",
|
||||
caption: undefined,
|
||||
isGroup: false,
|
||||
mediaUrl: "https://example.com/a.png",
|
||||
textMode: "markdown",
|
||||
textStyles: [{ start: 0, len: 3, st: TextStyle.Italic }],
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("does not keep the raw markdown caption as a media fallback after formatting", async () => {
|
||||
mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2b" });
|
||||
|
||||
await sendImageZalouser("thread-2", "https://example.com/a.png", {
|
||||
profile: "p2",
|
||||
caption: "cap",
|
||||
caption: "```\n```",
|
||||
isGroup: false,
|
||||
mediaUrl: "https://example.com/a.png",
|
||||
textMode: "markdown",
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledWith(
|
||||
"thread-2",
|
||||
"",
|
||||
expect.objectContaining({
|
||||
profile: "p2",
|
||||
caption: undefined,
|
||||
isGroup: false,
|
||||
mediaUrl: "https://example.com/a.png",
|
||||
textMode: "markdown",
|
||||
textStyles: undefined,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("rechunks normalized markdown text before sending to avoid transport truncation", async () => {
|
||||
const text = "\t".repeat(500) + "a".repeat(1500);
|
||||
const formatted = parseZalouserTextStyles(text);
|
||||
mockSendText
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2c-1" })
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2c-2" });
|
||||
|
||||
const result = await sendMessageZalouser("thread-2c", text, {
|
||||
profile: "p2c",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
});
|
||||
|
||||
expect(formatted.text.length).toBeGreaterThan(2000);
|
||||
expect(mockSendText).toHaveBeenCalledTimes(2);
|
||||
expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text);
|
||||
expect(mockSendText.mock.calls.every((call) => (call[1] as string).length <= 2000)).toBe(true);
|
||||
expect(result).toEqual({ ok: true, messageId: "mid-2c-2" });
|
||||
});
|
||||
|
||||
it("preserves text styles when splitting long formatted markdown", async () => {
|
||||
const text = `**${"a".repeat(2501)}**`;
|
||||
mockSendText
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-1" })
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-2" });
|
||||
|
||||
const result = await sendMessageZalouser("thread-2d", text, {
|
||||
profile: "p2d",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"thread-2d",
|
||||
"a".repeat(2000),
|
||||
expect.objectContaining({
|
||||
profile: "p2d",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textStyles: [{ start: 0, len: 2000, st: TextStyle.Bold }],
|
||||
}),
|
||||
);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
"thread-2d",
|
||||
"a".repeat(501),
|
||||
expect.objectContaining({
|
||||
profile: "p2d",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textStyles: [{ start: 0, len: 501, st: TextStyle.Bold }],
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({ ok: true, messageId: "mid-2d-2" });
|
||||
});
|
||||
|
||||
it("preserves formatted text and styles when newline chunk mode splits after parsing", async () => {
|
||||
const text = `**${"a".repeat(1995)}**\n\nsecond paragraph`;
|
||||
const formatted = parseZalouserTextStyles(text);
|
||||
mockSendText
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-3" })
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-4" });
|
||||
|
||||
const result = await sendMessageZalouser("thread-2d-2", text, {
|
||||
profile: "p2d-2",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkMode: "newline",
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledTimes(2);
|
||||
expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"thread-2d-2",
|
||||
`${"a".repeat(1995)}\n\n`,
|
||||
expect.objectContaining({
|
||||
profile: "p2d-2",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkMode: "newline",
|
||||
textStyles: [{ start: 0, len: 1995, st: TextStyle.Bold }],
|
||||
}),
|
||||
);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
"thread-2d-2",
|
||||
"second paragraph",
|
||||
expect.objectContaining({
|
||||
profile: "p2d-2",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkMode: "newline",
|
||||
textStyles: undefined,
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({ ok: true, messageId: "mid-2d-4" });
|
||||
});
|
||||
|
||||
it("respects an explicit text chunk limit when splitting formatted markdown", async () => {
|
||||
const text = `**${"a".repeat(1501)}**`;
|
||||
mockSendText
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-5" })
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2d-6" });
|
||||
|
||||
const result = await sendMessageZalouser("thread-2d-3", text, {
|
||||
profile: "p2d-3",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkLimit: 1200,
|
||||
} as never);
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledTimes(2);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"thread-2d-3",
|
||||
"a".repeat(1200),
|
||||
expect.objectContaining({
|
||||
profile: "p2d-3",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkLimit: 1200,
|
||||
textStyles: [{ start: 0, len: 1200, st: TextStyle.Bold }],
|
||||
}),
|
||||
);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
"thread-2d-3",
|
||||
"a".repeat(301),
|
||||
expect.objectContaining({
|
||||
profile: "p2d-3",
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
textChunkLimit: 1200,
|
||||
textStyles: [{ start: 0, len: 301, st: TextStyle.Bold }],
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({ ok: true, messageId: "mid-2d-6" });
|
||||
});
|
||||
|
||||
it("sends overflow markdown captions as follow-up text after the media message", async () => {
|
||||
const caption = "\t".repeat(500) + "a".repeat(1500);
|
||||
const formatted = parseZalouserTextStyles(caption);
|
||||
mockSendText
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2e-1" })
|
||||
.mockResolvedValueOnce({ ok: true, messageId: "mid-2e-2" });
|
||||
|
||||
const result = await sendImageZalouser("thread-2e", "https://example.com/long.png", {
|
||||
profile: "p2e",
|
||||
caption,
|
||||
isGroup: false,
|
||||
textMode: "markdown",
|
||||
});
|
||||
|
||||
expect(mockSendText).toHaveBeenCalledTimes(2);
|
||||
expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"thread-2e",
|
||||
expect.any(String),
|
||||
expect.objectContaining({
|
||||
profile: "p2e",
|
||||
caption: undefined,
|
||||
isGroup: false,
|
||||
mediaUrl: "https://example.com/long.png",
|
||||
textMode: "markdown",
|
||||
}),
|
||||
);
|
||||
expect(mockSendText).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
"thread-2e",
|
||||
expect.any(String),
|
||||
expect.not.objectContaining({
|
||||
mediaUrl: "https://example.com/long.png",
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({ ok: true, messageId: "mid-2e-2" });
|
||||
});
|
||||
|
||||
it("delegates link helper to JS transport", async () => {
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { parseZalouserTextStyles } from "./text-styles.js";
|
||||
import type { ZaloEventMessage, ZaloSendOptions, ZaloSendResult } from "./types.js";
|
||||
import {
|
||||
sendZaloDeliveredEvent,
|
||||
@ -7,16 +8,58 @@ import {
|
||||
sendZaloTextMessage,
|
||||
sendZaloTypingEvent,
|
||||
} from "./zalo-js.js";
|
||||
import { TextStyle } from "./zca-client.js";
|
||||
|
||||
export type ZalouserSendOptions = ZaloSendOptions;
|
||||
export type ZalouserSendResult = ZaloSendResult;
|
||||
|
||||
const ZALO_TEXT_LIMIT = 2000;
|
||||
const DEFAULT_TEXT_CHUNK_MODE = "length";
|
||||
|
||||
type StyledTextChunk = {
|
||||
text: string;
|
||||
styles?: ZaloSendOptions["textStyles"];
|
||||
};
|
||||
|
||||
type TextChunkMode = NonNullable<ZaloSendOptions["textChunkMode"]>;
|
||||
|
||||
export async function sendMessageZalouser(
|
||||
threadId: string,
|
||||
text: string,
|
||||
options: ZalouserSendOptions = {},
|
||||
): Promise<ZalouserSendResult> {
|
||||
return await sendZaloTextMessage(threadId, text, options);
|
||||
const prepared =
|
||||
options.textMode === "markdown"
|
||||
? parseZalouserTextStyles(text)
|
||||
: { text, styles: options.textStyles };
|
||||
const textChunkLimit = options.textChunkLimit ?? ZALO_TEXT_LIMIT;
|
||||
const chunks = splitStyledText(
|
||||
prepared.text,
|
||||
(prepared.styles?.length ?? 0) > 0 ? prepared.styles : undefined,
|
||||
textChunkLimit,
|
||||
options.textChunkMode,
|
||||
);
|
||||
|
||||
let lastResult: ZalouserSendResult | null = null;
|
||||
for (const [index, chunk] of chunks.entries()) {
|
||||
const chunkOptions =
|
||||
index === 0
|
||||
? { ...options, textStyles: chunk.styles }
|
||||
: {
|
||||
...options,
|
||||
caption: undefined,
|
||||
mediaLocalRoots: undefined,
|
||||
mediaUrl: undefined,
|
||||
textStyles: chunk.styles,
|
||||
};
|
||||
const result = await sendZaloTextMessage(threadId, chunk.text, chunkOptions);
|
||||
if (!result.ok) {
|
||||
return result;
|
||||
}
|
||||
lastResult = result;
|
||||
}
|
||||
|
||||
return lastResult ?? { ok: false, error: "No message content provided" };
|
||||
}
|
||||
|
||||
export async function sendImageZalouser(
|
||||
@ -24,8 +67,9 @@ export async function sendImageZalouser(
|
||||
imageUrl: string,
|
||||
options: ZalouserSendOptions = {},
|
||||
): Promise<ZalouserSendResult> {
|
||||
return await sendZaloTextMessage(threadId, options.caption ?? "", {
|
||||
return await sendMessageZalouser(threadId, options.caption ?? "", {
|
||||
...options,
|
||||
caption: undefined,
|
||||
mediaUrl: imageUrl,
|
||||
});
|
||||
}
|
||||
@ -85,3 +129,144 @@ export async function sendSeenZalouser(params: {
|
||||
}): Promise<void> {
|
||||
await sendZaloSeenEvent(params);
|
||||
}
|
||||
|
||||
function splitStyledText(
|
||||
text: string,
|
||||
styles: ZaloSendOptions["textStyles"],
|
||||
limit: number,
|
||||
mode: ZaloSendOptions["textChunkMode"],
|
||||
): StyledTextChunk[] {
|
||||
if (text.length === 0) {
|
||||
return [{ text, styles: undefined }];
|
||||
}
|
||||
|
||||
const chunks: StyledTextChunk[] = [];
|
||||
for (const range of splitTextRanges(text, limit, mode ?? DEFAULT_TEXT_CHUNK_MODE)) {
|
||||
const { start, end } = range;
|
||||
chunks.push({
|
||||
text: text.slice(start, end),
|
||||
styles: sliceTextStyles(styles, start, end),
|
||||
});
|
||||
}
|
||||
return chunks;
|
||||
}
|
||||
|
||||
function sliceTextStyles(
|
||||
styles: ZaloSendOptions["textStyles"],
|
||||
start: number,
|
||||
end: number,
|
||||
): ZaloSendOptions["textStyles"] {
|
||||
if (!styles || styles.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const chunkStyles = styles
|
||||
.map((style) => {
|
||||
const overlapStart = Math.max(style.start, start);
|
||||
const overlapEnd = Math.min(style.start + style.len, end);
|
||||
if (overlapEnd <= overlapStart) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (style.st === TextStyle.Indent) {
|
||||
return {
|
||||
start: overlapStart - start,
|
||||
len: overlapEnd - overlapStart,
|
||||
st: style.st,
|
||||
indentSize: style.indentSize,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
start: overlapStart - start,
|
||||
len: overlapEnd - overlapStart,
|
||||
st: style.st,
|
||||
};
|
||||
})
|
||||
.filter((style): style is NonNullable<typeof style> => style !== null);
|
||||
|
||||
return chunkStyles.length > 0 ? chunkStyles : undefined;
|
||||
}
|
||||
|
||||
function splitTextRanges(
|
||||
text: string,
|
||||
limit: number,
|
||||
mode: TextChunkMode,
|
||||
): Array<{ start: number; end: number }> {
|
||||
if (mode === "newline") {
|
||||
return splitTextRangesByPreferredBreaks(text, limit);
|
||||
}
|
||||
|
||||
const ranges: Array<{ start: number; end: number }> = [];
|
||||
for (let start = 0; start < text.length; start += limit) {
|
||||
ranges.push({
|
||||
start,
|
||||
end: Math.min(text.length, start + limit),
|
||||
});
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
function splitTextRangesByPreferredBreaks(
|
||||
text: string,
|
||||
limit: number,
|
||||
): Array<{ start: number; end: number }> {
|
||||
const ranges: Array<{ start: number; end: number }> = [];
|
||||
let start = 0;
|
||||
|
||||
while (start < text.length) {
|
||||
const maxEnd = Math.min(text.length, start + limit);
|
||||
let end = maxEnd;
|
||||
if (maxEnd < text.length) {
|
||||
end =
|
||||
findParagraphBreak(text, start, maxEnd) ??
|
||||
findLastBreak(text, "\n", start, maxEnd) ??
|
||||
findLastWhitespaceBreak(text, start, maxEnd) ??
|
||||
maxEnd;
|
||||
}
|
||||
|
||||
if (end <= start) {
|
||||
end = maxEnd;
|
||||
}
|
||||
|
||||
ranges.push({ start, end });
|
||||
start = end;
|
||||
}
|
||||
|
||||
return ranges;
|
||||
}
|
||||
|
||||
function findParagraphBreak(text: string, start: number, end: number): number | undefined {
|
||||
const slice = text.slice(start, end);
|
||||
const matches = slice.matchAll(/\n[\t ]*\n+/g);
|
||||
let lastMatch: RegExpMatchArray | undefined;
|
||||
for (const match of matches) {
|
||||
lastMatch = match;
|
||||
}
|
||||
if (!lastMatch || lastMatch.index === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return start + lastMatch.index + lastMatch[0].length;
|
||||
}
|
||||
|
||||
function findLastBreak(
|
||||
text: string,
|
||||
marker: string,
|
||||
start: number,
|
||||
end: number,
|
||||
): number | undefined {
|
||||
const index = text.lastIndexOf(marker, end - 1);
|
||||
if (index < start) {
|
||||
return undefined;
|
||||
}
|
||||
return index + marker.length;
|
||||
}
|
||||
|
||||
function findLastWhitespaceBreak(text: string, start: number, end: number): number | undefined {
|
||||
for (let index = end - 1; index > start; index -= 1) {
|
||||
if (/\s/.test(text[index])) {
|
||||
return index + 1;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
203
extensions/zalouser/src/text-styles.test.ts
Normal file
203
extensions/zalouser/src/text-styles.test.ts
Normal file
@ -0,0 +1,203 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { parseZalouserTextStyles } from "./text-styles.js";
|
||||
import { TextStyle } from "./zca-client.js";
|
||||
|
||||
describe("parseZalouserTextStyles", () => {
|
||||
it("renders inline markdown emphasis as Zalo style ranges", () => {
|
||||
expect(parseZalouserTextStyles("**bold** *italic* ~~strike~~")).toEqual({
|
||||
text: "bold italic strike",
|
||||
styles: [
|
||||
{ start: 0, len: 4, st: TextStyle.Bold },
|
||||
{ start: 5, len: 6, st: TextStyle.Italic },
|
||||
{ start: 12, len: 6, st: TextStyle.StrikeThrough },
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps inline code and plain math markers literal", () => {
|
||||
expect(parseZalouserTextStyles("before `inline *code*` after\n2 * 3 * 4")).toEqual({
|
||||
text: "before `inline *code*` after\n2 * 3 * 4",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves backslash escapes inside code spans and fenced code blocks", () => {
|
||||
expect(parseZalouserTextStyles("before `\\*` after\n```ts\n\\*\\_\\\\\n```")).toEqual({
|
||||
text: "before `\\*` after\n\\*\\_\\\\",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("closes fenced code blocks when the input uses CRLF newlines", () => {
|
||||
expect(parseZalouserTextStyles("```\r\n*code*\r\n```\r\n**after**")).toEqual({
|
||||
text: "*code*\nafter",
|
||||
styles: [{ start: 7, len: 5, st: TextStyle.Bold }],
|
||||
});
|
||||
});
|
||||
|
||||
it("maps headings, block quotes, and lists into line styles", () => {
|
||||
expect(parseZalouserTextStyles(["# Title", "> quoted", " - nested"].join("\n"))).toEqual({
|
||||
text: "Title\nquoted\nnested",
|
||||
styles: [
|
||||
{ start: 0, len: 5, st: TextStyle.Bold },
|
||||
{ start: 0, len: 5, st: TextStyle.Big },
|
||||
{ start: 6, len: 6, st: TextStyle.Indent, indentSize: 1 },
|
||||
{ start: 13, len: 6, st: TextStyle.UnorderedList },
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats 1-3 leading spaces as markdown padding for headings and lists", () => {
|
||||
expect(parseZalouserTextStyles(" # Title\n 1. item\n - bullet")).toEqual({
|
||||
text: "Title\nitem\nbullet",
|
||||
styles: [
|
||||
{ start: 0, len: 5, st: TextStyle.Bold },
|
||||
{ start: 0, len: 5, st: TextStyle.Big },
|
||||
{ start: 6, len: 4, st: TextStyle.OrderedList },
|
||||
{ start: 11, len: 6, st: TextStyle.UnorderedList },
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it("strips fenced code markers and preserves leading indentation with nbsp", () => {
|
||||
expect(parseZalouserTextStyles("```ts\n const x = 1\n\treturn x\n```")).toEqual({
|
||||
text: "\u00A0\u00A0const x = 1\n\u00A0\u00A0\u00A0\u00A0return x",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats tilde fences as literal code blocks", () => {
|
||||
expect(parseZalouserTextStyles("~~~bash\n*cmd*\n~~~")).toEqual({
|
||||
text: "*cmd*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats fences indented under list items as literal code blocks", () => {
|
||||
expect(parseZalouserTextStyles(" ```\n*cmd*\n ```")).toEqual({
|
||||
text: "*cmd*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats quoted backtick fences as literal code blocks", () => {
|
||||
expect(parseZalouserTextStyles("> ```js\n> *cmd*\n> ```")).toEqual({
|
||||
text: "*cmd*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats quoted tilde fences as literal code blocks", () => {
|
||||
expect(parseZalouserTextStyles("> ~~~\n> *cmd*\n> ~~~")).toEqual({
|
||||
text: "*cmd*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves quote-prefixed lines inside normal fenced code blocks", () => {
|
||||
expect(parseZalouserTextStyles("```\n> prompt\n```")).toEqual({
|
||||
text: "> prompt",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("does not treat quote-prefixed fence text inside code as a closing fence", () => {
|
||||
expect(parseZalouserTextStyles("```\n> ```\n*still code*\n```")).toEqual({
|
||||
text: "> ```\n*still code*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats indented blockquotes as quoted lines", () => {
|
||||
expect(parseZalouserTextStyles(" > quoted")).toEqual({
|
||||
text: "quoted",
|
||||
styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 1 }],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats spaced nested blockquotes as deeper quoted lines", () => {
|
||||
expect(parseZalouserTextStyles("> > quoted")).toEqual({
|
||||
text: "quoted",
|
||||
styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 2 }],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats indented quoted fences as literal code blocks", () => {
|
||||
expect(parseZalouserTextStyles(" > ```\n > *cmd*\n > ```")).toEqual({
|
||||
text: "*cmd*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats spaced nested quoted fences as literal code blocks", () => {
|
||||
expect(parseZalouserTextStyles("> > ```\n> > code\n> > ```")).toEqual({
|
||||
text: "code",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves inner quote markers inside quoted fenced code blocks", () => {
|
||||
expect(parseZalouserTextStyles("> ```\n>> prompt\n> ```")).toEqual({
|
||||
text: "> prompt",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps quote indentation on heading lines", () => {
|
||||
expect(parseZalouserTextStyles("> # Title")).toEqual({
|
||||
text: "Title",
|
||||
styles: [
|
||||
{ start: 0, len: 5, st: TextStyle.Bold },
|
||||
{ start: 0, len: 5, st: TextStyle.Big },
|
||||
{ start: 0, len: 5, st: TextStyle.Indent, indentSize: 1 },
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps unmatched fences literal", () => {
|
||||
expect(parseZalouserTextStyles("```python")).toEqual({
|
||||
text: "```python",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps unclosed fenced blocks literal until eof", () => {
|
||||
expect(parseZalouserTextStyles("```python\n\\*not italic*\n_next_")).toEqual({
|
||||
text: "```python\n\\*not italic*\n_next_",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("supports nested markdown and tag styles regardless of order", () => {
|
||||
expect(parseZalouserTextStyles("**{red}x{/red}** {red}**y**{/red}")).toEqual({
|
||||
text: "x y",
|
||||
styles: [
|
||||
{ start: 0, len: 1, st: TextStyle.Bold },
|
||||
{ start: 0, len: 1, st: TextStyle.Red },
|
||||
{ start: 2, len: 1, st: TextStyle.Red },
|
||||
{ start: 2, len: 1, st: TextStyle.Bold },
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it("treats small text tags as normal text", () => {
|
||||
expect(parseZalouserTextStyles("{small}tiny{/small}")).toEqual({
|
||||
text: "tiny",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps escaped markers literal", () => {
|
||||
expect(parseZalouserTextStyles("\\*literal\\* \\{underline}tag{/underline}")).toEqual({
|
||||
text: "*literal* {underline}tag{/underline}",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps indented code blocks literal", () => {
|
||||
expect(parseZalouserTextStyles(" *cmd*")).toEqual({
|
||||
text: "\u00A0\u00A0\u00A0\u00A0*cmd*",
|
||||
styles: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
537
extensions/zalouser/src/text-styles.ts
Normal file
537
extensions/zalouser/src/text-styles.ts
Normal file
@ -0,0 +1,537 @@
|
||||
import { TextStyle, type Style } from "./zca-client.js";
|
||||
|
||||
type InlineStyle = (typeof TextStyle)[keyof typeof TextStyle];
|
||||
|
||||
type LineStyle = {
|
||||
lineIndex: number;
|
||||
style: InlineStyle;
|
||||
indentSize?: number;
|
||||
};
|
||||
|
||||
type Segment = {
|
||||
text: string;
|
||||
styles: InlineStyle[];
|
||||
};
|
||||
|
||||
type InlineMarker = {
|
||||
pattern: RegExp;
|
||||
extractText: (match: RegExpExecArray) => string;
|
||||
resolveStyles?: (match: RegExpExecArray) => InlineStyle[];
|
||||
literal?: boolean;
|
||||
};
|
||||
|
||||
type ResolvedInlineMatch = {
|
||||
match: RegExpExecArray;
|
||||
marker: InlineMarker;
|
||||
styles: InlineStyle[];
|
||||
text: string;
|
||||
priority: number;
|
||||
};
|
||||
|
||||
type FenceMarker = {
|
||||
char: "`" | "~";
|
||||
length: number;
|
||||
indent: number;
|
||||
};
|
||||
|
||||
type ActiveFence = FenceMarker & {
|
||||
quoteIndent: number;
|
||||
};
|
||||
|
||||
const TAG_STYLE_MAP: Record<string, InlineStyle | null> = {
|
||||
red: TextStyle.Red,
|
||||
orange: TextStyle.Orange,
|
||||
yellow: TextStyle.Yellow,
|
||||
green: TextStyle.Green,
|
||||
small: null,
|
||||
big: TextStyle.Big,
|
||||
underline: TextStyle.Underline,
|
||||
};
|
||||
|
||||
const INLINE_MARKERS: InlineMarker[] = [
|
||||
{
|
||||
pattern: /`([^`\n]+)`/g,
|
||||
extractText: (match) => match[0],
|
||||
literal: true,
|
||||
},
|
||||
{
|
||||
pattern: /\\([*_~#\\{}>+\-`])/g,
|
||||
extractText: (match) => match[1],
|
||||
literal: true,
|
||||
},
|
||||
{
|
||||
pattern: new RegExp(`\\{(${Object.keys(TAG_STYLE_MAP).join("|")})\\}(.+?)\\{/\\1\\}`, "g"),
|
||||
extractText: (match) => match[2],
|
||||
resolveStyles: (match) => {
|
||||
const style = TAG_STYLE_MAP[match[1]];
|
||||
return style ? [style] : [];
|
||||
},
|
||||
},
|
||||
{
|
||||
pattern: /(?<!\*)\*\*\*(?=\S)([^\n]*?\S)(?<!\*)\*\*\*(?!\*)/g,
|
||||
extractText: (match) => match[1],
|
||||
resolveStyles: () => [TextStyle.Bold, TextStyle.Italic],
|
||||
},
|
||||
{
|
||||
pattern: /(?<!\*)\*\*(?![\s*])([^\n]*?\S)(?<!\*)\*\*(?!\*)/g,
|
||||
extractText: (match) => match[1],
|
||||
resolveStyles: () => [TextStyle.Bold],
|
||||
},
|
||||
{
|
||||
pattern: /(?<![\w_])__(?![\s_])([^\n]*?\S)(?<!_)__(?![\w_])/g,
|
||||
extractText: (match) => match[1],
|
||||
resolveStyles: () => [TextStyle.Bold],
|
||||
},
|
||||
{
|
||||
pattern: /(?<!~)~~(?=\S)([^\n]*?\S)(?<!~)~~(?!~)/g,
|
||||
extractText: (match) => match[1],
|
||||
resolveStyles: () => [TextStyle.StrikeThrough],
|
||||
},
|
||||
{
|
||||
pattern: /(?<!\*)\*(?![\s*])([^\n]*?\S)(?<!\*)\*(?!\*)/g,
|
||||
extractText: (match) => match[1],
|
||||
resolveStyles: () => [TextStyle.Italic],
|
||||
},
|
||||
{
|
||||
pattern: /(?<![\w_])_(?![\s_])([^\n]*?\S)(?<!_)_(?![\w_])/g,
|
||||
extractText: (match) => match[1],
|
||||
resolveStyles: () => [TextStyle.Italic],
|
||||
},
|
||||
];
|
||||
|
||||
export function parseZalouserTextStyles(input: string): { text: string; styles: Style[] } {
|
||||
const allStyles: Style[] = [];
|
||||
|
||||
const escapeMap: string[] = [];
|
||||
const lines = input.replace(/\r\n?/g, "\n").split("\n");
|
||||
const lineStyles: LineStyle[] = [];
|
||||
const processedLines: string[] = [];
|
||||
let activeFence: ActiveFence | null = null;
|
||||
|
||||
for (let lineIndex = 0; lineIndex < lines.length; lineIndex += 1) {
|
||||
const rawLine = lines[lineIndex];
|
||||
const { text: unquotedLine, indent: baseIndent } = stripQuotePrefix(rawLine);
|
||||
|
||||
if (activeFence) {
|
||||
const codeLine =
|
||||
activeFence.quoteIndent > 0
|
||||
? stripQuotePrefix(rawLine, activeFence.quoteIndent).text
|
||||
: rawLine;
|
||||
if (isClosingFence(codeLine, activeFence)) {
|
||||
activeFence = null;
|
||||
continue;
|
||||
}
|
||||
processedLines.push(
|
||||
escapeLiteralText(
|
||||
normalizeCodeBlockLeadingWhitespace(stripCodeFenceIndent(codeLine, activeFence.indent)),
|
||||
escapeMap,
|
||||
),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let line = unquotedLine;
|
||||
const openingFence = resolveOpeningFence(rawLine);
|
||||
if (openingFence) {
|
||||
const fenceLine = openingFence.quoteIndent > 0 ? unquotedLine : rawLine;
|
||||
if (!hasClosingFence(lines, lineIndex + 1, openingFence)) {
|
||||
processedLines.push(escapeLiteralText(fenceLine, escapeMap));
|
||||
activeFence = openingFence;
|
||||
continue;
|
||||
}
|
||||
activeFence = openingFence;
|
||||
continue;
|
||||
}
|
||||
|
||||
const outputLineIndex = processedLines.length;
|
||||
if (isIndentedCodeBlockLine(line)) {
|
||||
if (baseIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: baseIndent,
|
||||
});
|
||||
}
|
||||
processedLines.push(escapeLiteralText(normalizeCodeBlockLeadingWhitespace(line), escapeMap));
|
||||
continue;
|
||||
}
|
||||
|
||||
const { text: markdownLine, size: markdownPadding } = stripOptionalMarkdownPadding(line);
|
||||
|
||||
const headingMatch = markdownLine.match(/^(#{1,4})\s(.*)$/);
|
||||
if (headingMatch) {
|
||||
const depth = headingMatch[1].length;
|
||||
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Bold });
|
||||
if (depth === 1) {
|
||||
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Big });
|
||||
}
|
||||
if (baseIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: baseIndent,
|
||||
});
|
||||
}
|
||||
processedLines.push(headingMatch[2]);
|
||||
continue;
|
||||
}
|
||||
|
||||
const indentMatch = markdownLine.match(/^(\s+)(.*)$/);
|
||||
let indentLevel = 0;
|
||||
let content = markdownLine;
|
||||
if (indentMatch) {
|
||||
indentLevel = clampIndent(indentMatch[1].length);
|
||||
content = indentMatch[2];
|
||||
}
|
||||
const totalIndent = Math.min(5, baseIndent + indentLevel);
|
||||
|
||||
if (/^[-*+]\s\[[ xX]\]\s/.test(content)) {
|
||||
if (totalIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: totalIndent,
|
||||
});
|
||||
}
|
||||
processedLines.push(content);
|
||||
continue;
|
||||
}
|
||||
|
||||
const orderedListMatch = content.match(/^(\d+)\.\s(.*)$/);
|
||||
if (orderedListMatch) {
|
||||
if (totalIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: totalIndent,
|
||||
});
|
||||
}
|
||||
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.OrderedList });
|
||||
processedLines.push(orderedListMatch[2]);
|
||||
continue;
|
||||
}
|
||||
|
||||
const unorderedListMatch = content.match(/^[-*+]\s(.*)$/);
|
||||
if (unorderedListMatch) {
|
||||
if (totalIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: totalIndent,
|
||||
});
|
||||
}
|
||||
lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.UnorderedList });
|
||||
processedLines.push(unorderedListMatch[1]);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (markdownPadding > 0) {
|
||||
if (baseIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: baseIndent,
|
||||
});
|
||||
}
|
||||
processedLines.push(line);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (totalIndent > 0) {
|
||||
lineStyles.push({
|
||||
lineIndex: outputLineIndex,
|
||||
style: TextStyle.Indent,
|
||||
indentSize: totalIndent,
|
||||
});
|
||||
processedLines.push(content);
|
||||
continue;
|
||||
}
|
||||
|
||||
processedLines.push(line);
|
||||
}
|
||||
|
||||
const segments = parseInlineSegments(processedLines.join("\n"));
|
||||
|
||||
let plainText = "";
|
||||
for (const segment of segments) {
|
||||
const start = plainText.length;
|
||||
plainText += segment.text;
|
||||
for (const style of segment.styles) {
|
||||
allStyles.push({ start, len: segment.text.length, st: style } as Style);
|
||||
}
|
||||
}
|
||||
|
||||
if (escapeMap.length > 0) {
|
||||
const escapeRegex = /\x01(\d+)\x02/g;
|
||||
const shifts: Array<{ pos: number; delta: number }> = [];
|
||||
let cumulativeDelta = 0;
|
||||
|
||||
for (const match of plainText.matchAll(escapeRegex)) {
|
||||
const escapeIndex = Number.parseInt(match[1], 10);
|
||||
cumulativeDelta += match[0].length - escapeMap[escapeIndex].length;
|
||||
shifts.push({ pos: (match.index ?? 0) + match[0].length, delta: cumulativeDelta });
|
||||
}
|
||||
|
||||
for (const style of allStyles) {
|
||||
let startDelta = 0;
|
||||
let endDelta = 0;
|
||||
const end = style.start + style.len;
|
||||
for (const shift of shifts) {
|
||||
if (shift.pos <= style.start) {
|
||||
startDelta = shift.delta;
|
||||
}
|
||||
if (shift.pos <= end) {
|
||||
endDelta = shift.delta;
|
||||
}
|
||||
}
|
||||
style.start -= startDelta;
|
||||
style.len -= endDelta - startDelta;
|
||||
}
|
||||
|
||||
plainText = plainText.replace(
|
||||
escapeRegex,
|
||||
(_match, index) => escapeMap[Number.parseInt(index, 10)],
|
||||
);
|
||||
}
|
||||
|
||||
const finalLines = plainText.split("\n");
|
||||
let offset = 0;
|
||||
for (let lineIndex = 0; lineIndex < finalLines.length; lineIndex += 1) {
|
||||
const lineLength = finalLines[lineIndex].length;
|
||||
if (lineLength > 0) {
|
||||
for (const lineStyle of lineStyles) {
|
||||
if (lineStyle.lineIndex !== lineIndex) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lineStyle.style === TextStyle.Indent) {
|
||||
allStyles.push({
|
||||
start: offset,
|
||||
len: lineLength,
|
||||
st: TextStyle.Indent,
|
||||
indentSize: lineStyle.indentSize,
|
||||
});
|
||||
} else {
|
||||
allStyles.push({ start: offset, len: lineLength, st: lineStyle.style } as Style);
|
||||
}
|
||||
}
|
||||
}
|
||||
offset += lineLength + 1;
|
||||
}
|
||||
|
||||
return { text: plainText, styles: allStyles };
|
||||
}
|
||||
|
||||
function clampIndent(spaceCount: number): number {
|
||||
return Math.min(5, Math.max(1, Math.floor(spaceCount / 2)));
|
||||
}
|
||||
|
||||
function stripOptionalMarkdownPadding(line: string): { text: string; size: number } {
|
||||
const match = line.match(/^( {1,3})(?=\S)/);
|
||||
if (!match) {
|
||||
return { text: line, size: 0 };
|
||||
}
|
||||
return {
|
||||
text: line.slice(match[1].length),
|
||||
size: match[1].length,
|
||||
};
|
||||
}
|
||||
|
||||
function hasClosingFence(lines: string[], startIndex: number, fence: ActiveFence): boolean {
|
||||
for (let index = startIndex; index < lines.length; index += 1) {
|
||||
const candidate =
|
||||
fence.quoteIndent > 0 ? stripQuotePrefix(lines[index], fence.quoteIndent).text : lines[index];
|
||||
if (isClosingFence(candidate, fence)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function resolveOpeningFence(line: string): ActiveFence | null {
|
||||
const directFence = parseFenceMarker(line);
|
||||
if (directFence) {
|
||||
return { ...directFence, quoteIndent: 0 };
|
||||
}
|
||||
|
||||
const quoted = stripQuotePrefix(line);
|
||||
if (quoted.indent === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const quotedFence = parseFenceMarker(quoted.text);
|
||||
if (!quotedFence) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
...quotedFence,
|
||||
quoteIndent: quoted.indent,
|
||||
};
|
||||
}
|
||||
|
||||
function stripQuotePrefix(
|
||||
line: string,
|
||||
maxDepth = Number.POSITIVE_INFINITY,
|
||||
): { text: string; indent: number } {
|
||||
let cursor = 0;
|
||||
while (cursor < line.length && cursor < 3 && line[cursor] === " ") {
|
||||
cursor += 1;
|
||||
}
|
||||
|
||||
let removedDepth = 0;
|
||||
let consumedCursor = cursor;
|
||||
while (removedDepth < maxDepth && consumedCursor < line.length && line[consumedCursor] === ">") {
|
||||
removedDepth += 1;
|
||||
consumedCursor += 1;
|
||||
if (line[consumedCursor] === " ") {
|
||||
consumedCursor += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (removedDepth === 0) {
|
||||
return { text: line, indent: 0 };
|
||||
}
|
||||
|
||||
return {
|
||||
text: line.slice(consumedCursor),
|
||||
indent: Math.min(5, removedDepth),
|
||||
};
|
||||
}
|
||||
|
||||
function parseFenceMarker(line: string): FenceMarker | null {
|
||||
const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})(.*)$/);
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const marker = match[2];
|
||||
const char = marker[0];
|
||||
if (char !== "`" && char !== "~") {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
char,
|
||||
length: marker.length,
|
||||
indent: match[1].length,
|
||||
};
|
||||
}
|
||||
|
||||
function isClosingFence(line: string, fence: FenceMarker): boolean {
|
||||
const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})[ \t]*$/);
|
||||
if (!match) {
|
||||
return false;
|
||||
}
|
||||
return match[2][0] === fence.char && match[2].length >= fence.length;
|
||||
}
|
||||
|
||||
function escapeLiteralText(input: string, escapeMap: string[]): string {
|
||||
return input.replace(/[\\*_~{}`]/g, (ch) => {
|
||||
const index = escapeMap.length;
|
||||
escapeMap.push(ch);
|
||||
return `\x01${index}\x02`;
|
||||
});
|
||||
}
|
||||
|
||||
function parseInlineSegments(text: string, inheritedStyles: InlineStyle[] = []): Segment[] {
|
||||
const segments: Segment[] = [];
|
||||
let cursor = 0;
|
||||
|
||||
while (cursor < text.length) {
|
||||
const nextMatch = findNextInlineMatch(text, cursor);
|
||||
if (!nextMatch) {
|
||||
pushSegment(segments, text.slice(cursor), inheritedStyles);
|
||||
break;
|
||||
}
|
||||
|
||||
if (nextMatch.match.index > cursor) {
|
||||
pushSegment(segments, text.slice(cursor, nextMatch.match.index), inheritedStyles);
|
||||
}
|
||||
|
||||
const combinedStyles = [...inheritedStyles, ...nextMatch.styles];
|
||||
if (nextMatch.marker.literal) {
|
||||
pushSegment(segments, nextMatch.text, combinedStyles);
|
||||
} else {
|
||||
segments.push(...parseInlineSegments(nextMatch.text, combinedStyles));
|
||||
}
|
||||
|
||||
cursor = nextMatch.match.index + nextMatch.match[0].length;
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
|
||||
function findNextInlineMatch(text: string, startIndex: number): ResolvedInlineMatch | null {
|
||||
let bestMatch: ResolvedInlineMatch | null = null;
|
||||
|
||||
for (const [priority, marker] of INLINE_MARKERS.entries()) {
|
||||
const regex = new RegExp(marker.pattern.source, marker.pattern.flags);
|
||||
regex.lastIndex = startIndex;
|
||||
const match = regex.exec(text);
|
||||
if (!match) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (
|
||||
bestMatch &&
|
||||
(match.index > bestMatch.match.index ||
|
||||
(match.index === bestMatch.match.index && priority > bestMatch.priority))
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
bestMatch = {
|
||||
match,
|
||||
marker,
|
||||
text: marker.extractText(match),
|
||||
styles: marker.resolveStyles?.(match) ?? [],
|
||||
priority,
|
||||
};
|
||||
}
|
||||
|
||||
return bestMatch;
|
||||
}
|
||||
|
||||
function pushSegment(segments: Segment[], text: string, styles: InlineStyle[]): void {
|
||||
if (!text) {
|
||||
return;
|
||||
}
|
||||
|
||||
const lastSegment = segments.at(-1);
|
||||
if (lastSegment && sameStyles(lastSegment.styles, styles)) {
|
||||
lastSegment.text += text;
|
||||
return;
|
||||
}
|
||||
|
||||
segments.push({
|
||||
text,
|
||||
styles: [...styles],
|
||||
});
|
||||
}
|
||||
|
||||
function sameStyles(left: InlineStyle[], right: InlineStyle[]): boolean {
|
||||
return left.length === right.length && left.every((style, index) => style === right[index]);
|
||||
}
|
||||
|
||||
function normalizeCodeBlockLeadingWhitespace(line: string): string {
|
||||
return line.replace(/^[ \t]+/, (leadingWhitespace) =>
|
||||
leadingWhitespace.replace(/\t/g, "\u00A0\u00A0\u00A0\u00A0").replace(/ /g, "\u00A0"),
|
||||
);
|
||||
}
|
||||
|
||||
function isIndentedCodeBlockLine(line: string): boolean {
|
||||
return /^(?: {4,}|\t)/.test(line);
|
||||
}
|
||||
|
||||
function stripCodeFenceIndent(line: string, indent: number): string {
|
||||
let consumed = 0;
|
||||
let cursor = 0;
|
||||
|
||||
while (cursor < line.length && consumed < indent && line[cursor] === " ") {
|
||||
cursor += 1;
|
||||
consumed += 1;
|
||||
}
|
||||
|
||||
return line.slice(cursor);
|
||||
}
|
||||
@ -1,3 +1,5 @@
|
||||
import type { Style } from "./zca-client.js";
|
||||
|
||||
export type ZcaFriend = {
|
||||
userId: string;
|
||||
displayName: string;
|
||||
@ -59,6 +61,10 @@ export type ZaloSendOptions = {
|
||||
caption?: string;
|
||||
isGroup?: boolean;
|
||||
mediaLocalRoots?: readonly string[];
|
||||
textMode?: "markdown" | "plain";
|
||||
textChunkMode?: "length" | "newline";
|
||||
textChunkLimit?: number;
|
||||
textStyles?: Style[];
|
||||
};
|
||||
|
||||
export type ZaloSendResult = {
|
||||
|
||||
@ -20,6 +20,7 @@ import type {
|
||||
} from "./types.js";
|
||||
import {
|
||||
LoginQRCallbackEventType,
|
||||
TextStyle,
|
||||
ThreadType,
|
||||
Zalo,
|
||||
type API,
|
||||
@ -136,6 +137,39 @@ function toErrorMessage(error: unknown): string {
|
||||
return String(error);
|
||||
}
|
||||
|
||||
function clampTextStyles(
|
||||
text: string,
|
||||
styles?: ZaloSendOptions["textStyles"],
|
||||
): ZaloSendOptions["textStyles"] {
|
||||
if (!styles || styles.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
const maxLength = text.length;
|
||||
const clamped = styles
|
||||
.map((style) => {
|
||||
const start = Math.max(0, Math.min(style.start, maxLength));
|
||||
const end = Math.min(style.start + style.len, maxLength);
|
||||
if (end <= start) {
|
||||
return null;
|
||||
}
|
||||
if (style.st === TextStyle.Indent) {
|
||||
return {
|
||||
start,
|
||||
len: end - start,
|
||||
st: style.st,
|
||||
indentSize: style.indentSize,
|
||||
};
|
||||
}
|
||||
return {
|
||||
start,
|
||||
len: end - start,
|
||||
st: style.st,
|
||||
};
|
||||
})
|
||||
.filter((style): style is NonNullable<typeof style> => style !== null);
|
||||
return clamped.length > 0 ? clamped : undefined;
|
||||
}
|
||||
|
||||
function toNumberId(value: unknown): string {
|
||||
if (typeof value === "number" && Number.isFinite(value)) {
|
||||
return String(Math.trunc(value));
|
||||
@ -1018,11 +1052,16 @@ export async function sendZaloTextMessage(
|
||||
kind: media.kind,
|
||||
});
|
||||
const payloadText = (text || options.caption || "").slice(0, 2000);
|
||||
const textStyles = clampTextStyles(payloadText, options.textStyles);
|
||||
|
||||
if (media.kind === "audio") {
|
||||
let textMessageId: string | undefined;
|
||||
if (payloadText) {
|
||||
const textResponse = await api.sendMessage(payloadText, trimmedThreadId, type);
|
||||
const textResponse = await api.sendMessage(
|
||||
textStyles ? { msg: payloadText, styles: textStyles } : payloadText,
|
||||
trimmedThreadId,
|
||||
type,
|
||||
);
|
||||
textMessageId = extractSendMessageId(textResponse);
|
||||
}
|
||||
|
||||
@ -1055,6 +1094,7 @@ export async function sendZaloTextMessage(
|
||||
const response = await api.sendMessage(
|
||||
{
|
||||
msg: payloadText,
|
||||
...(textStyles ? { styles: textStyles } : {}),
|
||||
attachments: [
|
||||
{
|
||||
data: media.buffer,
|
||||
@ -1071,7 +1111,13 @@ export async function sendZaloTextMessage(
|
||||
return { ok: true, messageId: extractSendMessageId(response) };
|
||||
}
|
||||
|
||||
const response = await api.sendMessage(text.slice(0, 2000), trimmedThreadId, type);
|
||||
const payloadText = text.slice(0, 2000);
|
||||
const textStyles = clampTextStyles(payloadText, options.textStyles);
|
||||
const response = await api.sendMessage(
|
||||
textStyles ? { msg: payloadText, styles: textStyles } : payloadText,
|
||||
trimmedThreadId,
|
||||
type,
|
||||
);
|
||||
return { ok: true, messageId: extractSendMessageId(response) };
|
||||
} catch (error) {
|
||||
return { ok: false, error: toErrorMessage(error) };
|
||||
|
||||
@ -28,6 +28,39 @@ export const Reactions = ReactionsRuntime as Record<string, string> & {
|
||||
NONE: string;
|
||||
};
|
||||
|
||||
// Mirror zca-js sendMessage style constants locally because the package root
|
||||
// typing surface does not consistently expose TextStyle/Style to tsgo.
|
||||
export const TextStyle = {
|
||||
Bold: "b",
|
||||
Italic: "i",
|
||||
Underline: "u",
|
||||
StrikeThrough: "s",
|
||||
Red: "c_db342e",
|
||||
Orange: "c_f27806",
|
||||
Yellow: "c_f7b503",
|
||||
Green: "c_15a85f",
|
||||
Small: "f_13",
|
||||
Big: "f_18",
|
||||
UnorderedList: "lst_1",
|
||||
OrderedList: "lst_2",
|
||||
Indent: "ind_$",
|
||||
} as const;
|
||||
|
||||
type TextStyleValue = (typeof TextStyle)[keyof typeof TextStyle];
|
||||
|
||||
export type Style =
|
||||
| {
|
||||
start: number;
|
||||
len: number;
|
||||
st: Exclude<TextStyleValue, typeof TextStyle.Indent>;
|
||||
}
|
||||
| {
|
||||
start: number;
|
||||
len: number;
|
||||
st: typeof TextStyle.Indent;
|
||||
indentSize?: number;
|
||||
};
|
||||
|
||||
export type Credentials = {
|
||||
imei: string;
|
||||
cookie: unknown;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "openclaw",
|
||||
"version": "2026.3.11-beta.1",
|
||||
"version": "2026.3.11",
|
||||
"description": "Multi-channel AI gateway with extensible messaging integrations",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/openclaw/openclaw#readme",
|
||||
|
||||
@ -274,6 +274,8 @@ describe("failover-error", () => {
|
||||
it("infers timeout from common node error codes", () => {
|
||||
expect(resolveFailoverReasonFromError({ code: "ETIMEDOUT" })).toBe("timeout");
|
||||
expect(resolveFailoverReasonFromError({ code: "ECONNRESET" })).toBe("timeout");
|
||||
expect(resolveFailoverReasonFromError({ code: "EHOSTDOWN" })).toBe("timeout");
|
||||
expect(resolveFailoverReasonFromError({ code: "EPIPE" })).toBe("timeout");
|
||||
});
|
||||
|
||||
it("infers timeout from abort/error stop-reason messages", () => {
|
||||
|
||||
@ -170,7 +170,9 @@ export function resolveFailoverReasonFromError(err: unknown): FailoverReason | n
|
||||
"ECONNREFUSED",
|
||||
"ENETUNREACH",
|
||||
"EHOSTUNREACH",
|
||||
"EHOSTDOWN",
|
||||
"ENETRESET",
|
||||
"EPIPE",
|
||||
"EAI_AGAIN",
|
||||
].includes(code)
|
||||
) {
|
||||
|
||||
@ -6,6 +6,7 @@ import {
|
||||
type ExistingProviderConfig,
|
||||
} from "./models-config.merge.js";
|
||||
import {
|
||||
enforceSourceManagedProviderSecrets,
|
||||
normalizeProviders,
|
||||
resolveImplicitProviders,
|
||||
type ProviderConfig,
|
||||
@ -86,6 +87,7 @@ async function resolveProvidersForMode(params: {
|
||||
|
||||
export async function planOpenClawModelsJson(params: {
|
||||
cfg: OpenClawConfig;
|
||||
sourceConfigForSecrets?: OpenClawConfig;
|
||||
agentDir: string;
|
||||
env: NodeJS.ProcessEnv;
|
||||
existingRaw: string;
|
||||
@ -106,6 +108,8 @@ export async function planOpenClawModelsJson(params: {
|
||||
agentDir,
|
||||
env,
|
||||
secretDefaults: cfg.secrets?.defaults,
|
||||
sourceProviders: params.sourceConfigForSecrets?.models?.providers,
|
||||
sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults,
|
||||
secretRefManagedProviders,
|
||||
}) ?? providers;
|
||||
const mergedProviders = await resolveProvidersForMode({
|
||||
@ -115,7 +119,14 @@ export async function planOpenClawModelsJson(params: {
|
||||
secretRefManagedProviders,
|
||||
explicitBaseUrlProviders: resolveExplicitBaseUrlProviders(cfg.models),
|
||||
});
|
||||
const nextContents = `${JSON.stringify({ providers: mergedProviders }, null, 2)}\n`;
|
||||
const secretEnforcedProviders =
|
||||
enforceSourceManagedProviderSecrets({
|
||||
providers: mergedProviders,
|
||||
sourceProviders: params.sourceConfigForSecrets?.models?.providers,
|
||||
sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults,
|
||||
secretRefManagedProviders,
|
||||
}) ?? mergedProviders;
|
||||
const nextContents = `${JSON.stringify({ providers: secretEnforcedProviders }, null, 2)}\n`;
|
||||
|
||||
if (params.existingRaw === nextContents) {
|
||||
return { action: "noop" };
|
||||
|
||||
@ -4,7 +4,10 @@ import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js";
|
||||
import { normalizeProviders } from "./models-config.providers.js";
|
||||
import {
|
||||
enforceSourceManagedProviderSecrets,
|
||||
normalizeProviders,
|
||||
} from "./models-config.providers.js";
|
||||
|
||||
describe("normalizeProviders", () => {
|
||||
it("trims provider keys so image models remain discoverable for custom providers", async () => {
|
||||
@ -136,4 +139,38 @@ describe("normalizeProviders", () => {
|
||||
await fs.rm(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores non-object provider entries during source-managed enforcement", () => {
|
||||
const providers = {
|
||||
openai: null,
|
||||
moonshot: {
|
||||
baseUrl: "https://api.moonshot.ai/v1",
|
||||
api: "openai-completions",
|
||||
apiKey: "sk-runtime-moonshot", // pragma: allowlist secret
|
||||
models: [],
|
||||
},
|
||||
} as unknown as NonNullable<NonNullable<OpenClawConfig["models"]>["providers"]>;
|
||||
|
||||
const sourceProviders: NonNullable<NonNullable<OpenClawConfig["models"]>["providers"]> = {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
models: [],
|
||||
},
|
||||
moonshot: {
|
||||
baseUrl: "https://api.moonshot.ai/v1",
|
||||
api: "openai-completions",
|
||||
apiKey: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" }, // pragma: allowlist secret
|
||||
models: [],
|
||||
},
|
||||
};
|
||||
|
||||
const enforced = enforceSourceManagedProviderSecrets({
|
||||
providers,
|
||||
sourceProviders,
|
||||
});
|
||||
expect((enforced as Record<string, unknown>).openai).toBeNull();
|
||||
expect(enforced?.moonshot?.apiKey).toBe("MOONSHOT_API_KEY"); // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
|
||||
@ -4,6 +4,7 @@ import {
|
||||
DEFAULT_COPILOT_API_BASE_URL,
|
||||
resolveCopilotApiToken,
|
||||
} from "../providers/github-copilot-token.js";
|
||||
import { isRecord } from "../utils.js";
|
||||
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
|
||||
import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js";
|
||||
import { discoverBedrockModels } from "./bedrock-discovery.js";
|
||||
@ -70,6 +71,11 @@ export { resolveOllamaApiBase } from "./models-config.providers.discovery.js";
|
||||
|
||||
type ModelsConfig = NonNullable<OpenClawConfig["models"]>;
|
||||
export type ProviderConfig = NonNullable<ModelsConfig["providers"]>[string];
|
||||
type SecretDefaults = {
|
||||
env?: string;
|
||||
file?: string;
|
||||
exec?: string;
|
||||
};
|
||||
|
||||
const ENV_VAR_NAME_RE = /^[A-Z_][A-Z0-9_]*$/;
|
||||
|
||||
@ -97,13 +103,7 @@ function resolveAwsSdkApiKeyVarName(env: NodeJS.ProcessEnv = process.env): strin
|
||||
|
||||
function normalizeHeaderValues(params: {
|
||||
headers: ProviderConfig["headers"] | undefined;
|
||||
secretDefaults:
|
||||
| {
|
||||
env?: string;
|
||||
file?: string;
|
||||
exec?: string;
|
||||
}
|
||||
| undefined;
|
||||
secretDefaults: SecretDefaults | undefined;
|
||||
}): { headers: ProviderConfig["headers"] | undefined; mutated: boolean } {
|
||||
const { headers } = params;
|
||||
if (!headers) {
|
||||
@ -276,15 +276,155 @@ function normalizeAntigravityProvider(provider: ProviderConfig): ProviderConfig
|
||||
return normalizeProviderModels(provider, normalizeAntigravityModelId);
|
||||
}
|
||||
|
||||
function normalizeSourceProviderLookup(
|
||||
providers: ModelsConfig["providers"] | undefined,
|
||||
): Record<string, ProviderConfig> {
|
||||
if (!providers) {
|
||||
return {};
|
||||
}
|
||||
const out: Record<string, ProviderConfig> = {};
|
||||
for (const [key, provider] of Object.entries(providers)) {
|
||||
const normalizedKey = key.trim();
|
||||
if (!normalizedKey || !isRecord(provider)) {
|
||||
continue;
|
||||
}
|
||||
out[normalizedKey] = provider;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function resolveSourceManagedApiKeyMarker(params: {
|
||||
sourceProvider: ProviderConfig | undefined;
|
||||
sourceSecretDefaults: SecretDefaults | undefined;
|
||||
}): string | undefined {
|
||||
const sourceApiKeyRef = resolveSecretInputRef({
|
||||
value: params.sourceProvider?.apiKey,
|
||||
defaults: params.sourceSecretDefaults,
|
||||
}).ref;
|
||||
if (!sourceApiKeyRef || !sourceApiKeyRef.id.trim()) {
|
||||
return undefined;
|
||||
}
|
||||
return sourceApiKeyRef.source === "env"
|
||||
? sourceApiKeyRef.id.trim()
|
||||
: resolveNonEnvSecretRefApiKeyMarker(sourceApiKeyRef.source);
|
||||
}
|
||||
|
||||
function resolveSourceManagedHeaderMarkers(params: {
|
||||
sourceProvider: ProviderConfig | undefined;
|
||||
sourceSecretDefaults: SecretDefaults | undefined;
|
||||
}): Record<string, string> {
|
||||
const sourceHeaders = isRecord(params.sourceProvider?.headers)
|
||||
? (params.sourceProvider.headers as Record<string, unknown>)
|
||||
: undefined;
|
||||
if (!sourceHeaders) {
|
||||
return {};
|
||||
}
|
||||
const markers: Record<string, string> = {};
|
||||
for (const [headerName, headerValue] of Object.entries(sourceHeaders)) {
|
||||
const sourceHeaderRef = resolveSecretInputRef({
|
||||
value: headerValue,
|
||||
defaults: params.sourceSecretDefaults,
|
||||
}).ref;
|
||||
if (!sourceHeaderRef || !sourceHeaderRef.id.trim()) {
|
||||
continue;
|
||||
}
|
||||
markers[headerName] =
|
||||
sourceHeaderRef.source === "env"
|
||||
? resolveEnvSecretRefHeaderValueMarker(sourceHeaderRef.id)
|
||||
: resolveNonEnvSecretRefHeaderValueMarker(sourceHeaderRef.source);
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
|
||||
export function enforceSourceManagedProviderSecrets(params: {
|
||||
providers: ModelsConfig["providers"];
|
||||
sourceProviders: ModelsConfig["providers"] | undefined;
|
||||
sourceSecretDefaults?: SecretDefaults;
|
||||
secretRefManagedProviders?: Set<string>;
|
||||
}): ModelsConfig["providers"] {
|
||||
const { providers } = params;
|
||||
if (!providers) {
|
||||
return providers;
|
||||
}
|
||||
const sourceProvidersByKey = normalizeSourceProviderLookup(params.sourceProviders);
|
||||
if (Object.keys(sourceProvidersByKey).length === 0) {
|
||||
return providers;
|
||||
}
|
||||
|
||||
let nextProviders: Record<string, ProviderConfig> | null = null;
|
||||
for (const [providerKey, provider] of Object.entries(providers)) {
|
||||
if (!isRecord(provider)) {
|
||||
continue;
|
||||
}
|
||||
const sourceProvider = sourceProvidersByKey[providerKey.trim()];
|
||||
if (!sourceProvider) {
|
||||
continue;
|
||||
}
|
||||
let nextProvider = provider;
|
||||
let providerMutated = false;
|
||||
|
||||
const sourceApiKeyMarker = resolveSourceManagedApiKeyMarker({
|
||||
sourceProvider,
|
||||
sourceSecretDefaults: params.sourceSecretDefaults,
|
||||
});
|
||||
if (sourceApiKeyMarker) {
|
||||
params.secretRefManagedProviders?.add(providerKey.trim());
|
||||
if (nextProvider.apiKey !== sourceApiKeyMarker) {
|
||||
providerMutated = true;
|
||||
nextProvider = {
|
||||
...nextProvider,
|
||||
apiKey: sourceApiKeyMarker,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const sourceHeaderMarkers = resolveSourceManagedHeaderMarkers({
|
||||
sourceProvider,
|
||||
sourceSecretDefaults: params.sourceSecretDefaults,
|
||||
});
|
||||
if (Object.keys(sourceHeaderMarkers).length > 0) {
|
||||
const currentHeaders = isRecord(nextProvider.headers)
|
||||
? (nextProvider.headers as Record<string, unknown>)
|
||||
: undefined;
|
||||
const nextHeaders = {
|
||||
...(currentHeaders as Record<string, NonNullable<ProviderConfig["headers"]>[string]>),
|
||||
};
|
||||
let headersMutated = !currentHeaders;
|
||||
for (const [headerName, marker] of Object.entries(sourceHeaderMarkers)) {
|
||||
if (nextHeaders[headerName] === marker) {
|
||||
continue;
|
||||
}
|
||||
headersMutated = true;
|
||||
nextHeaders[headerName] = marker;
|
||||
}
|
||||
if (headersMutated) {
|
||||
providerMutated = true;
|
||||
nextProvider = {
|
||||
...nextProvider,
|
||||
headers: nextHeaders,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (!providerMutated) {
|
||||
continue;
|
||||
}
|
||||
if (!nextProviders) {
|
||||
nextProviders = { ...providers };
|
||||
}
|
||||
nextProviders[providerKey] = nextProvider;
|
||||
}
|
||||
|
||||
return nextProviders ?? providers;
|
||||
}
|
||||
|
||||
export function normalizeProviders(params: {
|
||||
providers: ModelsConfig["providers"];
|
||||
agentDir: string;
|
||||
env?: NodeJS.ProcessEnv;
|
||||
secretDefaults?: {
|
||||
env?: string;
|
||||
file?: string;
|
||||
exec?: string;
|
||||
};
|
||||
secretDefaults?: SecretDefaults;
|
||||
sourceProviders?: ModelsConfig["providers"];
|
||||
sourceSecretDefaults?: SecretDefaults;
|
||||
secretRefManagedProviders?: Set<string>;
|
||||
}): ModelsConfig["providers"] {
|
||||
const { providers } = params;
|
||||
@ -434,7 +574,13 @@ export function normalizeProviders(params: {
|
||||
next[normalizedKey] = normalizedProvider;
|
||||
}
|
||||
|
||||
return mutated ? next : providers;
|
||||
const normalizedProviders = mutated ? next : providers;
|
||||
return enforceSourceManagedProviderSecrets({
|
||||
providers: normalizedProviders,
|
||||
sourceProviders: params.sourceProviders,
|
||||
sourceSecretDefaults: params.sourceSecretDefaults,
|
||||
secretRefManagedProviders: params.secretRefManagedProviders,
|
||||
});
|
||||
}
|
||||
|
||||
type ImplicitProviderParams = {
|
||||
|
||||
@ -209,4 +209,152 @@ describe("models-config runtime source snapshot", () => {
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const incompatibleCandidate: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(incompatibleCandidate);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const incompatibleCandidate: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(incompatibleCandidate);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -42,15 +42,31 @@ async function writeModelsFileAtomic(targetPath: string, contents: string): Prom
|
||||
await fs.rename(tempPath, targetPath);
|
||||
}
|
||||
|
||||
function resolveModelsConfigInput(config?: OpenClawConfig): OpenClawConfig {
|
||||
function resolveModelsConfigInput(config?: OpenClawConfig): {
|
||||
config: OpenClawConfig;
|
||||
sourceConfigForSecrets: OpenClawConfig;
|
||||
} {
|
||||
const runtimeSource = getRuntimeConfigSourceSnapshot();
|
||||
if (!config) {
|
||||
return runtimeSource ?? loadConfig();
|
||||
const loaded = loadConfig();
|
||||
return {
|
||||
config: runtimeSource ?? loaded,
|
||||
sourceConfigForSecrets: runtimeSource ?? loaded,
|
||||
};
|
||||
}
|
||||
if (!runtimeSource) {
|
||||
return config;
|
||||
return {
|
||||
config,
|
||||
sourceConfigForSecrets: config,
|
||||
};
|
||||
}
|
||||
return projectConfigOntoRuntimeSourceSnapshot(config);
|
||||
const projected = projectConfigOntoRuntimeSourceSnapshot(config);
|
||||
return {
|
||||
config: projected,
|
||||
// If projection is skipped (for example incompatible top-level shape),
|
||||
// keep managed secret persistence anchored to the active source snapshot.
|
||||
sourceConfigForSecrets: projected === config ? runtimeSource : projected,
|
||||
};
|
||||
}
|
||||
|
||||
async function withModelsJsonWriteLock<T>(targetPath: string, run: () => Promise<T>): Promise<T> {
|
||||
@ -76,7 +92,8 @@ export async function ensureOpenClawModelsJson(
|
||||
config?: OpenClawConfig,
|
||||
agentDirOverride?: string,
|
||||
): Promise<{ agentDir: string; wrote: boolean }> {
|
||||
const cfg = resolveModelsConfigInput(config);
|
||||
const resolved = resolveModelsConfigInput(config);
|
||||
const cfg = resolved.config;
|
||||
const agentDir = agentDirOverride?.trim() ? agentDirOverride.trim() : resolveOpenClawAgentDir();
|
||||
const targetPath = path.join(agentDir, "models.json");
|
||||
|
||||
@ -87,6 +104,7 @@ export async function ensureOpenClawModelsJson(
|
||||
const existingModelsFile = await readExistingModelsFile(targetPath);
|
||||
const plan = await planOpenClawModelsJson({
|
||||
cfg,
|
||||
sourceConfigForSecrets: resolved.sourceConfigForSecrets,
|
||||
agentDir,
|
||||
env,
|
||||
existingRaw: existingModelsFile.raw,
|
||||
|
||||
@ -2,6 +2,22 @@ import { describe, expect, it, vi } from "vitest";
|
||||
|
||||
const loadSessionStoreMock = vi.fn();
|
||||
const updateSessionStoreMock = vi.fn();
|
||||
const callGatewayMock = vi.fn();
|
||||
|
||||
const createMockConfig = () => ({
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
},
|
||||
},
|
||||
tools: {
|
||||
agentToAgent: { enabled: false },
|
||||
},
|
||||
});
|
||||
|
||||
let mockConfig: Record<string, unknown> = createMockConfig();
|
||||
|
||||
vi.mock("../config/sessions.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/sessions.js")>();
|
||||
@ -22,19 +38,15 @@ vi.mock("../config/sessions.js", async (importOriginal) => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => ({
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
},
|
||||
},
|
||||
}),
|
||||
loadConfig: () => mockConfig,
|
||||
};
|
||||
});
|
||||
|
||||
@ -82,13 +94,17 @@ import { createOpenClawTools } from "./openclaw-tools.js";
|
||||
function resetSessionStore(store: Record<string, unknown>) {
|
||||
loadSessionStoreMock.mockClear();
|
||||
updateSessionStoreMock.mockClear();
|
||||
callGatewayMock.mockClear();
|
||||
loadSessionStoreMock.mockReturnValue(store);
|
||||
callGatewayMock.mockResolvedValue({});
|
||||
mockConfig = createMockConfig();
|
||||
}
|
||||
|
||||
function getSessionStatusTool(agentSessionKey = "main") {
|
||||
const tool = createOpenClawTools({ agentSessionKey }).find(
|
||||
(candidate) => candidate.name === "session_status",
|
||||
);
|
||||
function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) {
|
||||
const tool = createOpenClawTools({
|
||||
agentSessionKey,
|
||||
sandboxed: options?.sandboxed,
|
||||
}).find((candidate) => candidate.name === "session_status");
|
||||
expect(tool).toBeDefined();
|
||||
if (!tool) {
|
||||
throw new Error("missing session_status tool");
|
||||
@ -176,6 +192,153 @@ describe("session_status tool", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("blocks sandboxed child session_status access outside its tree before store lookup", async () => {
|
||||
resetSessionStore({
|
||||
"agent:main:subagent:child": {
|
||||
sessionId: "s-child",
|
||||
updatedAt: 20,
|
||||
},
|
||||
"agent:main:main": {
|
||||
sessionId: "s-parent",
|
||||
updatedAt: 10,
|
||||
},
|
||||
});
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return { sessions: [] };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = getSessionStatusTool("agent:main:subagent:child", {
|
||||
sandboxed: true,
|
||||
});
|
||||
const expectedError = "Session status visibility is restricted to the current session tree";
|
||||
|
||||
await expect(
|
||||
tool.execute("call6", {
|
||||
sessionKey: "agent:main:main",
|
||||
model: "anthropic/claude-sonnet-4-5",
|
||||
}),
|
||||
).rejects.toThrow(expectedError);
|
||||
|
||||
await expect(
|
||||
tool.execute("call7", {
|
||||
sessionKey: "agent:main:subagent:missing",
|
||||
}),
|
||||
).rejects.toThrow(expectedError);
|
||||
|
||||
expect(loadSessionStoreMock).not.toHaveBeenCalled();
|
||||
expect(updateSessionStoreMock).not.toHaveBeenCalled();
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "agent:main:subagent:child",
|
||||
},
|
||||
});
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "agent:main:subagent:child",
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps legacy main requester keys for sandboxed session tree checks", async () => {
|
||||
resetSessionStore({
|
||||
"agent:main:main": {
|
||||
sessionId: "s-main",
|
||||
updatedAt: 10,
|
||||
},
|
||||
"agent:main:subagent:child": {
|
||||
sessionId: "s-child",
|
||||
updatedAt: 20,
|
||||
},
|
||||
});
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return {
|
||||
sessions:
|
||||
request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
|
||||
};
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = getSessionStatusTool("main", {
|
||||
sandboxed: true,
|
||||
});
|
||||
|
||||
const mainResult = await tool.execute("call8", {});
|
||||
const mainDetails = mainResult.details as { ok?: boolean; sessionKey?: string };
|
||||
expect(mainDetails.ok).toBe(true);
|
||||
expect(mainDetails.sessionKey).toBe("agent:main:main");
|
||||
|
||||
const childResult = await tool.execute("call9", {
|
||||
sessionKey: "agent:main:subagent:child",
|
||||
});
|
||||
const childDetails = childResult.details as { ok?: boolean; sessionKey?: string };
|
||||
expect(childDetails.ok).toBe(true);
|
||||
expect(childDetails.sessionKey).toBe("agent:main:subagent:child");
|
||||
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "main",
|
||||
},
|
||||
});
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "main",
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("scopes bare session keys to the requester agent", async () => {
|
||||
loadSessionStoreMock.mockClear();
|
||||
updateSessionStoreMock.mockClear();
|
||||
|
||||
@ -85,7 +85,10 @@ describe("sessions_spawn depth + child limits", () => {
|
||||
});
|
||||
|
||||
it("rejects spawning when caller depth reaches maxSpawnDepth", async () => {
|
||||
const tool = createSessionsSpawnTool({ agentSessionKey: "agent:main:subagent:parent" });
|
||||
const tool = createSessionsSpawnTool({
|
||||
agentSessionKey: "agent:main:subagent:parent",
|
||||
workspaceDir: "/parent/workspace",
|
||||
});
|
||||
const result = await tool.execute("call-depth-reject", { task: "hello" });
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
@ -109,8 +112,13 @@ describe("sessions_spawn depth + child limits", () => {
|
||||
const calls = callGatewayMock.mock.calls.map(
|
||||
(call) => call[0] as { method?: string; params?: Record<string, unknown> },
|
||||
);
|
||||
const agentCall = calls.find((entry) => entry.method === "agent");
|
||||
expect(agentCall?.params?.spawnedBy).toBe("agent:main:subagent:parent");
|
||||
const spawnedByPatch = calls.find(
|
||||
(entry) =>
|
||||
entry.method === "sessions.patch" &&
|
||||
entry.params?.spawnedBy === "agent:main:subagent:parent",
|
||||
);
|
||||
expect(spawnedByPatch?.params?.key).toMatch(/^agent:main:subagent:/);
|
||||
expect(typeof spawnedByPatch?.params?.spawnedWorkspaceDir).toBe("string");
|
||||
|
||||
const spawnDepthPatch = calls.find(
|
||||
(entry) => entry.method === "sessions.patch" && entry.params?.spawnDepth === 2,
|
||||
|
||||
@ -200,6 +200,7 @@ export function createOpenClawTools(
|
||||
createSessionStatusTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
config: options?.config,
|
||||
sandboxed: options?.sandboxed,
|
||||
}),
|
||||
...(webSearchTool ? [webSearchTool] : []),
|
||||
...(webFetchTool ? [webFetchTool] : []),
|
||||
|
||||
@ -535,6 +535,23 @@ describe("isFailoverErrorMessage", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("matches network errno codes in serialized error messages", () => {
|
||||
const samples = [
|
||||
"Error: connect ETIMEDOUT 10.0.0.1:443",
|
||||
"Error: connect ESOCKETTIMEDOUT 10.0.0.1:443",
|
||||
"Error: connect EHOSTUNREACH 10.0.0.1:443",
|
||||
"Error: connect ENETUNREACH 10.0.0.1:443",
|
||||
"Error: write EPIPE",
|
||||
"Error: read ENETRESET",
|
||||
"Error: connect EHOSTDOWN 192.168.1.1:443",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isTimeoutErrorMessage(sample)).toBe(true);
|
||||
expect(classifyFailoverReason(sample)).toBe("timeout");
|
||||
expect(isFailoverErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("does not classify MALFORMED_FUNCTION_CALL as timeout", () => {
|
||||
const sample = "Unhandled stop reason: MALFORMED_FUNCTION_CALL";
|
||||
expect(isTimeoutErrorMessage(sample)).toBe(false);
|
||||
|
||||
@ -37,6 +37,13 @@ const ERROR_PATTERNS = {
|
||||
"fetch failed",
|
||||
"socket hang up",
|
||||
/\beconn(?:refused|reset|aborted)\b/i,
|
||||
/\benetunreach\b/i,
|
||||
/\behostunreach\b/i,
|
||||
/\behostdown\b/i,
|
||||
/\benetreset\b/i,
|
||||
/\betimedout\b/i,
|
||||
/\besockettimedout\b/i,
|
||||
/\bepipe\b/i,
|
||||
/\benotfound\b/i,
|
||||
/\beai_again\b/i,
|
||||
/without sending (?:any )?chunks?/i,
|
||||
|
||||
@ -13,6 +13,7 @@ import {
|
||||
shouldInjectOllamaCompatNumCtx,
|
||||
decodeHtmlEntitiesInObject,
|
||||
wrapOllamaCompatNumCtx,
|
||||
wrapStreamFnRepairMalformedToolCallArguments,
|
||||
wrapStreamFnTrimToolCallNames,
|
||||
} from "./attempt.js";
|
||||
|
||||
@ -430,6 +431,182 @@ describe("wrapStreamFnTrimToolCallNames", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("wrapStreamFnRepairMalformedToolCallArguments", () => {
|
||||
function createFakeStream(params: { events: unknown[]; resultMessage: unknown }): {
|
||||
result: () => Promise<unknown>;
|
||||
[Symbol.asyncIterator]: () => AsyncIterator<unknown>;
|
||||
} {
|
||||
return {
|
||||
async result() {
|
||||
return params.resultMessage;
|
||||
},
|
||||
[Symbol.asyncIterator]() {
|
||||
return (async function* () {
|
||||
for (const event of params.events) {
|
||||
yield event;
|
||||
}
|
||||
})();
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function invokeWrappedStream(baseFn: (...args: never[]) => unknown) {
|
||||
const wrappedFn = wrapStreamFnRepairMalformedToolCallArguments(baseFn as never);
|
||||
return await wrappedFn({} as never, {} as never, {} as never);
|
||||
}
|
||||
|
||||
it("repairs anthropic-compatible tool arguments when trailing junk follows valid JSON", async () => {
|
||||
const partialToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const streamedToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const endMessageToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const finalToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const partialMessage = { role: "assistant", content: [partialToolCall] };
|
||||
const endMessage = { role: "assistant", content: [endMessageToolCall] };
|
||||
const finalMessage = { role: "assistant", content: [finalToolCall] };
|
||||
const baseFn = vi.fn(() =>
|
||||
createFakeStream({
|
||||
events: [
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: '{"path":"/tmp/report.txt"}',
|
||||
partial: partialMessage,
|
||||
},
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: "xx",
|
||||
partial: partialMessage,
|
||||
},
|
||||
{
|
||||
type: "toolcall_end",
|
||||
contentIndex: 0,
|
||||
toolCall: streamedToolCall,
|
||||
partial: partialMessage,
|
||||
message: endMessage,
|
||||
},
|
||||
],
|
||||
resultMessage: finalMessage,
|
||||
}),
|
||||
);
|
||||
|
||||
const stream = await invokeWrappedStream(baseFn);
|
||||
for await (const _item of stream) {
|
||||
// drain
|
||||
}
|
||||
const result = await stream.result();
|
||||
|
||||
expect(partialToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
|
||||
expect(streamedToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
|
||||
expect(endMessageToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
|
||||
expect(finalToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
|
||||
expect(result).toBe(finalMessage);
|
||||
});
|
||||
|
||||
it("keeps incomplete partial JSON unchanged until a complete object exists", async () => {
|
||||
const partialToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const partialMessage = { role: "assistant", content: [partialToolCall] };
|
||||
const baseFn = vi.fn(() =>
|
||||
createFakeStream({
|
||||
events: [
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: '{"path":"/tmp',
|
||||
partial: partialMessage,
|
||||
},
|
||||
],
|
||||
resultMessage: { role: "assistant", content: [partialToolCall] },
|
||||
}),
|
||||
);
|
||||
|
||||
const stream = await invokeWrappedStream(baseFn);
|
||||
for await (const _item of stream) {
|
||||
// drain
|
||||
}
|
||||
|
||||
expect(partialToolCall.arguments).toEqual({});
|
||||
});
|
||||
|
||||
it("does not repair tool arguments when trailing junk exceeds the Kimi-specific allowance", async () => {
|
||||
const partialToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const streamedToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const partialMessage = { role: "assistant", content: [partialToolCall] };
|
||||
const baseFn = vi.fn(() =>
|
||||
createFakeStream({
|
||||
events: [
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: '{"path":"/tmp/report.txt"}oops',
|
||||
partial: partialMessage,
|
||||
},
|
||||
{
|
||||
type: "toolcall_end",
|
||||
contentIndex: 0,
|
||||
toolCall: streamedToolCall,
|
||||
partial: partialMessage,
|
||||
},
|
||||
],
|
||||
resultMessage: { role: "assistant", content: [partialToolCall] },
|
||||
}),
|
||||
);
|
||||
|
||||
const stream = await invokeWrappedStream(baseFn);
|
||||
for await (const _item of stream) {
|
||||
// drain
|
||||
}
|
||||
|
||||
expect(partialToolCall.arguments).toEqual({});
|
||||
expect(streamedToolCall.arguments).toEqual({});
|
||||
});
|
||||
|
||||
it("clears a cached repair when later deltas make the trailing suffix invalid", async () => {
|
||||
const partialToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const streamedToolCall = { type: "toolCall", name: "read", arguments: {} };
|
||||
const partialMessage = { role: "assistant", content: [partialToolCall] };
|
||||
const baseFn = vi.fn(() =>
|
||||
createFakeStream({
|
||||
events: [
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: '{"path":"/tmp/report.txt"}',
|
||||
partial: partialMessage,
|
||||
},
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: "x",
|
||||
partial: partialMessage,
|
||||
},
|
||||
{
|
||||
type: "toolcall_delta",
|
||||
contentIndex: 0,
|
||||
delta: "yzq",
|
||||
partial: partialMessage,
|
||||
},
|
||||
{
|
||||
type: "toolcall_end",
|
||||
contentIndex: 0,
|
||||
toolCall: streamedToolCall,
|
||||
partial: partialMessage,
|
||||
},
|
||||
],
|
||||
resultMessage: { role: "assistant", content: [partialToolCall] },
|
||||
}),
|
||||
);
|
||||
|
||||
const stream = await invokeWrappedStream(baseFn);
|
||||
for await (const _item of stream) {
|
||||
// drain
|
||||
}
|
||||
|
||||
expect(partialToolCall.arguments).toEqual({});
|
||||
expect(streamedToolCall.arguments).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("isOllamaCompatProvider", () => {
|
||||
it("detects native ollama provider id", () => {
|
||||
expect(
|
||||
|
||||
@ -436,6 +436,281 @@ export function wrapStreamFnTrimToolCallNames(
|
||||
};
|
||||
}
|
||||
|
||||
function extractBalancedJsonPrefix(raw: string): string | null {
|
||||
let start = 0;
|
||||
while (start < raw.length && /\s/.test(raw[start] ?? "")) {
|
||||
start += 1;
|
||||
}
|
||||
const startChar = raw[start];
|
||||
if (startChar !== "{" && startChar !== "[") {
|
||||
return null;
|
||||
}
|
||||
|
||||
let depth = 0;
|
||||
let inString = false;
|
||||
let escaped = false;
|
||||
for (let i = start; i < raw.length; i += 1) {
|
||||
const char = raw[i];
|
||||
if (char === undefined) {
|
||||
break;
|
||||
}
|
||||
if (inString) {
|
||||
if (escaped) {
|
||||
escaped = false;
|
||||
} else if (char === "\\") {
|
||||
escaped = true;
|
||||
} else if (char === '"') {
|
||||
inString = false;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (char === '"') {
|
||||
inString = true;
|
||||
continue;
|
||||
}
|
||||
if (char === "{" || char === "[") {
|
||||
depth += 1;
|
||||
continue;
|
||||
}
|
||||
if (char === "}" || char === "]") {
|
||||
depth -= 1;
|
||||
if (depth === 0) {
|
||||
return raw.slice(start, i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const MAX_TOOLCALL_REPAIR_BUFFER_CHARS = 64_000;
|
||||
const MAX_TOOLCALL_REPAIR_TRAILING_CHARS = 3;
|
||||
const TOOLCALL_REPAIR_ALLOWED_TRAILING_RE = /^[^\s{}[\]":,\\]{1,3}$/;
|
||||
|
||||
function shouldAttemptMalformedToolCallRepair(partialJson: string, delta: string): boolean {
|
||||
if (/[}\]]/.test(delta)) {
|
||||
return true;
|
||||
}
|
||||
const trimmedDelta = delta.trim();
|
||||
return (
|
||||
trimmedDelta.length > 0 &&
|
||||
trimmedDelta.length <= MAX_TOOLCALL_REPAIR_TRAILING_CHARS &&
|
||||
/[}\]]/.test(partialJson)
|
||||
);
|
||||
}
|
||||
|
||||
type ToolCallArgumentRepair = {
|
||||
args: Record<string, unknown>;
|
||||
trailingSuffix: string;
|
||||
};
|
||||
|
||||
function tryParseMalformedToolCallArguments(raw: string): ToolCallArgumentRepair | undefined {
|
||||
if (!raw.trim()) {
|
||||
return undefined;
|
||||
}
|
||||
try {
|
||||
JSON.parse(raw);
|
||||
return undefined;
|
||||
} catch {
|
||||
const jsonPrefix = extractBalancedJsonPrefix(raw);
|
||||
if (!jsonPrefix) {
|
||||
return undefined;
|
||||
}
|
||||
const suffix = raw.slice(raw.indexOf(jsonPrefix) + jsonPrefix.length).trim();
|
||||
if (
|
||||
suffix.length === 0 ||
|
||||
suffix.length > MAX_TOOLCALL_REPAIR_TRAILING_CHARS ||
|
||||
!TOOLCALL_REPAIR_ALLOWED_TRAILING_RE.test(suffix)
|
||||
) {
|
||||
return undefined;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(jsonPrefix) as unknown;
|
||||
return parsed && typeof parsed === "object" && !Array.isArray(parsed)
|
||||
? { args: parsed as Record<string, unknown>, trailingSuffix: suffix }
|
||||
: undefined;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function repairToolCallArgumentsInMessage(
|
||||
message: unknown,
|
||||
contentIndex: number,
|
||||
repairedArgs: Record<string, unknown>,
|
||||
): void {
|
||||
if (!message || typeof message !== "object") {
|
||||
return;
|
||||
}
|
||||
const content = (message as { content?: unknown }).content;
|
||||
if (!Array.isArray(content)) {
|
||||
return;
|
||||
}
|
||||
const block = content[contentIndex];
|
||||
if (!block || typeof block !== "object") {
|
||||
return;
|
||||
}
|
||||
const typedBlock = block as { type?: unknown; arguments?: unknown };
|
||||
if (!isToolCallBlockType(typedBlock.type)) {
|
||||
return;
|
||||
}
|
||||
typedBlock.arguments = repairedArgs;
|
||||
}
|
||||
|
||||
function clearToolCallArgumentsInMessage(message: unknown, contentIndex: number): void {
|
||||
if (!message || typeof message !== "object") {
|
||||
return;
|
||||
}
|
||||
const content = (message as { content?: unknown }).content;
|
||||
if (!Array.isArray(content)) {
|
||||
return;
|
||||
}
|
||||
const block = content[contentIndex];
|
||||
if (!block || typeof block !== "object") {
|
||||
return;
|
||||
}
|
||||
const typedBlock = block as { type?: unknown; arguments?: unknown };
|
||||
if (!isToolCallBlockType(typedBlock.type)) {
|
||||
return;
|
||||
}
|
||||
typedBlock.arguments = {};
|
||||
}
|
||||
|
||||
function repairMalformedToolCallArgumentsInMessage(
|
||||
message: unknown,
|
||||
repairedArgsByIndex: Map<number, Record<string, unknown>>,
|
||||
): void {
|
||||
if (!message || typeof message !== "object") {
|
||||
return;
|
||||
}
|
||||
const content = (message as { content?: unknown }).content;
|
||||
if (!Array.isArray(content)) {
|
||||
return;
|
||||
}
|
||||
for (const [index, repairedArgs] of repairedArgsByIndex.entries()) {
|
||||
repairToolCallArgumentsInMessage(message, index, repairedArgs);
|
||||
}
|
||||
}
|
||||
|
||||
function wrapStreamRepairMalformedToolCallArguments(
|
||||
stream: ReturnType<typeof streamSimple>,
|
||||
): ReturnType<typeof streamSimple> {
|
||||
const partialJsonByIndex = new Map<number, string>();
|
||||
const repairedArgsByIndex = new Map<number, Record<string, unknown>>();
|
||||
const disabledIndices = new Set<number>();
|
||||
const loggedRepairIndices = new Set<number>();
|
||||
const originalResult = stream.result.bind(stream);
|
||||
stream.result = async () => {
|
||||
const message = await originalResult();
|
||||
repairMalformedToolCallArgumentsInMessage(message, repairedArgsByIndex);
|
||||
partialJsonByIndex.clear();
|
||||
repairedArgsByIndex.clear();
|
||||
disabledIndices.clear();
|
||||
loggedRepairIndices.clear();
|
||||
return message;
|
||||
};
|
||||
|
||||
const originalAsyncIterator = stream[Symbol.asyncIterator].bind(stream);
|
||||
(stream as { [Symbol.asyncIterator]: typeof originalAsyncIterator })[Symbol.asyncIterator] =
|
||||
function () {
|
||||
const iterator = originalAsyncIterator();
|
||||
return {
|
||||
async next() {
|
||||
const result = await iterator.next();
|
||||
if (!result.done && result.value && typeof result.value === "object") {
|
||||
const event = result.value as {
|
||||
type?: unknown;
|
||||
contentIndex?: unknown;
|
||||
delta?: unknown;
|
||||
partial?: unknown;
|
||||
message?: unknown;
|
||||
toolCall?: unknown;
|
||||
};
|
||||
if (
|
||||
typeof event.contentIndex === "number" &&
|
||||
Number.isInteger(event.contentIndex) &&
|
||||
event.type === "toolcall_delta" &&
|
||||
typeof event.delta === "string"
|
||||
) {
|
||||
if (disabledIndices.has(event.contentIndex)) {
|
||||
return result;
|
||||
}
|
||||
const nextPartialJson =
|
||||
(partialJsonByIndex.get(event.contentIndex) ?? "") + event.delta;
|
||||
if (nextPartialJson.length > MAX_TOOLCALL_REPAIR_BUFFER_CHARS) {
|
||||
partialJsonByIndex.delete(event.contentIndex);
|
||||
repairedArgsByIndex.delete(event.contentIndex);
|
||||
disabledIndices.add(event.contentIndex);
|
||||
return result;
|
||||
}
|
||||
partialJsonByIndex.set(event.contentIndex, nextPartialJson);
|
||||
if (shouldAttemptMalformedToolCallRepair(nextPartialJson, event.delta)) {
|
||||
const repair = tryParseMalformedToolCallArguments(nextPartialJson);
|
||||
if (repair) {
|
||||
repairedArgsByIndex.set(event.contentIndex, repair.args);
|
||||
repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repair.args);
|
||||
repairToolCallArgumentsInMessage(event.message, event.contentIndex, repair.args);
|
||||
if (!loggedRepairIndices.has(event.contentIndex)) {
|
||||
loggedRepairIndices.add(event.contentIndex);
|
||||
log.warn(
|
||||
`repairing kimi-coding tool call arguments after ${repair.trailingSuffix.length} trailing chars`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
repairedArgsByIndex.delete(event.contentIndex);
|
||||
clearToolCallArgumentsInMessage(event.partial, event.contentIndex);
|
||||
clearToolCallArgumentsInMessage(event.message, event.contentIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (
|
||||
typeof event.contentIndex === "number" &&
|
||||
Number.isInteger(event.contentIndex) &&
|
||||
event.type === "toolcall_end"
|
||||
) {
|
||||
const repairedArgs = repairedArgsByIndex.get(event.contentIndex);
|
||||
if (repairedArgs) {
|
||||
if (event.toolCall && typeof event.toolCall === "object") {
|
||||
(event.toolCall as { arguments?: unknown }).arguments = repairedArgs;
|
||||
}
|
||||
repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repairedArgs);
|
||||
repairToolCallArgumentsInMessage(event.message, event.contentIndex, repairedArgs);
|
||||
}
|
||||
partialJsonByIndex.delete(event.contentIndex);
|
||||
disabledIndices.delete(event.contentIndex);
|
||||
loggedRepairIndices.delete(event.contentIndex);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
},
|
||||
async return(value?: unknown) {
|
||||
return iterator.return?.(value) ?? { done: true as const, value: undefined };
|
||||
},
|
||||
async throw(error?: unknown) {
|
||||
return iterator.throw?.(error) ?? { done: true as const, value: undefined };
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
export function wrapStreamFnRepairMalformedToolCallArguments(baseFn: StreamFn): StreamFn {
|
||||
return (model, context, options) => {
|
||||
const maybeStream = baseFn(model, context, options);
|
||||
if (maybeStream && typeof maybeStream === "object" && "then" in maybeStream) {
|
||||
return Promise.resolve(maybeStream).then((stream) =>
|
||||
wrapStreamRepairMalformedToolCallArguments(stream),
|
||||
);
|
||||
}
|
||||
return wrapStreamRepairMalformedToolCallArguments(maybeStream);
|
||||
};
|
||||
}
|
||||
|
||||
function shouldRepairMalformedAnthropicToolCallArguments(provider?: string): boolean {
|
||||
return normalizeProviderId(provider ?? "") === "kimi-coding";
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// xAI / Grok: decode HTML entities in tool call arguments
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -1379,6 +1654,15 @@ export async function runEmbeddedAttempt(
|
||||
allowedToolNames,
|
||||
);
|
||||
|
||||
if (
|
||||
params.model.api === "anthropic-messages" &&
|
||||
shouldRepairMalformedAnthropicToolCallArguments(params.provider)
|
||||
) {
|
||||
activeSession.agent.streamFn = wrapStreamFnRepairMalformedToolCallArguments(
|
||||
activeSession.agent.streamFn,
|
||||
);
|
||||
}
|
||||
|
||||
if (isXaiProvider(params.provider, params.modelId)) {
|
||||
activeSession.agent.streamFn = wrapStreamFnDecodeXaiToolCallArguments(
|
||||
activeSession.agent.streamFn,
|
||||
|
||||
@ -3,7 +3,10 @@ import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { SANDBOX_PINNED_MUTATION_PYTHON } from "./fs-bridge-mutation-helper.js";
|
||||
import {
|
||||
buildPinnedWritePlan,
|
||||
SANDBOX_PINNED_MUTATION_PYTHON,
|
||||
} from "./fs-bridge-mutation-helper.js";
|
||||
|
||||
async function withTempRoot<T>(prefix: string, run: (root: string) => Promise<T>): Promise<T> {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
@ -22,6 +25,35 @@ function runMutation(args: string[], input?: string) {
|
||||
});
|
||||
}
|
||||
|
||||
function runWritePlan(args: string[], input?: string) {
|
||||
const plan = buildPinnedWritePlan({
|
||||
check: {
|
||||
target: {
|
||||
hostPath: args[1] ?? "",
|
||||
containerPath: args[1] ?? "",
|
||||
relativePath: path.posix.join(args[2] ?? "", args[3] ?? ""),
|
||||
writable: true,
|
||||
},
|
||||
options: {
|
||||
action: "write files",
|
||||
requireWritable: true,
|
||||
},
|
||||
},
|
||||
pinned: {
|
||||
mountRootPath: args[1] ?? "",
|
||||
relativeParentPath: args[2] ?? "",
|
||||
basename: args[3] ?? "",
|
||||
},
|
||||
mkdir: args[4] === "1",
|
||||
});
|
||||
|
||||
return spawnSync("sh", ["-c", plan.script, "moltbot-sandbox-fs", ...(plan.args ?? [])], {
|
||||
input,
|
||||
encoding: "utf8",
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
}
|
||||
|
||||
describe("sandbox pinned mutation helper", () => {
|
||||
it("writes through a pinned directory fd", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
@ -37,6 +69,26 @@ describe("sandbox pinned mutation helper", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"preserves stdin payload bytes when the pinned write plan runs through sh",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
|
||||
const result = runWritePlan(
|
||||
["write", workspace, "nested/deeper", "note.txt", "1"],
|
||||
"hello",
|
||||
);
|
||||
|
||||
expect(result.status).toBe(0);
|
||||
await expect(
|
||||
fs.readFile(path.join(workspace, "nested", "deeper", "note.txt"), "utf8"),
|
||||
).resolves.toBe("hello");
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"rejects symlink-parent writes instead of materializing a temp file outside the mount",
|
||||
async () => {
|
||||
|
||||
@ -257,7 +257,13 @@ function buildPinnedMutationPlan(params: {
|
||||
return {
|
||||
checks: params.checks,
|
||||
recheckBeforeCommand: true,
|
||||
script: ["set -eu", "python3 - \"$@\" <<'PY'", SANDBOX_PINNED_MUTATION_PYTHON, "PY"].join("\n"),
|
||||
// Feed the helper source over fd 3 so stdin stays available for write payload bytes.
|
||||
script: [
|
||||
"set -eu",
|
||||
"python3 /dev/fd/3 \"$@\" 3<<'PY'",
|
||||
SANDBOX_PINNED_MUTATION_PYTHON,
|
||||
"PY",
|
||||
].join("\n"),
|
||||
args: params.args,
|
||||
};
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ describe("sandbox fs bridge anchored ops", () => {
|
||||
const opCall = mockedExecDockerRaw.mock.calls.find(
|
||||
([args]) =>
|
||||
typeof args[5] === "string" &&
|
||||
args[5].includes("python3 - \"$@\" <<'PY'") &&
|
||||
args[5].includes("python3 /dev/fd/3 \"$@\" 3<<'PY'") &&
|
||||
getDockerArg(args, 1) === testCase.expectedArgs[0],
|
||||
);
|
||||
expect(opCall).toBeDefined();
|
||||
|
||||
@ -129,6 +129,10 @@ describe("sandbox fs bridge shell compatibility", () => {
|
||||
await bridge.writeFile({ filePath: "b.txt", data: "hello" });
|
||||
|
||||
const scripts = getScriptsFromCalls();
|
||||
expect(scripts.some((script) => script.includes("python3 - \"$@\" <<'PY'"))).toBe(false);
|
||||
expect(scripts.some((script) => script.includes("python3 /dev/fd/3 \"$@\" 3<<'PY'"))).toBe(
|
||||
true,
|
||||
);
|
||||
expect(scripts.some((script) => script.includes('cat >"$1"'))).toBe(false);
|
||||
expect(scripts.some((script) => script.includes('cat >"$tmp"'))).toBe(false);
|
||||
expect(scripts.some((script) => script.includes("os.replace("))).toBe(true);
|
||||
|
||||
@ -380,4 +380,36 @@ describe("sessions_spawn subagent lifecycle hooks", () => {
|
||||
emitLifecycleHooks: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("cleans up the provisional session when lineage patching fails after thread binding", async () => {
|
||||
const callGatewayMock = getCallGatewayMock();
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.patch" && typeof request.params?.spawnedBy === "string") {
|
||||
throw new Error("lineage patch failed");
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const result = await executeDiscordThreadSessionSpawn("call9");
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "error",
|
||||
error: "lineage patch failed",
|
||||
});
|
||||
expect(hookRunnerMocks.runSubagentSpawned).not.toHaveBeenCalled();
|
||||
expect(hookRunnerMocks.runSubagentEnded).not.toHaveBeenCalled();
|
||||
const methods = getGatewayMethods();
|
||||
expect(methods).toContain("sessions.delete");
|
||||
expect(methods).not.toContain("agent");
|
||||
const deleteCall = findGatewayRequest("sessions.delete");
|
||||
expect(deleteCall?.params).toMatchObject({
|
||||
key: (result.details as { childSessionKey?: string }).childSessionKey,
|
||||
deleteTranscript: true,
|
||||
emitLifecycleHooks: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
import { decodeStrictBase64, spawnSubagentDirect } from "./subagent-spawn.js";
|
||||
|
||||
@ -31,6 +32,7 @@ let configOverride: Record<string, unknown> = {
|
||||
},
|
||||
},
|
||||
};
|
||||
let workspaceDirOverride = "";
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
@ -61,7 +63,7 @@ vi.mock("./agent-scope.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("./agent-scope.js")>();
|
||||
return {
|
||||
...actual,
|
||||
resolveAgentWorkspaceDir: () => path.join(os.tmpdir(), "agent-workspace"),
|
||||
resolveAgentWorkspaceDir: () => workspaceDirOverride,
|
||||
};
|
||||
});
|
||||
|
||||
@ -145,6 +147,16 @@ describe("spawnSubagentDirect filename validation", () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockClear();
|
||||
setupGatewayMock();
|
||||
workspaceDirOverride = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), `openclaw-subagent-attachments-${process.pid}-${Date.now()}-`),
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (workspaceDirOverride) {
|
||||
fs.rmSync(workspaceDirOverride, { recursive: true, force: true });
|
||||
workspaceDirOverride = "";
|
||||
}
|
||||
});
|
||||
|
||||
const ctx = {
|
||||
@ -210,4 +222,43 @@ describe("spawnSubagentDirect filename validation", () => {
|
||||
expect(result.status).toBe("error");
|
||||
expect(result.error).toMatch(/attachments_invalid_name/);
|
||||
});
|
||||
|
||||
it("removes materialized attachments when lineage patching fails", async () => {
|
||||
const calls: Array<{ method?: string; params?: Record<string, unknown> }> = [];
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch" && typeof request.params?.spawnedBy === "string") {
|
||||
throw new Error("lineage patch failed");
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const result = await spawnSubagentDirect(
|
||||
{
|
||||
task: "test",
|
||||
attachments: [{ name: "file.txt", content: validContent, encoding: "base64" }],
|
||||
},
|
||||
ctx,
|
||||
);
|
||||
|
||||
expect(result).toMatchObject({
|
||||
status: "error",
|
||||
error: "lineage patch failed",
|
||||
});
|
||||
const attachmentsRoot = path.join(workspaceDirOverride, ".openclaw", "attachments");
|
||||
const retainedDirs = fs.existsSync(attachmentsRoot)
|
||||
? fs.readdirSync(attachmentsRoot).filter((entry) => !entry.startsWith("."))
|
||||
: [];
|
||||
expect(retainedDirs).toHaveLength(0);
|
||||
const deleteCall = calls.find((entry) => entry.method === "sessions.delete");
|
||||
expect(deleteCall?.params).toMatchObject({
|
||||
key: expect.stringMatching(/^agent:main:subagent:/),
|
||||
deleteTranscript: true,
|
||||
emitLifecycleHooks: false,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -153,6 +153,25 @@ async function cleanupProvisionalSession(
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupFailedSpawnBeforeAgentStart(params: {
|
||||
childSessionKey: string;
|
||||
attachmentAbsDir?: string;
|
||||
emitLifecycleHooks?: boolean;
|
||||
deleteTranscript?: boolean;
|
||||
}): Promise<void> {
|
||||
if (params.attachmentAbsDir) {
|
||||
try {
|
||||
await fs.rm(params.attachmentAbsDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// Best-effort cleanup only.
|
||||
}
|
||||
}
|
||||
await cleanupProvisionalSession(params.childSessionKey, {
|
||||
emitLifecycleHooks: params.emitLifecycleHooks,
|
||||
deleteTranscript: params.deleteTranscript,
|
||||
});
|
||||
}
|
||||
|
||||
function resolveSpawnMode(params: {
|
||||
requestedMode?: SpawnSubagentMode;
|
||||
threadRequested: boolean;
|
||||
@ -561,10 +580,32 @@ export async function spawnSubagentDirect(
|
||||
explicitWorkspaceDir: toolSpawnMetadata.workspaceDir,
|
||||
}),
|
||||
});
|
||||
const spawnLineagePatchError = await patchChildSession({
|
||||
spawnedBy: spawnedByKey,
|
||||
...(spawnedMetadata.workspaceDir ? { spawnedWorkspaceDir: spawnedMetadata.workspaceDir } : {}),
|
||||
});
|
||||
if (spawnLineagePatchError) {
|
||||
await cleanupFailedSpawnBeforeAgentStart({
|
||||
childSessionKey,
|
||||
attachmentAbsDir,
|
||||
emitLifecycleHooks: threadBindingReady,
|
||||
deleteTranscript: true,
|
||||
});
|
||||
return {
|
||||
status: "error",
|
||||
error: spawnLineagePatchError,
|
||||
childSessionKey,
|
||||
};
|
||||
}
|
||||
|
||||
const childIdem = crypto.randomUUID();
|
||||
let childRunId: string = childIdem;
|
||||
try {
|
||||
const {
|
||||
spawnedBy: _spawnedBy,
|
||||
workspaceDir: _workspaceDir,
|
||||
...publicSpawnedMetadata
|
||||
} = spawnedMetadata;
|
||||
const response = await callGateway<{ runId: string }>({
|
||||
method: "agent",
|
||||
params: {
|
||||
@ -581,7 +622,7 @@ export async function spawnSubagentDirect(
|
||||
thinking: thinkingOverride,
|
||||
timeout: runTimeoutSeconds,
|
||||
label: label || undefined,
|
||||
...spawnedMetadata,
|
||||
...publicSpawnedMetadata,
|
||||
},
|
||||
timeoutMs: 10_000,
|
||||
});
|
||||
|
||||
@ -19,6 +19,7 @@ import {
|
||||
import {
|
||||
buildAgentMainSessionKey,
|
||||
DEFAULT_AGENT_ID,
|
||||
parseAgentSessionKey,
|
||||
resolveAgentIdFromSessionKey,
|
||||
} from "../../routing/session-key.js";
|
||||
import { applyModelOverrideToSessionEntry } from "../../sessions/model-overrides.js";
|
||||
@ -36,10 +37,12 @@ import {
|
||||
import type { AnyAgentTool } from "./common.js";
|
||||
import { readStringParam } from "./common.js";
|
||||
import {
|
||||
createSessionVisibilityGuard,
|
||||
shouldResolveSessionIdInput,
|
||||
resolveInternalSessionKey,
|
||||
resolveMainSessionAlias,
|
||||
createAgentToAgentPolicy,
|
||||
resolveEffectiveSessionToolsVisibility,
|
||||
resolveInternalSessionKey,
|
||||
resolveSandboxedSessionToolContext,
|
||||
} from "./sessions-helpers.js";
|
||||
|
||||
const SessionStatusToolSchema = Type.Object({
|
||||
@ -175,6 +178,7 @@ async function resolveModelOverride(params: {
|
||||
export function createSessionStatusTool(opts?: {
|
||||
agentSessionKey?: string;
|
||||
config?: OpenClawConfig;
|
||||
sandboxed?: boolean;
|
||||
}): AnyAgentTool {
|
||||
return {
|
||||
label: "Session Status",
|
||||
@ -185,18 +189,70 @@ export function createSessionStatusTool(opts?: {
|
||||
execute: async (_toolCallId, args) => {
|
||||
const params = args as Record<string, unknown>;
|
||||
const cfg = opts?.config ?? loadConfig();
|
||||
const { mainKey, alias } = resolveMainSessionAlias(cfg);
|
||||
const { mainKey, alias, effectiveRequesterKey } = resolveSandboxedSessionToolContext({
|
||||
cfg,
|
||||
agentSessionKey: opts?.agentSessionKey,
|
||||
sandboxed: opts?.sandboxed,
|
||||
});
|
||||
const a2aPolicy = createAgentToAgentPolicy(cfg);
|
||||
const requesterAgentId = resolveAgentIdFromSessionKey(
|
||||
opts?.agentSessionKey ?? effectiveRequesterKey,
|
||||
);
|
||||
const visibilityRequesterKey = effectiveRequesterKey.trim();
|
||||
const usesLegacyMainAlias = alias === mainKey;
|
||||
const isLegacyMainVisibilityKey = (sessionKey: string) => {
|
||||
const trimmed = sessionKey.trim();
|
||||
return usesLegacyMainAlias && (trimmed === "main" || trimmed === mainKey);
|
||||
};
|
||||
const resolveVisibilityMainSessionKey = (sessionAgentId: string) => {
|
||||
const requesterParsed = parseAgentSessionKey(visibilityRequesterKey);
|
||||
if (
|
||||
resolveAgentIdFromSessionKey(visibilityRequesterKey) === sessionAgentId &&
|
||||
(requesterParsed?.rest === mainKey || isLegacyMainVisibilityKey(visibilityRequesterKey))
|
||||
) {
|
||||
return visibilityRequesterKey;
|
||||
}
|
||||
return buildAgentMainSessionKey({
|
||||
agentId: sessionAgentId,
|
||||
mainKey,
|
||||
});
|
||||
};
|
||||
const normalizeVisibilityTargetSessionKey = (sessionKey: string, sessionAgentId: string) => {
|
||||
const trimmed = sessionKey.trim();
|
||||
if (!trimmed) {
|
||||
return trimmed;
|
||||
}
|
||||
if (trimmed.startsWith("agent:")) {
|
||||
const parsed = parseAgentSessionKey(trimmed);
|
||||
if (parsed?.rest === mainKey) {
|
||||
return resolveVisibilityMainSessionKey(sessionAgentId);
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
// Preserve legacy bare main keys for requester tree checks.
|
||||
if (isLegacyMainVisibilityKey(trimmed)) {
|
||||
return resolveVisibilityMainSessionKey(sessionAgentId);
|
||||
}
|
||||
return trimmed;
|
||||
};
|
||||
const visibilityGuard =
|
||||
opts?.sandboxed === true
|
||||
? await createSessionVisibilityGuard({
|
||||
action: "status",
|
||||
requesterSessionKey: visibilityRequesterKey,
|
||||
visibility: resolveEffectiveSessionToolsVisibility({
|
||||
cfg,
|
||||
sandboxed: true,
|
||||
}),
|
||||
a2aPolicy,
|
||||
})
|
||||
: null;
|
||||
|
||||
const requestedKeyParam = readStringParam(params, "sessionKey");
|
||||
let requestedKeyRaw = requestedKeyParam ?? opts?.agentSessionKey;
|
||||
if (!requestedKeyRaw?.trim()) {
|
||||
throw new Error("sessionKey required");
|
||||
}
|
||||
|
||||
const requesterAgentId = resolveAgentIdFromSessionKey(
|
||||
opts?.agentSessionKey ?? requestedKeyRaw,
|
||||
);
|
||||
const ensureAgentAccess = (targetAgentId: string) => {
|
||||
if (targetAgentId === requesterAgentId) {
|
||||
return;
|
||||
@ -213,7 +269,14 @@ export function createSessionStatusTool(opts?: {
|
||||
};
|
||||
|
||||
if (requestedKeyRaw.startsWith("agent:")) {
|
||||
ensureAgentAccess(resolveAgentIdFromSessionKey(requestedKeyRaw));
|
||||
const requestedAgentId = resolveAgentIdFromSessionKey(requestedKeyRaw);
|
||||
ensureAgentAccess(requestedAgentId);
|
||||
const access = visibilityGuard?.check(
|
||||
normalizeVisibilityTargetSessionKey(requestedKeyRaw, requestedAgentId),
|
||||
);
|
||||
if (access && !access.allowed) {
|
||||
throw new Error(access.error);
|
||||
}
|
||||
}
|
||||
|
||||
const isExplicitAgentKey = requestedKeyRaw.startsWith("agent:");
|
||||
@ -258,6 +321,15 @@ export function createSessionStatusTool(opts?: {
|
||||
throw new Error(`Unknown ${kind}: ${requestedKeyRaw}`);
|
||||
}
|
||||
|
||||
if (visibilityGuard && !requestedKeyRaw.startsWith("agent:")) {
|
||||
const access = visibilityGuard.check(
|
||||
normalizeVisibilityTargetSessionKey(resolved.key, agentId),
|
||||
);
|
||||
if (!access.allowed) {
|
||||
throw new Error(access.error);
|
||||
}
|
||||
}
|
||||
|
||||
const configured = resolveDefaultModelForAgent({ cfg, agentId });
|
||||
const modelRaw = readStringParam(params, "model");
|
||||
let changedModel = false;
|
||||
|
||||
@ -14,7 +14,7 @@ export type AgentToAgentPolicy = {
|
||||
isAllowed: (requesterAgentId: string, targetAgentId: string) => boolean;
|
||||
};
|
||||
|
||||
export type SessionAccessAction = "history" | "send" | "list";
|
||||
export type SessionAccessAction = "history" | "send" | "list" | "status";
|
||||
|
||||
export type SessionAccessResult =
|
||||
| { allowed: true }
|
||||
@ -130,6 +130,9 @@ function actionPrefix(action: SessionAccessAction): string {
|
||||
if (action === "send") {
|
||||
return "Session send";
|
||||
}
|
||||
if (action === "status") {
|
||||
return "Session status";
|
||||
}
|
||||
return "Session list";
|
||||
}
|
||||
|
||||
@ -140,6 +143,9 @@ function a2aDisabledMessage(action: SessionAccessAction): string {
|
||||
if (action === "send") {
|
||||
return "Agent-to-agent messaging is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent sends.";
|
||||
}
|
||||
if (action === "status") {
|
||||
return "Agent-to-agent status is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent access.";
|
||||
}
|
||||
return "Agent-to-agent listing is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent visibility.";
|
||||
}
|
||||
|
||||
@ -150,6 +156,9 @@ function a2aDeniedMessage(action: SessionAccessAction): string {
|
||||
if (action === "send") {
|
||||
return "Agent-to-agent messaging denied by tools.agentToAgent.allow.";
|
||||
}
|
||||
if (action === "status") {
|
||||
return "Agent-to-agent status denied by tools.agentToAgent.allow.";
|
||||
}
|
||||
return "Agent-to-agent listing denied by tools.agentToAgent.allow.";
|
||||
}
|
||||
|
||||
@ -160,6 +169,9 @@ function crossVisibilityMessage(action: SessionAccessAction): string {
|
||||
if (action === "send") {
|
||||
return "Session send visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access.";
|
||||
}
|
||||
if (action === "status") {
|
||||
return "Session status visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access.";
|
||||
}
|
||||
return "Session list visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access.";
|
||||
}
|
||||
|
||||
|
||||
@ -169,6 +169,50 @@ describe("buildReplyPayloads media filter integration", () => {
|
||||
expect(replyPayloads).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("drops all final payloads when block pipeline streamed successfully", async () => {
|
||||
const pipeline: Parameters<typeof buildReplyPayloads>[0]["blockReplyPipeline"] = {
|
||||
didStream: () => true,
|
||||
isAborted: () => false,
|
||||
hasSentPayload: () => false,
|
||||
enqueue: () => {},
|
||||
flush: async () => {},
|
||||
stop: () => {},
|
||||
hasBuffered: () => false,
|
||||
};
|
||||
// shouldDropFinalPayloads short-circuits to [] when the pipeline streamed
|
||||
// without aborting, so hasSentPayload is never reached.
|
||||
const { replyPayloads } = await buildReplyPayloads({
|
||||
...baseParams,
|
||||
blockStreamingEnabled: true,
|
||||
blockReplyPipeline: pipeline,
|
||||
replyToMode: "all",
|
||||
payloads: [{ text: "response", replyToId: "post-123" }],
|
||||
});
|
||||
|
||||
expect(replyPayloads).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("deduplicates final payloads against directly sent block keys regardless of replyToId", async () => {
|
||||
// When block streaming is not active but directlySentBlockKeys has entries
|
||||
// (e.g. from pre-tool flush), the key should match even if replyToId differs.
|
||||
const { createBlockReplyContentKey } = await import("./block-reply-pipeline.js");
|
||||
const directlySentBlockKeys = new Set<string>();
|
||||
directlySentBlockKeys.add(
|
||||
createBlockReplyContentKey({ text: "response", replyToId: "post-1" }),
|
||||
);
|
||||
|
||||
const { replyPayloads } = await buildReplyPayloads({
|
||||
...baseParams,
|
||||
blockStreamingEnabled: false,
|
||||
blockReplyPipeline: null,
|
||||
directlySentBlockKeys,
|
||||
replyToMode: "off",
|
||||
payloads: [{ text: "response" }],
|
||||
});
|
||||
|
||||
expect(replyPayloads).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("does not suppress same-target replies when accountId differs", async () => {
|
||||
const { replyPayloads } = await buildReplyPayloads({
|
||||
...baseParams,
|
||||
|
||||
@ -5,7 +5,7 @@ import type { OriginatingChannelType } from "../templating.js";
|
||||
import { SILENT_REPLY_TOKEN } from "../tokens.js";
|
||||
import type { ReplyPayload } from "../types.js";
|
||||
import { formatBunFetchSocketError, isBunFetchSocketError } from "./agent-runner-utils.js";
|
||||
import { createBlockReplyPayloadKey, type BlockReplyPipeline } from "./block-reply-pipeline.js";
|
||||
import { createBlockReplyContentKey, type BlockReplyPipeline } from "./block-reply-pipeline.js";
|
||||
import {
|
||||
resolveOriginAccountId,
|
||||
resolveOriginMessageProvider,
|
||||
@ -213,7 +213,7 @@ export async function buildReplyPayloads(params: {
|
||||
)
|
||||
: params.directlySentBlockKeys?.size
|
||||
? mediaFilteredPayloads.filter(
|
||||
(payload) => !params.directlySentBlockKeys!.has(createBlockReplyPayloadKey(payload)),
|
||||
(payload) => !params.directlySentBlockKeys!.has(createBlockReplyContentKey(payload)),
|
||||
)
|
||||
: mediaFilteredPayloads;
|
||||
const replyPayloads = suppressMessagingToolReplies ? [] : filteredPayloads;
|
||||
|
||||
79
src/auto-reply/reply/block-reply-pipeline.test.ts
Normal file
79
src/auto-reply/reply/block-reply-pipeline.test.ts
Normal file
@ -0,0 +1,79 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
createBlockReplyContentKey,
|
||||
createBlockReplyPayloadKey,
|
||||
createBlockReplyPipeline,
|
||||
} from "./block-reply-pipeline.js";
|
||||
|
||||
describe("createBlockReplyPayloadKey", () => {
|
||||
it("produces different keys for payloads differing only by replyToId", () => {
|
||||
const a = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-1" });
|
||||
const b = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-2" });
|
||||
const c = createBlockReplyPayloadKey({ text: "hello world" });
|
||||
expect(a).not.toBe(b);
|
||||
expect(a).not.toBe(c);
|
||||
});
|
||||
|
||||
it("produces different keys for payloads with different text", () => {
|
||||
const a = createBlockReplyPayloadKey({ text: "hello" });
|
||||
const b = createBlockReplyPayloadKey({ text: "world" });
|
||||
expect(a).not.toBe(b);
|
||||
});
|
||||
|
||||
it("produces different keys for payloads with different media", () => {
|
||||
const a = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///a.png" });
|
||||
const b = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///b.png" });
|
||||
expect(a).not.toBe(b);
|
||||
});
|
||||
|
||||
it("trims whitespace from text for key comparison", () => {
|
||||
const a = createBlockReplyPayloadKey({ text: " hello " });
|
||||
const b = createBlockReplyPayloadKey({ text: "hello" });
|
||||
expect(a).toBe(b);
|
||||
});
|
||||
});
|
||||
|
||||
describe("createBlockReplyContentKey", () => {
|
||||
it("produces the same key for payloads differing only by replyToId", () => {
|
||||
const a = createBlockReplyContentKey({ text: "hello world", replyToId: "post-1" });
|
||||
const b = createBlockReplyContentKey({ text: "hello world", replyToId: "post-2" });
|
||||
const c = createBlockReplyContentKey({ text: "hello world" });
|
||||
expect(a).toBe(b);
|
||||
expect(a).toBe(c);
|
||||
});
|
||||
});
|
||||
|
||||
describe("createBlockReplyPipeline dedup with threading", () => {
|
||||
it("keeps separate deliveries for same text with different replyToId", async () => {
|
||||
const sent: Array<{ text?: string; replyToId?: string }> = [];
|
||||
const pipeline = createBlockReplyPipeline({
|
||||
onBlockReply: async (payload) => {
|
||||
sent.push({ text: payload.text, replyToId: payload.replyToId });
|
||||
},
|
||||
timeoutMs: 5000,
|
||||
});
|
||||
|
||||
pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" });
|
||||
pipeline.enqueue({ text: "response text", replyToId: undefined });
|
||||
await pipeline.flush();
|
||||
|
||||
expect(sent).toEqual([
|
||||
{ text: "response text", replyToId: "thread-root-1" },
|
||||
{ text: "response text", replyToId: undefined },
|
||||
]);
|
||||
});
|
||||
|
||||
it("hasSentPayload matches regardless of replyToId", async () => {
|
||||
const pipeline = createBlockReplyPipeline({
|
||||
onBlockReply: async () => {},
|
||||
timeoutMs: 5000,
|
||||
});
|
||||
|
||||
pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" });
|
||||
await pipeline.flush();
|
||||
|
||||
// Final payload with no replyToId should be recognized as already sent
|
||||
expect(pipeline.hasSentPayload({ text: "response text" })).toBe(true);
|
||||
expect(pipeline.hasSentPayload({ text: "response text", replyToId: "other-id" })).toBe(true);
|
||||
});
|
||||
});
|
||||
@ -48,6 +48,19 @@ export function createBlockReplyPayloadKey(payload: ReplyPayload): string {
|
||||
});
|
||||
}
|
||||
|
||||
export function createBlockReplyContentKey(payload: ReplyPayload): string {
|
||||
const text = payload.text?.trim() ?? "";
|
||||
const mediaList = payload.mediaUrls?.length
|
||||
? payload.mediaUrls
|
||||
: payload.mediaUrl
|
||||
? [payload.mediaUrl]
|
||||
: [];
|
||||
// Content-only key used for final-payload suppression after block streaming.
|
||||
// This intentionally ignores replyToId so a streamed threaded payload and the
|
||||
// later final payload still collapse when they carry the same content.
|
||||
return JSON.stringify({ text, mediaList });
|
||||
}
|
||||
|
||||
const withTimeout = async <T>(
|
||||
promise: Promise<T>,
|
||||
timeoutMs: number,
|
||||
@ -80,6 +93,7 @@ export function createBlockReplyPipeline(params: {
|
||||
}): BlockReplyPipeline {
|
||||
const { onBlockReply, timeoutMs, coalescing, buffer } = params;
|
||||
const sentKeys = new Set<string>();
|
||||
const sentContentKeys = new Set<string>();
|
||||
const pendingKeys = new Set<string>();
|
||||
const seenKeys = new Set<string>();
|
||||
const bufferedKeys = new Set<string>();
|
||||
@ -95,6 +109,7 @@ export function createBlockReplyPipeline(params: {
|
||||
return;
|
||||
}
|
||||
const payloadKey = createBlockReplyPayloadKey(payload);
|
||||
const contentKey = createBlockReplyContentKey(payload);
|
||||
if (!bypassSeenCheck) {
|
||||
if (seenKeys.has(payloadKey)) {
|
||||
return;
|
||||
@ -130,6 +145,7 @@ export function createBlockReplyPipeline(params: {
|
||||
return;
|
||||
}
|
||||
sentKeys.add(payloadKey);
|
||||
sentContentKeys.add(contentKey);
|
||||
didStream = true;
|
||||
})
|
||||
.catch((err) => {
|
||||
@ -238,8 +254,8 @@ export function createBlockReplyPipeline(params: {
|
||||
didStream: () => didStream,
|
||||
isAborted: () => aborted,
|
||||
hasSentPayload: (payload) => {
|
||||
const payloadKey = createBlockReplyPayloadKey(payload);
|
||||
return sentKeys.has(payloadKey);
|
||||
const payloadKey = createBlockReplyContentKey(payload);
|
||||
return sentContentKeys.has(payloadKey);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@ import { logVerbose } from "../../globals.js";
|
||||
import { SILENT_REPLY_TOKEN } from "../tokens.js";
|
||||
import type { BlockReplyContext, ReplyPayload } from "../types.js";
|
||||
import type { BlockReplyPipeline } from "./block-reply-pipeline.js";
|
||||
import { createBlockReplyPayloadKey } from "./block-reply-pipeline.js";
|
||||
import { createBlockReplyContentKey } from "./block-reply-pipeline.js";
|
||||
import { parseReplyDirectives } from "./reply-directives.js";
|
||||
import { applyReplyTagsToPayload, isRenderablePayload } from "./reply-payloads.js";
|
||||
import type { TypingSignaler } from "./typing-mode.js";
|
||||
@ -128,7 +128,7 @@ export function createBlockReplyDeliveryHandler(params: {
|
||||
} else if (params.blockStreamingEnabled) {
|
||||
// Send directly when flushing before tool execution (no pipeline but streaming enabled).
|
||||
// Track sent key to avoid duplicate in final payloads.
|
||||
params.directlySentBlockKeys.add(createBlockReplyPayloadKey(blockPayload));
|
||||
params.directlySentBlockKeys.add(createBlockReplyContentKey(blockPayload));
|
||||
await params.onBlockReply(blockPayload);
|
||||
}
|
||||
// When streaming is disabled entirely, blocks are accumulated in final text instead.
|
||||
|
||||
54
src/browser/proxy-files.test.ts
Normal file
54
src/browser/proxy-files.test.ts
Normal file
@ -0,0 +1,54 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
import { MEDIA_MAX_BYTES } from "../media/store.js";
|
||||
import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js";
|
||||
import { persistBrowserProxyFiles } from "./proxy-files.js";
|
||||
|
||||
describe("persistBrowserProxyFiles", () => {
|
||||
let tempHome: TempHomeEnv;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempHome = await createTempHomeEnv("openclaw-browser-proxy-files-");
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await tempHome.restore();
|
||||
});
|
||||
|
||||
it("persists browser proxy files under the shared media store", async () => {
|
||||
const sourcePath = "/tmp/proxy-file.txt";
|
||||
const mapping = await persistBrowserProxyFiles([
|
||||
{
|
||||
path: sourcePath,
|
||||
base64: Buffer.from("hello from browser proxy").toString("base64"),
|
||||
mimeType: "text/plain",
|
||||
},
|
||||
]);
|
||||
|
||||
const savedPath = mapping.get(sourcePath);
|
||||
expect(typeof savedPath).toBe("string");
|
||||
expect(path.normalize(savedPath ?? "")).toContain(
|
||||
`${path.sep}.openclaw${path.sep}media${path.sep}browser${path.sep}`,
|
||||
);
|
||||
await expect(fs.readFile(savedPath ?? "", "utf8")).resolves.toBe("hello from browser proxy");
|
||||
});
|
||||
|
||||
it("rejects browser proxy files that exceed the shared media size limit", async () => {
|
||||
const oversized = Buffer.alloc(MEDIA_MAX_BYTES + 1, 0x41);
|
||||
|
||||
await expect(
|
||||
persistBrowserProxyFiles([
|
||||
{
|
||||
path: "/tmp/oversized.bin",
|
||||
base64: oversized.toString("base64"),
|
||||
mimeType: "application/octet-stream",
|
||||
},
|
||||
]),
|
||||
).rejects.toThrow("Media exceeds 5MB limit");
|
||||
|
||||
await expect(
|
||||
fs.stat(path.join(tempHome.home, ".openclaw", "media", "browser")),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
@ -13,7 +13,7 @@ export async function persistBrowserProxyFiles(files: BrowserProxyFile[] | undef
|
||||
const mapping = new Map<string, string>();
|
||||
for (const file of files) {
|
||||
const buffer = Buffer.from(file.base64, "base64");
|
||||
const saved = await saveMediaBuffer(buffer, file.mimeType, "browser", buffer.byteLength);
|
||||
const saved = await saveMediaBuffer(buffer, file.mimeType, "browser");
|
||||
mapping.set(file.path, saved.path);
|
||||
}
|
||||
return mapping;
|
||||
|
||||
@ -164,11 +164,11 @@ export function setAccountAllowFromForChannel(params: {
|
||||
});
|
||||
}
|
||||
|
||||
export function setTopLevelChannelAllowFrom(params: {
|
||||
function patchTopLevelChannelConfig(params: {
|
||||
cfg: OpenClawConfig;
|
||||
channel: string;
|
||||
allowFrom: string[];
|
||||
enabled?: boolean;
|
||||
patch: Record<string, unknown>;
|
||||
}): OpenClawConfig {
|
||||
const channelConfig =
|
||||
(params.cfg.channels?.[params.channel] as Record<string, unknown> | undefined) ?? {};
|
||||
@ -179,12 +179,26 @@ export function setTopLevelChannelAllowFrom(params: {
|
||||
[params.channel]: {
|
||||
...channelConfig,
|
||||
...(params.enabled ? { enabled: true } : {}),
|
||||
allowFrom: params.allowFrom,
|
||||
...params.patch,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function setTopLevelChannelAllowFrom(params: {
|
||||
cfg: OpenClawConfig;
|
||||
channel: string;
|
||||
allowFrom: string[];
|
||||
enabled?: boolean;
|
||||
}): OpenClawConfig {
|
||||
return patchTopLevelChannelConfig({
|
||||
cfg: params.cfg,
|
||||
channel: params.channel,
|
||||
enabled: params.enabled,
|
||||
patch: { allowFrom: params.allowFrom },
|
||||
});
|
||||
}
|
||||
|
||||
export function setTopLevelChannelDmPolicyWithAllowFrom(params: {
|
||||
cfg: OpenClawConfig;
|
||||
channel: string;
|
||||
@ -199,17 +213,14 @@ export function setTopLevelChannelDmPolicyWithAllowFrom(params: {
|
||||
undefined;
|
||||
const allowFrom =
|
||||
params.dmPolicy === "open" ? addWildcardAllowFrom(existingAllowFrom) : undefined;
|
||||
return {
|
||||
...params.cfg,
|
||||
channels: {
|
||||
...params.cfg.channels,
|
||||
[params.channel]: {
|
||||
...channelConfig,
|
||||
dmPolicy: params.dmPolicy,
|
||||
...(allowFrom ? { allowFrom } : {}),
|
||||
},
|
||||
return patchTopLevelChannelConfig({
|
||||
cfg: params.cfg,
|
||||
channel: params.channel,
|
||||
patch: {
|
||||
dmPolicy: params.dmPolicy,
|
||||
...(allowFrom ? { allowFrom } : {}),
|
||||
},
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export function setTopLevelChannelGroupPolicy(params: {
|
||||
@ -218,19 +229,12 @@ export function setTopLevelChannelGroupPolicy(params: {
|
||||
groupPolicy: GroupPolicy;
|
||||
enabled?: boolean;
|
||||
}): OpenClawConfig {
|
||||
const channelConfig =
|
||||
(params.cfg.channels?.[params.channel] as Record<string, unknown> | undefined) ?? {};
|
||||
return {
|
||||
...params.cfg,
|
||||
channels: {
|
||||
...params.cfg.channels,
|
||||
[params.channel]: {
|
||||
...channelConfig,
|
||||
...(params.enabled ? { enabled: true } : {}),
|
||||
groupPolicy: params.groupPolicy,
|
||||
},
|
||||
},
|
||||
};
|
||||
return patchTopLevelChannelConfig({
|
||||
cfg: params.cfg,
|
||||
channel: params.channel,
|
||||
enabled: params.enabled,
|
||||
patch: { groupPolicy: params.groupPolicy },
|
||||
});
|
||||
}
|
||||
|
||||
export function setChannelDmPolicyWithAllowFrom(params: {
|
||||
|
||||
@ -69,7 +69,6 @@ function sanitizeJsonValue(value: unknown): unknown {
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
function formatSkillName(skill: SkillStatusEntry): string {
|
||||
const emoji = normalizeSkillEmoji(skill.emoji);
|
||||
return `${emoji} ${theme.command(skill.name)}`;
|
||||
|
||||
@ -78,6 +78,8 @@ export type SessionEntry = {
|
||||
sessionFile?: string;
|
||||
/** Parent session key that spawned this session (used for sandbox session-tool scoping). */
|
||||
spawnedBy?: string;
|
||||
/** Workspace inherited by spawned sessions and reused on later turns for the same child session. */
|
||||
spawnedWorkspaceDir?: string;
|
||||
/** True after a thread/topic session has been forked from its parent transcript once. */
|
||||
forkedFromParent?: boolean;
|
||||
/** Subagent spawn depth (0 = main, 1 = sub-agent, 2 = sub-sub-agent). */
|
||||
|
||||
@ -186,6 +186,8 @@ export type GatewayTailscaleConfig = {
|
||||
};
|
||||
|
||||
export type GatewayRemoteConfig = {
|
||||
/** Whether remote gateway surfaces are enabled. Default: true when absent. */
|
||||
enabled?: boolean;
|
||||
/** Remote Gateway WebSocket URL (ws:// or wss://). */
|
||||
url?: string;
|
||||
/** Transport for macOS remote connections (ssh tunnel or direct WS). */
|
||||
|
||||
@ -217,6 +217,9 @@ describe("dispatchCronDelivery — double-announce guard", () => {
|
||||
payloads: [{ text: "Detailed child result, everything finished successfully." }],
|
||||
}),
|
||||
);
|
||||
expect(deliverOutboundPayloads).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ skipQueue: true }),
|
||||
);
|
||||
});
|
||||
|
||||
it("normal text delivery sends exactly once and sets deliveryAttempted=true", async () => {
|
||||
@ -304,4 +307,69 @@ describe("dispatchCronDelivery — double-announce guard", () => {
|
||||
expect(deliverOutboundPayloads).not.toHaveBeenCalled();
|
||||
expect(state.deliveryAttempted).toBe(false);
|
||||
});
|
||||
|
||||
it("text delivery always bypasses the write-ahead queue", async () => {
|
||||
vi.mocked(countActiveDescendantRuns).mockReturnValue(0);
|
||||
vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false);
|
||||
vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]);
|
||||
|
||||
const params = makeBaseParams({ synthesizedText: "Daily digest ready." });
|
||||
const state = await dispatchCronDelivery(params);
|
||||
|
||||
expect(state.delivered).toBe(true);
|
||||
expect(state.deliveryAttempted).toBe(true);
|
||||
expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1);
|
||||
|
||||
expect(deliverOutboundPayloads).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
channel: "telegram",
|
||||
to: "123456",
|
||||
payloads: [{ text: "Daily digest ready." }],
|
||||
skipQueue: true,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("structured/thread delivery also bypasses the write-ahead queue", async () => {
|
||||
vi.mocked(countActiveDescendantRuns).mockReturnValue(0);
|
||||
vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false);
|
||||
vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]);
|
||||
|
||||
const params = makeBaseParams({ synthesizedText: "Report attached." });
|
||||
// Simulate structured content so useDirectDelivery path is taken (no retryTransient)
|
||||
(params as Record<string, unknown>).deliveryPayloadHasStructuredContent = true;
|
||||
await dispatchCronDelivery(params);
|
||||
|
||||
expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1);
|
||||
expect(deliverOutboundPayloads).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ skipQueue: true }),
|
||||
);
|
||||
});
|
||||
|
||||
it("transient retry delivers exactly once with skipQueue on both attempts", async () => {
|
||||
vi.mocked(countActiveDescendantRuns).mockReturnValue(0);
|
||||
vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false);
|
||||
|
||||
// First call throws a transient error, second call succeeds.
|
||||
vi.mocked(deliverOutboundPayloads)
|
||||
.mockRejectedValueOnce(new Error("gateway timeout"))
|
||||
.mockResolvedValueOnce([{ ok: true } as never]);
|
||||
|
||||
vi.stubEnv("OPENCLAW_TEST_FAST", "1");
|
||||
try {
|
||||
const params = makeBaseParams({ synthesizedText: "Retry test." });
|
||||
const state = await dispatchCronDelivery(params);
|
||||
|
||||
expect(state.delivered).toBe(true);
|
||||
expect(state.deliveryAttempted).toBe(true);
|
||||
// Two calls total: first failed transiently, second succeeded.
|
||||
expect(deliverOutboundPayloads).toHaveBeenCalledTimes(2);
|
||||
|
||||
const calls = vi.mocked(deliverOutboundPayloads).mock.calls;
|
||||
expect(calls[0][0]).toEqual(expect.objectContaining({ skipQueue: true }));
|
||||
expect(calls[1][0]).toEqual(expect.objectContaining({ skipQueue: true }));
|
||||
} finally {
|
||||
vi.unstubAllEnvs();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@ -157,7 +157,9 @@ function isTransientDirectCronDeliveryError(error: unknown): boolean {
|
||||
}
|
||||
|
||||
function resolveDirectCronRetryDelaysMs(): readonly number[] {
|
||||
return process.env.OPENCLAW_TEST_FAST === "1" ? [8, 16, 32] : [5_000, 10_000, 20_000];
|
||||
return process.env.NODE_ENV === "test" && process.env.OPENCLAW_TEST_FAST === "1"
|
||||
? [8, 16, 32]
|
||||
: [5_000, 10_000, 20_000];
|
||||
}
|
||||
|
||||
async function retryTransientDirectCronDelivery<T>(params: {
|
||||
@ -256,6 +258,12 @@ export async function dispatchCronDelivery(
|
||||
bestEffort: params.deliveryBestEffort,
|
||||
deps: createOutboundSendDeps(params.deps),
|
||||
abortSignal: params.abortSignal,
|
||||
// Isolated cron direct delivery uses its own transient retry loop.
|
||||
// Keep all attempts out of the write-ahead delivery queue so a
|
||||
// late-successful first send cannot leave behind a failed queue
|
||||
// entry that replays on the next restart.
|
||||
// See: https://github.com/openclaw/openclaw/issues/40545
|
||||
skipQueue: true,
|
||||
});
|
||||
const deliveryResults = options?.retryTransient
|
||||
? await retryTransientDirectCronDelivery({
|
||||
|
||||
@ -6,6 +6,8 @@ export type ChatAbortControllerEntry = {
|
||||
sessionKey: string;
|
||||
startedAtMs: number;
|
||||
expiresAtMs: number;
|
||||
ownerConnId?: string;
|
||||
ownerDeviceId?: string;
|
||||
};
|
||||
|
||||
export function isChatStopCommandText(text: string): boolean {
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { INPUT_PROVENANCE_KIND_VALUES } from "../../../sessions/input-provenance.js";
|
||||
import { NonEmptyString, SessionLabelString } from "./primitives.js";
|
||||
import { InputProvenanceSchema, NonEmptyString, SessionLabelString } from "./primitives.js";
|
||||
|
||||
export const AgentInternalEventSchema = Type.Object(
|
||||
{
|
||||
@ -96,22 +95,9 @@ export const AgentParamsSchema = Type.Object(
|
||||
lane: Type.Optional(Type.String()),
|
||||
extraSystemPrompt: Type.Optional(Type.String()),
|
||||
internalEvents: Type.Optional(Type.Array(AgentInternalEventSchema)),
|
||||
inputProvenance: Type.Optional(
|
||||
Type.Object(
|
||||
{
|
||||
kind: Type.String({ enum: [...INPUT_PROVENANCE_KIND_VALUES] }),
|
||||
originSessionId: Type.Optional(Type.String()),
|
||||
sourceSessionKey: Type.Optional(Type.String()),
|
||||
sourceChannel: Type.Optional(Type.String()),
|
||||
sourceTool: Type.Optional(Type.String()),
|
||||
},
|
||||
{ additionalProperties: false },
|
||||
),
|
||||
),
|
||||
inputProvenance: Type.Optional(InputProvenanceSchema),
|
||||
idempotencyKey: NonEmptyString,
|
||||
label: Type.Optional(SessionLabelString),
|
||||
spawnedBy: Type.Optional(Type.String()),
|
||||
workspaceDir: Type.Optional(Type.String()),
|
||||
},
|
||||
{ additionalProperties: false },
|
||||
);
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { INPUT_PROVENANCE_KIND_VALUES } from "../../../sessions/input-provenance.js";
|
||||
import { ChatSendSessionKeyString, NonEmptyString } from "./primitives.js";
|
||||
import { ChatSendSessionKeyString, InputProvenanceSchema, NonEmptyString } from "./primitives.js";
|
||||
|
||||
export const LogsTailParamsSchema = Type.Object(
|
||||
{
|
||||
@ -40,18 +39,7 @@ export const ChatSendParamsSchema = Type.Object(
|
||||
deliver: Type.Optional(Type.Boolean()),
|
||||
attachments: Type.Optional(Type.Array(Type.Unknown())),
|
||||
timeoutMs: Type.Optional(Type.Integer({ minimum: 0 })),
|
||||
systemInputProvenance: Type.Optional(
|
||||
Type.Object(
|
||||
{
|
||||
kind: Type.String({ enum: [...INPUT_PROVENANCE_KIND_VALUES] }),
|
||||
originSessionId: Type.Optional(Type.String()),
|
||||
sourceSessionKey: Type.Optional(Type.String()),
|
||||
sourceChannel: Type.Optional(Type.String()),
|
||||
sourceTool: Type.Optional(Type.String()),
|
||||
},
|
||||
{ additionalProperties: false },
|
||||
),
|
||||
),
|
||||
systemInputProvenance: Type.Optional(InputProvenanceSchema),
|
||||
systemProvenanceReceipt: Type.Optional(Type.String()),
|
||||
idempotencyKey: NonEmptyString,
|
||||
},
|
||||
|
||||
@ -5,6 +5,7 @@ import {
|
||||
FILE_SECRET_REF_ID_PATTERN,
|
||||
SECRET_PROVIDER_ALIAS_PATTERN,
|
||||
} from "../../../secrets/ref-contract.js";
|
||||
import { INPUT_PROVENANCE_KIND_VALUES } from "../../../sessions/input-provenance.js";
|
||||
import { SESSION_LABEL_MAX_LENGTH } from "../../../sessions/session-label.js";
|
||||
import { GATEWAY_CLIENT_IDS, GATEWAY_CLIENT_MODES } from "../client-info.js";
|
||||
|
||||
@ -18,6 +19,16 @@ export const SessionLabelString = Type.String({
|
||||
minLength: 1,
|
||||
maxLength: SESSION_LABEL_MAX_LENGTH,
|
||||
});
|
||||
export const InputProvenanceSchema = Type.Object(
|
||||
{
|
||||
kind: Type.String({ enum: [...INPUT_PROVENANCE_KIND_VALUES] }),
|
||||
originSessionId: Type.Optional(Type.String()),
|
||||
sourceSessionKey: Type.Optional(Type.String()),
|
||||
sourceChannel: Type.Optional(Type.String()),
|
||||
sourceTool: Type.Optional(Type.String()),
|
||||
},
|
||||
{ additionalProperties: false },
|
||||
);
|
||||
|
||||
export const GatewayClientIdSchema = Type.Union(
|
||||
Object.values(GATEWAY_CLIENT_IDS).map((value) => Type.Literal(value)),
|
||||
|
||||
@ -71,6 +71,7 @@ export const SessionsPatchParamsSchema = Type.Object(
|
||||
execNode: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
|
||||
model: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
|
||||
spawnedBy: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
|
||||
spawnedWorkspaceDir: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
|
||||
spawnDepth: Type.Optional(Type.Union([Type.Integer({ minimum: 0 }), Type.Null()])),
|
||||
subagentRole: Type.Optional(
|
||||
Type.Union([Type.Literal("orchestrator"), Type.Literal("leaf"), Type.Null()]),
|
||||
|
||||
@ -405,30 +405,53 @@ describe("gateway agent handler", () => {
|
||||
expect(callArgs.bestEffortDeliver).toBe(false);
|
||||
});
|
||||
|
||||
it("only forwards workspaceDir for spawned subagent runs", async () => {
|
||||
it("rejects public spawned-run metadata fields", async () => {
|
||||
primeMainAgentRun();
|
||||
mocks.agentCommand.mockClear();
|
||||
|
||||
await invokeAgent(
|
||||
{
|
||||
message: "normal run",
|
||||
sessionKey: "agent:main:main",
|
||||
workspaceDir: "/tmp/ignored",
|
||||
idempotencyKey: "workspace-ignored",
|
||||
},
|
||||
{ reqId: "workspace-ignored-1" },
|
||||
);
|
||||
await vi.waitFor(() => expect(mocks.agentCommand).toHaveBeenCalled());
|
||||
const normalCall = mocks.agentCommand.mock.calls.at(-1)?.[0] as { workspaceDir?: string };
|
||||
expect(normalCall.workspaceDir).toBeUndefined();
|
||||
mocks.agentCommand.mockClear();
|
||||
const respond = vi.fn();
|
||||
|
||||
await invokeAgent(
|
||||
{
|
||||
message: "spawned run",
|
||||
sessionKey: "agent:main:main",
|
||||
spawnedBy: "agent:main:subagent:parent",
|
||||
workspaceDir: "/tmp/inherited",
|
||||
workspaceDir: "/tmp/injected",
|
||||
idempotencyKey: "workspace-rejected",
|
||||
} as AgentParams,
|
||||
{ reqId: "workspace-rejected-1", respond },
|
||||
);
|
||||
|
||||
expect(mocks.agentCommand).not.toHaveBeenCalled();
|
||||
expect(respond).toHaveBeenCalledWith(
|
||||
false,
|
||||
undefined,
|
||||
expect.objectContaining({
|
||||
message: expect.stringContaining("invalid agent params"),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("only forwards workspaceDir for spawned sessions with stored workspace inheritance", async () => {
|
||||
primeMainAgentRun();
|
||||
mockMainSessionEntry({
|
||||
spawnedBy: "agent:main:subagent:parent",
|
||||
spawnedWorkspaceDir: "/tmp/inherited",
|
||||
});
|
||||
mocks.updateSessionStore.mockImplementation(async (_path, updater) => {
|
||||
const store: Record<string, unknown> = {
|
||||
"agent:main:main": buildExistingMainStoreEntry({
|
||||
spawnedBy: "agent:main:subagent:parent",
|
||||
spawnedWorkspaceDir: "/tmp/inherited",
|
||||
}),
|
||||
};
|
||||
return await updater(store);
|
||||
});
|
||||
mocks.agentCommand.mockClear();
|
||||
|
||||
await invokeAgent(
|
||||
{
|
||||
message: "spawned run",
|
||||
sessionKey: "agent:main:main",
|
||||
idempotencyKey: "workspace-forwarded",
|
||||
},
|
||||
{ reqId: "workspace-forwarded-1" },
|
||||
|
||||
@ -190,24 +190,20 @@ export const agentHandlers: GatewayRequestHandlers = {
|
||||
timeout?: number;
|
||||
bestEffortDeliver?: boolean;
|
||||
label?: string;
|
||||
spawnedBy?: string;
|
||||
inputProvenance?: InputProvenance;
|
||||
workspaceDir?: string;
|
||||
};
|
||||
const senderIsOwner = resolveSenderIsOwnerFromClient(client);
|
||||
const cfg = loadConfig();
|
||||
const idem = request.idempotencyKey;
|
||||
const normalizedSpawned = normalizeSpawnedRunMetadata({
|
||||
spawnedBy: request.spawnedBy,
|
||||
groupId: request.groupId,
|
||||
groupChannel: request.groupChannel,
|
||||
groupSpace: request.groupSpace,
|
||||
workspaceDir: request.workspaceDir,
|
||||
});
|
||||
let resolvedGroupId: string | undefined = normalizedSpawned.groupId;
|
||||
let resolvedGroupChannel: string | undefined = normalizedSpawned.groupChannel;
|
||||
let resolvedGroupSpace: string | undefined = normalizedSpawned.groupSpace;
|
||||
let spawnedByValue = normalizedSpawned.spawnedBy;
|
||||
let spawnedByValue: string | undefined;
|
||||
const inputProvenance = normalizeInputProvenance(request.inputProvenance);
|
||||
const cached = context.dedupe.get(`agent:${idem}`);
|
||||
if (cached) {
|
||||
@ -359,11 +355,7 @@ export const agentHandlers: GatewayRequestHandlers = {
|
||||
const sessionId = entry?.sessionId ?? randomUUID();
|
||||
const labelValue = request.label?.trim() || entry?.label;
|
||||
const sessionAgent = resolveAgentIdFromSessionKey(canonicalKey);
|
||||
spawnedByValue = canonicalizeSpawnedByForAgent(
|
||||
cfg,
|
||||
sessionAgent,
|
||||
spawnedByValue || entry?.spawnedBy,
|
||||
);
|
||||
spawnedByValue = canonicalizeSpawnedByForAgent(cfg, sessionAgent, entry?.spawnedBy);
|
||||
let inheritedGroup:
|
||||
| { groupId?: string; groupChannel?: string; groupSpace?: string }
|
||||
| undefined;
|
||||
@ -400,6 +392,7 @@ export const agentHandlers: GatewayRequestHandlers = {
|
||||
providerOverride: entry?.providerOverride,
|
||||
label: labelValue,
|
||||
spawnedBy: spawnedByValue,
|
||||
spawnedWorkspaceDir: entry?.spawnedWorkspaceDir,
|
||||
spawnDepth: entry?.spawnDepth,
|
||||
channel: entry?.channel ?? request.channel?.trim(),
|
||||
groupId: resolvedGroupId ?? entry?.groupId,
|
||||
@ -628,7 +621,7 @@ export const agentHandlers: GatewayRequestHandlers = {
|
||||
// Internal-only: allow workspace override for spawned subagent runs.
|
||||
workspaceDir: resolveIngressWorkspaceOverrideForSpawnedRun({
|
||||
spawnedBy: spawnedByValue,
|
||||
workspaceDir: request.workspaceDir,
|
||||
workspaceDir: sessionEntry?.spawnedWorkspaceDir,
|
||||
}),
|
||||
senderIsOwner,
|
||||
},
|
||||
|
||||
@ -100,4 +100,42 @@ describe("browser.request profile selection", () => {
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
method: "POST",
|
||||
path: "/profiles/create",
|
||||
body: { name: "poc", cdpUrl: "http://10.0.0.42:9222" },
|
||||
},
|
||||
{
|
||||
method: "DELETE",
|
||||
path: "/profiles/poc",
|
||||
body: undefined,
|
||||
},
|
||||
{
|
||||
method: "POST",
|
||||
path: "profiles/create",
|
||||
body: { name: "poc", cdpUrl: "http://10.0.0.42:9222" },
|
||||
},
|
||||
{
|
||||
method: "DELETE",
|
||||
path: "profiles/poc",
|
||||
body: undefined,
|
||||
},
|
||||
])("blocks persistent profile mutations for $method $path", async ({ method, path, body }) => {
|
||||
const { respond, nodeRegistry } = await runBrowserRequest({
|
||||
method,
|
||||
path,
|
||||
body,
|
||||
});
|
||||
|
||||
expect(nodeRegistry.invoke).not.toHaveBeenCalled();
|
||||
expect(respond).toHaveBeenCalledWith(
|
||||
false,
|
||||
undefined,
|
||||
expect.objectContaining({
|
||||
message: "browser.request cannot create or delete persistent browser profiles",
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@ -20,6 +20,26 @@ type BrowserRequestParams = {
|
||||
timeoutMs?: number;
|
||||
};
|
||||
|
||||
function normalizeBrowserRequestPath(value: string): string {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) {
|
||||
return trimmed;
|
||||
}
|
||||
const withLeadingSlash = trimmed.startsWith("/") ? trimmed : `/${trimmed}`;
|
||||
if (withLeadingSlash.length <= 1) {
|
||||
return withLeadingSlash;
|
||||
}
|
||||
return withLeadingSlash.replace(/\/+$/, "");
|
||||
}
|
||||
|
||||
function isPersistentBrowserProfileMutation(method: string, path: string): boolean {
|
||||
const normalizedPath = normalizeBrowserRequestPath(path);
|
||||
if (method === "POST" && normalizedPath === "/profiles/create") {
|
||||
return true;
|
||||
}
|
||||
return method === "DELETE" && /^\/profiles\/[^/]+$/.test(normalizedPath);
|
||||
}
|
||||
|
||||
function resolveRequestedProfile(params: {
|
||||
query?: Record<string, unknown>;
|
||||
body?: unknown;
|
||||
@ -167,6 +187,17 @@ export const browserHandlers: GatewayRequestHandlers = {
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (isPersistentBrowserProfileMutation(methodRaw, path)) {
|
||||
respond(
|
||||
false,
|
||||
undefined,
|
||||
errorShape(
|
||||
ErrorCodes.INVALID_REQUEST,
|
||||
"browser.request cannot create or delete persistent browser profiles",
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const cfg = loadConfig();
|
||||
let nodeTarget: NodeSession | null = null;
|
||||
|
||||
147
src/gateway/server-methods/chat.abort-authorization.test.ts
Normal file
147
src/gateway/server-methods/chat.abort-authorization.test.ts
Normal file
@ -0,0 +1,147 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { chatHandlers } from "./chat.js";
|
||||
|
||||
function createActiveRun(sessionKey: string, owner?: { connId?: string; deviceId?: string }) {
|
||||
const now = Date.now();
|
||||
return {
|
||||
controller: new AbortController(),
|
||||
sessionId: `${sessionKey}-session`,
|
||||
sessionKey,
|
||||
startedAtMs: now,
|
||||
expiresAtMs: now + 30_000,
|
||||
ownerConnId: owner?.connId,
|
||||
ownerDeviceId: owner?.deviceId,
|
||||
};
|
||||
}
|
||||
|
||||
function createContext(overrides: Record<string, unknown> = {}) {
|
||||
return {
|
||||
chatAbortControllers: new Map(),
|
||||
chatRunBuffers: new Map(),
|
||||
chatDeltaSentAt: new Map(),
|
||||
chatAbortedRuns: new Map<string, number>(),
|
||||
removeChatRun: vi
|
||||
.fn()
|
||||
.mockImplementation((run: string) => ({ sessionKey: "main", clientRunId: run })),
|
||||
agentRunSeq: new Map<string, number>(),
|
||||
broadcast: vi.fn(),
|
||||
nodeSendToSession: vi.fn(),
|
||||
logGateway: { warn: vi.fn() },
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
async function invokeChatAbort(params: {
|
||||
context: ReturnType<typeof createContext>;
|
||||
request: { sessionKey: string; runId?: string };
|
||||
client?: {
|
||||
connId?: string;
|
||||
connect?: {
|
||||
device?: { id?: string };
|
||||
scopes?: string[];
|
||||
};
|
||||
} | null;
|
||||
}) {
|
||||
const respond = vi.fn();
|
||||
await chatHandlers["chat.abort"]({
|
||||
params: params.request,
|
||||
respond: respond as never,
|
||||
context: params.context as never,
|
||||
req: {} as never,
|
||||
client: (params.client ?? null) as never,
|
||||
isWebchatConnect: () => false,
|
||||
});
|
||||
return respond;
|
||||
}
|
||||
|
||||
describe("chat.abort authorization", () => {
|
||||
it("rejects explicit run aborts from other clients", async () => {
|
||||
const context = createContext({
|
||||
chatAbortControllers: new Map([
|
||||
["run-1", createActiveRun("main", { connId: "conn-owner", deviceId: "dev-owner" })],
|
||||
]),
|
||||
});
|
||||
|
||||
const respond = await invokeChatAbort({
|
||||
context,
|
||||
request: { sessionKey: "main", runId: "run-1" },
|
||||
client: {
|
||||
connId: "conn-other",
|
||||
connect: { device: { id: "dev-other" }, scopes: ["operator.write"] },
|
||||
},
|
||||
});
|
||||
|
||||
const [ok, payload, error] = respond.mock.calls.at(-1) ?? [];
|
||||
expect(ok).toBe(false);
|
||||
expect(payload).toBeUndefined();
|
||||
expect(error).toMatchObject({ code: "INVALID_REQUEST", message: "unauthorized" });
|
||||
expect(context.chatAbortControllers.has("run-1")).toBe(true);
|
||||
});
|
||||
|
||||
it("allows the same paired device to abort after reconnecting", async () => {
|
||||
const context = createContext({
|
||||
chatAbortControllers: new Map([
|
||||
["run-1", createActiveRun("main", { connId: "conn-old", deviceId: "dev-1" })],
|
||||
]),
|
||||
});
|
||||
|
||||
const respond = await invokeChatAbort({
|
||||
context,
|
||||
request: { sessionKey: "main", runId: "run-1" },
|
||||
client: {
|
||||
connId: "conn-new",
|
||||
connect: { device: { id: "dev-1" }, scopes: ["operator.write"] },
|
||||
},
|
||||
});
|
||||
|
||||
const [ok, payload] = respond.mock.calls.at(-1) ?? [];
|
||||
expect(ok).toBe(true);
|
||||
expect(payload).toMatchObject({ aborted: true, runIds: ["run-1"] });
|
||||
expect(context.chatAbortControllers.has("run-1")).toBe(false);
|
||||
});
|
||||
|
||||
it("only aborts session-scoped runs owned by the requester", async () => {
|
||||
const context = createContext({
|
||||
chatAbortControllers: new Map([
|
||||
["run-mine", createActiveRun("main", { deviceId: "dev-1" })],
|
||||
["run-other", createActiveRun("main", { deviceId: "dev-2" })],
|
||||
]),
|
||||
});
|
||||
|
||||
const respond = await invokeChatAbort({
|
||||
context,
|
||||
request: { sessionKey: "main" },
|
||||
client: {
|
||||
connId: "conn-1",
|
||||
connect: { device: { id: "dev-1" }, scopes: ["operator.write"] },
|
||||
},
|
||||
});
|
||||
|
||||
const [ok, payload] = respond.mock.calls.at(-1) ?? [];
|
||||
expect(ok).toBe(true);
|
||||
expect(payload).toMatchObject({ aborted: true, runIds: ["run-mine"] });
|
||||
expect(context.chatAbortControllers.has("run-mine")).toBe(false);
|
||||
expect(context.chatAbortControllers.has("run-other")).toBe(true);
|
||||
});
|
||||
|
||||
it("allows operator.admin clients to bypass owner checks", async () => {
|
||||
const context = createContext({
|
||||
chatAbortControllers: new Map([
|
||||
["run-1", createActiveRun("main", { connId: "conn-owner", deviceId: "dev-owner" })],
|
||||
]),
|
||||
});
|
||||
|
||||
const respond = await invokeChatAbort({
|
||||
context,
|
||||
request: { sessionKey: "main", runId: "run-1" },
|
||||
client: {
|
||||
connId: "conn-admin",
|
||||
connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] },
|
||||
},
|
||||
});
|
||||
|
||||
const [ok, payload] = respond.mock.calls.at(-1) ?? [];
|
||||
expect(ok).toBe(true);
|
||||
expect(payload).toMatchObject({ aborted: true, runIds: ["run-1"] });
|
||||
});
|
||||
});
|
||||
@ -25,7 +25,6 @@ import {
|
||||
} from "../../utils/message-channel.js";
|
||||
import {
|
||||
abortChatRunById,
|
||||
abortChatRunsForSessionKey,
|
||||
type ChatAbortControllerEntry,
|
||||
type ChatAbortOps,
|
||||
isChatStopCommandText,
|
||||
@ -33,6 +32,7 @@ import {
|
||||
} from "../chat-abort.js";
|
||||
import { type ChatImageContent, parseMessageWithAttachments } from "../chat-attachments.js";
|
||||
import { stripEnvelopeFromMessage, stripEnvelopeFromMessages } from "../chat-sanitize.js";
|
||||
import { ADMIN_SCOPE } from "../method-scopes.js";
|
||||
import {
|
||||
GATEWAY_CLIENT_CAPS,
|
||||
GATEWAY_CLIENT_MODES,
|
||||
@ -83,6 +83,12 @@ type AbortedPartialSnapshot = {
|
||||
abortOrigin: AbortOrigin;
|
||||
};
|
||||
|
||||
type ChatAbortRequester = {
|
||||
connId?: string;
|
||||
deviceId?: string;
|
||||
isAdmin: boolean;
|
||||
};
|
||||
|
||||
const CHAT_HISTORY_TEXT_MAX_CHARS = 12_000;
|
||||
const CHAT_HISTORY_MAX_SINGLE_MESSAGE_BYTES = 128 * 1024;
|
||||
const CHAT_HISTORY_OVERSIZED_PLACEHOLDER = "[chat.history omitted: message too large]";
|
||||
@ -314,6 +320,68 @@ function sanitizeChatHistoryContentBlock(block: unknown): { block: unknown; chan
|
||||
return { block: changed ? entry : block, changed };
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a value is a finite number, returning undefined otherwise.
|
||||
*/
|
||||
function toFiniteNumber(x: unknown): number | undefined {
|
||||
return typeof x === "number" && Number.isFinite(x) ? x : undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize usage metadata to ensure only finite numeric fields are included.
|
||||
* Prevents UI crashes from malformed transcript JSON.
|
||||
*/
|
||||
function sanitizeUsage(raw: unknown): Record<string, number> | undefined {
|
||||
if (!raw || typeof raw !== "object") {
|
||||
return undefined;
|
||||
}
|
||||
const u = raw as Record<string, unknown>;
|
||||
const out: Record<string, number> = {};
|
||||
|
||||
// Whitelist known usage fields and validate they're finite numbers
|
||||
const knownFields = [
|
||||
"input",
|
||||
"output",
|
||||
"totalTokens",
|
||||
"inputTokens",
|
||||
"outputTokens",
|
||||
"cacheRead",
|
||||
"cacheWrite",
|
||||
"cache_read_input_tokens",
|
||||
"cache_creation_input_tokens",
|
||||
];
|
||||
|
||||
for (const k of knownFields) {
|
||||
const n = toFiniteNumber(u[k]);
|
||||
if (n !== undefined) {
|
||||
out[k] = n;
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve nested usage.cost when present
|
||||
if ("cost" in u && u.cost != null && typeof u.cost === "object") {
|
||||
const sanitizedCost = sanitizeCost(u.cost);
|
||||
if (sanitizedCost) {
|
||||
(out as Record<string, unknown>).cost = sanitizedCost;
|
||||
}
|
||||
}
|
||||
|
||||
return Object.keys(out).length > 0 ? out : undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize cost metadata to ensure only finite numeric fields are included.
|
||||
* Prevents UI crashes from calling .toFixed() on non-numbers.
|
||||
*/
|
||||
function sanitizeCost(raw: unknown): { total?: number } | undefined {
|
||||
if (!raw || typeof raw !== "object") {
|
||||
return undefined;
|
||||
}
|
||||
const c = raw as Record<string, unknown>;
|
||||
const total = toFiniteNumber(c.total);
|
||||
return total !== undefined ? { total } : undefined;
|
||||
}
|
||||
|
||||
function sanitizeChatHistoryMessage(message: unknown): { message: unknown; changed: boolean } {
|
||||
if (!message || typeof message !== "object") {
|
||||
return { message, changed: false };
|
||||
@ -325,13 +393,38 @@ function sanitizeChatHistoryMessage(message: unknown): { message: unknown; chang
|
||||
delete entry.details;
|
||||
changed = true;
|
||||
}
|
||||
if ("usage" in entry) {
|
||||
delete entry.usage;
|
||||
changed = true;
|
||||
}
|
||||
if ("cost" in entry) {
|
||||
delete entry.cost;
|
||||
changed = true;
|
||||
|
||||
// Keep usage/cost so the chat UI can render per-message token and cost badges.
|
||||
// Only retain usage/cost on assistant messages and validate numeric fields to prevent UI crashes.
|
||||
if (entry.role !== "assistant") {
|
||||
if ("usage" in entry) {
|
||||
delete entry.usage;
|
||||
changed = true;
|
||||
}
|
||||
if ("cost" in entry) {
|
||||
delete entry.cost;
|
||||
changed = true;
|
||||
}
|
||||
} else {
|
||||
// Validate and sanitize usage/cost for assistant messages
|
||||
if ("usage" in entry) {
|
||||
const sanitized = sanitizeUsage(entry.usage);
|
||||
if (sanitized) {
|
||||
entry.usage = sanitized;
|
||||
} else {
|
||||
delete entry.usage;
|
||||
}
|
||||
changed = true;
|
||||
}
|
||||
if ("cost" in entry) {
|
||||
const sanitized = sanitizeCost(entry.cost);
|
||||
if (sanitized) {
|
||||
entry.cost = sanitized;
|
||||
} else {
|
||||
delete entry.cost;
|
||||
}
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof entry.content === "string") {
|
||||
@ -597,12 +690,12 @@ function appendAssistantTranscriptMessage(params: {
|
||||
function collectSessionAbortPartials(params: {
|
||||
chatAbortControllers: Map<string, ChatAbortControllerEntry>;
|
||||
chatRunBuffers: Map<string, string>;
|
||||
sessionKey: string;
|
||||
runIds: ReadonlySet<string>;
|
||||
abortOrigin: AbortOrigin;
|
||||
}): AbortedPartialSnapshot[] {
|
||||
const out: AbortedPartialSnapshot[] = [];
|
||||
for (const [runId, active] of params.chatAbortControllers) {
|
||||
if (active.sessionKey !== params.sessionKey) {
|
||||
if (!params.runIds.has(runId)) {
|
||||
continue;
|
||||
}
|
||||
const text = params.chatRunBuffers.get(runId);
|
||||
@ -664,23 +757,104 @@ function createChatAbortOps(context: GatewayRequestContext): ChatAbortOps {
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeOptionalText(value?: string | null): string | undefined {
|
||||
const trimmed = value?.trim();
|
||||
return trimmed || undefined;
|
||||
}
|
||||
|
||||
function resolveChatAbortRequester(
|
||||
client: GatewayRequestHandlerOptions["client"],
|
||||
): ChatAbortRequester {
|
||||
const scopes = Array.isArray(client?.connect?.scopes) ? client.connect.scopes : [];
|
||||
return {
|
||||
connId: normalizeOptionalText(client?.connId),
|
||||
deviceId: normalizeOptionalText(client?.connect?.device?.id),
|
||||
isAdmin: scopes.includes(ADMIN_SCOPE),
|
||||
};
|
||||
}
|
||||
|
||||
function canRequesterAbortChatRun(
|
||||
entry: ChatAbortControllerEntry,
|
||||
requester: ChatAbortRequester,
|
||||
): boolean {
|
||||
if (requester.isAdmin) {
|
||||
return true;
|
||||
}
|
||||
const ownerDeviceId = normalizeOptionalText(entry.ownerDeviceId);
|
||||
const ownerConnId = normalizeOptionalText(entry.ownerConnId);
|
||||
if (!ownerDeviceId && !ownerConnId) {
|
||||
return true;
|
||||
}
|
||||
if (ownerDeviceId && requester.deviceId && ownerDeviceId === requester.deviceId) {
|
||||
return true;
|
||||
}
|
||||
if (ownerConnId && requester.connId && ownerConnId === requester.connId) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function resolveAuthorizedRunIdsForSession(params: {
|
||||
chatAbortControllers: Map<string, ChatAbortControllerEntry>;
|
||||
sessionKey: string;
|
||||
requester: ChatAbortRequester;
|
||||
}) {
|
||||
const authorizedRunIds: string[] = [];
|
||||
let matchedSessionRuns = 0;
|
||||
for (const [runId, active] of params.chatAbortControllers) {
|
||||
if (active.sessionKey !== params.sessionKey) {
|
||||
continue;
|
||||
}
|
||||
matchedSessionRuns += 1;
|
||||
if (canRequesterAbortChatRun(active, params.requester)) {
|
||||
authorizedRunIds.push(runId);
|
||||
}
|
||||
}
|
||||
return {
|
||||
matchedSessionRuns,
|
||||
authorizedRunIds,
|
||||
};
|
||||
}
|
||||
|
||||
function abortChatRunsForSessionKeyWithPartials(params: {
|
||||
context: GatewayRequestContext;
|
||||
ops: ChatAbortOps;
|
||||
sessionKey: string;
|
||||
abortOrigin: AbortOrigin;
|
||||
stopReason?: string;
|
||||
requester: ChatAbortRequester;
|
||||
}) {
|
||||
const { matchedSessionRuns, authorizedRunIds } = resolveAuthorizedRunIdsForSession({
|
||||
chatAbortControllers: params.context.chatAbortControllers,
|
||||
sessionKey: params.sessionKey,
|
||||
requester: params.requester,
|
||||
});
|
||||
if (authorizedRunIds.length === 0) {
|
||||
return {
|
||||
aborted: false,
|
||||
runIds: [],
|
||||
unauthorized: matchedSessionRuns > 0,
|
||||
};
|
||||
}
|
||||
const authorizedRunIdSet = new Set(authorizedRunIds);
|
||||
const snapshots = collectSessionAbortPartials({
|
||||
chatAbortControllers: params.context.chatAbortControllers,
|
||||
chatRunBuffers: params.context.chatRunBuffers,
|
||||
sessionKey: params.sessionKey,
|
||||
runIds: authorizedRunIdSet,
|
||||
abortOrigin: params.abortOrigin,
|
||||
});
|
||||
const res = abortChatRunsForSessionKey(params.ops, {
|
||||
sessionKey: params.sessionKey,
|
||||
stopReason: params.stopReason,
|
||||
});
|
||||
const runIds: string[] = [];
|
||||
for (const runId of authorizedRunIds) {
|
||||
const res = abortChatRunById(params.ops, {
|
||||
runId,
|
||||
sessionKey: params.sessionKey,
|
||||
stopReason: params.stopReason,
|
||||
});
|
||||
if (res.aborted) {
|
||||
runIds.push(runId);
|
||||
}
|
||||
}
|
||||
const res = { aborted: runIds.length > 0, runIds, unauthorized: false };
|
||||
if (res.aborted) {
|
||||
persistAbortedPartials({
|
||||
context: params.context,
|
||||
@ -802,7 +976,7 @@ export const chatHandlers: GatewayRequestHandlers = {
|
||||
verboseLevel,
|
||||
});
|
||||
},
|
||||
"chat.abort": ({ params, respond, context }) => {
|
||||
"chat.abort": ({ params, respond, context, client }) => {
|
||||
if (!validateChatAbortParams(params)) {
|
||||
respond(
|
||||
false,
|
||||
@ -820,6 +994,7 @@ export const chatHandlers: GatewayRequestHandlers = {
|
||||
};
|
||||
|
||||
const ops = createChatAbortOps(context);
|
||||
const requester = resolveChatAbortRequester(client);
|
||||
|
||||
if (!runId) {
|
||||
const res = abortChatRunsForSessionKeyWithPartials({
|
||||
@ -828,7 +1003,12 @@ export const chatHandlers: GatewayRequestHandlers = {
|
||||
sessionKey: rawSessionKey,
|
||||
abortOrigin: "rpc",
|
||||
stopReason: "rpc",
|
||||
requester,
|
||||
});
|
||||
if (res.unauthorized) {
|
||||
respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized"));
|
||||
return;
|
||||
}
|
||||
respond(true, { ok: true, aborted: res.aborted, runIds: res.runIds });
|
||||
return;
|
||||
}
|
||||
@ -846,6 +1026,10 @@ export const chatHandlers: GatewayRequestHandlers = {
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (!canRequesterAbortChatRun(active, requester)) {
|
||||
respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized"));
|
||||
return;
|
||||
}
|
||||
|
||||
const partialText = context.chatRunBuffers.get(runId);
|
||||
const res = abortChatRunById(ops, {
|
||||
@ -987,7 +1171,12 @@ export const chatHandlers: GatewayRequestHandlers = {
|
||||
sessionKey: rawSessionKey,
|
||||
abortOrigin: "stop-command",
|
||||
stopReason: "stop",
|
||||
requester: resolveChatAbortRequester(client),
|
||||
});
|
||||
if (res.unauthorized) {
|
||||
respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "unauthorized"));
|
||||
return;
|
||||
}
|
||||
respond(true, { ok: true, aborted: res.aborted, runIds: res.runIds });
|
||||
return;
|
||||
}
|
||||
@ -1017,6 +1206,8 @@ export const chatHandlers: GatewayRequestHandlers = {
|
||||
sessionKey: rawSessionKey,
|
||||
startedAtMs: now,
|
||||
expiresAtMs: resolveChatRunExpiresAtMs({ now, timeoutMs }),
|
||||
ownerConnId: normalizeOptionalText(client?.connId),
|
||||
ownerDeviceId: normalizeOptionalText(client?.connect?.device?.id),
|
||||
});
|
||||
const ackPayload = {
|
||||
runId: clientRunId,
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { sanitizeExecApprovalDisplayText } from "../../infra/exec-approval-command-display.js";
|
||||
import type { ExecApprovalForwarder } from "../../infra/exec-approval-forwarder.js";
|
||||
import {
|
||||
DEFAULT_EXEC_APPROVAL_TIMEOUT_MS,
|
||||
@ -125,8 +126,11 @@ export function createExecApprovalHandlers(
|
||||
return;
|
||||
}
|
||||
const request = {
|
||||
command: effectiveCommandText,
|
||||
commandPreview: host === "node" ? undefined : approvalContext.commandPreview,
|
||||
command: sanitizeExecApprovalDisplayText(effectiveCommandText),
|
||||
commandPreview:
|
||||
host === "node" || !approvalContext.commandPreview
|
||||
? undefined
|
||||
: sanitizeExecApprovalDisplayText(approvalContext.commandPreview),
|
||||
commandArgv: host === "node" ? undefined : effectiveCommandArgv,
|
||||
envKeys: systemRunBinding?.envKeys?.length ? systemRunBinding.envKeys : undefined,
|
||||
systemRunBinding: systemRunBinding?.binding ?? null,
|
||||
|
||||
@ -641,6 +641,34 @@ describe("exec approval handlers", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("sanitizes invisible Unicode format chars in approval display text without changing node bindings", async () => {
|
||||
const { handlers, broadcasts, respond, context } = createExecApprovalFixture();
|
||||
await requestExecApproval({
|
||||
handlers,
|
||||
respond,
|
||||
context,
|
||||
params: {
|
||||
timeoutMs: 10,
|
||||
command: "bash safe\u200B.sh",
|
||||
commandArgv: ["bash", "safe\u200B.sh"],
|
||||
systemRunPlan: {
|
||||
argv: ["bash", "safe\u200B.sh"],
|
||||
cwd: "/real/cwd",
|
||||
commandText: "bash safe\u200B.sh",
|
||||
agentId: "main",
|
||||
sessionKey: "agent:main:main",
|
||||
},
|
||||
},
|
||||
});
|
||||
const requested = broadcasts.find((entry) => entry.event === "exec.approval.requested");
|
||||
expect(requested).toBeTruthy();
|
||||
const request = (requested?.payload as { request?: Record<string, unknown> })?.request ?? {};
|
||||
expect(request["command"]).toBe("bash safe\\u{200B}.sh");
|
||||
expect((request["systemRunPlan"] as { commandText?: string }).commandText).toBe(
|
||||
"bash safe\u200B.sh",
|
||||
);
|
||||
});
|
||||
|
||||
it("accepts resolve during broadcast", async () => {
|
||||
const manager = new ExecApprovalManager();
|
||||
const handlers = createExecApprovalHandlers(manager);
|
||||
|
||||
@ -273,6 +273,37 @@ describe("gateway server chat", () => {
|
||||
});
|
||||
});
|
||||
|
||||
test("chat.history preserves usage and cost metadata for assistant messages", async () => {
|
||||
await withGatewayChatHarness(async ({ ws, createSessionDir }) => {
|
||||
await connectOk(ws);
|
||||
|
||||
const sessionDir = await createSessionDir();
|
||||
await writeMainSessionStore();
|
||||
|
||||
await writeMainSessionTranscript(sessionDir, [
|
||||
JSON.stringify({
|
||||
message: {
|
||||
role: "assistant",
|
||||
timestamp: Date.now(),
|
||||
content: [{ type: "text", text: "hello" }],
|
||||
usage: { input: 12, output: 5, totalTokens: 17 },
|
||||
cost: { total: 0.0123 },
|
||||
details: { debug: true },
|
||||
},
|
||||
}),
|
||||
]);
|
||||
|
||||
const messages = await fetchHistoryMessages(ws);
|
||||
expect(messages).toHaveLength(1);
|
||||
expect(messages[0]).toMatchObject({
|
||||
role: "assistant",
|
||||
usage: { input: 12, output: 5, totalTokens: 17 },
|
||||
cost: { total: 0.0123 },
|
||||
});
|
||||
expect(messages[0]).not.toHaveProperty("details");
|
||||
});
|
||||
});
|
||||
|
||||
test("chat.history strips inline directives from displayed message text", async () => {
|
||||
await withGatewayChatHarness(async ({ ws, createSessionDir }) => {
|
||||
await connectOk(ws);
|
||||
|
||||
@ -810,6 +810,7 @@ export function listSessionsFromStore(params: {
|
||||
const model = resolvedModel.model ?? DEFAULT_MODEL;
|
||||
return {
|
||||
key,
|
||||
spawnedBy: entry?.spawnedBy,
|
||||
entry,
|
||||
kind: classifySessionKey(key, entry),
|
||||
label: entry?.label,
|
||||
|
||||
@ -15,6 +15,7 @@ export type GatewaySessionsDefaults = {
|
||||
|
||||
export type GatewaySessionRow = {
|
||||
key: string;
|
||||
spawnedBy?: string;
|
||||
kind: "direct" | "group" | "global" | "unknown";
|
||||
label?: string;
|
||||
displayName?: string;
|
||||
|
||||
@ -265,6 +265,19 @@ describe("gateway sessions patch", () => {
|
||||
expect(entry.spawnedBy).toBe("agent:main:main");
|
||||
});
|
||||
|
||||
test("sets spawnedWorkspaceDir for subagent sessions", async () => {
|
||||
const entry = expectPatchOk(
|
||||
await runPatch({
|
||||
storeKey: "agent:main:subagent:child",
|
||||
patch: {
|
||||
key: "agent:main:subagent:child",
|
||||
spawnedWorkspaceDir: "/tmp/subagent-workspace",
|
||||
},
|
||||
}),
|
||||
);
|
||||
expect(entry.spawnedWorkspaceDir).toBe("/tmp/subagent-workspace");
|
||||
});
|
||||
|
||||
test("sets spawnDepth for ACP sessions", async () => {
|
||||
const entry = expectPatchOk(
|
||||
await runPatch({
|
||||
@ -282,6 +295,13 @@ describe("gateway sessions patch", () => {
|
||||
expectPatchError(result, "spawnDepth is only supported");
|
||||
});
|
||||
|
||||
test("rejects spawnedWorkspaceDir on non-subagent sessions", async () => {
|
||||
const result = await runPatch({
|
||||
patch: { key: MAIN_SESSION_KEY, spawnedWorkspaceDir: "/tmp/nope" },
|
||||
});
|
||||
expectPatchError(result, "spawnedWorkspaceDir is only supported");
|
||||
});
|
||||
|
||||
test("normalizes exec/send/group patches", async () => {
|
||||
const entry = expectPatchOk(
|
||||
await runPatch({
|
||||
|
||||
@ -128,6 +128,27 @@ export async function applySessionsPatchToStore(params: {
|
||||
}
|
||||
}
|
||||
|
||||
if ("spawnedWorkspaceDir" in patch) {
|
||||
const raw = patch.spawnedWorkspaceDir;
|
||||
if (raw === null) {
|
||||
if (existing?.spawnedWorkspaceDir) {
|
||||
return invalid("spawnedWorkspaceDir cannot be cleared once set");
|
||||
}
|
||||
} else if (raw !== undefined) {
|
||||
if (!supportsSpawnLineage(storeKey)) {
|
||||
return invalid("spawnedWorkspaceDir is only supported for subagent:* or acp:* sessions");
|
||||
}
|
||||
const trimmed = String(raw).trim();
|
||||
if (!trimmed) {
|
||||
return invalid("invalid spawnedWorkspaceDir: empty");
|
||||
}
|
||||
if (existing?.spawnedWorkspaceDir && existing.spawnedWorkspaceDir !== trimmed) {
|
||||
return invalid("spawnedWorkspaceDir cannot be changed once set");
|
||||
}
|
||||
next.spawnedWorkspaceDir = trimmed;
|
||||
}
|
||||
}
|
||||
|
||||
if ("spawnDepth" in patch) {
|
||||
const raw = patch.spawnDepth;
|
||||
if (raw === null) {
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import { sanitizeTerminalText } from "../../terminal/safe-text.js";
|
||||
import {
|
||||
describeIMessageEchoDropLog,
|
||||
resolveIMessageInboundDecision,
|
||||
} from "./inbound-processing.js";
|
||||
import { createSelfChatCache } from "./self-chat-cache.js";
|
||||
|
||||
describe("resolveIMessageInboundDecision echo detection", () => {
|
||||
const cfg = {} as OpenClawConfig;
|
||||
@ -46,6 +48,324 @@ describe("resolveIMessageInboundDecision echo detection", () => {
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("drops reflected self-chat duplicates after seeing the from-me copy", () => {
|
||||
const selfChatCache = createSelfChatCache();
|
||||
const createdAt = "2026-03-02T20:58:10.649Z";
|
||||
|
||||
expect(
|
||||
resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9641,
|
||||
sender: "+15555550123",
|
||||
text: "Do you want to report this issue?",
|
||||
created_at: createdAt,
|
||||
is_from_me: true,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "Do you want to report this issue?",
|
||||
bodyText: "Do you want to report this issue?",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
}),
|
||||
).toEqual({ kind: "drop", reason: "from me" });
|
||||
|
||||
expect(
|
||||
resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9642,
|
||||
sender: "+15555550123",
|
||||
text: "Do you want to report this issue?",
|
||||
created_at: createdAt,
|
||||
is_from_me: false,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "Do you want to report this issue?",
|
||||
bodyText: "Do you want to report this issue?",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
}),
|
||||
).toEqual({ kind: "drop", reason: "self-chat echo" });
|
||||
});
|
||||
|
||||
it("does not drop same-text messages when created_at differs", () => {
|
||||
const selfChatCache = createSelfChatCache();
|
||||
|
||||
resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9641,
|
||||
sender: "+15555550123",
|
||||
text: "ok",
|
||||
created_at: "2026-03-02T20:58:10.649Z",
|
||||
is_from_me: true,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "ok",
|
||||
bodyText: "ok",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
});
|
||||
|
||||
const decision = resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9642,
|
||||
sender: "+15555550123",
|
||||
text: "ok",
|
||||
created_at: "2026-03-02T20:58:11.649Z",
|
||||
is_from_me: false,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "ok",
|
||||
bodyText: "ok",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
});
|
||||
|
||||
expect(decision.kind).toBe("dispatch");
|
||||
});
|
||||
|
||||
it("keeps self-chat cache scoped to configured group threads", () => {
|
||||
const selfChatCache = createSelfChatCache();
|
||||
const groupedCfg = {
|
||||
channels: {
|
||||
imessage: {
|
||||
groups: {
|
||||
"123": {},
|
||||
"456": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
const createdAt = "2026-03-02T20:58:10.649Z";
|
||||
|
||||
expect(
|
||||
resolveIMessageInboundDecision({
|
||||
cfg: groupedCfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9701,
|
||||
chat_id: 123,
|
||||
sender: "+15555550123",
|
||||
text: "same text",
|
||||
created_at: createdAt,
|
||||
is_from_me: true,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "same text",
|
||||
bodyText: "same text",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
}),
|
||||
).toEqual({ kind: "drop", reason: "from me" });
|
||||
|
||||
const decision = resolveIMessageInboundDecision({
|
||||
cfg: groupedCfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9702,
|
||||
chat_id: 456,
|
||||
sender: "+15555550123",
|
||||
text: "same text",
|
||||
created_at: createdAt,
|
||||
is_from_me: false,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "same text",
|
||||
bodyText: "same text",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
});
|
||||
|
||||
expect(decision.kind).toBe("dispatch");
|
||||
});
|
||||
|
||||
it("does not drop other participants in the same group thread", () => {
|
||||
const selfChatCache = createSelfChatCache();
|
||||
const createdAt = "2026-03-02T20:58:10.649Z";
|
||||
|
||||
expect(
|
||||
resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9751,
|
||||
chat_id: 123,
|
||||
sender: "+15555550123",
|
||||
text: "same text",
|
||||
created_at: createdAt,
|
||||
is_from_me: true,
|
||||
is_group: true,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "same text",
|
||||
bodyText: "same text",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
}),
|
||||
).toEqual({ kind: "drop", reason: "from me" });
|
||||
|
||||
const decision = resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9752,
|
||||
chat_id: 123,
|
||||
sender: "+15555550999",
|
||||
text: "same text",
|
||||
created_at: createdAt,
|
||||
is_from_me: false,
|
||||
is_group: true,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: "same text",
|
||||
bodyText: "same text",
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose: undefined,
|
||||
});
|
||||
|
||||
expect(decision.kind).toBe("dispatch");
|
||||
});
|
||||
|
||||
it("sanitizes reflected duplicate previews before logging", () => {
|
||||
const selfChatCache = createSelfChatCache();
|
||||
const logVerbose = vi.fn();
|
||||
const createdAt = "2026-03-02T20:58:10.649Z";
|
||||
const bodyText = "line-1\nline-2\t\u001b[31mred";
|
||||
|
||||
resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9801,
|
||||
sender: "+15555550123",
|
||||
text: bodyText,
|
||||
created_at: createdAt,
|
||||
is_from_me: true,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: bodyText,
|
||||
bodyText,
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose,
|
||||
});
|
||||
|
||||
resolveIMessageInboundDecision({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
message: {
|
||||
id: 9802,
|
||||
sender: "+15555550123",
|
||||
text: bodyText,
|
||||
created_at: createdAt,
|
||||
is_from_me: false,
|
||||
is_group: false,
|
||||
},
|
||||
opts: undefined,
|
||||
messageText: bodyText,
|
||||
bodyText,
|
||||
allowFrom: [],
|
||||
groupAllowFrom: [],
|
||||
groupPolicy: "open",
|
||||
dmPolicy: "open",
|
||||
storeAllowFrom: [],
|
||||
historyLimit: 0,
|
||||
groupHistories: new Map(),
|
||||
echoCache: undefined,
|
||||
selfChatCache,
|
||||
logVerbose,
|
||||
});
|
||||
|
||||
expect(logVerbose).toHaveBeenCalledWith(
|
||||
`imessage: dropping self-chat reflected duplicate: "${sanitizeTerminalText(bodyText)}"`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("describeIMessageEchoDropLog", () => {
|
||||
|
||||
@ -24,6 +24,7 @@ import {
|
||||
DM_GROUP_ACCESS_REASON,
|
||||
resolveDmGroupAccessWithLists,
|
||||
} from "../../security/dm-policy-shared.js";
|
||||
import { sanitizeTerminalText } from "../../terminal/safe-text.js";
|
||||
import { truncateUtf16Safe } from "../../utils.js";
|
||||
import {
|
||||
formatIMessageChatTarget,
|
||||
@ -31,6 +32,7 @@ import {
|
||||
normalizeIMessageHandle,
|
||||
} from "../targets.js";
|
||||
import { detectReflectedContent } from "./reflection-guard.js";
|
||||
import type { SelfChatCache } from "./self-chat-cache.js";
|
||||
import type { MonitorIMessageOpts, IMessagePayload } from "./types.js";
|
||||
|
||||
type IMessageReplyContext = {
|
||||
@ -101,6 +103,7 @@ export function resolveIMessageInboundDecision(params: {
|
||||
historyLimit: number;
|
||||
groupHistories: Map<string, HistoryEntry[]>;
|
||||
echoCache?: { has: (scope: string, lookup: { text?: string; messageId?: string }) => boolean };
|
||||
selfChatCache?: SelfChatCache;
|
||||
logVerbose?: (msg: string) => void;
|
||||
}): IMessageInboundDecision {
|
||||
const senderRaw = params.message.sender ?? "";
|
||||
@ -109,13 +112,10 @@ export function resolveIMessageInboundDecision(params: {
|
||||
return { kind: "drop", reason: "missing sender" };
|
||||
}
|
||||
const senderNormalized = normalizeIMessageHandle(sender);
|
||||
if (params.message.is_from_me) {
|
||||
return { kind: "drop", reason: "from me" };
|
||||
}
|
||||
|
||||
const chatId = params.message.chat_id ?? undefined;
|
||||
const chatGuid = params.message.chat_guid ?? undefined;
|
||||
const chatIdentifier = params.message.chat_identifier ?? undefined;
|
||||
const createdAt = params.message.created_at ? Date.parse(params.message.created_at) : undefined;
|
||||
|
||||
const groupIdCandidate = chatId !== undefined ? String(chatId) : undefined;
|
||||
const groupListPolicy = groupIdCandidate
|
||||
@ -138,6 +138,18 @@ export function resolveIMessageInboundDecision(params: {
|
||||
groupIdCandidate && groupListPolicy.allowlistEnabled && groupListPolicy.groupConfig,
|
||||
);
|
||||
const isGroup = Boolean(params.message.is_group) || treatAsGroupByConfig;
|
||||
const selfChatLookup = {
|
||||
accountId: params.accountId,
|
||||
isGroup,
|
||||
chatId,
|
||||
sender,
|
||||
text: params.bodyText,
|
||||
createdAt,
|
||||
};
|
||||
if (params.message.is_from_me) {
|
||||
params.selfChatCache?.remember(selfChatLookup);
|
||||
return { kind: "drop", reason: "from me" };
|
||||
}
|
||||
if (isGroup && !chatId) {
|
||||
return { kind: "drop", reason: "group without chat_id" };
|
||||
}
|
||||
@ -215,6 +227,17 @@ export function resolveIMessageInboundDecision(params: {
|
||||
return { kind: "drop", reason: "empty body" };
|
||||
}
|
||||
|
||||
if (
|
||||
params.selfChatCache?.has({
|
||||
...selfChatLookup,
|
||||
text: bodyText,
|
||||
})
|
||||
) {
|
||||
const preview = sanitizeTerminalText(truncateUtf16Safe(bodyText, 50));
|
||||
params.logVerbose?.(`imessage: dropping self-chat reflected duplicate: "${preview}"`);
|
||||
return { kind: "drop", reason: "self-chat echo" };
|
||||
}
|
||||
|
||||
// Echo detection: check if the received message matches a recently sent message.
|
||||
// Scope by conversation so same text in different chats is not conflated.
|
||||
const inboundMessageId = params.message.id != null ? String(params.message.id) : undefined;
|
||||
@ -250,7 +273,6 @@ export function resolveIMessageInboundDecision(params: {
|
||||
}
|
||||
|
||||
const replyContext = describeReplyContext(params.message);
|
||||
const createdAt = params.message.created_at ? Date.parse(params.message.created_at) : undefined;
|
||||
const historyKey = isGroup
|
||||
? String(chatId ?? chatGuid ?? chatIdentifier ?? "unknown")
|
||||
: undefined;
|
||||
|
||||
@ -53,6 +53,7 @@ import {
|
||||
import { createLoopRateLimiter } from "./loop-rate-limiter.js";
|
||||
import { parseIMessageNotification } from "./parse-notification.js";
|
||||
import { normalizeAllowList, resolveRuntime } from "./runtime.js";
|
||||
import { createSelfChatCache } from "./self-chat-cache.js";
|
||||
import type { IMessagePayload, MonitorIMessageOpts } from "./types.js";
|
||||
|
||||
/**
|
||||
@ -99,6 +100,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
|
||||
);
|
||||
const groupHistories = new Map<string, HistoryEntry[]>();
|
||||
const sentMessageCache = createSentMessageCache();
|
||||
const selfChatCache = createSelfChatCache();
|
||||
const loopRateLimiter = createLoopRateLimiter();
|
||||
const textLimit = resolveTextChunkLimit(cfg, "imessage", accountInfo.accountId);
|
||||
const allowFrom = normalizeAllowList(opts.allowFrom ?? imessageCfg.allowFrom);
|
||||
@ -252,6 +254,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
|
||||
historyLimit,
|
||||
groupHistories,
|
||||
echoCache: sentMessageCache,
|
||||
selfChatCache,
|
||||
logVerbose,
|
||||
});
|
||||
|
||||
@ -267,6 +270,7 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
|
||||
// are normal and should not escalate.
|
||||
const isLoopDrop =
|
||||
decision.reason === "echo" ||
|
||||
decision.reason === "self-chat echo" ||
|
||||
decision.reason === "reflected assistant content" ||
|
||||
decision.reason === "from me";
|
||||
if (isLoopDrop) {
|
||||
|
||||
76
src/imessage/monitor/self-chat-cache.test.ts
Normal file
76
src/imessage/monitor/self-chat-cache.test.ts
Normal file
@ -0,0 +1,76 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { createSelfChatCache } from "./self-chat-cache.js";
|
||||
|
||||
describe("createSelfChatCache", () => {
|
||||
const directLookup = {
|
||||
accountId: "default",
|
||||
sender: "+15555550123",
|
||||
isGroup: false,
|
||||
} as const;
|
||||
|
||||
it("matches repeated lookups for the same scope, timestamp, and text", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
const cache = createSelfChatCache();
|
||||
cache.remember({
|
||||
...directLookup,
|
||||
text: " hello\r\nworld ",
|
||||
createdAt: 123,
|
||||
});
|
||||
|
||||
expect(
|
||||
cache.has({
|
||||
...directLookup,
|
||||
text: "hello\nworld",
|
||||
createdAt: 123,
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("expires entries after the ttl window", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
const cache = createSelfChatCache();
|
||||
cache.remember({ ...directLookup, text: "hello", createdAt: 123 });
|
||||
|
||||
vi.advanceTimersByTime(11_001);
|
||||
|
||||
expect(cache.has({ ...directLookup, text: "hello", createdAt: 123 })).toBe(false);
|
||||
});
|
||||
|
||||
it("evicts older entries when the cache exceeds its cap", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
const cache = createSelfChatCache();
|
||||
for (let i = 0; i < 513; i += 1) {
|
||||
cache.remember({
|
||||
...directLookup,
|
||||
text: `message-${i}`,
|
||||
createdAt: i,
|
||||
});
|
||||
vi.advanceTimersByTime(1_001);
|
||||
}
|
||||
|
||||
expect(cache.has({ ...directLookup, text: "message-0", createdAt: 0 })).toBe(false);
|
||||
expect(cache.has({ ...directLookup, text: "message-512", createdAt: 512 })).toBe(true);
|
||||
});
|
||||
|
||||
it("does not collide long texts that differ only in the middle", () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-07T00:00:00Z"));
|
||||
|
||||
const cache = createSelfChatCache();
|
||||
const prefix = "a".repeat(256);
|
||||
const suffix = "b".repeat(256);
|
||||
const longTextA = `${prefix}${"x".repeat(300)}${suffix}`;
|
||||
const longTextB = `${prefix}${"y".repeat(300)}${suffix}`;
|
||||
|
||||
cache.remember({ ...directLookup, text: longTextA, createdAt: 123 });
|
||||
|
||||
expect(cache.has({ ...directLookup, text: longTextA, createdAt: 123 })).toBe(true);
|
||||
expect(cache.has({ ...directLookup, text: longTextB, createdAt: 123 })).toBe(false);
|
||||
});
|
||||
});
|
||||
103
src/imessage/monitor/self-chat-cache.ts
Normal file
103
src/imessage/monitor/self-chat-cache.ts
Normal file
@ -0,0 +1,103 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import { formatIMessageChatTarget } from "../targets.js";
|
||||
|
||||
type SelfChatCacheKeyParts = {
|
||||
accountId: string;
|
||||
sender: string;
|
||||
isGroup: boolean;
|
||||
chatId?: number;
|
||||
};
|
||||
|
||||
export type SelfChatLookup = SelfChatCacheKeyParts & {
|
||||
text?: string;
|
||||
createdAt?: number;
|
||||
};
|
||||
|
||||
export type SelfChatCache = {
|
||||
remember: (lookup: SelfChatLookup) => void;
|
||||
has: (lookup: SelfChatLookup) => boolean;
|
||||
};
|
||||
|
||||
const SELF_CHAT_TTL_MS = 10_000;
|
||||
const MAX_SELF_CHAT_CACHE_ENTRIES = 512;
|
||||
const CLEANUP_MIN_INTERVAL_MS = 1_000;
|
||||
|
||||
function normalizeText(text: string | undefined): string | null {
|
||||
if (!text) {
|
||||
return null;
|
||||
}
|
||||
const normalized = text.replace(/\r\n?/g, "\n").trim();
|
||||
return normalized ? normalized : null;
|
||||
}
|
||||
|
||||
function isUsableTimestamp(createdAt: number | undefined): createdAt is number {
|
||||
return typeof createdAt === "number" && Number.isFinite(createdAt);
|
||||
}
|
||||
|
||||
function digestText(text: string): string {
|
||||
return createHash("sha256").update(text).digest("hex");
|
||||
}
|
||||
|
||||
function buildScope(parts: SelfChatCacheKeyParts): string {
|
||||
if (!parts.isGroup) {
|
||||
return `${parts.accountId}:imessage:${parts.sender}`;
|
||||
}
|
||||
const chatTarget = formatIMessageChatTarget(parts.chatId) || "chat_id:unknown";
|
||||
return `${parts.accountId}:${chatTarget}:imessage:${parts.sender}`;
|
||||
}
|
||||
|
||||
class DefaultSelfChatCache implements SelfChatCache {
|
||||
private cache = new Map<string, number>();
|
||||
private lastCleanupAt = 0;
|
||||
|
||||
private buildKey(lookup: SelfChatLookup): string | null {
|
||||
const text = normalizeText(lookup.text);
|
||||
if (!text || !isUsableTimestamp(lookup.createdAt)) {
|
||||
return null;
|
||||
}
|
||||
return `${buildScope(lookup)}:${lookup.createdAt}:${digestText(text)}`;
|
||||
}
|
||||
|
||||
remember(lookup: SelfChatLookup): void {
|
||||
const key = this.buildKey(lookup);
|
||||
if (!key) {
|
||||
return;
|
||||
}
|
||||
this.cache.set(key, Date.now());
|
||||
this.maybeCleanup();
|
||||
}
|
||||
|
||||
has(lookup: SelfChatLookup): boolean {
|
||||
this.maybeCleanup();
|
||||
const key = this.buildKey(lookup);
|
||||
if (!key) {
|
||||
return false;
|
||||
}
|
||||
const timestamp = this.cache.get(key);
|
||||
return typeof timestamp === "number" && Date.now() - timestamp <= SELF_CHAT_TTL_MS;
|
||||
}
|
||||
|
||||
private maybeCleanup(): void {
|
||||
const now = Date.now();
|
||||
if (now - this.lastCleanupAt < CLEANUP_MIN_INTERVAL_MS) {
|
||||
return;
|
||||
}
|
||||
this.lastCleanupAt = now;
|
||||
for (const [key, timestamp] of this.cache.entries()) {
|
||||
if (now - timestamp > SELF_CHAT_TTL_MS) {
|
||||
this.cache.delete(key);
|
||||
}
|
||||
}
|
||||
while (this.cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) {
|
||||
const oldestKey = this.cache.keys().next().value;
|
||||
if (typeof oldestKey !== "string") {
|
||||
break;
|
||||
}
|
||||
this.cache.delete(oldestKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function createSelfChatCache(): SelfChatCache {
|
||||
return new DefaultSelfChatCache();
|
||||
}
|
||||
@ -1,16 +1,19 @@
|
||||
import { mkdtemp } from "node:fs/promises";
|
||||
import { mkdtemp, readFile, writeFile } from "node:fs/promises";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { describe, expect, test } from "vitest";
|
||||
import {
|
||||
approveDevicePairing,
|
||||
clearDevicePairing,
|
||||
ensureDeviceToken,
|
||||
getPairedDevice,
|
||||
removePairedDevice,
|
||||
requestDevicePairing,
|
||||
rotateDeviceToken,
|
||||
verifyDeviceToken,
|
||||
type PairedDevice,
|
||||
} from "./device-pairing.js";
|
||||
import { resolvePairingPaths } from "./pairing-files.js";
|
||||
|
||||
async function setupPairedOperatorDevice(baseDir: string, scopes: string[]) {
|
||||
const request = await requestDevicePairing(
|
||||
@ -51,6 +54,43 @@ function requireToken(token: string | undefined): string {
|
||||
return token;
|
||||
}
|
||||
|
||||
async function overwritePairedOperatorTokenScopes(baseDir: string, scopes: string[]) {
|
||||
const { pairedPath } = resolvePairingPaths(baseDir, "devices");
|
||||
const pairedByDeviceId = JSON.parse(await readFile(pairedPath, "utf8")) as Record<
|
||||
string,
|
||||
PairedDevice
|
||||
>;
|
||||
const device = pairedByDeviceId["device-1"];
|
||||
expect(device?.tokens?.operator).toBeDefined();
|
||||
if (!device?.tokens?.operator) {
|
||||
throw new Error("expected paired operator token");
|
||||
}
|
||||
device.tokens.operator.scopes = scopes;
|
||||
await writeFile(pairedPath, JSON.stringify(pairedByDeviceId, null, 2));
|
||||
}
|
||||
|
||||
async function mutatePairedOperatorDevice(baseDir: string, mutate: (device: PairedDevice) => void) {
|
||||
const { pairedPath } = resolvePairingPaths(baseDir, "devices");
|
||||
const pairedByDeviceId = JSON.parse(await readFile(pairedPath, "utf8")) as Record<
|
||||
string,
|
||||
PairedDevice
|
||||
>;
|
||||
const device = pairedByDeviceId["device-1"];
|
||||
expect(device).toBeDefined();
|
||||
if (!device) {
|
||||
throw new Error("expected paired operator device");
|
||||
}
|
||||
mutate(device);
|
||||
await writeFile(pairedPath, JSON.stringify(pairedByDeviceId, null, 2));
|
||||
}
|
||||
|
||||
async function clearPairedOperatorApprovalBaseline(baseDir: string) {
|
||||
await mutatePairedOperatorDevice(baseDir, (device) => {
|
||||
delete device.approvedScopes;
|
||||
delete device.scopes;
|
||||
});
|
||||
}
|
||||
|
||||
describe("device pairing tokens", () => {
|
||||
test("reuses existing pending requests for the same device", async () => {
|
||||
const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-"));
|
||||
@ -180,6 +220,26 @@ describe("device pairing tokens", () => {
|
||||
expect(after?.approvedScopes).toEqual(["operator.read"]);
|
||||
});
|
||||
|
||||
test("rejects scope escalation when ensuring a token and leaves state unchanged", async () => {
|
||||
const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-"));
|
||||
await setupPairedOperatorDevice(baseDir, ["operator.read"]);
|
||||
const before = await getPairedDevice("device-1", baseDir);
|
||||
|
||||
const ensured = await ensureDeviceToken({
|
||||
deviceId: "device-1",
|
||||
role: "operator",
|
||||
scopes: ["operator.admin"],
|
||||
baseDir,
|
||||
});
|
||||
expect(ensured).toBeNull();
|
||||
|
||||
const after = await getPairedDevice("device-1", baseDir);
|
||||
expect(after?.tokens?.operator?.token).toEqual(before?.tokens?.operator?.token);
|
||||
expect(after?.tokens?.operator?.scopes).toEqual(["operator.read"]);
|
||||
expect(after?.scopes).toEqual(["operator.read"]);
|
||||
expect(after?.approvedScopes).toEqual(["operator.read"]);
|
||||
});
|
||||
|
||||
test("verifies token and rejects mismatches", async () => {
|
||||
const { baseDir, token } = await setupOperatorToken(["operator.read"]);
|
||||
|
||||
@ -199,6 +259,32 @@ describe("device pairing tokens", () => {
|
||||
expect(mismatch.reason).toBe("token-mismatch");
|
||||
});
|
||||
|
||||
test("rejects persisted tokens whose scopes exceed the approved scope baseline", async () => {
|
||||
const { baseDir, token } = await setupOperatorToken(["operator.read"]);
|
||||
await overwritePairedOperatorTokenScopes(baseDir, ["operator.admin"]);
|
||||
|
||||
await expect(
|
||||
verifyOperatorToken({
|
||||
baseDir,
|
||||
token,
|
||||
scopes: ["operator.admin"],
|
||||
}),
|
||||
).resolves.toEqual({ ok: false, reason: "scope-mismatch" });
|
||||
});
|
||||
|
||||
test("fails closed when the paired device approval baseline is missing during verification", async () => {
|
||||
const { baseDir, token } = await setupOperatorToken(["operator.read"]);
|
||||
await clearPairedOperatorApprovalBaseline(baseDir);
|
||||
|
||||
await expect(
|
||||
verifyOperatorToken({
|
||||
baseDir,
|
||||
token,
|
||||
scopes: ["operator.read"],
|
||||
}),
|
||||
).resolves.toEqual({ ok: false, reason: "scope-mismatch" });
|
||||
});
|
||||
|
||||
test("accepts operator.read/operator.write requests with an operator.admin token scope", async () => {
|
||||
const { baseDir, token } = await setupOperatorToken(["operator.admin"]);
|
||||
|
||||
@ -217,6 +303,57 @@ describe("device pairing tokens", () => {
|
||||
expect(writeOk.ok).toBe(true);
|
||||
});
|
||||
|
||||
test("accepts custom operator scopes under an operator.admin approval baseline", async () => {
|
||||
const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-"));
|
||||
await setupPairedOperatorDevice(baseDir, ["operator.admin"]);
|
||||
|
||||
const rotated = await rotateDeviceToken({
|
||||
deviceId: "device-1",
|
||||
role: "operator",
|
||||
scopes: ["operator.talk.secrets"],
|
||||
baseDir,
|
||||
});
|
||||
expect(rotated?.scopes).toEqual(["operator.talk.secrets"]);
|
||||
|
||||
await expect(
|
||||
verifyOperatorToken({
|
||||
baseDir,
|
||||
token: requireToken(rotated?.token),
|
||||
scopes: ["operator.talk.secrets"],
|
||||
}),
|
||||
).resolves.toEqual({ ok: true });
|
||||
});
|
||||
|
||||
test("fails closed when the paired device approval baseline is missing during ensure", async () => {
|
||||
const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-"));
|
||||
await setupPairedOperatorDevice(baseDir, ["operator.admin"]);
|
||||
await clearPairedOperatorApprovalBaseline(baseDir);
|
||||
|
||||
await expect(
|
||||
ensureDeviceToken({
|
||||
deviceId: "device-1",
|
||||
role: "operator",
|
||||
scopes: ["operator.admin"],
|
||||
baseDir,
|
||||
}),
|
||||
).resolves.toBeNull();
|
||||
});
|
||||
|
||||
test("fails closed when the paired device approval baseline is missing during rotation", async () => {
|
||||
const baseDir = await mkdtemp(join(tmpdir(), "openclaw-device-pairing-"));
|
||||
await setupPairedOperatorDevice(baseDir, ["operator.admin"]);
|
||||
await clearPairedOperatorApprovalBaseline(baseDir);
|
||||
|
||||
await expect(
|
||||
rotateDeviceToken({
|
||||
deviceId: "device-1",
|
||||
role: "operator",
|
||||
scopes: ["operator.admin"],
|
||||
baseDir,
|
||||
}),
|
||||
).resolves.toBeNull();
|
||||
});
|
||||
|
||||
test("treats multibyte same-length token input as mismatch without throwing", async () => {
|
||||
const { baseDir, token } = await setupOperatorToken(["operator.read"]);
|
||||
const multibyteToken = "é".repeat(token.length);
|
||||
|
||||
@ -181,44 +181,6 @@ function mergePendingDevicePairingRequest(
|
||||
};
|
||||
}
|
||||
|
||||
function scopesAllow(requested: string[], allowed: string[]): boolean {
|
||||
if (requested.length === 0) {
|
||||
return true;
|
||||
}
|
||||
if (allowed.length === 0) {
|
||||
return false;
|
||||
}
|
||||
const allowedSet = new Set(allowed);
|
||||
return requested.every((scope) => allowedSet.has(scope));
|
||||
}
|
||||
|
||||
const DEVICE_SCOPE_IMPLICATIONS: Readonly<Record<string, readonly string[]>> = {
|
||||
"operator.admin": ["operator.read", "operator.write", "operator.approvals", "operator.pairing"],
|
||||
"operator.write": ["operator.read"],
|
||||
};
|
||||
|
||||
function expandScopeImplications(scopes: string[]): string[] {
|
||||
const expanded = new Set(scopes);
|
||||
const queue = [...scopes];
|
||||
while (queue.length > 0) {
|
||||
const scope = queue.pop();
|
||||
if (!scope) {
|
||||
continue;
|
||||
}
|
||||
for (const impliedScope of DEVICE_SCOPE_IMPLICATIONS[scope] ?? []) {
|
||||
if (!expanded.has(impliedScope)) {
|
||||
expanded.add(impliedScope);
|
||||
queue.push(impliedScope);
|
||||
}
|
||||
}
|
||||
}
|
||||
return [...expanded];
|
||||
}
|
||||
|
||||
function scopesAllowWithImplications(requested: string[], allowed: string[]): boolean {
|
||||
return scopesAllow(expandScopeImplications(requested), expandScopeImplications(allowed));
|
||||
}
|
||||
|
||||
function newToken() {
|
||||
return generatePairingToken();
|
||||
}
|
||||
@ -252,6 +214,29 @@ function buildDeviceAuthToken(params: {
|
||||
};
|
||||
}
|
||||
|
||||
function resolveApprovedDeviceScopeBaseline(device: PairedDevice): string[] | null {
|
||||
const baseline = device.approvedScopes ?? device.scopes;
|
||||
if (!Array.isArray(baseline)) {
|
||||
return null;
|
||||
}
|
||||
return normalizeDeviceAuthScopes(baseline);
|
||||
}
|
||||
|
||||
function scopesWithinApprovedDeviceBaseline(params: {
|
||||
role: string;
|
||||
scopes: readonly string[];
|
||||
approvedScopes: readonly string[] | null;
|
||||
}): boolean {
|
||||
if (!params.approvedScopes) {
|
||||
return false;
|
||||
}
|
||||
return roleScopesAllow({
|
||||
role: params.role,
|
||||
requestedScopes: params.scopes,
|
||||
allowedScopes: params.approvedScopes,
|
||||
});
|
||||
}
|
||||
|
||||
export async function listDevicePairing(baseDir?: string): Promise<DevicePairingList> {
|
||||
const state = await loadState(baseDir);
|
||||
const pending = Object.values(state.pendingById).toSorted((a, b) => b.ts - a.ts);
|
||||
@ -494,6 +479,16 @@ export async function verifyDeviceToken(params: {
|
||||
if (!verifyPairingToken(params.token, entry.token)) {
|
||||
return { ok: false, reason: "token-mismatch" };
|
||||
}
|
||||
const approvedScopes = resolveApprovedDeviceScopeBaseline(device);
|
||||
if (
|
||||
!scopesWithinApprovedDeviceBaseline({
|
||||
role,
|
||||
scopes: entry.scopes,
|
||||
approvedScopes,
|
||||
})
|
||||
) {
|
||||
return { ok: false, reason: "scope-mismatch" };
|
||||
}
|
||||
const requestedScopes = normalizeDeviceAuthScopes(params.scopes);
|
||||
if (!roleScopesAllow({ role, requestedScopes, allowedScopes: entry.scopes })) {
|
||||
return { ok: false, reason: "scope-mismatch" };
|
||||
@ -525,8 +520,26 @@ export async function ensureDeviceToken(params: {
|
||||
return null;
|
||||
}
|
||||
const { device, role, tokens, existing } = context;
|
||||
const approvedScopes = resolveApprovedDeviceScopeBaseline(device);
|
||||
if (
|
||||
!scopesWithinApprovedDeviceBaseline({
|
||||
role,
|
||||
scopes: requestedScopes,
|
||||
approvedScopes,
|
||||
})
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
if (existing && !existing.revokedAtMs) {
|
||||
if (roleScopesAllow({ role, requestedScopes, allowedScopes: existing.scopes })) {
|
||||
const existingWithinApproved = scopesWithinApprovedDeviceBaseline({
|
||||
role,
|
||||
scopes: existing.scopes,
|
||||
approvedScopes,
|
||||
});
|
||||
if (
|
||||
existingWithinApproved &&
|
||||
roleScopesAllow({ role, requestedScopes, allowedScopes: existing.scopes })
|
||||
) {
|
||||
return existing;
|
||||
}
|
||||
}
|
||||
@ -589,10 +602,14 @@ export async function rotateDeviceToken(params: {
|
||||
const requestedScopes = normalizeDeviceAuthScopes(
|
||||
params.scopes ?? existing?.scopes ?? device.scopes,
|
||||
);
|
||||
const approvedScopes = normalizeDeviceAuthScopes(
|
||||
device.approvedScopes ?? device.scopes ?? existing?.scopes,
|
||||
);
|
||||
if (!scopesAllowWithImplications(requestedScopes, approvedScopes)) {
|
||||
const approvedScopes = resolveApprovedDeviceScopeBaseline(device);
|
||||
if (
|
||||
!scopesWithinApprovedDeviceBaseline({
|
||||
role,
|
||||
scopes: requestedScopes,
|
||||
approvedScopes,
|
||||
})
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
const now = Date.now();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user