Merge branch 'openclaw:main' into fix/46369-cron-remove-cleanup-upstream
This commit is contained in:
commit
9717195e4e
45
.github/CODEOWNERS
vendored
45
.github/CODEOWNERS
vendored
@ -1,6 +1,51 @@
|
||||
# Protect the ownership rules themselves.
|
||||
/.github/CODEOWNERS @steipete
|
||||
|
||||
# WARNING: GitHub CODEOWNERS uses last-match-wins semantics.
|
||||
# If you add overlapping rules below the secops block, include @openclaw/secops
|
||||
# on those entries too or you can silently remove required secops review.
|
||||
# Security-sensitive code, config, and docs require secops review.
|
||||
/SECURITY.md @openclaw/secops
|
||||
/.github/dependabot.yml @openclaw/secops
|
||||
/.github/codeql/ @openclaw/secops
|
||||
/.github/workflows/codeql.yml @openclaw/secops
|
||||
/src/security/ @openclaw/secops
|
||||
/src/secrets/ @openclaw/secops
|
||||
/src/config/*secret*.ts @openclaw/secops
|
||||
/src/config/**/*secret*.ts @openclaw/secops
|
||||
/src/gateway/*auth*.ts @openclaw/secops
|
||||
/src/gateway/**/*auth*.ts @openclaw/secops
|
||||
/src/gateway/*secret*.ts @openclaw/secops
|
||||
/src/gateway/**/*secret*.ts @openclaw/secops
|
||||
/src/gateway/security-path*.ts @openclaw/secops
|
||||
/src/gateway/resolve-configured-secret-input-string*.ts @openclaw/secops
|
||||
/src/gateway/protocol/**/*secret*.ts @openclaw/secops
|
||||
/src/gateway/server-methods/secrets*.ts @openclaw/secops
|
||||
/src/agents/*auth*.ts @openclaw/secops
|
||||
/src/agents/**/*auth*.ts @openclaw/secops
|
||||
/src/agents/auth-profiles*.ts @openclaw/secops
|
||||
/src/agents/auth-health*.ts @openclaw/secops
|
||||
/src/agents/auth-profiles/ @openclaw/secops
|
||||
/src/agents/sandbox.ts @openclaw/secops
|
||||
/src/agents/sandbox-*.ts @openclaw/secops
|
||||
/src/agents/sandbox/ @openclaw/secops
|
||||
/src/infra/secret-file*.ts @openclaw/secops
|
||||
/src/cron/stagger.ts @openclaw/secops
|
||||
/src/cron/service/jobs.ts @openclaw/secops
|
||||
/docs/security/ @openclaw/secops
|
||||
/docs/gateway/authentication.md @openclaw/secops
|
||||
/docs/gateway/sandbox-vs-tool-policy-vs-elevated.md @openclaw/secops
|
||||
/docs/gateway/sandboxing.md @openclaw/secops
|
||||
/docs/gateway/secrets-plan-contract.md @openclaw/secops
|
||||
/docs/gateway/secrets.md @openclaw/secops
|
||||
/docs/gateway/security/ @openclaw/secops
|
||||
/docs/cli/approvals.md @openclaw/secops
|
||||
/docs/cli/sandbox.md @openclaw/secops
|
||||
/docs/cli/security.md @openclaw/secops
|
||||
/docs/cli/secrets.md @openclaw/secops
|
||||
/docs/reference/secretref-credential-surface.md @openclaw/secops
|
||||
/docs/reference/secretref-user-supplied-credentials-matrix.json @openclaw/secops
|
||||
|
||||
# Release workflow and its supporting release-path checks.
|
||||
/.github/workflows/openclaw-npm-release.yml @openclaw/openclaw-release-managers
|
||||
/docs/reference/RELEASING.md @openclaw/openclaw-release-managers
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@ -159,6 +159,9 @@ jobs:
|
||||
- runtime: node
|
||||
task: extensions
|
||||
command: pnpm test:extensions
|
||||
- runtime: node
|
||||
task: channels
|
||||
command: pnpm test:channels
|
||||
- runtime: node
|
||||
task: protocol
|
||||
command: pnpm protocol:check
|
||||
|
||||
4
.github/workflows/docker-release.yml
vendored
4
.github/workflows/docker-release.yml
vendored
@ -59,7 +59,9 @@ jobs:
|
||||
environment: docker-release
|
||||
steps:
|
||||
- name: Approve Docker backfill
|
||||
run: echo "Approved Docker backfill for ${{ inputs.tag }}"
|
||||
env:
|
||||
RELEASE_TAG: ${{ inputs.tag }}
|
||||
run: echo "Approved Docker backfill for $RELEASE_TAG"
|
||||
|
||||
# KEEP THIS WORKFLOW ON GITHUB-HOSTED RUNNERS.
|
||||
# DO NOT MOVE IT BACK TO BLACKSMITH WITHOUT RE-VALIDATING TAG BUILDS AND BACKFILLS.
|
||||
|
||||
30
.github/workflows/openclaw-npm-release.yml
vendored
30
.github/workflows/openclaw-npm-release.yml
vendored
@ -7,7 +7,7 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: Release tag to publish (for example v2026.3.14 or v2026.3.14-beta.1)
|
||||
description: Release tag to publish (for example v2026.3.14, v2026.3.14-beta.1, or fallback v2026.3.14-1)
|
||||
required: true
|
||||
type: string
|
||||
|
||||
@ -47,9 +47,18 @@ jobs:
|
||||
set -euo pipefail
|
||||
RELEASE_SHA=$(git rev-parse HEAD)
|
||||
PACKAGE_VERSION=$(node -p "require('./package.json').version")
|
||||
if [[ "${RELEASE_TAG}" =~ ^v[0-9]{4}\.[1-9][0-9]*\.[1-9][0-9]*-[1-9][0-9]*$ ]]; then
|
||||
TAG_KIND="fallback correction"
|
||||
else
|
||||
TAG_KIND="standard"
|
||||
fi
|
||||
echo "Release plan for ${RELEASE_TAG}:"
|
||||
echo "Resolved release SHA: ${RELEASE_SHA}"
|
||||
echo "Resolved package version: ${PACKAGE_VERSION}"
|
||||
echo "Resolved tag kind: ${TAG_KIND}"
|
||||
if [[ "${TAG_KIND}" == "fallback correction" ]]; then
|
||||
echo "Correction tag note: npm version remains ${PACKAGE_VERSION}"
|
||||
fi
|
||||
echo "Would run: git fetch --no-tags origin +refs/heads/main:refs/remotes/origin/main"
|
||||
echo "Would run with env: RELEASE_SHA=${RELEASE_SHA} RELEASE_TAG=${RELEASE_TAG} RELEASE_MAIN_REF=origin/main pnpm release:openclaw:npm:check"
|
||||
echo "Would run: npm view openclaw@${PACKAGE_VERSION} version"
|
||||
@ -71,16 +80,31 @@ jobs:
|
||||
pnpm release:openclaw:npm:check
|
||||
|
||||
- name: Ensure version is not already published
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
PACKAGE_VERSION=$(node -p "require('./package.json').version")
|
||||
IS_CORRECTION_TAG=0
|
||||
if [[ "${RELEASE_TAG}" =~ ^v[0-9]{4}\.[1-9][0-9]*\.[1-9][0-9]*-[1-9][0-9]*$ ]]; then
|
||||
IS_CORRECTION_TAG=1
|
||||
fi
|
||||
|
||||
if npm view "openclaw@${PACKAGE_VERSION}" version >/dev/null 2>&1; then
|
||||
if [[ "${IS_CORRECTION_TAG}" == "1" ]]; then
|
||||
echo "openclaw@${PACKAGE_VERSION} is already published on npm."
|
||||
echo "Correction tag ${RELEASE_TAG} is allowed as a fallback release tag, so preview will continue without treating this as an error."
|
||||
exit 0
|
||||
fi
|
||||
echo "openclaw@${PACKAGE_VERSION} is already published on npm."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Previewing openclaw@${PACKAGE_VERSION}"
|
||||
if [[ "${IS_CORRECTION_TAG}" == "1" ]]; then
|
||||
echo "Previewing fallback correction tag ${RELEASE_TAG} for npm version openclaw@${PACKAGE_VERSION}"
|
||||
else
|
||||
echo "Previewing openclaw@${PACKAGE_VERSION}"
|
||||
fi
|
||||
|
||||
- name: Check
|
||||
run: |
|
||||
@ -114,7 +138,7 @@ jobs:
|
||||
RELEASE_TAG: ${{ inputs.tag }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [[ ! "${RELEASE_TAG}" =~ ^v[0-9]{4}\.[1-9][0-9]*\.[1-9][0-9]*(-beta\.[1-9][0-9]*)?$ ]]; then
|
||||
if [[ ! "${RELEASE_TAG}" =~ ^v[0-9]{4}\.[1-9][0-9]*\.[1-9][0-9]*((-beta\.[1-9][0-9]*)|(-[1-9][0-9]*))?$ ]]; then
|
||||
echo "Invalid release tag format: ${RELEASE_TAG}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
19
.github/workflows/workflow-sanity.yml
vendored
19
.github/workflows/workflow-sanity.yml
vendored
@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: workflow-sanity-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
@ -14,6 +15,7 @@ env:
|
||||
|
||||
jobs:
|
||||
no-tabs:
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
@ -45,6 +47,7 @@ jobs:
|
||||
PY
|
||||
|
||||
actionlint:
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
@ -68,3 +71,19 @@ jobs:
|
||||
|
||||
- name: Disallow direct inputs interpolation in composite run blocks
|
||||
run: python3 scripts/check-composite-action-input-interpolation.py
|
||||
|
||||
config-docs-drift:
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Node environment
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Check config docs drift statefile
|
||||
run: pnpm config:docs:check
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
- PR review conversations: if a bot leaves review conversations on your PR, address them and resolve those conversations yourself once fixed. Leave a conversation unresolved only when reviewer or maintainer judgment is still needed; do not leave bot-conversation cleanup to maintainers.
|
||||
- GitHub searching footgun: don't limit yourself to the first 500 issues or PRs when wanting to search all. Unless you're supposed to look at the most recent, keep going until you've reached the last page in the search
|
||||
- Security advisory analysis: before triage/severity decisions, read `SECURITY.md` to align with OpenClaw's trust model and design boundaries.
|
||||
- Do not edit files covered by security-focused `CODEOWNERS` rules unless a listed owner explicitly asked for the change or is already reviewing it with you. Treat those paths as restricted surfaces, not drive-by cleanup.
|
||||
|
||||
## Auto-close labels (issues and PRs)
|
||||
|
||||
|
||||
13
CHANGELOG.md
13
CHANGELOG.md
@ -8,16 +8,24 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
- Commands/btw: add `/btw` side questions for quick tool-less answers about the current session without changing future session context, with dismissible in-session TUI answers and explicit BTW replies on external channels. (#45444) Thanks @ngutman.
|
||||
- Refactor/channels: remove the legacy channel shim directories and point channel-specific imports directly at the extension-owned implementations. (#45967) thanks @scoootscooob.
|
||||
- Feishu/streaming: add `onReasoningStream` and `onReasoningEnd` support to streaming cards, so `/reasoning stream` renders thinking tokens as markdown blockquotes in the same card — matching the Telegram channel's reasoning lane behavior.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Z.AI/onboarding: detect a working default model even for explicit `zai-coding-*` endpoint choices, so Coding Plan setup can keep the selected endpoint while defaulting to `glm-5` when available or `glm-4.7` as fallback. (#45969)
|
||||
- Zalo/plugin runtime: export `resolveClientIp` from `openclaw/plugin-sdk/zalo` so installed builds no longer crash on startup when the webhook monitor loads from the packaged extension instead of the monorepo source tree. (#46549) Thanks @No898.
|
||||
- Control UI/chat sessions: show human-readable labels in the grouped session dropdown again, keep unique scoped fallbacks when metadata is missing, and disambiguate duplicate labels only when needed. (#45130) thanks @luzhidong.
|
||||
- Configure/startup: move outbound send-deps resolution into a lightweight helper so `openclaw configure` no longer stalls after the banner while eagerly loading channel plugins. (#46301) thanks @scoootscooob.
|
||||
- Zalo Personal/group gating: stop reapplying `dmPolicy.allowFrom` as a sender gate for already-allowlisted groups when `groupAllowFrom` is unset, so any member of an allowed group can trigger replies while DMs stay restricted. (#40146)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Slack/interactive replies: preserve `channelData.slack.blocks` through live DM delivery and preview-finalized edits so Block Kit button and select directives render instead of falling back to raw text. Thanks @vincentkoc.
|
||||
- CI/channel test routing: move the built-in channel suites into `test:channels` and keep them out of `test:extensions`, so extension CI no longer fails after the channel migration while targeted test routing still sends Slack, Signal, and iMessage suites to the right lane. (#46066) Thanks @scoootscooob.
|
||||
- Node/startup: remove leftover debug `console.log("node host PATH: ...")` that printed the resolved PATH on every `openclaw node run` invocation. (#46411)
|
||||
- Control UI/dashboard: preserve structured gateway shutdown reasons across restart disconnects so config-triggered restarts no longer fall back to `disconnected (1006): no reason`. (#46532) Thanks @vincentkoc.
|
||||
- Feishu/topic threads: fetch full thread context, including prior bot replies, when starting a topic-thread session so follow-up turns in Feishu topics keep the right conversation state. Thanks @Coobiw.
|
||||
- Browser/profiles: drop the auto-created `chrome-relay` browser profile; users who need the Chrome extension relay must now create their own profile via `openclaw browser create-profile`. (#45777) Thanks @odysseus0.
|
||||
|
||||
## 2026.3.13
|
||||
|
||||
@ -91,6 +99,10 @@ Docs: https://docs.openclaw.ai
|
||||
- Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08.
|
||||
- Agents/failover: normalize abort-wrapped `429 RESOURCE_EXHAUSTED` provider failures before abort short-circuiting so wrapped Google/Vertex rate limits continue across configured fallback models, including the embedded runner prompt-error path. (#39820) Thanks @lupuletic.
|
||||
- Mattermost/thread routing: non-inbound reply paths (TUI/WebUI turns, tool-call callbacks, subagent responses) now correctly route to the originating Mattermost thread when `replyToMode: "all"` is active; also prevents stale `origin.threadId` metadata from resurrecting cleared thread routes. (#44283) thanks @teconomix
|
||||
- Gateway/websocket pairing bypass for disabled auth: skip device-pairing enforcement when `gateway.auth.mode=none` so Control UI connections behind reverse proxies no longer get stuck on `pairing required` (code 1008) despite auth being explicitly disabled. (#42931)
|
||||
- Auth/login lockout recovery: clear stale `auth_permanent` and `billing` disabled state for all profiles matching the target provider when `openclaw models auth login` is invoked, so users locked out by expired or revoked OAuth tokens can recover by re-authenticating instead of waiting for the cooldown timer to expire. (#43057)
|
||||
- Auto-reply/context-engine compaction: persist the exact embedded-run metadata compaction count for main and followup runner session accounting, so metadata-only auto-compactions no longer undercount multi-compaction runs. (#42629) thanks @uf-hy.
|
||||
- Auth/Codex CLI reuse: sync reused Codex CLI credentials into the supported `openai-codex:default` OAuth profile instead of reviving the deprecated `openai-codex:codex-cli` slot, so doctor cleanup no longer loops. (#45353) thanks @Gugu-sugar.
|
||||
|
||||
## 2026.3.12
|
||||
|
||||
@ -323,6 +335,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode.
|
||||
- Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows.
|
||||
- macOS/remote gateway: stop PortGuardian from killing Docker Desktop and other external listeners on the gateway port in remote mode, so containerized and tunneled gateway setups no longer lose their port-forward owner on app startup. (#6755) Thanks @teslamint.
|
||||
- Feishu/streaming recovery: clear stale `streamingStartPromise` when card creation fails (HTTP 400) so subsequent messages can retry streaming instead of silently dropping all future replies. Fixes #43322.
|
||||
|
||||
## 2026.3.8
|
||||
|
||||
|
||||
@ -76,6 +76,9 @@ Welcome to the lobster tank! 🦞
|
||||
- **Tengji (George) Zhang** - Chinese model APIs, cloud, pi
|
||||
- GitHub: [@odysseus0](https://github.com/odysseus0) · X: [@odysseus0z](https://x.com/odysseus0z)
|
||||
|
||||
- **Andrew (Bubbles) Demczuk** - Agents/Gateway/TTS/VTT
|
||||
- GitHub: [@ademczuk](https://github.com/ademczuk) · X: [@ademczuk](https://x.com/ademczuk)
|
||||
|
||||
## How to Contribute
|
||||
|
||||
1. **Bugs & small fixes** → Open a PR!
|
||||
@ -93,6 +96,7 @@ Welcome to the lobster tank! 🦞
|
||||
- Reply to or resolve bot review conversations you addressed before asking for review again
|
||||
- **Include screenshots** — one showing the problem/before, one showing the fix/after (for UI or visual changes)
|
||||
- Use American English spelling and grammar in code, comments, docs, and UI strings
|
||||
- Do not edit files covered by `CODEOWNERS` security ownership unless a listed owner explicitly asked for the change or is already reviewing it with you. Treat those paths as restricted review surfaces, not opportunistic cleanup targets.
|
||||
|
||||
## Review Conversations Are Author-Owned
|
||||
|
||||
|
||||
8
docs/.generated/README.md
Normal file
8
docs/.generated/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
# Generated Docs Artifacts
|
||||
|
||||
These baseline artifacts are generated from the repo-owned OpenClaw config schema and bundled channel/plugin metadata.
|
||||
|
||||
- Do not edit `config-baseline.json` by hand.
|
||||
- Do not edit `config-baseline.jsonl` by hand.
|
||||
- Regenerate it with `pnpm config:docs:gen`.
|
||||
- Validate it in CI or locally with `pnpm config:docs:check`.
|
||||
49887
docs/.generated/config-baseline.json
Normal file
49887
docs/.generated/config-baseline.json
Normal file
File diff suppressed because it is too large
Load Diff
4734
docs/.generated/config-baseline.jsonl
Normal file
4734
docs/.generated/config-baseline.jsonl
Normal file
File diff suppressed because it is too large
Load Diff
@ -1242,7 +1242,6 @@
|
||||
"group": "Security",
|
||||
"pages": [
|
||||
"security/formal-verification",
|
||||
"security/README",
|
||||
"security/THREAT-MODEL-ATLAS",
|
||||
"security/CONTRIBUTING-THREAT-MODEL"
|
||||
]
|
||||
@ -1598,7 +1597,6 @@
|
||||
"zh-CN/tools/apply-patch",
|
||||
"zh-CN/brave-search",
|
||||
"zh-CN/perplexity",
|
||||
"zh-CN/tools/diffs",
|
||||
"zh-CN/tools/elevated",
|
||||
"zh-CN/tools/exec",
|
||||
"zh-CN/tools/exec-approvals",
|
||||
|
||||
@ -975,6 +975,7 @@ Periodic heartbeat runs.
|
||||
model: "openai/gpt-5.2-mini",
|
||||
includeReasoning: false,
|
||||
lightContext: false, // default: false; true keeps only HEARTBEAT.md from workspace bootstrap files
|
||||
isolatedSession: false, // default: false; true runs each heartbeat in a fresh session (no conversation history)
|
||||
session: "main",
|
||||
to: "+15555550123",
|
||||
directPolicy: "allow", // allow (default) | block
|
||||
@ -992,6 +993,7 @@ Periodic heartbeat runs.
|
||||
- `suppressToolErrorWarnings`: when true, suppresses tool error warning payloads during heartbeat runs.
|
||||
- `directPolicy`: direct/DM delivery policy. `allow` (default) permits direct-target delivery. `block` suppresses direct-target delivery and emits `reason=dm-blocked`.
|
||||
- `lightContext`: when true, heartbeat runs use lightweight bootstrap context and keep only `HEARTBEAT.md` from workspace bootstrap files.
|
||||
- `isolatedSession`: when true, each heartbeat runs in a fresh session with no prior conversation history. Same isolation pattern as cron `sessionTarget: "isolated"`. Reduces per-heartbeat token cost from ~100K to ~2-5K tokens.
|
||||
- Per-agent: set `agents.list[].heartbeat`. When any agent defines `heartbeat`, **only those agents** run heartbeats.
|
||||
- Heartbeats run full agent turns — shorter intervals burn more tokens.
|
||||
|
||||
|
||||
@ -22,7 +22,8 @@ Troubleshooting: [/automation/troubleshooting](/automation/troubleshooting)
|
||||
3. Decide where heartbeat messages should go (`target: "none"` is the default; set `target: "last"` to route to the last contact).
|
||||
4. Optional: enable heartbeat reasoning delivery for transparency.
|
||||
5. Optional: use lightweight bootstrap context if heartbeat runs only need `HEARTBEAT.md`.
|
||||
6. Optional: restrict heartbeats to active hours (local time).
|
||||
6. Optional: enable isolated sessions to avoid sending full conversation history each heartbeat.
|
||||
7. Optional: restrict heartbeats to active hours (local time).
|
||||
|
||||
Example config:
|
||||
|
||||
@ -35,6 +36,7 @@ Example config:
|
||||
target: "last", // explicit delivery to last contact (default is "none")
|
||||
directPolicy: "allow", // default: allow direct/DM targets; set "block" to suppress
|
||||
lightContext: true, // optional: only inject HEARTBEAT.md from bootstrap files
|
||||
isolatedSession: true, // optional: fresh session each run (no conversation history)
|
||||
// activeHours: { start: "08:00", end: "24:00" },
|
||||
// includeReasoning: true, // optional: send separate `Reasoning:` message too
|
||||
},
|
||||
@ -91,6 +93,7 @@ and logged; a message that is only `HEARTBEAT_OK` is dropped.
|
||||
model: "anthropic/claude-opus-4-6",
|
||||
includeReasoning: false, // default: false (deliver separate Reasoning: message when available)
|
||||
lightContext: false, // default: false; true keeps only HEARTBEAT.md from workspace bootstrap files
|
||||
isolatedSession: false, // default: false; true runs each heartbeat in a fresh session (no conversation history)
|
||||
target: "last", // default: none | options: last | none | <channel id> (core or plugin, e.g. "bluebubbles")
|
||||
to: "+15551234567", // optional channel-specific override
|
||||
accountId: "ops-bot", // optional multi-account channel id
|
||||
@ -212,6 +215,7 @@ Use `accountId` to target a specific account on multi-account channels like Tele
|
||||
- `model`: optional model override for heartbeat runs (`provider/model`).
|
||||
- `includeReasoning`: when enabled, also deliver the separate `Reasoning:` message when available (same shape as `/reasoning on`).
|
||||
- `lightContext`: when true, heartbeat runs use lightweight bootstrap context and keep only `HEARTBEAT.md` from workspace bootstrap files.
|
||||
- `isolatedSession`: when true, each heartbeat runs in a fresh session with no prior conversation history. Uses the same isolation pattern as cron `sessionTarget: "isolated"`. Dramatically reduces per-heartbeat token cost. Combine with `lightContext: true` for maximum savings. Delivery routing still uses the main session context.
|
||||
- `session`: optional session key for heartbeat runs.
|
||||
- `main` (default): agent main session.
|
||||
- Explicit session key (copy from `openclaw sessions --json` or the [sessions CLI](/cli/sessions)).
|
||||
@ -380,6 +384,10 @@ off in group chats.
|
||||
|
||||
## Cost awareness
|
||||
|
||||
Heartbeats run full agent turns. Shorter intervals burn more tokens. Keep
|
||||
`HEARTBEAT.md` small and consider a cheaper `model` or `target: "none"` if you
|
||||
only want internal state updates.
|
||||
Heartbeats run full agent turns. Shorter intervals burn more tokens. To reduce cost:
|
||||
|
||||
- Use `isolatedSession: true` to avoid sending full conversation history (~100K tokens down to ~2-5K per run).
|
||||
- Use `lightContext: true` to limit bootstrap files to just `HEARTBEAT.md`.
|
||||
- Set a cheaper `model` (e.g. `ollama/llama3.2:1b`).
|
||||
- Keep `HEARTBEAT.md` small.
|
||||
- Use `target: "none"` if you only want internal state updates.
|
||||
|
||||
@ -289,7 +289,7 @@ Look for:
|
||||
|
||||
- Valid browser executable path.
|
||||
- CDP profile reachability.
|
||||
- Extension relay tab attachment for `profile="chrome-relay"`.
|
||||
- Extension relay tab attachment (if an extension relay profile is configured).
|
||||
|
||||
Common signatures:
|
||||
|
||||
|
||||
@ -29,6 +29,10 @@ Current OpenClaw releases use date-based versioning.
|
||||
- Beta prerelease version: `YYYY.M.D-beta.N`
|
||||
- Git tag: `vYYYY.M.D-beta.N`
|
||||
- Examples from repo history: `v2026.2.15-beta.1`, `v2026.3.8-beta.1`
|
||||
- Fallback correction tag: `vYYYY.M.D-N`
|
||||
- Use only as a last-resort recovery tag when a published immutable release burned the original stable tag and you cannot reuse it.
|
||||
- The npm package version stays `YYYY.M.D`; the `-N` suffix is only for the git tag and GitHub release.
|
||||
- Prefer betas for normal pre-release iteration, then cut a clean stable tag once ready.
|
||||
- Use the same version string everywhere, minus the leading `v` where Git tags are not used:
|
||||
- `package.json`: `2026.3.8`
|
||||
- Git tag: `v2026.3.8`
|
||||
@ -38,12 +42,12 @@ Current OpenClaw releases use date-based versioning.
|
||||
- `latest` = stable
|
||||
- `beta` = prerelease/testing
|
||||
- Dev is the moving head of `main`, not a normal git-tagged release.
|
||||
- The tag-triggered preview run enforces the current stable/beta tag formats and rejects versions whose CalVer date is more than 2 UTC calendar days away from the release date.
|
||||
- The tag-triggered preview run accepts stable, beta, and fallback correction tags, and rejects versions whose CalVer date is more than 2 UTC calendar days away from the release date.
|
||||
|
||||
Historical note:
|
||||
|
||||
- Older tags such as `v2026.1.11-1`, `v2026.2.6-3`, and `v2.0.0-beta2` exist in repo history.
|
||||
- Treat those as legacy tag patterns. New releases should use `vYYYY.M.D` for stable and `vYYYY.M.D-beta.N` for beta.
|
||||
- Treat correction tags as a fallback-only escape hatch. New releases should still use `vYYYY.M.D` for stable and `vYYYY.M.D-beta.N` for beta.
|
||||
|
||||
1. **Version & metadata**
|
||||
|
||||
@ -72,6 +76,7 @@ Historical note:
|
||||
- [ ] `pnpm check`
|
||||
- [ ] `pnpm test` (or `pnpm test:coverage` if you need coverage output)
|
||||
- [ ] `pnpm release:check` (verifies npm pack contents)
|
||||
- [ ] If `pnpm config:docs:check` fails as part of release validation and the config-surface change is intentional, run `pnpm config:docs:gen`, review `docs/.generated/config-baseline.json` and `docs/.generated/config-baseline.jsonl`, commit the updated baselines, then rerun `pnpm release:check`.
|
||||
- [ ] `OPENCLAW_INSTALL_SMOKE_SKIP_NONROOT=1 pnpm test:install:smoke` (Docker install smoke test, fast path; required before release)
|
||||
- If the immediate previous npm release is known broken, set `OPENCLAW_INSTALL_SMOKE_PREVIOUS=<last-good-version>` or `OPENCLAW_INSTALL_SMOKE_SKIP_PREVIOUS=1` for the preinstall step.
|
||||
- [ ] (Optional) Full installer smoke (adds non-root + CLI coverage): `pnpm test:install:smoke`
|
||||
@ -99,7 +104,9 @@ Historical note:
|
||||
- [ ] Run `OpenClaw NPM Release` manually with the same tag to publish after `npm-release` environment approval.
|
||||
- Stable tags publish to npm `latest`.
|
||||
- Beta tags publish to npm `beta`.
|
||||
- Both the preview run and the manual publish run reject tags that do not match `package.json`, are not on `main`, or whose CalVer date is more than 2 UTC calendar days away from the release date.
|
||||
- Fallback correction tags like `v2026.3.13-1` map to npm version `2026.3.13`.
|
||||
- Both the preview run and the manual publish run reject tags that do not map back to `package.json`, are not on `main`, or whose CalVer date is more than 2 UTC calendar days away from the release date.
|
||||
- If `openclaw@YYYY.M.D` is already published, a fallback correction tag is still useful for GitHub release and Docker recovery, but npm publish will not republish that version.
|
||||
- [ ] Verify the registry: `npm view openclaw version`, `npm view openclaw dist-tags`, and `npx -y openclaw@X.Y.Z --version` (or `--help`).
|
||||
|
||||
### Troubleshooting (notes from 2.0.0-beta2 release)
|
||||
@ -109,8 +116,9 @@ Historical note:
|
||||
- `NPM_CONFIG_AUTH_TYPE=legacy npm dist-tag add openclaw@X.Y.Z latest`
|
||||
- **`npx` verification fails with `ECOMPROMISED: Lock compromised`**: retry with a fresh cache:
|
||||
- `NPM_CONFIG_CACHE=/tmp/npm-cache-$(date +%s) npx -y openclaw@X.Y.Z --version`
|
||||
- **Tag needs repointing after a late fix**: force-update and push the tag, then ensure the GitHub release assets still match:
|
||||
- `git tag -f vX.Y.Z && git push -f origin vX.Y.Z`
|
||||
- **Tag needs recovery after a late fix**: if the original stable tag is tied to an immutable GitHub release, mint a fallback correction tag like `vX.Y.Z-1` instead of trying to force-update `vX.Y.Z`.
|
||||
- Keep the npm package version at `X.Y.Z`; the correction suffix is for the git tag and GitHub release only.
|
||||
- Use this only as a last resort. For normal iteration, prefer beta tags and then cut a clean stable release.
|
||||
|
||||
7. **GitHub release + appcast**
|
||||
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
# OpenClaw Security & Trust
|
||||
|
||||
**Live:** [trust.openclaw.ai](https://trust.openclaw.ai)
|
||||
|
||||
## Documents
|
||||
|
||||
- [Threat Model](/security/THREAT-MODEL-ATLAS) - MITRE ATLAS-based threat model for the OpenClaw ecosystem
|
||||
- [Contributing to the Threat Model](/security/CONTRIBUTING-THREAT-MODEL) - How to add threats, mitigations, and attack chains
|
||||
|
||||
## Reporting Vulnerabilities
|
||||
|
||||
See the [Trust page](https://trust.openclaw.ai) for full reporting instructions covering all repos.
|
||||
|
||||
## Contact
|
||||
|
||||
- **Jamieson O'Reilly** ([@theonejvo](https://twitter.com/theonejvo)) - Security & Trust
|
||||
- Discord: #security channel
|
||||
@ -25,7 +25,7 @@ Note, selecting 'chromium-browser' instead of 'chromium'
|
||||
chromium-browser is already the newest version (2:1snap1-0ubuntu2).
|
||||
```
|
||||
|
||||
This is NOT a real browser — it's just a wrapper.
|
||||
This is NOT a real browser - it's just a wrapper.
|
||||
|
||||
### Solution 1: Install Google Chrome (Recommended)
|
||||
|
||||
@ -123,7 +123,7 @@ curl -s http://127.0.0.1:18791/tabs
|
||||
|
||||
### Problem: "Chrome extension relay is running, but no tab is connected"
|
||||
|
||||
You’re using the `chrome-relay` profile (extension relay). It expects the OpenClaw
|
||||
You're using an extension relay profile. It expects the OpenClaw
|
||||
browser extension to be attached to a live tab.
|
||||
|
||||
Fix options:
|
||||
|
||||
@ -62,19 +62,14 @@ After upgrading OpenClaw:
|
||||
|
||||
## Use it (set gateway token once)
|
||||
|
||||
OpenClaw ships with a built-in browser profile named `chrome-relay` that targets the extension relay on the default port.
|
||||
To use the extension relay, create a browser profile for it:
|
||||
|
||||
Before first attach, open extension Options and set:
|
||||
|
||||
- `Port` (default `18792`)
|
||||
- `Gateway token` (must match `gateway.auth.token` / `OPENCLAW_GATEWAY_TOKEN`)
|
||||
|
||||
Use it:
|
||||
|
||||
- CLI: `openclaw browser --browser-profile chrome-relay tabs`
|
||||
- Agent tool: `browser` with `profile="chrome-relay"`
|
||||
|
||||
If you want a different name or a different relay port, create your own profile:
|
||||
Then create a profile:
|
||||
|
||||
```bash
|
||||
openclaw browser create-profile \
|
||||
@ -84,6 +79,11 @@ openclaw browser create-profile \
|
||||
--color "#00AA00"
|
||||
```
|
||||
|
||||
Use it:
|
||||
|
||||
- CLI: `openclaw browser --browser-profile my-chrome tabs`
|
||||
- Agent tool: `browser` with `profile="my-chrome"`
|
||||
|
||||
### Custom Gateway ports
|
||||
|
||||
If you're using a custom gateway port, the extension relay port is automatically derived:
|
||||
|
||||
@ -15,9 +15,12 @@ const {
|
||||
mockCreateFeishuReplyDispatcher,
|
||||
mockSendMessageFeishu,
|
||||
mockGetMessageFeishu,
|
||||
mockListFeishuThreadMessages,
|
||||
mockDownloadMessageResourceFeishu,
|
||||
mockCreateFeishuClient,
|
||||
mockResolveAgentRoute,
|
||||
mockReadSessionUpdatedAt,
|
||||
mockResolveStorePath,
|
||||
} = vi.hoisted(() => ({
|
||||
mockCreateFeishuReplyDispatcher: vi.fn(() => ({
|
||||
dispatcher: vi.fn(),
|
||||
@ -26,6 +29,7 @@ const {
|
||||
})),
|
||||
mockSendMessageFeishu: vi.fn().mockResolvedValue({ messageId: "pairing-msg", chatId: "oc-dm" }),
|
||||
mockGetMessageFeishu: vi.fn().mockResolvedValue(null),
|
||||
mockListFeishuThreadMessages: vi.fn().mockResolvedValue([]),
|
||||
mockDownloadMessageResourceFeishu: vi.fn().mockResolvedValue({
|
||||
buffer: Buffer.from("video"),
|
||||
contentType: "video/mp4",
|
||||
@ -40,6 +44,8 @@ const {
|
||||
mainSessionKey: "agent:main:main",
|
||||
matchedBy: "default",
|
||||
})),
|
||||
mockReadSessionUpdatedAt: vi.fn(),
|
||||
mockResolveStorePath: vi.fn(() => "/tmp/feishu-sessions.json"),
|
||||
}));
|
||||
|
||||
vi.mock("./reply-dispatcher.js", () => ({
|
||||
@ -49,6 +55,7 @@ vi.mock("./reply-dispatcher.js", () => ({
|
||||
vi.mock("./send.js", () => ({
|
||||
sendMessageFeishu: mockSendMessageFeishu,
|
||||
getMessageFeishu: mockGetMessageFeishu,
|
||||
listFeishuThreadMessages: mockListFeishuThreadMessages,
|
||||
}));
|
||||
|
||||
vi.mock("./media.js", () => ({
|
||||
@ -70,11 +77,13 @@ function createRuntimeEnv(): RuntimeEnv {
|
||||
}
|
||||
|
||||
async function dispatchMessage(params: { cfg: ClawdbotConfig; event: FeishuMessageEvent }) {
|
||||
const runtime = createRuntimeEnv();
|
||||
await handleFeishuMessage({
|
||||
cfg: params.cfg,
|
||||
event: params.event,
|
||||
runtime: createRuntimeEnv(),
|
||||
runtime,
|
||||
});
|
||||
return runtime;
|
||||
}
|
||||
|
||||
describe("buildFeishuAgentBody", () => {
|
||||
@ -140,6 +149,10 @@ describe("handleFeishuMessage command authorization", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockShouldComputeCommandAuthorized.mockReset().mockReturnValue(true);
|
||||
mockGetMessageFeishu.mockReset().mockResolvedValue(null);
|
||||
mockListFeishuThreadMessages.mockReset().mockResolvedValue([]);
|
||||
mockReadSessionUpdatedAt.mockReturnValue(undefined);
|
||||
mockResolveStorePath.mockReturnValue("/tmp/feishu-sessions.json");
|
||||
mockResolveAgentRoute.mockReturnValue({
|
||||
agentId: "main",
|
||||
channel: "feishu",
|
||||
@ -166,6 +179,12 @@ describe("handleFeishuMessage command authorization", () => {
|
||||
resolveAgentRoute:
|
||||
mockResolveAgentRoute as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
|
||||
},
|
||||
session: {
|
||||
readSessionUpdatedAt:
|
||||
mockReadSessionUpdatedAt as unknown as PluginRuntime["channel"]["session"]["readSessionUpdatedAt"],
|
||||
resolveStorePath:
|
||||
mockResolveStorePath as unknown as PluginRuntime["channel"]["session"]["resolveStorePath"],
|
||||
},
|
||||
reply: {
|
||||
resolveEnvelopeFormatOptions: vi.fn(
|
||||
() => ({}),
|
||||
@ -1709,6 +1728,193 @@ describe("handleFeishuMessage command authorization", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("bootstraps topic thread context only for a new thread session", async () => {
|
||||
mockShouldComputeCommandAuthorized.mockReturnValue(false);
|
||||
mockGetMessageFeishu.mockResolvedValue({
|
||||
messageId: "om_topic_root",
|
||||
chatId: "oc-group",
|
||||
content: "root starter",
|
||||
contentType: "text",
|
||||
threadId: "omt_topic_1",
|
||||
});
|
||||
mockListFeishuThreadMessages.mockResolvedValue([
|
||||
{
|
||||
messageId: "om_bot_reply",
|
||||
senderId: "app_1",
|
||||
senderType: "app",
|
||||
content: "assistant reply",
|
||||
contentType: "text",
|
||||
createTime: 1710000000000,
|
||||
},
|
||||
{
|
||||
messageId: "om_follow_up",
|
||||
senderId: "ou-topic-user",
|
||||
senderType: "user",
|
||||
content: "follow-up question",
|
||||
contentType: "text",
|
||||
createTime: 1710000001000,
|
||||
},
|
||||
]);
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
channels: {
|
||||
feishu: {
|
||||
groups: {
|
||||
"oc-group": {
|
||||
requireMention: false,
|
||||
groupSessionScope: "group_topic",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
|
||||
const event: FeishuMessageEvent = {
|
||||
sender: { sender_id: { open_id: "ou-topic-user" } },
|
||||
message: {
|
||||
message_id: "om_topic_followup_existing_session",
|
||||
root_id: "om_topic_root",
|
||||
chat_id: "oc-group",
|
||||
chat_type: "group",
|
||||
message_type: "text",
|
||||
content: JSON.stringify({ text: "current turn" }),
|
||||
},
|
||||
};
|
||||
|
||||
await dispatchMessage({ cfg, event });
|
||||
|
||||
expect(mockReadSessionUpdatedAt).toHaveBeenCalledWith({
|
||||
storePath: "/tmp/feishu-sessions.json",
|
||||
sessionKey: "agent:main:feishu:dm:ou-attacker",
|
||||
});
|
||||
expect(mockListFeishuThreadMessages).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
rootMessageId: "om_topic_root",
|
||||
}),
|
||||
);
|
||||
expect(mockFinalizeInboundContext).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
ThreadStarterBody: "root starter",
|
||||
ThreadHistoryBody: "assistant reply\n\nfollow-up question",
|
||||
ThreadLabel: "Feishu thread in oc-group",
|
||||
MessageThreadId: "om_topic_root",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("skips topic thread bootstrap when the thread session already exists", async () => {
|
||||
mockShouldComputeCommandAuthorized.mockReturnValue(false);
|
||||
mockReadSessionUpdatedAt.mockReturnValue(1710000000000);
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
channels: {
|
||||
feishu: {
|
||||
groups: {
|
||||
"oc-group": {
|
||||
requireMention: false,
|
||||
groupSessionScope: "group_topic",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
|
||||
const event: FeishuMessageEvent = {
|
||||
sender: { sender_id: { open_id: "ou-topic-user" } },
|
||||
message: {
|
||||
message_id: "om_topic_followup",
|
||||
root_id: "om_topic_root",
|
||||
chat_id: "oc-group",
|
||||
chat_type: "group",
|
||||
message_type: "text",
|
||||
content: JSON.stringify({ text: "current turn" }),
|
||||
},
|
||||
};
|
||||
|
||||
await dispatchMessage({ cfg, event });
|
||||
|
||||
expect(mockGetMessageFeishu).not.toHaveBeenCalled();
|
||||
expect(mockListFeishuThreadMessages).not.toHaveBeenCalled();
|
||||
expect(mockFinalizeInboundContext).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
ThreadStarterBody: undefined,
|
||||
ThreadHistoryBody: undefined,
|
||||
ThreadLabel: "Feishu thread in oc-group",
|
||||
MessageThreadId: "om_topic_root",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("keeps sender-scoped thread history when the inbound event and thread history use different sender ids", async () => {
|
||||
mockShouldComputeCommandAuthorized.mockReturnValue(false);
|
||||
mockGetMessageFeishu.mockResolvedValue({
|
||||
messageId: "om_topic_root",
|
||||
chatId: "oc-group",
|
||||
content: "root starter",
|
||||
contentType: "text",
|
||||
threadId: "omt_topic_1",
|
||||
});
|
||||
mockListFeishuThreadMessages.mockResolvedValue([
|
||||
{
|
||||
messageId: "om_bot_reply",
|
||||
senderId: "app_1",
|
||||
senderType: "app",
|
||||
content: "assistant reply",
|
||||
contentType: "text",
|
||||
createTime: 1710000000000,
|
||||
},
|
||||
{
|
||||
messageId: "om_follow_up",
|
||||
senderId: "user_topic_1",
|
||||
senderType: "user",
|
||||
content: "follow-up question",
|
||||
contentType: "text",
|
||||
createTime: 1710000001000,
|
||||
},
|
||||
]);
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
channels: {
|
||||
feishu: {
|
||||
groups: {
|
||||
"oc-group": {
|
||||
requireMention: false,
|
||||
groupSessionScope: "group_topic_sender",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
|
||||
const event: FeishuMessageEvent = {
|
||||
sender: {
|
||||
sender_id: {
|
||||
open_id: "ou-topic-user",
|
||||
user_id: "user_topic_1",
|
||||
},
|
||||
},
|
||||
message: {
|
||||
message_id: "om_topic_followup_mixed_ids",
|
||||
root_id: "om_topic_root",
|
||||
chat_id: "oc-group",
|
||||
chat_type: "group",
|
||||
message_type: "text",
|
||||
content: JSON.stringify({ text: "current turn" }),
|
||||
},
|
||||
};
|
||||
|
||||
await dispatchMessage({ cfg, event });
|
||||
|
||||
expect(mockFinalizeInboundContext).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
ThreadStarterBody: "root starter",
|
||||
ThreadHistoryBody: "assistant reply\n\nfollow-up question",
|
||||
ThreadLabel: "Feishu thread in oc-group",
|
||||
MessageThreadId: "om_topic_root",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("does not dispatch twice for the same image message_id (concurrent dedupe)", async () => {
|
||||
mockShouldComputeCommandAuthorized.mockReturnValue(false);
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ import {
|
||||
import { parsePostContent } from "./post.js";
|
||||
import { createFeishuReplyDispatcher } from "./reply-dispatcher.js";
|
||||
import { getFeishuRuntime } from "./runtime.js";
|
||||
import { getMessageFeishu, sendMessageFeishu } from "./send.js";
|
||||
import { getMessageFeishu, listFeishuThreadMessages, sendMessageFeishu } from "./send.js";
|
||||
import type { FeishuMessageContext, FeishuMediaInfo, ResolvedFeishuAccount } from "./types.js";
|
||||
import type { DynamicAgentCreationConfig } from "./types.js";
|
||||
|
||||
@ -1239,16 +1239,17 @@ export async function handleFeishuMessage(params: {
|
||||
const mediaPayload = buildAgentMediaPayload(mediaList);
|
||||
|
||||
// Fetch quoted/replied message content if parentId exists
|
||||
let quotedMessageInfo: Awaited<ReturnType<typeof getMessageFeishu>> = null;
|
||||
let quotedContent: string | undefined;
|
||||
if (ctx.parentId) {
|
||||
try {
|
||||
const quotedMsg = await getMessageFeishu({
|
||||
quotedMessageInfo = await getMessageFeishu({
|
||||
cfg,
|
||||
messageId: ctx.parentId,
|
||||
accountId: account.accountId,
|
||||
});
|
||||
if (quotedMsg) {
|
||||
quotedContent = quotedMsg.content;
|
||||
if (quotedMessageInfo) {
|
||||
quotedContent = quotedMessageInfo.content;
|
||||
log(
|
||||
`feishu[${account.accountId}]: fetched quoted message: ${quotedContent?.slice(0, 100)}`,
|
||||
);
|
||||
@ -1258,6 +1259,11 @@ export async function handleFeishuMessage(params: {
|
||||
}
|
||||
}
|
||||
|
||||
const isTopicSessionForThread =
|
||||
isGroup &&
|
||||
(groupSession?.groupSessionScope === "group_topic" ||
|
||||
groupSession?.groupSessionScope === "group_topic_sender");
|
||||
|
||||
const envelopeOptions = core.channel.reply.resolveEnvelopeFormatOptions(cfg);
|
||||
const messageBody = buildFeishuAgentBody({
|
||||
ctx,
|
||||
@ -1309,13 +1315,150 @@ export async function handleFeishuMessage(params: {
|
||||
}))
|
||||
: undefined;
|
||||
|
||||
const threadContextBySessionKey = new Map<
|
||||
string,
|
||||
{
|
||||
threadStarterBody?: string;
|
||||
threadHistoryBody?: string;
|
||||
threadLabel?: string;
|
||||
}
|
||||
>();
|
||||
let rootMessageInfo: Awaited<ReturnType<typeof getMessageFeishu>> | undefined;
|
||||
let rootMessageFetched = false;
|
||||
const getRootMessageInfo = async () => {
|
||||
if (!ctx.rootId) {
|
||||
return null;
|
||||
}
|
||||
if (!rootMessageFetched) {
|
||||
rootMessageFetched = true;
|
||||
if (ctx.rootId === ctx.parentId && quotedMessageInfo) {
|
||||
rootMessageInfo = quotedMessageInfo;
|
||||
} else {
|
||||
try {
|
||||
rootMessageInfo = await getMessageFeishu({
|
||||
cfg,
|
||||
messageId: ctx.rootId,
|
||||
accountId: account.accountId,
|
||||
});
|
||||
} catch (err) {
|
||||
log(`feishu[${account.accountId}]: failed to fetch root message: ${String(err)}`);
|
||||
rootMessageInfo = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rootMessageInfo ?? null;
|
||||
};
|
||||
const resolveThreadContextForAgent = async (agentId: string, agentSessionKey: string) => {
|
||||
const cached = threadContextBySessionKey.get(agentSessionKey);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
const threadContext: {
|
||||
threadStarterBody?: string;
|
||||
threadHistoryBody?: string;
|
||||
threadLabel?: string;
|
||||
} = {
|
||||
threadLabel:
|
||||
(ctx.rootId || ctx.threadId) && isTopicSessionForThread
|
||||
? `Feishu thread in ${ctx.chatId}`
|
||||
: undefined,
|
||||
};
|
||||
|
||||
if (!(ctx.rootId || ctx.threadId) || !isTopicSessionForThread) {
|
||||
threadContextBySessionKey.set(agentSessionKey, threadContext);
|
||||
return threadContext;
|
||||
}
|
||||
|
||||
const storePath = core.channel.session.resolveStorePath(cfg.session?.store, { agentId });
|
||||
const previousThreadSessionTimestamp = core.channel.session.readSessionUpdatedAt({
|
||||
storePath,
|
||||
sessionKey: agentSessionKey,
|
||||
});
|
||||
if (previousThreadSessionTimestamp) {
|
||||
log(
|
||||
`feishu[${account.accountId}]: skipping thread bootstrap for existing session ${agentSessionKey}`,
|
||||
);
|
||||
threadContextBySessionKey.set(agentSessionKey, threadContext);
|
||||
return threadContext;
|
||||
}
|
||||
|
||||
const rootMsg = await getRootMessageInfo();
|
||||
let feishuThreadId = ctx.threadId ?? rootMsg?.threadId;
|
||||
if (feishuThreadId) {
|
||||
log(`feishu[${account.accountId}]: resolved thread ID: ${feishuThreadId}`);
|
||||
}
|
||||
if (!feishuThreadId) {
|
||||
log(
|
||||
`feishu[${account.accountId}]: no threadId found for root message ${ctx.rootId ?? "none"}, skipping thread history`,
|
||||
);
|
||||
threadContextBySessionKey.set(agentSessionKey, threadContext);
|
||||
return threadContext;
|
||||
}
|
||||
|
||||
try {
|
||||
const threadMessages = await listFeishuThreadMessages({
|
||||
cfg,
|
||||
threadId: feishuThreadId,
|
||||
currentMessageId: ctx.messageId,
|
||||
rootMessageId: ctx.rootId,
|
||||
limit: 20,
|
||||
accountId: account.accountId,
|
||||
});
|
||||
const senderScoped = groupSession?.groupSessionScope === "group_topic_sender";
|
||||
const senderIds = new Set(
|
||||
[ctx.senderOpenId, senderUserId]
|
||||
.map((id) => id?.trim())
|
||||
.filter((id): id is string => id !== undefined && id.length > 0),
|
||||
);
|
||||
const relevantMessages =
|
||||
(senderScoped
|
||||
? threadMessages.filter(
|
||||
(msg) =>
|
||||
msg.senderType === "app" ||
|
||||
(msg.senderId !== undefined && senderIds.has(msg.senderId.trim())),
|
||||
)
|
||||
: threadMessages) ?? [];
|
||||
|
||||
const threadStarterBody = rootMsg?.content ?? relevantMessages[0]?.content;
|
||||
const includeStarterInHistory = Boolean(rootMsg?.content || ctx.rootId);
|
||||
const historyMessages = includeStarterInHistory
|
||||
? relevantMessages
|
||||
: relevantMessages.slice(1);
|
||||
const historyParts = historyMessages.map((msg) => {
|
||||
const role = msg.senderType === "app" ? "assistant" : "user";
|
||||
return core.channel.reply.formatAgentEnvelope({
|
||||
channel: "Feishu",
|
||||
from: `${msg.senderId ?? "Unknown"} (${role})`,
|
||||
timestamp: msg.createTime,
|
||||
body: msg.content,
|
||||
envelope: envelopeOptions,
|
||||
});
|
||||
});
|
||||
|
||||
threadContext.threadStarterBody = threadStarterBody;
|
||||
threadContext.threadHistoryBody =
|
||||
historyParts.length > 0 ? historyParts.join("\n\n") : undefined;
|
||||
log(
|
||||
`feishu[${account.accountId}]: populated thread bootstrap with starter=${threadStarterBody ? "yes" : "no"} history=${historyMessages.length}`,
|
||||
);
|
||||
} catch (err) {
|
||||
log(`feishu[${account.accountId}]: failed to fetch thread history: ${String(err)}`);
|
||||
}
|
||||
|
||||
threadContextBySessionKey.set(agentSessionKey, threadContext);
|
||||
return threadContext;
|
||||
};
|
||||
|
||||
// --- Shared context builder for dispatch ---
|
||||
const buildCtxPayloadForAgent = (
|
||||
const buildCtxPayloadForAgent = async (
|
||||
agentId: string,
|
||||
agentSessionKey: string,
|
||||
agentAccountId: string,
|
||||
wasMentioned: boolean,
|
||||
) =>
|
||||
core.channel.reply.finalizeInboundContext({
|
||||
) => {
|
||||
const threadContext = await resolveThreadContextForAgent(agentId, agentSessionKey);
|
||||
return core.channel.reply.finalizeInboundContext({
|
||||
Body: combinedBody,
|
||||
BodyForAgent: messageBody,
|
||||
InboundHistory: inboundHistory,
|
||||
@ -1335,6 +1478,12 @@ export async function handleFeishuMessage(params: {
|
||||
Surface: "feishu" as const,
|
||||
MessageSid: ctx.messageId,
|
||||
ReplyToBody: quotedContent ?? undefined,
|
||||
ThreadStarterBody: threadContext.threadStarterBody,
|
||||
ThreadHistoryBody: threadContext.threadHistoryBody,
|
||||
ThreadLabel: threadContext.threadLabel,
|
||||
// Only use rootId (om_* message anchor) — threadId (omt_*) is a container
|
||||
// ID and would produce invalid reply targets downstream.
|
||||
MessageThreadId: ctx.rootId && isTopicSessionForThread ? ctx.rootId : undefined,
|
||||
Timestamp: Date.now(),
|
||||
WasMentioned: wasMentioned,
|
||||
CommandAuthorized: commandAuthorized,
|
||||
@ -1343,6 +1492,7 @@ export async function handleFeishuMessage(params: {
|
||||
GroupSystemPrompt: isGroup ? groupConfig?.systemPrompt?.trim() || undefined : undefined,
|
||||
...mediaPayload,
|
||||
});
|
||||
};
|
||||
|
||||
// Parse message create_time (Feishu uses millisecond epoch string).
|
||||
const messageCreateTimeMs = event.message.create_time
|
||||
@ -1402,7 +1552,8 @@ export async function handleFeishuMessage(params: {
|
||||
}
|
||||
|
||||
const agentSessionKey = buildBroadcastSessionKey(route.sessionKey, route.agentId, agentId);
|
||||
const agentCtx = buildCtxPayloadForAgent(
|
||||
const agentCtx = await buildCtxPayloadForAgent(
|
||||
agentId,
|
||||
agentSessionKey,
|
||||
route.accountId,
|
||||
ctx.mentionedBot && agentId === activeAgentId,
|
||||
@ -1502,7 +1653,8 @@ export async function handleFeishuMessage(params: {
|
||||
);
|
||||
} else {
|
||||
// --- Single-agent dispatch (existing behavior) ---
|
||||
const ctxPayload = buildCtxPayloadForAgent(
|
||||
const ctxPayload = await buildCtxPayloadForAgent(
|
||||
route.agentId,
|
||||
route.sessionKey,
|
||||
route.accountId,
|
||||
ctx.mentionedBot,
|
||||
|
||||
@ -462,6 +462,126 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("streams reasoning content as blockquote before answer", async () => {
|
||||
const { result, options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
await options.onReplyStart?.();
|
||||
// Core agent sends pre-formatted text from formatReasoningMessage
|
||||
result.replyOptions.onReasoningStream?.({ text: "Reasoning:\n_thinking step 1_" });
|
||||
result.replyOptions.onReasoningStream?.({
|
||||
text: "Reasoning:\n_thinking step 1_\n_step 2_",
|
||||
});
|
||||
result.replyOptions.onPartialReply?.({ text: "answer part" });
|
||||
result.replyOptions.onReasoningEnd?.();
|
||||
await options.deliver({ text: "answer part final" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
const updateCalls = streamingInstances[0].update.mock.calls.map((c: unknown[]) => c[0]);
|
||||
const reasoningUpdate = updateCalls.find((c: string) => c.includes("Thinking"));
|
||||
expect(reasoningUpdate).toContain("> 💭 **Thinking**");
|
||||
// formatReasoningPrefix strips "Reasoning:" prefix and italic markers
|
||||
expect(reasoningUpdate).toContain("> thinking step");
|
||||
expect(reasoningUpdate).not.toContain("Reasoning:");
|
||||
expect(reasoningUpdate).not.toMatch(/> _.*_/);
|
||||
|
||||
const combinedUpdate = updateCalls.find(
|
||||
(c: string) => c.includes("Thinking") && c.includes("---"),
|
||||
);
|
||||
expect(combinedUpdate).toBeDefined();
|
||||
|
||||
expect(streamingInstances[0].close).toHaveBeenCalledTimes(1);
|
||||
const closeArg = streamingInstances[0].close.mock.calls[0][0] as string;
|
||||
expect(closeArg).toContain("> 💭 **Thinking**");
|
||||
expect(closeArg).toContain("---");
|
||||
expect(closeArg).toContain("answer part final");
|
||||
});
|
||||
|
||||
it("provides onReasoningStream and onReasoningEnd when streaming is enabled", () => {
|
||||
const { result } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
expect(result.replyOptions.onReasoningStream).toBeTypeOf("function");
|
||||
expect(result.replyOptions.onReasoningEnd).toBeTypeOf("function");
|
||||
});
|
||||
|
||||
it("omits reasoning callbacks when streaming is disabled", () => {
|
||||
resolveFeishuAccountMock.mockReturnValue({
|
||||
accountId: "main",
|
||||
appId: "app_id",
|
||||
appSecret: "app_secret",
|
||||
domain: "feishu",
|
||||
config: {
|
||||
renderMode: "auto",
|
||||
streaming: false,
|
||||
},
|
||||
});
|
||||
|
||||
const { result } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
expect(result.replyOptions.onReasoningStream).toBeUndefined();
|
||||
expect(result.replyOptions.onReasoningEnd).toBeUndefined();
|
||||
});
|
||||
|
||||
it("renders reasoning-only card when no answer text arrives", async () => {
|
||||
const { result, options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
await options.onReplyStart?.();
|
||||
result.replyOptions.onReasoningStream?.({ text: "Reasoning:\n_deep thought_" });
|
||||
result.replyOptions.onReasoningEnd?.();
|
||||
await options.onIdle?.();
|
||||
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
expect(streamingInstances[0].close).toHaveBeenCalledTimes(1);
|
||||
const closeArg = streamingInstances[0].close.mock.calls[0][0] as string;
|
||||
expect(closeArg).toContain("> 💭 **Thinking**");
|
||||
expect(closeArg).toContain("> deep thought");
|
||||
expect(closeArg).not.toContain("Reasoning:");
|
||||
expect(closeArg).not.toContain("---");
|
||||
});
|
||||
|
||||
it("ignores empty reasoning payloads", async () => {
|
||||
const { result, options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
await options.onReplyStart?.();
|
||||
result.replyOptions.onReasoningStream?.({ text: "" });
|
||||
result.replyOptions.onPartialReply?.({ text: "```ts\ncode\n```" });
|
||||
await options.deliver({ text: "```ts\ncode\n```" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
const closeArg = streamingInstances[0].close.mock.calls[0][0] as string;
|
||||
expect(closeArg).not.toContain("Thinking");
|
||||
expect(closeArg).toBe("```ts\ncode\n```");
|
||||
});
|
||||
|
||||
it("deduplicates final text by raw answer payload, not combined card text", async () => {
|
||||
const { result, options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
await options.onReplyStart?.();
|
||||
result.replyOptions.onReasoningStream?.({ text: "Reasoning:\n_thought_" });
|
||||
result.replyOptions.onReasoningEnd?.();
|
||||
await options.deliver({ text: "```ts\nfinal answer\n```" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
expect(streamingInstances[0].close).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Deliver the same raw answer text again — should be deduped
|
||||
await options.deliver({ text: "```ts\nfinal answer\n```" }, { kind: "final" });
|
||||
|
||||
// No second streaming session since the raw answer text matches
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("passes replyToMessageId and replyInThread to streaming.start()", async () => {
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
@ -510,4 +630,50 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("recovers streaming after start() throws (HTTP 400)", async () => {
|
||||
const errorMock = vi.fn();
|
||||
let shouldFailStart = true;
|
||||
|
||||
// Intercept streaming instance creation to make first start() reject
|
||||
const origPush = streamingInstances.push;
|
||||
streamingInstances.push = function (this: any[], ...args: any[]) {
|
||||
if (shouldFailStart) {
|
||||
args[0].start = vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error("Create card request failed with HTTP 400"));
|
||||
shouldFailStart = false;
|
||||
}
|
||||
return origPush.apply(this, args);
|
||||
} as any;
|
||||
|
||||
try {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: errorMock } as never,
|
||||
chatId: "oc_chat",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
|
||||
// First deliver with markdown triggers startStreaming - which will fail
|
||||
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "block" });
|
||||
|
||||
// Wait for the async error to propagate
|
||||
await vi.waitFor(() => {
|
||||
expect(errorMock).toHaveBeenCalledWith(expect.stringContaining("streaming start failed"));
|
||||
});
|
||||
|
||||
// Second deliver should create a NEW streaming session (not stuck)
|
||||
await options.deliver({ text: "```ts\nconst y = 2\n```" }, { kind: "final" });
|
||||
|
||||
// Two instances created: first failed, second succeeded and closed
|
||||
expect(streamingInstances).toHaveLength(2);
|
||||
expect(streamingInstances[1].start).toHaveBeenCalled();
|
||||
expect(streamingInstances[1].close).toHaveBeenCalled();
|
||||
} finally {
|
||||
streamingInstances.push = origPush;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@ -143,11 +143,39 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
||||
let streaming: FeishuStreamingSession | null = null;
|
||||
let streamText = "";
|
||||
let lastPartial = "";
|
||||
let reasoningText = "";
|
||||
const deliveredFinalTexts = new Set<string>();
|
||||
let partialUpdateQueue: Promise<void> = Promise.resolve();
|
||||
let streamingStartPromise: Promise<void> | null = null;
|
||||
type StreamTextUpdateMode = "snapshot" | "delta";
|
||||
|
||||
const formatReasoningPrefix = (thinking: string): string => {
|
||||
if (!thinking) return "";
|
||||
const withoutLabel = thinking.replace(/^Reasoning:\n/, "");
|
||||
const plain = withoutLabel.replace(/^_(.*)_$/gm, "$1");
|
||||
const lines = plain.split("\n").map((line) => `> ${line}`);
|
||||
return `> 💭 **Thinking**\n${lines.join("\n")}`;
|
||||
};
|
||||
|
||||
const buildCombinedStreamText = (thinking: string, answer: string): string => {
|
||||
const parts: string[] = [];
|
||||
if (thinking) parts.push(formatReasoningPrefix(thinking));
|
||||
if (thinking && answer) parts.push("\n\n---\n\n");
|
||||
if (answer) parts.push(answer);
|
||||
return parts.join("");
|
||||
};
|
||||
|
||||
const flushStreamingCardUpdate = (combined: string) => {
|
||||
partialUpdateQueue = partialUpdateQueue.then(async () => {
|
||||
if (streamingStartPromise) {
|
||||
await streamingStartPromise;
|
||||
}
|
||||
if (streaming?.isActive()) {
|
||||
await streaming.update(combined);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const queueStreamingUpdate = (
|
||||
nextText: string,
|
||||
options?: {
|
||||
@ -167,14 +195,13 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
||||
const mode = options?.mode ?? "snapshot";
|
||||
streamText =
|
||||
mode === "delta" ? `${streamText}${nextText}` : mergeStreamingText(streamText, nextText);
|
||||
partialUpdateQueue = partialUpdateQueue.then(async () => {
|
||||
if (streamingStartPromise) {
|
||||
await streamingStartPromise;
|
||||
}
|
||||
if (streaming?.isActive()) {
|
||||
await streaming.update(streamText);
|
||||
}
|
||||
});
|
||||
flushStreamingCardUpdate(buildCombinedStreamText(reasoningText, streamText));
|
||||
};
|
||||
|
||||
const queueReasoningUpdate = (nextThinking: string) => {
|
||||
if (!nextThinking) return;
|
||||
reasoningText = nextThinking;
|
||||
flushStreamingCardUpdate(buildCombinedStreamText(reasoningText, streamText));
|
||||
};
|
||||
|
||||
const startStreaming = () => {
|
||||
@ -202,6 +229,7 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
||||
} catch (error) {
|
||||
params.runtime.error?.(`feishu: streaming start failed: ${String(error)}`);
|
||||
streaming = null;
|
||||
streamingStartPromise = null; // allow retry on next deliver
|
||||
}
|
||||
})();
|
||||
};
|
||||
@ -212,7 +240,7 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
||||
}
|
||||
await partialUpdateQueue;
|
||||
if (streaming?.isActive()) {
|
||||
let text = streamText;
|
||||
let text = buildCombinedStreamText(reasoningText, streamText);
|
||||
if (mentionTargets?.length) {
|
||||
text = buildMentionedCardContent(mentionTargets, text);
|
||||
}
|
||||
@ -222,6 +250,7 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
||||
streamingStartPromise = null;
|
||||
streamText = "";
|
||||
lastPartial = "";
|
||||
reasoningText = "";
|
||||
};
|
||||
|
||||
const sendChunkedTextReply = async (params: {
|
||||
@ -391,6 +420,16 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
||||
});
|
||||
}
|
||||
: undefined,
|
||||
onReasoningStream: streamingEnabled
|
||||
? (payload: ReplyPayload) => {
|
||||
if (!payload.text) {
|
||||
return;
|
||||
}
|
||||
startStreaming();
|
||||
queueReasoningUpdate(payload.text);
|
||||
}
|
||||
: undefined,
|
||||
onReasoningEnd: streamingEnabled ? () => {} : undefined,
|
||||
},
|
||||
markDispatchIdle,
|
||||
};
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { getMessageFeishu } from "./send.js";
|
||||
import { getMessageFeishu, listFeishuThreadMessages } from "./send.js";
|
||||
|
||||
const { mockClientGet, mockCreateFeishuClient, mockResolveFeishuAccount } = vi.hoisted(() => ({
|
||||
mockClientGet: vi.fn(),
|
||||
mockCreateFeishuClient: vi.fn(),
|
||||
mockResolveFeishuAccount: vi.fn(),
|
||||
}));
|
||||
const { mockClientGet, mockClientList, mockCreateFeishuClient, mockResolveFeishuAccount } =
|
||||
vi.hoisted(() => ({
|
||||
mockClientGet: vi.fn(),
|
||||
mockClientList: vi.fn(),
|
||||
mockCreateFeishuClient: vi.fn(),
|
||||
mockResolveFeishuAccount: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("./client.js", () => ({
|
||||
createFeishuClient: mockCreateFeishuClient,
|
||||
@ -27,6 +29,7 @@ describe("getMessageFeishu", () => {
|
||||
im: {
|
||||
message: {
|
||||
get: mockClientGet,
|
||||
list: mockClientList,
|
||||
},
|
||||
},
|
||||
});
|
||||
@ -165,4 +168,68 @@ describe("getMessageFeishu", () => {
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("reuses the same content parsing for thread history messages", async () => {
|
||||
mockClientList.mockResolvedValueOnce({
|
||||
code: 0,
|
||||
data: {
|
||||
items: [
|
||||
{
|
||||
message_id: "om_root",
|
||||
msg_type: "text",
|
||||
body: {
|
||||
content: JSON.stringify({ text: "root starter" }),
|
||||
},
|
||||
},
|
||||
{
|
||||
message_id: "om_card",
|
||||
msg_type: "interactive",
|
||||
body: {
|
||||
content: JSON.stringify({
|
||||
body: {
|
||||
elements: [{ tag: "markdown", content: "hello from card 2.0" }],
|
||||
},
|
||||
}),
|
||||
},
|
||||
sender: {
|
||||
id: "app_1",
|
||||
sender_type: "app",
|
||||
},
|
||||
create_time: "1710000000000",
|
||||
},
|
||||
{
|
||||
message_id: "om_file",
|
||||
msg_type: "file",
|
||||
body: {
|
||||
content: JSON.stringify({ file_key: "file_v3_123" }),
|
||||
},
|
||||
sender: {
|
||||
id: "ou_1",
|
||||
sender_type: "user",
|
||||
},
|
||||
create_time: "1710000001000",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
|
||||
const result = await listFeishuThreadMessages({
|
||||
cfg: {} as ClawdbotConfig,
|
||||
threadId: "omt_1",
|
||||
rootMessageId: "om_root",
|
||||
});
|
||||
|
||||
expect(result).toEqual([
|
||||
expect.objectContaining({
|
||||
messageId: "om_file",
|
||||
contentType: "file",
|
||||
content: "[file message]",
|
||||
}),
|
||||
expect.objectContaining({
|
||||
messageId: "om_card",
|
||||
contentType: "interactive",
|
||||
content: "hello from card 2.0",
|
||||
}),
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
@ -65,6 +65,7 @@ type FeishuMessageGetItem = {
|
||||
message_id?: string;
|
||||
chat_id?: string;
|
||||
chat_type?: FeishuChatType;
|
||||
thread_id?: string;
|
||||
msg_type?: string;
|
||||
body?: { content?: string };
|
||||
sender?: FeishuMessageSender;
|
||||
@ -151,13 +152,19 @@ function parseInteractiveCardContent(parsed: unknown): string {
|
||||
return "[Interactive Card]";
|
||||
}
|
||||
|
||||
const candidate = parsed as { elements?: unknown };
|
||||
if (!Array.isArray(candidate.elements)) {
|
||||
// Support both schema 1.0 (top-level `elements`) and 2.0 (`body.elements`).
|
||||
const candidate = parsed as { elements?: unknown; body?: { elements?: unknown } };
|
||||
const elements = Array.isArray(candidate.elements)
|
||||
? candidate.elements
|
||||
: Array.isArray(candidate.body?.elements)
|
||||
? candidate.body!.elements
|
||||
: null;
|
||||
if (!elements) {
|
||||
return "[Interactive Card]";
|
||||
}
|
||||
|
||||
const texts: string[] = [];
|
||||
for (const element of candidate.elements) {
|
||||
for (const element of elements) {
|
||||
if (!element || typeof element !== "object") {
|
||||
continue;
|
||||
}
|
||||
@ -177,7 +184,7 @@ function parseInteractiveCardContent(parsed: unknown): string {
|
||||
return texts.join("\n").trim() || "[Interactive Card]";
|
||||
}
|
||||
|
||||
function parseQuotedMessageContent(rawContent: string, msgType: string): string {
|
||||
function parseFeishuMessageContent(rawContent: string, msgType: string): string {
|
||||
if (!rawContent) {
|
||||
return "";
|
||||
}
|
||||
@ -218,6 +225,30 @@ function parseQuotedMessageContent(rawContent: string, msgType: string): string
|
||||
return `[${msgType || "unknown"} message]`;
|
||||
}
|
||||
|
||||
function parseFeishuMessageItem(
|
||||
item: FeishuMessageGetItem,
|
||||
fallbackMessageId?: string,
|
||||
): FeishuMessageInfo {
|
||||
const msgType = item.msg_type ?? "text";
|
||||
const rawContent = item.body?.content ?? "";
|
||||
|
||||
return {
|
||||
messageId: item.message_id ?? fallbackMessageId ?? "",
|
||||
chatId: item.chat_id ?? "",
|
||||
chatType:
|
||||
item.chat_type === "group" || item.chat_type === "private" || item.chat_type === "p2p"
|
||||
? item.chat_type
|
||||
: undefined,
|
||||
senderId: item.sender?.id,
|
||||
senderOpenId: item.sender?.id_type === "open_id" ? item.sender?.id : undefined,
|
||||
senderType: item.sender?.sender_type,
|
||||
content: parseFeishuMessageContent(rawContent, msgType),
|
||||
contentType: msgType,
|
||||
createTime: item.create_time ? parseInt(String(item.create_time), 10) : undefined,
|
||||
threadId: item.thread_id || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a message by its ID.
|
||||
* Useful for fetching quoted/replied message content.
|
||||
@ -255,29 +286,98 @@ export async function getMessageFeishu(params: {
|
||||
return null;
|
||||
}
|
||||
|
||||
const msgType = item.msg_type ?? "text";
|
||||
const rawContent = item.body?.content ?? "";
|
||||
const content = parseQuotedMessageContent(rawContent, msgType);
|
||||
|
||||
return {
|
||||
messageId: item.message_id ?? messageId,
|
||||
chatId: item.chat_id ?? "",
|
||||
chatType:
|
||||
item.chat_type === "group" || item.chat_type === "private" || item.chat_type === "p2p"
|
||||
? item.chat_type
|
||||
: undefined,
|
||||
senderId: item.sender?.id,
|
||||
senderOpenId: item.sender?.id_type === "open_id" ? item.sender?.id : undefined,
|
||||
senderType: item.sender?.sender_type,
|
||||
content,
|
||||
contentType: msgType,
|
||||
createTime: item.create_time ? parseInt(String(item.create_time), 10) : undefined,
|
||||
};
|
||||
return parseFeishuMessageItem(item, messageId);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export type FeishuThreadMessageInfo = {
|
||||
messageId: string;
|
||||
senderId?: string;
|
||||
senderType?: string;
|
||||
content: string;
|
||||
contentType: string;
|
||||
createTime?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* List messages in a Feishu thread (topic).
|
||||
* Uses container_id_type=thread to directly query thread messages,
|
||||
* which includes both the root message and all replies (including bot replies).
|
||||
*/
|
||||
export async function listFeishuThreadMessages(params: {
|
||||
cfg: ClawdbotConfig;
|
||||
threadId: string;
|
||||
currentMessageId?: string;
|
||||
/** Exclude the root message (already provided separately as ThreadStarterBody). */
|
||||
rootMessageId?: string;
|
||||
limit?: number;
|
||||
accountId?: string;
|
||||
}): Promise<FeishuThreadMessageInfo[]> {
|
||||
const { cfg, threadId, currentMessageId, rootMessageId, limit = 20, accountId } = params;
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
|
||||
const response = (await client.im.message.list({
|
||||
params: {
|
||||
container_id_type: "thread",
|
||||
container_id: threadId,
|
||||
// Fetch newest messages first so long threads keep the most recent turns.
|
||||
// Results are reversed below to restore chronological order.
|
||||
sort_type: "ByCreateTimeDesc",
|
||||
page_size: Math.min(limit + 1, 50),
|
||||
},
|
||||
})) as {
|
||||
code?: number;
|
||||
msg?: string;
|
||||
data?: {
|
||||
items?: Array<
|
||||
{
|
||||
message_id?: string;
|
||||
root_id?: string;
|
||||
parent_id?: string;
|
||||
} & FeishuMessageGetItem
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
if (response.code !== 0) {
|
||||
throw new Error(
|
||||
`Feishu thread list failed: code=${response.code} msg=${response.msg ?? "unknown"}`,
|
||||
);
|
||||
}
|
||||
|
||||
const items = response.data?.items ?? [];
|
||||
const results: FeishuThreadMessageInfo[] = [];
|
||||
|
||||
for (const item of items) {
|
||||
if (currentMessageId && item.message_id === currentMessageId) continue;
|
||||
if (rootMessageId && item.message_id === rootMessageId) continue;
|
||||
|
||||
const parsed = parseFeishuMessageItem(item);
|
||||
|
||||
results.push({
|
||||
messageId: parsed.messageId,
|
||||
senderId: parsed.senderId,
|
||||
senderType: parsed.senderType,
|
||||
content: parsed.content,
|
||||
contentType: parsed.contentType,
|
||||
createTime: parsed.createTime,
|
||||
});
|
||||
|
||||
if (results.length >= limit) break;
|
||||
}
|
||||
|
||||
// Restore chronological order (oldest first) since we fetched newest-first.
|
||||
results.reverse();
|
||||
return results;
|
||||
}
|
||||
|
||||
export type SendFeishuMessageParams = {
|
||||
cfg: ClawdbotConfig;
|
||||
to: string;
|
||||
|
||||
@ -72,6 +72,8 @@ export type FeishuMessageInfo = {
|
||||
content: string;
|
||||
contentType: string;
|
||||
createTime?: number;
|
||||
/** Feishu thread ID (omt_xxx) — present when the message belongs to a topic thread. */
|
||||
threadId?: string;
|
||||
};
|
||||
|
||||
export type FeishuProbeResult = BaseProbeResult<string> & {
|
||||
|
||||
@ -74,7 +74,10 @@ function createAutoAbortController() {
|
||||
}
|
||||
|
||||
async function runMonitorWithMocks(opts: MonitorSignalProviderOptions) {
|
||||
return monitorSignalProvider(opts);
|
||||
return monitorSignalProvider({
|
||||
config: config as OpenClawConfig,
|
||||
...opts,
|
||||
});
|
||||
}
|
||||
|
||||
async function receiveSignalPayloads(params: {
|
||||
@ -304,7 +307,9 @@ describe("monitorSignalProvider tool results", () => {
|
||||
],
|
||||
});
|
||||
|
||||
expect(sendMock).toHaveBeenCalledTimes(1);
|
||||
await vi.waitFor(() => {
|
||||
expect(sendMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
expect(sendMock.mock.calls[0][1]).toBe("PFX final reply");
|
||||
});
|
||||
|
||||
@ -460,8 +465,9 @@ describe("monitorSignalProvider tool results", () => {
|
||||
],
|
||||
});
|
||||
|
||||
expect(sendMock).toHaveBeenCalledTimes(1);
|
||||
expect(updateLastRouteMock).toHaveBeenCalled();
|
||||
await vi.waitFor(() => {
|
||||
expect(sendMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
it("does not resend pairing code when a request is already pending", async () => {
|
||||
|
||||
@ -5,6 +5,7 @@ type SlackProviderMonitor = (params: {
|
||||
botToken: string;
|
||||
appToken: string;
|
||||
abortSignal: AbortSignal;
|
||||
config?: Record<string, unknown>;
|
||||
}) => Promise<unknown>;
|
||||
|
||||
type SlackTestState = {
|
||||
@ -49,14 +50,51 @@ type SlackClient = {
|
||||
};
|
||||
};
|
||||
|
||||
export const getSlackHandlers = () =>
|
||||
(
|
||||
globalThis as {
|
||||
__slackHandlers?: Map<string, SlackHandler>;
|
||||
}
|
||||
).__slackHandlers;
|
||||
export const getSlackHandlers = () => ensureSlackTestRuntime().handlers;
|
||||
|
||||
export const getSlackClient = () => (globalThis as { __slackClient?: SlackClient }).__slackClient;
|
||||
export const getSlackClient = () => ensureSlackTestRuntime().client;
|
||||
|
||||
function ensureSlackTestRuntime(): {
|
||||
handlers: Map<string, SlackHandler>;
|
||||
client: SlackClient;
|
||||
} {
|
||||
const globalState = globalThis as {
|
||||
__slackHandlers?: Map<string, SlackHandler>;
|
||||
__slackClient?: SlackClient;
|
||||
};
|
||||
if (!globalState.__slackHandlers) {
|
||||
globalState.__slackHandlers = new Map<string, SlackHandler>();
|
||||
}
|
||||
if (!globalState.__slackClient) {
|
||||
globalState.__slackClient = {
|
||||
auth: { test: vi.fn().mockResolvedValue({ user_id: "bot-user" }) },
|
||||
conversations: {
|
||||
info: vi.fn().mockResolvedValue({
|
||||
channel: { name: "dm", is_im: true },
|
||||
}),
|
||||
replies: vi.fn().mockResolvedValue({ messages: [] }),
|
||||
history: vi.fn().mockResolvedValue({ messages: [] }),
|
||||
},
|
||||
users: {
|
||||
info: vi.fn().mockResolvedValue({
|
||||
user: { profile: { display_name: "Ada" } },
|
||||
}),
|
||||
},
|
||||
assistant: {
|
||||
threads: {
|
||||
setStatus: vi.fn().mockResolvedValue({ ok: true }),
|
||||
},
|
||||
},
|
||||
reactions: {
|
||||
add: (...args: unknown[]) => slackTestState.reactMock(...args),
|
||||
},
|
||||
};
|
||||
}
|
||||
return {
|
||||
handlers: globalState.__slackHandlers,
|
||||
client: globalState.__slackClient,
|
||||
};
|
||||
}
|
||||
|
||||
export const flush = () => new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
@ -78,6 +116,7 @@ export function startSlackMonitor(
|
||||
botToken: opts?.botToken ?? "bot-token",
|
||||
appToken: opts?.appToken ?? "app-token",
|
||||
abortSignal: controller.signal,
|
||||
config: slackTestState.config,
|
||||
});
|
||||
return { controller, run };
|
||||
}
|
||||
@ -193,34 +232,9 @@ vi.mock("../../../src/config/sessions.js", async (importOriginal) => {
|
||||
});
|
||||
|
||||
vi.mock("@slack/bolt", () => {
|
||||
const handlers = new Map<string, SlackHandler>();
|
||||
(globalThis as { __slackHandlers?: typeof handlers }).__slackHandlers = handlers;
|
||||
const client = {
|
||||
auth: { test: vi.fn().mockResolvedValue({ user_id: "bot-user" }) },
|
||||
conversations: {
|
||||
info: vi.fn().mockResolvedValue({
|
||||
channel: { name: "dm", is_im: true },
|
||||
}),
|
||||
replies: vi.fn().mockResolvedValue({ messages: [] }),
|
||||
history: vi.fn().mockResolvedValue({ messages: [] }),
|
||||
},
|
||||
users: {
|
||||
info: vi.fn().mockResolvedValue({
|
||||
user: { profile: { display_name: "Ada" } },
|
||||
}),
|
||||
},
|
||||
assistant: {
|
||||
threads: {
|
||||
setStatus: vi.fn().mockResolvedValue({ ok: true }),
|
||||
},
|
||||
},
|
||||
reactions: {
|
||||
add: (...args: unknown[]) => slackTestState.reactMock(...args),
|
||||
},
|
||||
};
|
||||
(globalThis as { __slackClient?: typeof client }).__slackClient = client;
|
||||
const { handlers, client: slackClient } = ensureSlackTestRuntime();
|
||||
class App {
|
||||
client = client;
|
||||
client = slackClient;
|
||||
event(name: string, handler: SlackHandler) {
|
||||
handlers.set(name, handler);
|
||||
}
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { HISTORY_CONTEXT_MARKER } from "../../../src/auto-reply/reply/history.js";
|
||||
import { resetInboundDedupe } from "../../../src/auto-reply/reply/inbound-dedupe.js";
|
||||
import { CURRENT_MESSAGE_MARKER } from "../../../src/auto-reply/reply/mentions.js";
|
||||
import {
|
||||
defaultSlackTestConfig,
|
||||
getSlackTestState,
|
||||
@ -15,6 +12,9 @@ import {
|
||||
stopSlackMonitor,
|
||||
} from "./monitor.test-helpers.js";
|
||||
|
||||
const { resetInboundDedupe } = await import("../../../src/auto-reply/reply/inbound-dedupe.js");
|
||||
const { HISTORY_CONTEXT_MARKER } = await import("../../../src/auto-reply/reply/history.js");
|
||||
const { CURRENT_MESSAGE_MARKER } = await import("../../../src/auto-reply/reply/mentions.js");
|
||||
const { monitorSlackProvider } = await import("./monitor.js");
|
||||
|
||||
const slackTestState = getSlackTestState();
|
||||
@ -209,7 +209,9 @@ describe("monitorSlackProvider tool results", () => {
|
||||
|
||||
function expectSingleSendWithThread(threadTs: string | undefined) {
|
||||
expect(sendMock).toHaveBeenCalledTimes(1);
|
||||
expect(sendMock.mock.calls[0][2]).toMatchObject({ threadTs });
|
||||
expect((sendMock.mock.calls[0]?.[2] as { threadTs?: string } | undefined)?.threadTs).toBe(
|
||||
threadTs,
|
||||
);
|
||||
}
|
||||
|
||||
async function runDefaultMessageAndExpectSentText(expectedText: string) {
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { loadConfig } from "../../../src/config/config.js";
|
||||
import { buildTelegramMessageContextForTest } from "./bot-message-context.test-harness.js";
|
||||
|
||||
const { defaultRouteConfig } = vi.hoisted(() => ({
|
||||
defaultRouteConfig: {
|
||||
@ -20,6 +19,9 @@ vi.mock("../../../src/config/config.js", async (importOriginal) => {
|
||||
};
|
||||
});
|
||||
|
||||
const { buildTelegramMessageContextForTest } =
|
||||
await import("./bot-message-context.test-harness.js");
|
||||
|
||||
describe("buildTelegramMessageContext per-topic agentId routing", () => {
|
||||
function buildForumMessage(threadId = 3) {
|
||||
return {
|
||||
@ -98,7 +100,7 @@ describe("buildTelegramMessageContext per-topic agentId routing", () => {
|
||||
expect(ctx?.ctxPayload?.SessionKey).toContain("agent:main:");
|
||||
});
|
||||
|
||||
it("falls back to default agent when topic agentId does not exist", async () => {
|
||||
it("preserves an unknown topic agentId in the session key", async () => {
|
||||
vi.mocked(loadConfig).mockReturnValue({
|
||||
agents: {
|
||||
list: [{ id: "main", default: true }, { id: "zu" }],
|
||||
@ -110,7 +112,7 @@ describe("buildTelegramMessageContext per-topic agentId routing", () => {
|
||||
const ctx = await buildForumContext({ topicConfig: { agentId: "ghost" } });
|
||||
|
||||
expect(ctx).not.toBeNull();
|
||||
expect(ctx?.ctxPayload?.SessionKey).toContain("agent:main:");
|
||||
expect(ctx?.ctxPayload?.SessionKey).toContain("agent:ghost:");
|
||||
});
|
||||
|
||||
it("routes DM topic to specific agent when agentId is set", async () => {
|
||||
|
||||
@ -102,73 +102,81 @@ vi.mock("./sent-message-cache.js", () => ({
|
||||
clearSentMessageCache: vi.fn(),
|
||||
}));
|
||||
|
||||
export const useSpy: MockFn<(arg: unknown) => void> = vi.fn();
|
||||
export const middlewareUseSpy: AnyMock = vi.fn();
|
||||
export const onSpy: AnyMock = vi.fn();
|
||||
export const stopSpy: AnyMock = vi.fn();
|
||||
export const commandSpy: AnyMock = vi.fn();
|
||||
export const botCtorSpy: AnyMock = vi.fn();
|
||||
export const answerCallbackQuerySpy: AnyAsyncMock = vi.fn(async () => undefined);
|
||||
export const sendChatActionSpy: AnyMock = vi.fn();
|
||||
export const editMessageTextSpy: AnyAsyncMock = vi.fn(async () => ({ message_id: 88 }));
|
||||
export const editMessageReplyMarkupSpy: AnyAsyncMock = vi.fn(async () => ({ message_id: 88 }));
|
||||
export const sendMessageDraftSpy: AnyAsyncMock = vi.fn(async () => true);
|
||||
export const setMessageReactionSpy: AnyAsyncMock = vi.fn(async () => undefined);
|
||||
export const setMyCommandsSpy: AnyAsyncMock = vi.fn(async () => undefined);
|
||||
export const getMeSpy: AnyAsyncMock = vi.fn(async () => ({
|
||||
username: "openclaw_bot",
|
||||
has_topics_enabled: true,
|
||||
// All spy variables used inside vi.mock("grammy", ...) must be created via
|
||||
// vi.hoisted() so they are available when the hoisted factory runs, regardless
|
||||
// of module evaluation order across different test files.
|
||||
const grammySpies = vi.hoisted(() => ({
|
||||
useSpy: vi.fn() as MockFn<(arg: unknown) => void>,
|
||||
middlewareUseSpy: vi.fn() as AnyMock,
|
||||
onSpy: vi.fn() as AnyMock,
|
||||
stopSpy: vi.fn() as AnyMock,
|
||||
commandSpy: vi.fn() as AnyMock,
|
||||
botCtorSpy: vi.fn() as AnyMock,
|
||||
answerCallbackQuerySpy: vi.fn(async () => undefined) as AnyAsyncMock,
|
||||
sendChatActionSpy: vi.fn() as AnyMock,
|
||||
editMessageTextSpy: vi.fn(async () => ({ message_id: 88 })) as AnyAsyncMock,
|
||||
editMessageReplyMarkupSpy: vi.fn(async () => ({ message_id: 88 })) as AnyAsyncMock,
|
||||
sendMessageDraftSpy: vi.fn(async () => true) as AnyAsyncMock,
|
||||
setMessageReactionSpy: vi.fn(async () => undefined) as AnyAsyncMock,
|
||||
setMyCommandsSpy: vi.fn(async () => undefined) as AnyAsyncMock,
|
||||
getMeSpy: vi.fn(async () => ({
|
||||
username: "openclaw_bot",
|
||||
has_topics_enabled: true,
|
||||
})) as AnyAsyncMock,
|
||||
sendMessageSpy: vi.fn(async () => ({ message_id: 77 })) as AnyAsyncMock,
|
||||
sendAnimationSpy: vi.fn(async () => ({ message_id: 78 })) as AnyAsyncMock,
|
||||
sendPhotoSpy: vi.fn(async () => ({ message_id: 79 })) as AnyAsyncMock,
|
||||
getFileSpy: vi.fn(async () => ({ file_path: "media/file.jpg" })) as AnyAsyncMock,
|
||||
}));
|
||||
export const sendMessageSpy: AnyAsyncMock = vi.fn(async () => ({ message_id: 77 }));
|
||||
export const sendAnimationSpy: AnyAsyncMock = vi.fn(async () => ({ message_id: 78 }));
|
||||
export const sendPhotoSpy: AnyAsyncMock = vi.fn(async () => ({ message_id: 79 }));
|
||||
export const getFileSpy: AnyAsyncMock = vi.fn(async () => ({ file_path: "media/file.jpg" }));
|
||||
|
||||
type ApiStub = {
|
||||
config: { use: (arg: unknown) => void };
|
||||
answerCallbackQuery: typeof answerCallbackQuerySpy;
|
||||
sendChatAction: typeof sendChatActionSpy;
|
||||
editMessageText: typeof editMessageTextSpy;
|
||||
editMessageReplyMarkup: typeof editMessageReplyMarkupSpy;
|
||||
sendMessageDraft: typeof sendMessageDraftSpy;
|
||||
setMessageReaction: typeof setMessageReactionSpy;
|
||||
setMyCommands: typeof setMyCommandsSpy;
|
||||
getMe: typeof getMeSpy;
|
||||
sendMessage: typeof sendMessageSpy;
|
||||
sendAnimation: typeof sendAnimationSpy;
|
||||
sendPhoto: typeof sendPhotoSpy;
|
||||
getFile: typeof getFileSpy;
|
||||
};
|
||||
|
||||
const apiStub: ApiStub = {
|
||||
config: { use: useSpy },
|
||||
answerCallbackQuery: answerCallbackQuerySpy,
|
||||
sendChatAction: sendChatActionSpy,
|
||||
editMessageText: editMessageTextSpy,
|
||||
editMessageReplyMarkup: editMessageReplyMarkupSpy,
|
||||
sendMessageDraft: sendMessageDraftSpy,
|
||||
setMessageReaction: setMessageReactionSpy,
|
||||
setMyCommands: setMyCommandsSpy,
|
||||
getMe: getMeSpy,
|
||||
sendMessage: sendMessageSpy,
|
||||
sendAnimation: sendAnimationSpy,
|
||||
sendPhoto: sendPhotoSpy,
|
||||
getFile: getFileSpy,
|
||||
};
|
||||
export const {
|
||||
useSpy,
|
||||
middlewareUseSpy,
|
||||
onSpy,
|
||||
stopSpy,
|
||||
commandSpy,
|
||||
botCtorSpy,
|
||||
answerCallbackQuerySpy,
|
||||
sendChatActionSpy,
|
||||
editMessageTextSpy,
|
||||
editMessageReplyMarkupSpy,
|
||||
sendMessageDraftSpy,
|
||||
setMessageReactionSpy,
|
||||
setMyCommandsSpy,
|
||||
getMeSpy,
|
||||
sendMessageSpy,
|
||||
sendAnimationSpy,
|
||||
sendPhotoSpy,
|
||||
getFileSpy,
|
||||
} = grammySpies;
|
||||
|
||||
vi.mock("grammy", () => ({
|
||||
Bot: class {
|
||||
api = apiStub;
|
||||
use = middlewareUseSpy;
|
||||
on = onSpy;
|
||||
stop = stopSpy;
|
||||
command = commandSpy;
|
||||
api = {
|
||||
config: { use: grammySpies.useSpy },
|
||||
answerCallbackQuery: grammySpies.answerCallbackQuerySpy,
|
||||
sendChatAction: grammySpies.sendChatActionSpy,
|
||||
editMessageText: grammySpies.editMessageTextSpy,
|
||||
editMessageReplyMarkup: grammySpies.editMessageReplyMarkupSpy,
|
||||
sendMessageDraft: grammySpies.sendMessageDraftSpy,
|
||||
setMessageReaction: grammySpies.setMessageReactionSpy,
|
||||
setMyCommands: grammySpies.setMyCommandsSpy,
|
||||
getMe: grammySpies.getMeSpy,
|
||||
sendMessage: grammySpies.sendMessageSpy,
|
||||
sendAnimation: grammySpies.sendAnimationSpy,
|
||||
sendPhoto: grammySpies.sendPhotoSpy,
|
||||
getFile: grammySpies.getFileSpy,
|
||||
};
|
||||
use = grammySpies.middlewareUseSpy;
|
||||
on = grammySpies.onSpy;
|
||||
stop = grammySpies.stopSpy;
|
||||
command = grammySpies.commandSpy;
|
||||
catch = vi.fn();
|
||||
constructor(
|
||||
public token: string,
|
||||
public options?: { client?: { fetch?: typeof fetch } },
|
||||
) {
|
||||
botCtorSpy(token, options);
|
||||
grammySpies.botCtorSpy(token, options);
|
||||
}
|
||||
},
|
||||
InputFile: class {},
|
||||
|
||||
@ -29,9 +29,11 @@ import {
|
||||
throttlerSpy,
|
||||
useSpy,
|
||||
} from "./bot.create-telegram-bot.test-harness.js";
|
||||
import { createTelegramBot, getTelegramSequentialKey } from "./bot.js";
|
||||
import { resolveTelegramFetch } from "./fetch.js";
|
||||
|
||||
// Import after the harness registers `vi.mock(...)` for grammY and Telegram internals.
|
||||
const { createTelegramBot, getTelegramSequentialKey } = await import("./bot.js");
|
||||
|
||||
const loadConfig = getLoadConfigMock();
|
||||
const loadWebMedia = getLoadWebMediaMock();
|
||||
const readChannelAllowFromStore = getReadChannelAllowFromStoreMock();
|
||||
@ -813,7 +815,7 @@ describe("createTelegramBot", () => {
|
||||
expect(payload.SessionKey).toBe("agent:opie:main");
|
||||
});
|
||||
|
||||
it("drops non-default account DMs without explicit bindings", async () => {
|
||||
it("routes non-default account DMs to the per-account fallback session without explicit bindings", async () => {
|
||||
loadConfig.mockReturnValue({
|
||||
channels: {
|
||||
telegram: {
|
||||
@ -842,7 +844,10 @@ describe("createTelegramBot", () => {
|
||||
getFile: async () => ({ download: async () => new Uint8Array() }),
|
||||
});
|
||||
|
||||
expect(replySpy).not.toHaveBeenCalled();
|
||||
expect(replySpy).toHaveBeenCalledTimes(1);
|
||||
const payload = replySpy.mock.calls[0]?.[0];
|
||||
expect(payload.AccountId).toBe("opie");
|
||||
expect(payload.SessionKey).toContain("agent:main:telegram:opie:");
|
||||
});
|
||||
|
||||
it("applies group mention overrides and fallback behavior", async () => {
|
||||
@ -1909,9 +1914,8 @@ describe("createTelegramBot", () => {
|
||||
await flushTimer?.();
|
||||
|
||||
expect(replySpy).toHaveBeenCalledTimes(1);
|
||||
const payload = replySpy.mock.calls[0]?.[0] as { Body?: string; MediaPaths?: string[] };
|
||||
const payload = replySpy.mock.calls[0]?.[0] as { Body?: string };
|
||||
expect(payload.Body).toContain("album caption");
|
||||
expect(payload.MediaPaths).toHaveLength(2);
|
||||
} finally {
|
||||
setTimeoutSpy.mockRestore();
|
||||
fetchSpy.mockRestore();
|
||||
@ -2137,9 +2141,8 @@ describe("createTelegramBot", () => {
|
||||
await flushTimer?.();
|
||||
|
||||
expect(replySpy).toHaveBeenCalledTimes(1);
|
||||
const payload = replySpy.mock.calls[0]?.[0] as { Body?: string; MediaPaths?: string[] };
|
||||
const payload = replySpy.mock.calls[0]?.[0] as { Body?: string };
|
||||
expect(payload.Body).toContain("partial album");
|
||||
expect(payload.MediaPaths).toHaveLength(1);
|
||||
} finally {
|
||||
setTimeoutSpy.mockRestore();
|
||||
fetchSpy.mockRestore();
|
||||
|
||||
@ -1,11 +1,5 @@
|
||||
import { rm } from "node:fs/promises";
|
||||
import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
listNativeCommandSpecs,
|
||||
listNativeCommandSpecsForConfig,
|
||||
} from "../../../src/auto-reply/commands-registry.js";
|
||||
import { loadSessionStore } from "../../../src/config/sessions.js";
|
||||
import { normalizeTelegramCommandName } from "../../../src/config/telegram-custom-commands.js";
|
||||
import { escapeRegExp, formatEnvelopeTimestamp } from "../../../test/helpers/envelope-timestamp.js";
|
||||
import { expectInboundContextContract } from "../../../test/helpers/inbound-contract.js";
|
||||
import {
|
||||
@ -25,7 +19,14 @@ import {
|
||||
setMyCommandsSpy,
|
||||
wasSentByBot,
|
||||
} from "./bot.create-telegram-bot.test-harness.js";
|
||||
import { createTelegramBot } from "./bot.js";
|
||||
|
||||
// Import after the harness registers `vi.mock(...)` for grammY and Telegram internals.
|
||||
const { listNativeCommandSpecs, listNativeCommandSpecsForConfig } =
|
||||
await import("../../../src/auto-reply/commands-registry.js");
|
||||
const { loadSessionStore } = await import("../../../src/config/sessions.js");
|
||||
const { normalizeTelegramCommandName } =
|
||||
await import("../../../src/config/telegram-custom-commands.js");
|
||||
const { createTelegramBot } = await import("./bot.js");
|
||||
|
||||
const loadConfig = getLoadConfigMock();
|
||||
const readChannelAllowFromStore = getReadChannelAllowFromStoreMock();
|
||||
@ -833,8 +834,6 @@ describe("createTelegramBot", () => {
|
||||
ReplyToBody?: string;
|
||||
};
|
||||
expect(payload.ReplyToBody).toBe("<media:image>");
|
||||
expect(payload.MediaPaths).toHaveLength(1);
|
||||
expect(payload.MediaPath).toBe(payload.MediaPaths?.[0]);
|
||||
expect(getFileSpy).toHaveBeenCalledWith("reply-photo-1");
|
||||
} finally {
|
||||
fetchSpy.mockRestore();
|
||||
|
||||
@ -775,10 +775,11 @@ describe("sendMessageTelegram", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("retries on transient errors with retry_after", async () => {
|
||||
it("retries pre-connect send errors and honors retry_after when present", async () => {
|
||||
vi.useFakeTimers();
|
||||
const chatId = "123";
|
||||
const err = Object.assign(new Error("429"), {
|
||||
const err = Object.assign(new Error("getaddrinfo ENOTFOUND api.telegram.org"), {
|
||||
code: "ENOTFOUND",
|
||||
parameters: { retry_after: 0.5 },
|
||||
});
|
||||
const sendMessage = vi
|
||||
@ -823,29 +824,25 @@ describe("sendMessageTelegram", () => {
|
||||
expect(sendMessage).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("retries when grammY network envelope message includes failed-after wording", async () => {
|
||||
it("does not retry generic grammY failed-after envelopes for non-idempotent sends", async () => {
|
||||
const chatId = "123";
|
||||
const sendMessage = vi
|
||||
.fn()
|
||||
.mockRejectedValueOnce(
|
||||
new Error("Network request for 'sendMessage' failed after 1 attempts."),
|
||||
)
|
||||
.mockResolvedValueOnce({
|
||||
message_id: 7,
|
||||
chat: { id: chatId },
|
||||
});
|
||||
);
|
||||
const api = { sendMessage } as unknown as {
|
||||
sendMessage: typeof sendMessage;
|
||||
};
|
||||
|
||||
const result = await sendMessageTelegram(chatId, "hi", {
|
||||
token: "tok",
|
||||
api,
|
||||
retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 },
|
||||
});
|
||||
|
||||
expect(sendMessage).toHaveBeenCalledTimes(2);
|
||||
expect(result).toEqual({ messageId: "7", chatId });
|
||||
await expect(
|
||||
sendMessageTelegram(chatId, "hi", {
|
||||
token: "tok",
|
||||
api,
|
||||
retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 },
|
||||
}),
|
||||
).rejects.toThrow(/failed after 1 attempts/i);
|
||||
expect(sendMessage).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("sends GIF media as animation", async () => {
|
||||
|
||||
@ -15,8 +15,8 @@ import {
|
||||
withResolvedWebhookRequestPipeline,
|
||||
WEBHOOK_ANOMALY_COUNTER_DEFAULTS,
|
||||
WEBHOOK_RATE_LIMIT_DEFAULTS,
|
||||
resolveClientIp,
|
||||
} from "openclaw/plugin-sdk/zalo";
|
||||
import { resolveClientIp } from "../../../src/gateway/net.js";
|
||||
import type { ResolvedZaloAccount } from "./accounts.js";
|
||||
import type { ZaloFetch, ZaloUpdate } from "./api.js";
|
||||
import type { ZaloRuntimeEnv } from "./monitor.js";
|
||||
|
||||
@ -477,7 +477,37 @@ describe("zalouser monitor group mention gating", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => {
|
||||
it("allows allowlisted group replies without inheriting the DM allowlist", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
replyPayload: { text: "ok" },
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
content: "ping @bot",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
senderId: "456",
|
||||
}),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
groupPolicy: "allowlist",
|
||||
allowFrom: ["123"],
|
||||
groups: {
|
||||
"group:g-1": { allow: true, requireMention: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("blocks group messages when sender is not in groupAllowFrom", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
@ -493,6 +523,7 @@ describe("zalouser monitor group mention gating", () => {
|
||||
...createAccount().config,
|
||||
groupPolicy: "allowlist",
|
||||
allowFrom: ["999"],
|
||||
groupAllowFrom: ["999"],
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
|
||||
@ -27,6 +27,7 @@ import {
|
||||
resolveOpenProviderRuntimeGroupPolicy,
|
||||
resolveDefaultGroupPolicy,
|
||||
resolveSenderCommandAuthorization,
|
||||
resolveSenderScopedGroupPolicy,
|
||||
sendMediaWithLeadingCaption,
|
||||
summarizeMapping,
|
||||
warnMissingProviderGroupPolicyFallbackOnce,
|
||||
@ -349,6 +350,10 @@ async function processMessage(
|
||||
const dmPolicy = account.config.dmPolicy ?? "pairing";
|
||||
const configAllowFrom = (account.config.allowFrom ?? []).map((v) => String(v));
|
||||
const configGroupAllowFrom = (account.config.groupAllowFrom ?? []).map((v) => String(v));
|
||||
const senderGroupPolicy = resolveSenderScopedGroupPolicy({
|
||||
groupPolicy,
|
||||
groupAllowFrom: configGroupAllowFrom,
|
||||
});
|
||||
const shouldComputeCommandAuth = core.channel.commands.shouldComputeCommandAuthorized(
|
||||
commandBody,
|
||||
config,
|
||||
@ -360,10 +365,11 @@ async function processMessage(
|
||||
const accessDecision = resolveDmGroupAccessWithLists({
|
||||
isGroup,
|
||||
dmPolicy,
|
||||
groupPolicy,
|
||||
groupPolicy: senderGroupPolicy,
|
||||
allowFrom: configAllowFrom,
|
||||
groupAllowFrom: configGroupAllowFrom,
|
||||
storeAllowFrom,
|
||||
groupAllowFromFallbackToAllowFrom: false,
|
||||
isSenderAllowed: (allowFrom) => isSenderAllowed(senderId, allowFrom),
|
||||
});
|
||||
if (isGroup && accessDecision.decision !== "allow") {
|
||||
|
||||
@ -233,6 +233,8 @@
|
||||
"check:docs": "pnpm format:docs:check && pnpm lint:docs && pnpm docs:check-links",
|
||||
"check:host-env-policy:swift": "node scripts/generate-host-env-security-policy-swift.mjs --check",
|
||||
"check:loc": "node --import tsx scripts/check-ts-max-loc.ts --max 500",
|
||||
"config:docs:check": "node --import tsx scripts/generate-config-doc-baseline.ts --check",
|
||||
"config:docs:gen": "node --import tsx scripts/generate-config-doc-baseline.ts --write",
|
||||
"deadcode:ci": "pnpm deadcode:report:ci:knip",
|
||||
"deadcode:knip": "pnpm dlx knip --config knip.config.ts --isolate-workspaces --production --no-progress --reporter compact --files --dependencies",
|
||||
"deadcode:report": "pnpm deadcode:knip; pnpm deadcode:ts-prune; pnpm deadcode:ts-unused",
|
||||
@ -298,7 +300,7 @@
|
||||
"protocol:check": "pnpm protocol:gen && pnpm protocol:gen:swift && git diff --exit-code -- dist/protocol.schema.json apps/macos/Sources/OpenClawProtocol/GatewayModels.swift apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift",
|
||||
"protocol:gen": "node --import tsx scripts/protocol-gen.ts",
|
||||
"protocol:gen:swift": "node --import tsx scripts/protocol-gen-swift.ts",
|
||||
"release:check": "node --import tsx scripts/release-check.ts",
|
||||
"release:check": "pnpm config:docs:check && node --import tsx scripts/release-check.ts",
|
||||
"release:openclaw:npm:check": "node --import tsx scripts/openclaw-npm-release-check.ts",
|
||||
"start": "node scripts/run-node.mjs",
|
||||
"test": "node scripts/test-parallel.mjs",
|
||||
@ -449,7 +451,8 @@
|
||||
"node-domexception": "npm:@nolyfill/domexception@^1.0.28",
|
||||
"@sinclair/typebox": "0.34.48",
|
||||
"tar": "7.5.11",
|
||||
"tough-cookie": "4.1.3"
|
||||
"tough-cookie": "4.1.3",
|
||||
"yauzl": "3.2.1"
|
||||
},
|
||||
"onlyBuiltDependencies": [
|
||||
"@lydell/node-pty",
|
||||
|
||||
19
pnpm-lock.yaml
generated
19
pnpm-lock.yaml
generated
@ -18,6 +18,7 @@ overrides:
|
||||
'@sinclair/typebox': 0.34.48
|
||||
tar: 7.5.11
|
||||
tough-cookie: 4.1.3
|
||||
yauzl: 3.2.1
|
||||
|
||||
packageExtensionsChecksum: sha256-n+P/SQo4Pf+dHYpYn1Y6wL4cJEVoVzZ835N0OEp4TM8=
|
||||
|
||||
@ -4440,9 +4441,6 @@ packages:
|
||||
fastq@1.20.1:
|
||||
resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==}
|
||||
|
||||
fd-slicer@1.1.0:
|
||||
resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==}
|
||||
|
||||
fdir@6.5.0:
|
||||
resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==}
|
||||
engines: {node: '>=12.0.0'}
|
||||
@ -6805,8 +6803,9 @@ packages:
|
||||
resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
yauzl@2.10.0:
|
||||
resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==}
|
||||
yauzl@3.2.1:
|
||||
resolution: {integrity: sha512-k1isifdbpNSFEHFJ1ZY4YDewv0IH9FR61lDetaRMD3j2ae3bIXGV+7c+LHCqtQGofSd8PIyV4X6+dHMAnSr60A==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
yoctocolors@2.1.2:
|
||||
resolution: {integrity: sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==}
|
||||
@ -11574,7 +11573,7 @@ snapshots:
|
||||
dependencies:
|
||||
debug: 4.4.3
|
||||
get-stream: 5.2.0
|
||||
yauzl: 2.10.0
|
||||
yauzl: 3.2.1
|
||||
optionalDependencies:
|
||||
'@types/yauzl': 2.10.3
|
||||
transitivePeerDependencies:
|
||||
@ -11606,10 +11605,6 @@ snapshots:
|
||||
dependencies:
|
||||
reusify: 1.1.0
|
||||
|
||||
fd-slicer@1.1.0:
|
||||
dependencies:
|
||||
pend: 1.2.0
|
||||
|
||||
fdir@6.5.0(picomatch@4.0.3):
|
||||
optionalDependencies:
|
||||
picomatch: 4.0.3
|
||||
@ -14279,10 +14274,10 @@ snapshots:
|
||||
y18n: 5.0.8
|
||||
yargs-parser: 21.1.1
|
||||
|
||||
yauzl@2.10.0:
|
||||
yauzl@3.2.1:
|
||||
dependencies:
|
||||
buffer-crc32: 0.2.13
|
||||
fd-slicer: 1.1.0
|
||||
pend: 1.2.0
|
||||
|
||||
yoctocolors@2.1.2: {}
|
||||
|
||||
|
||||
@ -88,6 +88,11 @@ fi
|
||||
pnpm -s exec tsc -p "$A2UI_RENDERER_DIR/tsconfig.json"
|
||||
if command -v rolldown >/dev/null 2>&1 && rolldown --version >/dev/null 2>&1; then
|
||||
rolldown -c "$A2UI_APP_DIR/rolldown.config.mjs"
|
||||
elif [[ -f "$ROOT_DIR/node_modules/.pnpm/node_modules/rolldown/bin/cli.mjs" ]]; then
|
||||
node "$ROOT_DIR/node_modules/.pnpm/node_modules/rolldown/bin/cli.mjs" -c "$A2UI_APP_DIR/rolldown.config.mjs"
|
||||
elif [[ -f "$ROOT_DIR/node_modules/.pnpm/rolldown@1.0.0-rc.9/node_modules/rolldown/bin/cli.mjs" ]]; then
|
||||
node "$ROOT_DIR/node_modules/.pnpm/rolldown@1.0.0-rc.9/node_modules/rolldown/bin/cli.mjs" \
|
||||
-c "$A2UI_APP_DIR/rolldown.config.mjs"
|
||||
else
|
||||
pnpm -s dlx rolldown -c "$A2UI_APP_DIR/rolldown.config.mjs"
|
||||
fi
|
||||
|
||||
@ -113,6 +113,41 @@ function resolveRoute(route) {
|
||||
return { ok: routes.has(current), terminal: current };
|
||||
}
|
||||
|
||||
/** @param {unknown} node */
|
||||
function collectNavPageEntries(node) {
|
||||
/** @type {string[]} */
|
||||
const entries = [];
|
||||
if (Array.isArray(node)) {
|
||||
for (const item of node) {
|
||||
entries.push(...collectNavPageEntries(item));
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
if (!node || typeof node !== "object") {
|
||||
return entries;
|
||||
}
|
||||
|
||||
const record = /** @type {Record<string, unknown>} */ (node);
|
||||
if (Array.isArray(record.pages)) {
|
||||
for (const page of record.pages) {
|
||||
if (typeof page === "string") {
|
||||
entries.push(page);
|
||||
} else {
|
||||
entries.push(...collectNavPageEntries(page));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const value of Object.values(record)) {
|
||||
if (value !== record.pages) {
|
||||
entries.push(...collectNavPageEntries(value));
|
||||
}
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
const markdownLinkRegex = /!?\[[^\]]*\]\(([^)]+)\)/g;
|
||||
|
||||
/** @type {{file: string; line: number; link: string; reason: string}[]} */
|
||||
@ -221,6 +256,22 @@ for (const abs of markdownFiles) {
|
||||
}
|
||||
}
|
||||
|
||||
for (const page of collectNavPageEntries(docsConfig.navigation || [])) {
|
||||
checked++;
|
||||
const route = normalizeRoute(page);
|
||||
const resolvedRoute = resolveRoute(route);
|
||||
if (resolvedRoute.ok) {
|
||||
continue;
|
||||
}
|
||||
|
||||
broken.push({
|
||||
file: "docs.json",
|
||||
line: 0,
|
||||
link: page,
|
||||
reason: `navigation page not published (terminal: ${resolvedRoute.terminal})`,
|
||||
});
|
||||
}
|
||||
|
||||
console.log(`checked_internal_links=${checked}`);
|
||||
console.log(`broken_links=${broken.length}`);
|
||||
|
||||
|
||||
44
scripts/generate-config-doc-baseline.ts
Normal file
44
scripts/generate-config-doc-baseline.ts
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import { writeConfigDocBaselineStatefile } from "../src/config/doc-baseline.js";
|
||||
|
||||
const args = new Set(process.argv.slice(2));
|
||||
const checkOnly = args.has("--check");
|
||||
|
||||
if (checkOnly && args.has("--write")) {
|
||||
console.error("Use either --check or --write, not both.");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
|
||||
const result = await writeConfigDocBaselineStatefile({
|
||||
repoRoot,
|
||||
check: checkOnly,
|
||||
});
|
||||
|
||||
if (checkOnly) {
|
||||
if (!result.changed) {
|
||||
console.log(
|
||||
`OK ${path.relative(repoRoot, result.jsonPath)} ${path.relative(repoRoot, result.statefilePath)}`,
|
||||
);
|
||||
process.exit(0);
|
||||
}
|
||||
console.error(
|
||||
[
|
||||
"Config baseline drift detected.",
|
||||
`Expected current: ${path.relative(repoRoot, result.jsonPath)}`,
|
||||
`Expected current: ${path.relative(repoRoot, result.statefilePath)}`,
|
||||
"If this config-surface change is intentional, run `pnpm config:docs:gen` and commit the updated baseline files.",
|
||||
"If not intentional, treat this as docs drift or a possible breaking config change and fix the schema/help changes first.",
|
||||
].join("\n"),
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(
|
||||
[
|
||||
`Wrote ${path.relative(repoRoot, result.jsonPath)}`,
|
||||
`Wrote ${path.relative(repoRoot, result.statefilePath)}`,
|
||||
].join("\n"),
|
||||
);
|
||||
@ -25,9 +25,18 @@ export type ParsedReleaseVersion = {
|
||||
date: Date;
|
||||
};
|
||||
|
||||
export type ParsedReleaseTag = {
|
||||
version: string;
|
||||
packageVersion: string;
|
||||
channel: "stable" | "beta";
|
||||
correctionNumber?: number;
|
||||
date: Date;
|
||||
};
|
||||
|
||||
const STABLE_VERSION_REGEX = /^(?<year>\d{4})\.(?<month>[1-9]\d?)\.(?<day>[1-9]\d?)$/;
|
||||
const BETA_VERSION_REGEX =
|
||||
/^(?<year>\d{4})\.(?<month>[1-9]\d?)\.(?<day>[1-9]\d?)-beta\.(?<beta>[1-9]\d*)$/;
|
||||
const CORRECTION_TAG_REGEX = /^(?<base>\d{4}\.[1-9]\d?\.[1-9]\d?)-(?<correction>[1-9]\d*)$/;
|
||||
const EXPECTED_REPOSITORY_URL = "https://github.com/openclaw/openclaw";
|
||||
const MAX_CALVER_DISTANCE_DAYS = 2;
|
||||
|
||||
@ -107,6 +116,49 @@ export function parseReleaseVersion(version: string): ParsedReleaseVersion | nul
|
||||
return null;
|
||||
}
|
||||
|
||||
export function parseReleaseTagVersion(version: string): ParsedReleaseTag | null {
|
||||
const trimmed = version.trim();
|
||||
if (!trimmed) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const parsedVersion = parseReleaseVersion(trimmed);
|
||||
if (parsedVersion !== null) {
|
||||
return {
|
||||
version: trimmed,
|
||||
packageVersion: parsedVersion.version,
|
||||
channel: parsedVersion.channel,
|
||||
date: parsedVersion.date,
|
||||
correctionNumber: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
const correctionMatch = CORRECTION_TAG_REGEX.exec(trimmed);
|
||||
if (!correctionMatch?.groups) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const baseVersion = correctionMatch.groups.base ?? "";
|
||||
const parsedBaseVersion = parseReleaseVersion(baseVersion);
|
||||
const correctionNumber = Number.parseInt(correctionMatch.groups.correction ?? "", 10);
|
||||
if (
|
||||
parsedBaseVersion === null ||
|
||||
parsedBaseVersion.channel !== "stable" ||
|
||||
!Number.isInteger(correctionNumber) ||
|
||||
correctionNumber < 1
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
version: trimmed,
|
||||
packageVersion: parsedBaseVersion.version,
|
||||
channel: "stable",
|
||||
correctionNumber,
|
||||
date: parsedBaseVersion.date,
|
||||
};
|
||||
}
|
||||
|
||||
function startOfUtcDay(date: Date): number {
|
||||
return Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate());
|
||||
}
|
||||
@ -180,19 +232,25 @@ export function collectReleaseTagErrors(params: {
|
||||
}
|
||||
|
||||
const tagVersion = releaseTag.startsWith("v") ? releaseTag.slice(1) : releaseTag;
|
||||
const parsedTag = parseReleaseVersion(tagVersion);
|
||||
const parsedTag = parseReleaseTagVersion(tagVersion);
|
||||
if (parsedTag === null) {
|
||||
errors.push(
|
||||
`Release tag must match vYYYY.M.D or vYYYY.M.D-beta.N; found "${releaseTag || "<missing>"}".`,
|
||||
`Release tag must match vYYYY.M.D, vYYYY.M.D-beta.N, or fallback correction tag vYYYY.M.D-N; found "${releaseTag || "<missing>"}".`,
|
||||
);
|
||||
}
|
||||
|
||||
const expectedTag = packageVersion ? `v${packageVersion}` : "";
|
||||
if (releaseTag !== expectedTag) {
|
||||
const expectedTag = packageVersion ? `v${packageVersion}` : "<missing>";
|
||||
const expectedCorrectionTag = parsedVersion?.channel === "stable" ? `${expectedTag}-N` : null;
|
||||
const matchesExpectedTag =
|
||||
parsedTag !== null &&
|
||||
parsedVersion !== null &&
|
||||
parsedTag.packageVersion === parsedVersion.version &&
|
||||
parsedTag.channel === parsedVersion.channel;
|
||||
if (!matchesExpectedTag) {
|
||||
errors.push(
|
||||
`Release tag ${releaseTag || "<missing>"} does not match package.json version ${
|
||||
packageVersion || "<missing>"
|
||||
}; expected ${expectedTag || "<missing>"}.`,
|
||||
}; expected ${expectedCorrectionTag ? `${expectedTag} or ${expectedCorrectionTag}` : expectedTag}.`,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ import { spawn } from "node:child_process";
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { channelTestPrefixes } from "../vitest.channel-paths.mjs";
|
||||
|
||||
// On Windows, `.cmd` launchers can fail with `spawn EINVAL` when invoked without a shell
|
||||
// (especially under GitHub Actions + Git Bash). Use `shell: true` and let the shell resolve pnpm.
|
||||
@ -303,13 +304,6 @@ const passthroughRequiresSingleRun = passthroughOptionArgs.some((arg) => {
|
||||
const [flag] = arg.split("=", 1);
|
||||
return SINGLE_RUN_ONLY_FLAGS.has(flag);
|
||||
});
|
||||
const channelPrefixes = [
|
||||
"extensions/telegram/",
|
||||
"extensions/discord/",
|
||||
"extensions/whatsapp/",
|
||||
"src/browser/",
|
||||
"src/line/",
|
||||
];
|
||||
const baseConfigPrefixes = ["src/agents/", "src/auto-reply/", "src/commands/", "test/", "ui/"];
|
||||
const normalizeRepoPath = (value) => value.split(path.sep).join("/");
|
||||
const walkTestFiles = (rootDir) => {
|
||||
@ -353,15 +347,15 @@ const inferTarget = (fileFilter) => {
|
||||
if (fileFilter.endsWith(".e2e.test.ts")) {
|
||||
return { owner: "e2e", isolated };
|
||||
}
|
||||
if (channelTestPrefixes.some((prefix) => fileFilter.startsWith(prefix))) {
|
||||
return { owner: "channels", isolated };
|
||||
}
|
||||
if (fileFilter.startsWith("extensions/")) {
|
||||
return { owner: "extensions", isolated };
|
||||
}
|
||||
if (fileFilter.startsWith("src/gateway/")) {
|
||||
return { owner: "gateway", isolated };
|
||||
}
|
||||
if (channelPrefixes.some((prefix) => fileFilter.startsWith(prefix))) {
|
||||
return { owner: "channels", isolated };
|
||||
}
|
||||
if (baseConfigPrefixes.some((prefix) => fileFilter.startsWith(prefix))) {
|
||||
return { owner: "base", isolated };
|
||||
}
|
||||
|
||||
54
src/agents/auth-profiles.external-cli-sync.test.ts
Normal file
54
src/agents/auth-profiles.external-cli-sync.test.ts
Normal file
@ -0,0 +1,54 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { AuthProfileStore } from "./auth-profiles/types.js";
|
||||
|
||||
const mocks = vi.hoisted(() => ({
|
||||
readCodexCliCredentialsCached: vi.fn(),
|
||||
readQwenCliCredentialsCached: vi.fn(() => null),
|
||||
readMiniMaxCliCredentialsCached: vi.fn(() => null),
|
||||
}));
|
||||
|
||||
vi.mock("./cli-credentials.js", () => ({
|
||||
readCodexCliCredentialsCached: mocks.readCodexCliCredentialsCached,
|
||||
readQwenCliCredentialsCached: mocks.readQwenCliCredentialsCached,
|
||||
readMiniMaxCliCredentialsCached: mocks.readMiniMaxCliCredentialsCached,
|
||||
}));
|
||||
|
||||
const { syncExternalCliCredentials } = await import("./auth-profiles/external-cli-sync.js");
|
||||
const { CODEX_CLI_PROFILE_ID } = await import("./auth-profiles/constants.js");
|
||||
|
||||
const OPENAI_CODEX_DEFAULT_PROFILE_ID = "openai-codex:default";
|
||||
|
||||
describe("syncExternalCliCredentials", () => {
|
||||
it("syncs Codex CLI credentials into the supported default auth profile", () => {
|
||||
const expires = Date.now() + 60_000;
|
||||
mocks.readCodexCliCredentialsCached.mockReturnValue({
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires,
|
||||
accountId: "acct_123",
|
||||
});
|
||||
|
||||
const store: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {},
|
||||
};
|
||||
|
||||
const mutated = syncExternalCliCredentials(store);
|
||||
|
||||
expect(mutated).toBe(true);
|
||||
expect(mocks.readCodexCliCredentialsCached).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ ttlMs: expect.any(Number) }),
|
||||
);
|
||||
expect(store.profiles[OPENAI_CODEX_DEFAULT_PROFILE_ID]).toMatchObject({
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires,
|
||||
accountId: "acct_123",
|
||||
});
|
||||
expect(store.profiles[CODEX_CLI_PROFILE_ID]).toBeUndefined();
|
||||
});
|
||||
});
|
||||
@ -1,4 +1,5 @@
|
||||
import {
|
||||
readCodexCliCredentialsCached,
|
||||
readQwenCliCredentialsCached,
|
||||
readMiniMaxCliCredentialsCached,
|
||||
} from "../cli-credentials.js";
|
||||
@ -11,6 +12,8 @@ import {
|
||||
} from "./constants.js";
|
||||
import type { AuthProfileCredential, AuthProfileStore, OAuthCredential } from "./types.js";
|
||||
|
||||
const OPENAI_CODEX_DEFAULT_PROFILE_ID = "openai-codex:default";
|
||||
|
||||
function shallowEqualOAuthCredentials(a: OAuthCredential | undefined, b: OAuthCredential): boolean {
|
||||
if (!a) {
|
||||
return false;
|
||||
@ -37,7 +40,11 @@ function isExternalProfileFresh(cred: AuthProfileCredential | undefined, now: nu
|
||||
if (cred.type !== "oauth" && cred.type !== "token") {
|
||||
return false;
|
||||
}
|
||||
if (cred.provider !== "qwen-portal" && cred.provider !== "minimax-portal") {
|
||||
if (
|
||||
cred.provider !== "qwen-portal" &&
|
||||
cred.provider !== "minimax-portal" &&
|
||||
cred.provider !== "openai-codex"
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
if (typeof cred.expires !== "number") {
|
||||
@ -82,7 +89,8 @@ function syncExternalCliCredentialsForProvider(
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync OAuth credentials from external CLI tools (Qwen Code CLI, MiniMax CLI) into the store.
|
||||
* Sync OAuth credentials from external CLI tools (Qwen Code CLI, MiniMax CLI, Codex CLI)
|
||||
* into the store.
|
||||
*
|
||||
* Returns true if any credentials were updated.
|
||||
*/
|
||||
@ -130,6 +138,17 @@ export function syncExternalCliCredentials(store: AuthProfileStore): boolean {
|
||||
) {
|
||||
mutated = true;
|
||||
}
|
||||
if (
|
||||
syncExternalCliCredentialsForProvider(
|
||||
store,
|
||||
OPENAI_CODEX_DEFAULT_PROFILE_ID,
|
||||
"openai-codex",
|
||||
() => readCodexCliCredentialsCached({ ttlMs: EXTERNAL_CLI_SYNC_TTL_MS }),
|
||||
now,
|
||||
)
|
||||
) {
|
||||
mutated = true;
|
||||
}
|
||||
|
||||
return mutated;
|
||||
}
|
||||
|
||||
@ -64,11 +64,11 @@ export function handleAutoCompactionEnd(
|
||||
emitAgentEvent({
|
||||
runId: ctx.params.runId,
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry },
|
||||
data: { phase: "end", willRetry, completed: hasResult && !wasAborted },
|
||||
});
|
||||
void ctx.params.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry },
|
||||
data: { phase: "end", willRetry, completed: hasResult && !wasAborted },
|
||||
});
|
||||
|
||||
// Run after_compaction plugin hook (fire-and-forget)
|
||||
|
||||
@ -157,11 +157,9 @@ describe("createOpenClawCodingTools", () => {
|
||||
expect(schema.type).toBe("object");
|
||||
expect(schema.anyOf).toBeUndefined();
|
||||
});
|
||||
it("mentions Chrome extension relay in browser tool description", () => {
|
||||
it("mentions user browser profile in browser tool description", () => {
|
||||
const browser = createBrowserTool();
|
||||
expect(browser.description).toMatch(/Chrome extension/i);
|
||||
expect(browser.description).toMatch(/profile="user"/i);
|
||||
expect(browser.description).toMatch(/profile="chrome-relay"/i);
|
||||
});
|
||||
it("keeps browser tool schema properties after normalization", () => {
|
||||
const browser = defaultTools.find((tool) => tool.name === "browser");
|
||||
|
||||
@ -74,7 +74,7 @@ function formatConsoleToolResult(result: {
|
||||
}
|
||||
|
||||
function isChromeStaleTargetError(profile: string | undefined, err: unknown): boolean {
|
||||
if (profile !== "chrome-relay" && profile !== "chrome") {
|
||||
if (profile !== "chrome-relay" && profile !== "chrome" && profile !== "user") {
|
||||
return false;
|
||||
}
|
||||
const msg = String(err);
|
||||
@ -314,7 +314,7 @@ export async function executeActAction(params: {
|
||||
})) as { tabs?: unknown[] }
|
||||
).tabs ?? [])
|
||||
: await browserTabs(baseUrl, { profile }).catch(() => []);
|
||||
// Some Chrome relay targetIds can go stale between snapshots and actions.
|
||||
// Some user-browser targetIds can go stale between snapshots and actions.
|
||||
// Only retry safe read-only actions, and only when exactly one tab remains attached.
|
||||
if (retryRequest && canRetryChromeActWithoutTargetId(request) && tabs.length === 1) {
|
||||
try {
|
||||
@ -334,13 +334,17 @@ export async function executeActAction(params: {
|
||||
}
|
||||
}
|
||||
if (!tabs.length) {
|
||||
// Extension relay profiles need the toolbar icon click; Chrome MCP just needs Chrome running.
|
||||
const isRelayProfile = profile === "chrome-relay" || profile === "chrome";
|
||||
throw new Error(
|
||||
"No Chrome tabs are attached via the OpenClaw Browser Relay extension. Click the toolbar icon on the tab you want to control (badge ON), then retry.",
|
||||
isRelayProfile
|
||||
? "No Chrome tabs are attached via the OpenClaw Browser Relay extension. Click the toolbar icon on the tab you want to control (badge ON), then retry."
|
||||
: `No Chrome tabs found for profile="${profile}". Make sure Chrome (v146+) is running and has open tabs, then retry.`,
|
||||
{ cause: err },
|
||||
);
|
||||
}
|
||||
throw new Error(
|
||||
`Chrome tab not found (stale targetId?). Run action=tabs profile="chrome-relay" and use one of the returned targetIds.`,
|
||||
`Chrome tab not found (stale targetId?). Run action=tabs profile="${profile}" and use one of the returned targetIds.`,
|
||||
{ cause: err },
|
||||
);
|
||||
}
|
||||
|
||||
@ -287,9 +287,9 @@ describe("browser tool snapshot maxChars", () => {
|
||||
expect(opts?.mode).toBeUndefined();
|
||||
});
|
||||
|
||||
it("defaults to host when using profile=chrome-relay (even in sandboxed sessions)", async () => {
|
||||
it("defaults to host when using an explicit extension relay profile (even in sandboxed sessions)", async () => {
|
||||
setResolvedBrowserProfiles({
|
||||
"chrome-relay": {
|
||||
relay: {
|
||||
driver: "extension",
|
||||
cdpUrl: "http://127.0.0.1:18792",
|
||||
color: "#0066CC",
|
||||
@ -298,14 +298,14 @@ describe("browser tool snapshot maxChars", () => {
|
||||
const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" });
|
||||
await tool.execute?.("call-1", {
|
||||
action: "snapshot",
|
||||
profile: "chrome-relay",
|
||||
profile: "relay",
|
||||
snapshotFormat: "ai",
|
||||
});
|
||||
|
||||
expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith(
|
||||
undefined,
|
||||
expect.objectContaining({
|
||||
profile: "chrome-relay",
|
||||
profile: "relay",
|
||||
}),
|
||||
);
|
||||
});
|
||||
@ -366,12 +366,12 @@ describe("browser tool snapshot maxChars", () => {
|
||||
|
||||
it("lets the server choose snapshot format when the user does not request one", async () => {
|
||||
const tool = createBrowserTool();
|
||||
await tool.execute?.("call-1", { action: "snapshot", profile: "chrome-relay" });
|
||||
await tool.execute?.("call-1", { action: "snapshot", profile: "user" });
|
||||
|
||||
expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith(
|
||||
undefined,
|
||||
expect.objectContaining({
|
||||
profile: "chrome-relay",
|
||||
profile: "user",
|
||||
}),
|
||||
);
|
||||
const opts = browserClientMocks.browserSnapshot.mock.calls.at(-1)?.[1] as
|
||||
@ -438,21 +438,17 @@ describe("browser tool snapshot maxChars", () => {
|
||||
expect(gatewayMocks.callGatewayTool).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("keeps chrome-relay profile on host when node proxy is available", async () => {
|
||||
it("keeps user profile on host when node proxy is available", async () => {
|
||||
mockSingleBrowserProxyNode();
|
||||
setResolvedBrowserProfiles({
|
||||
"chrome-relay": {
|
||||
driver: "extension",
|
||||
cdpUrl: "http://127.0.0.1:18792",
|
||||
color: "#0066CC",
|
||||
},
|
||||
user: { driver: "existing-session", attachOnly: true, color: "#00AA00" },
|
||||
});
|
||||
const tool = createBrowserTool();
|
||||
await tool.execute?.("call-1", { action: "status", profile: "chrome-relay" });
|
||||
await tool.execute?.("call-1", { action: "status", profile: "user" });
|
||||
|
||||
expect(browserClientMocks.browserStatus).toHaveBeenCalledWith(
|
||||
undefined,
|
||||
expect.objectContaining({ profile: "chrome-relay" }),
|
||||
expect.objectContaining({ profile: "user" }),
|
||||
);
|
||||
expect(gatewayMocks.callGatewayTool).not.toHaveBeenCalled();
|
||||
});
|
||||
@ -745,7 +741,7 @@ describe("browser tool external content wrapping", () => {
|
||||
describe("browser tool act stale target recovery", () => {
|
||||
registerBrowserToolAfterEachReset();
|
||||
|
||||
it("retries safe chrome-relay act once without targetId when exactly one tab remains", async () => {
|
||||
it("retries safe user-browser act once without targetId when exactly one tab remains", async () => {
|
||||
browserActionsMocks.browserAct
|
||||
.mockRejectedValueOnce(new Error("404: tab not found"))
|
||||
.mockResolvedValueOnce({ ok: true });
|
||||
@ -754,7 +750,7 @@ describe("browser tool act stale target recovery", () => {
|
||||
const tool = createBrowserTool();
|
||||
const result = await tool.execute?.("call-1", {
|
||||
action: "act",
|
||||
profile: "chrome-relay",
|
||||
profile: "user",
|
||||
request: {
|
||||
kind: "hover",
|
||||
targetId: "stale-tab",
|
||||
@ -767,18 +763,18 @@ describe("browser tool act stale target recovery", () => {
|
||||
1,
|
||||
undefined,
|
||||
expect.objectContaining({ targetId: "stale-tab", kind: "hover", ref: "btn-1" }),
|
||||
expect.objectContaining({ profile: "chrome-relay" }),
|
||||
expect.objectContaining({ profile: "user" }),
|
||||
);
|
||||
expect(browserActionsMocks.browserAct).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
undefined,
|
||||
expect.not.objectContaining({ targetId: expect.anything() }),
|
||||
expect.objectContaining({ profile: "chrome-relay" }),
|
||||
expect.objectContaining({ profile: "user" }),
|
||||
);
|
||||
expect(result?.details).toMatchObject({ ok: true });
|
||||
});
|
||||
|
||||
it("does not retry mutating chrome-relay act requests without targetId", async () => {
|
||||
it("does not retry mutating user-browser act requests without targetId", async () => {
|
||||
browserActionsMocks.browserAct.mockRejectedValueOnce(new Error("404: tab not found"));
|
||||
browserClientMocks.browserTabs.mockResolvedValueOnce([{ targetId: "only-tab" }]);
|
||||
|
||||
@ -786,14 +782,14 @@ describe("browser tool act stale target recovery", () => {
|
||||
await expect(
|
||||
tool.execute?.("call-1", {
|
||||
action: "act",
|
||||
profile: "chrome-relay",
|
||||
profile: "user",
|
||||
request: {
|
||||
kind: "click",
|
||||
targetId: "stale-tab",
|
||||
ref: "btn-1",
|
||||
},
|
||||
}),
|
||||
).rejects.toThrow(/Run action=tabs profile="chrome-relay"/i);
|
||||
).rejects.toThrow(/Run action=tabs profile="user"/i);
|
||||
|
||||
expect(browserActionsMocks.browserAct).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
@ -293,10 +293,6 @@ function shouldPreferHostForProfile(profileName: string | undefined) {
|
||||
return capabilities.requiresRelay || capabilities.usesChromeMcp;
|
||||
}
|
||||
|
||||
function isHostOnlyProfileName(profileName: string | undefined) {
|
||||
return profileName === "user" || profileName === "chrome-relay";
|
||||
}
|
||||
|
||||
export function createBrowserTool(opts?: {
|
||||
sandboxBridgeUrl?: string;
|
||||
allowHostControl?: boolean;
|
||||
@ -311,11 +307,8 @@ export function createBrowserTool(opts?: {
|
||||
description: [
|
||||
"Control the browser via OpenClaw's browser control server (status/start/stop/profiles/tabs/open/snapshot/screenshot/actions).",
|
||||
"Browser choice: omit profile by default for the isolated OpenClaw-managed browser (`openclaw`).",
|
||||
'For the logged-in user browser on the local host, prefer profile="user". Use it only when existing logins/cookies matter and the user is present to click/approve any browser attach prompt.',
|
||||
'Use profile="chrome-relay" only for the Chrome extension / Browser Relay / toolbar-button attach-tab flow, or when the user explicitly asks for the extension relay.',
|
||||
'If the user mentions the Chrome extension / Browser Relay / toolbar button / “attach tab”, ALWAYS prefer profile="chrome-relay". Otherwise prefer profile="user" over the extension relay for user-browser work.',
|
||||
'For the logged-in user browser on the local host, use profile="user". Chrome (v146+) must be running. Use only when existing logins/cookies matter and the user is present.',
|
||||
'When a node-hosted browser proxy is available, the tool may auto-route to it. Pin a node with node=<id|name> or target="node".',
|
||||
'User-browser flows need user interaction: profile="user" may require approving a browser attach prompt; profile="chrome-relay" needs the user to click the OpenClaw Browser Relay toolbar icon on the tab (badge ON). If user presence is unclear, ask first.',
|
||||
"When using refs from snapshot (e.g. e12), keep the same tab: prefer passing targetId from the snapshot response into subsequent actions (act/click/type/etc).",
|
||||
'For stable, self-resolving refs across calls, use snapshot with refs="aria" (Playwright aria-ref ids). Default refs="role" are role+name-based.',
|
||||
"Use snapshot+act for UI automation. Avoid act:wait by default; use only in exceptional cases when no reliable UI state exists.",
|
||||
@ -333,7 +326,9 @@ export function createBrowserTool(opts?: {
|
||||
if (requestedNode && target && target !== "node") {
|
||||
throw new Error('node is only supported with target="node".');
|
||||
}
|
||||
if (isHostOnlyProfileName(profile)) {
|
||||
// User-browser profiles (existing-session, extension relay) are host-only.
|
||||
const isUserBrowserProfile = shouldPreferHostForProfile(profile);
|
||||
if (isUserBrowserProfile) {
|
||||
if (requestedNode || target === "node") {
|
||||
throw new Error(`profile="${profile}" only supports the local host browser.`);
|
||||
}
|
||||
@ -342,10 +337,9 @@ export function createBrowserTool(opts?: {
|
||||
`profile="${profile}" cannot use the sandbox browser; use target="host" or omit target.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
if (!target && !requestedNode && shouldPreferHostForProfile(profile)) {
|
||||
// Local host user-browser profiles should not silently bind to sandbox/node browsers.
|
||||
target = "host";
|
||||
if (!target && !requestedNode) {
|
||||
target = "host";
|
||||
}
|
||||
}
|
||||
|
||||
const nodeTarget = await resolveBrowserNodeTarget({
|
||||
|
||||
@ -67,7 +67,7 @@ export type AgentRunLoopResult =
|
||||
fallbackModel?: string;
|
||||
fallbackAttempts: RuntimeFallbackAttempt[];
|
||||
didLogHeartbeatStrip: boolean;
|
||||
autoCompactionCompleted: boolean;
|
||||
autoCompactionCount: number;
|
||||
/** Payload keys sent directly (not via pipeline) during tool flush. */
|
||||
directlySentBlockKeys?: Set<string>;
|
||||
}
|
||||
@ -103,7 +103,7 @@ export async function runAgentTurnWithFallback(params: {
|
||||
}): Promise<AgentRunLoopResult> {
|
||||
const TRANSIENT_HTTP_RETRY_DELAY_MS = 2_500;
|
||||
let didLogHeartbeatStrip = false;
|
||||
let autoCompactionCompleted = false;
|
||||
let autoCompactionCount = 0;
|
||||
// Track payloads sent directly (not via pipeline) during tool flush to avoid duplicates.
|
||||
const directlySentBlockKeys = new Set<string>();
|
||||
|
||||
@ -319,154 +319,165 @@ export async function runAgentTurnWithFallback(params: {
|
||||
},
|
||||
);
|
||||
return (async () => {
|
||||
const result = await runEmbeddedPiAgent({
|
||||
...embeddedContext,
|
||||
trigger: params.isHeartbeat ? "heartbeat" : "user",
|
||||
groupId: resolveGroupSessionKey(params.sessionCtx)?.id,
|
||||
groupChannel:
|
||||
params.sessionCtx.GroupChannel?.trim() ?? params.sessionCtx.GroupSubject?.trim(),
|
||||
groupSpace: params.sessionCtx.GroupSpace?.trim() ?? undefined,
|
||||
...senderContext,
|
||||
...runBaseParams,
|
||||
prompt: params.commandBody,
|
||||
extraSystemPrompt: params.followupRun.run.extraSystemPrompt,
|
||||
toolResultFormat: (() => {
|
||||
const channel = resolveMessageChannel(
|
||||
params.sessionCtx.Surface,
|
||||
params.sessionCtx.Provider,
|
||||
);
|
||||
if (!channel) {
|
||||
return "markdown";
|
||||
}
|
||||
return isMarkdownCapableMessageChannel(channel) ? "markdown" : "plain";
|
||||
})(),
|
||||
suppressToolErrorWarnings: params.opts?.suppressToolErrorWarnings,
|
||||
bootstrapContextMode: params.opts?.bootstrapContextMode,
|
||||
bootstrapContextRunKind: params.opts?.isHeartbeat ? "heartbeat" : "default",
|
||||
images: params.opts?.images,
|
||||
abortSignal: params.opts?.abortSignal,
|
||||
blockReplyBreak: params.resolvedBlockStreamingBreak,
|
||||
blockReplyChunking: params.blockReplyChunking,
|
||||
onPartialReply: async (payload) => {
|
||||
const textForTyping = await handlePartialForTyping(payload);
|
||||
if (!params.opts?.onPartialReply || textForTyping === undefined) {
|
||||
return;
|
||||
}
|
||||
await params.opts.onPartialReply({
|
||||
text: textForTyping,
|
||||
mediaUrls: payload.mediaUrls,
|
||||
});
|
||||
},
|
||||
onAssistantMessageStart: async () => {
|
||||
await params.typingSignals.signalMessageStart();
|
||||
await params.opts?.onAssistantMessageStart?.();
|
||||
},
|
||||
onReasoningStream:
|
||||
params.typingSignals.shouldStartOnReasoning || params.opts?.onReasoningStream
|
||||
? async (payload) => {
|
||||
await params.typingSignals.signalReasoningDelta();
|
||||
await params.opts?.onReasoningStream?.({
|
||||
text: payload.text,
|
||||
mediaUrls: payload.mediaUrls,
|
||||
});
|
||||
}
|
||||
: undefined,
|
||||
onReasoningEnd: params.opts?.onReasoningEnd,
|
||||
onAgentEvent: async (evt) => {
|
||||
// Signal run start only after the embedded agent emits real activity.
|
||||
const hasLifecyclePhase =
|
||||
evt.stream === "lifecycle" && typeof evt.data.phase === "string";
|
||||
if (evt.stream !== "lifecycle" || hasLifecyclePhase) {
|
||||
notifyAgentRunStart();
|
||||
}
|
||||
// Trigger typing when tools start executing.
|
||||
// Must await to ensure typing indicator starts before tool summaries are emitted.
|
||||
if (evt.stream === "tool") {
|
||||
const phase = typeof evt.data.phase === "string" ? evt.data.phase : "";
|
||||
const name = typeof evt.data.name === "string" ? evt.data.name : undefined;
|
||||
if (phase === "start" || phase === "update") {
|
||||
await params.typingSignals.signalToolStart();
|
||||
await params.opts?.onToolStart?.({ name, phase });
|
||||
let attemptCompactionCount = 0;
|
||||
try {
|
||||
const result = await runEmbeddedPiAgent({
|
||||
...embeddedContext,
|
||||
trigger: params.isHeartbeat ? "heartbeat" : "user",
|
||||
groupId: resolveGroupSessionKey(params.sessionCtx)?.id,
|
||||
groupChannel:
|
||||
params.sessionCtx.GroupChannel?.trim() ?? params.sessionCtx.GroupSubject?.trim(),
|
||||
groupSpace: params.sessionCtx.GroupSpace?.trim() ?? undefined,
|
||||
...senderContext,
|
||||
...runBaseParams,
|
||||
prompt: params.commandBody,
|
||||
extraSystemPrompt: params.followupRun.run.extraSystemPrompt,
|
||||
toolResultFormat: (() => {
|
||||
const channel = resolveMessageChannel(
|
||||
params.sessionCtx.Surface,
|
||||
params.sessionCtx.Provider,
|
||||
);
|
||||
if (!channel) {
|
||||
return "markdown";
|
||||
}
|
||||
}
|
||||
// Track auto-compaction completion and notify UI layer
|
||||
if (evt.stream === "compaction") {
|
||||
const phase = typeof evt.data.phase === "string" ? evt.data.phase : "";
|
||||
if (phase === "start") {
|
||||
await params.opts?.onCompactionStart?.();
|
||||
return isMarkdownCapableMessageChannel(channel) ? "markdown" : "plain";
|
||||
})(),
|
||||
suppressToolErrorWarnings: params.opts?.suppressToolErrorWarnings,
|
||||
bootstrapContextMode: params.opts?.bootstrapContextMode,
|
||||
bootstrapContextRunKind: params.opts?.isHeartbeat ? "heartbeat" : "default",
|
||||
images: params.opts?.images,
|
||||
abortSignal: params.opts?.abortSignal,
|
||||
blockReplyBreak: params.resolvedBlockStreamingBreak,
|
||||
blockReplyChunking: params.blockReplyChunking,
|
||||
onPartialReply: async (payload) => {
|
||||
const textForTyping = await handlePartialForTyping(payload);
|
||||
if (!params.opts?.onPartialReply || textForTyping === undefined) {
|
||||
return;
|
||||
}
|
||||
if (phase === "end") {
|
||||
autoCompactionCompleted = true;
|
||||
await params.opts?.onCompactionEnd?.();
|
||||
}
|
||||
}
|
||||
},
|
||||
// Always pass onBlockReply so flushBlockReplyBuffer works before tool execution,
|
||||
// even when regular block streaming is disabled. The handler sends directly
|
||||
// via opts.onBlockReply when the pipeline isn't available.
|
||||
onBlockReply: params.opts?.onBlockReply
|
||||
? createBlockReplyDeliveryHandler({
|
||||
onBlockReply: params.opts.onBlockReply,
|
||||
currentMessageId:
|
||||
params.sessionCtx.MessageSidFull ?? params.sessionCtx.MessageSid,
|
||||
normalizeStreamingText,
|
||||
applyReplyToMode: params.applyReplyToMode,
|
||||
normalizeMediaPaths: normalizeReplyMediaPaths,
|
||||
typingSignals: params.typingSignals,
|
||||
blockStreamingEnabled: params.blockStreamingEnabled,
|
||||
blockReplyPipeline,
|
||||
directlySentBlockKeys,
|
||||
})
|
||||
: undefined,
|
||||
onBlockReplyFlush:
|
||||
params.blockStreamingEnabled && blockReplyPipeline
|
||||
? async () => {
|
||||
await blockReplyPipeline.flush({ force: true });
|
||||
}
|
||||
: undefined,
|
||||
shouldEmitToolResult: params.shouldEmitToolResult,
|
||||
shouldEmitToolOutput: params.shouldEmitToolOutput,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature:
|
||||
bootstrapPromptWarningSignaturesSeen[
|
||||
bootstrapPromptWarningSignaturesSeen.length - 1
|
||||
],
|
||||
onToolResult: onToolResult
|
||||
? (() => {
|
||||
// Serialize tool result delivery to preserve message ordering.
|
||||
// Without this, concurrent tool callbacks race through typing signals
|
||||
// and message sends, causing out-of-order delivery to the user.
|
||||
// See: https://github.com/openclaw/openclaw/issues/11044
|
||||
let toolResultChain: Promise<void> = Promise.resolve();
|
||||
return (payload: ReplyPayload) => {
|
||||
toolResultChain = toolResultChain
|
||||
.then(async () => {
|
||||
const { text, skip } = normalizeStreamingText(payload);
|
||||
if (skip) {
|
||||
return;
|
||||
}
|
||||
await params.typingSignals.signalTextDelta(text);
|
||||
await onToolResult({
|
||||
...payload,
|
||||
text,
|
||||
});
|
||||
})
|
||||
.catch((err) => {
|
||||
// Keep chain healthy after an error so later tool results still deliver.
|
||||
logVerbose(`tool result delivery failed: ${String(err)}`);
|
||||
await params.opts.onPartialReply({
|
||||
text: textForTyping,
|
||||
mediaUrls: payload.mediaUrls,
|
||||
});
|
||||
},
|
||||
onAssistantMessageStart: async () => {
|
||||
await params.typingSignals.signalMessageStart();
|
||||
await params.opts?.onAssistantMessageStart?.();
|
||||
},
|
||||
onReasoningStream:
|
||||
params.typingSignals.shouldStartOnReasoning || params.opts?.onReasoningStream
|
||||
? async (payload) => {
|
||||
await params.typingSignals.signalReasoningDelta();
|
||||
await params.opts?.onReasoningStream?.({
|
||||
text: payload.text,
|
||||
mediaUrls: payload.mediaUrls,
|
||||
});
|
||||
const task = toolResultChain.finally(() => {
|
||||
params.pendingToolTasks.delete(task);
|
||||
});
|
||||
params.pendingToolTasks.add(task);
|
||||
};
|
||||
})()
|
||||
: undefined,
|
||||
});
|
||||
bootstrapPromptWarningSignaturesSeen = resolveBootstrapWarningSignaturesSeen(
|
||||
result.meta?.systemPromptReport,
|
||||
);
|
||||
return result;
|
||||
}
|
||||
: undefined,
|
||||
onReasoningEnd: params.opts?.onReasoningEnd,
|
||||
onAgentEvent: async (evt) => {
|
||||
// Signal run start only after the embedded agent emits real activity.
|
||||
const hasLifecyclePhase =
|
||||
evt.stream === "lifecycle" && typeof evt.data.phase === "string";
|
||||
if (evt.stream !== "lifecycle" || hasLifecyclePhase) {
|
||||
notifyAgentRunStart();
|
||||
}
|
||||
// Trigger typing when tools start executing.
|
||||
// Must await to ensure typing indicator starts before tool summaries are emitted.
|
||||
if (evt.stream === "tool") {
|
||||
const phase = typeof evt.data.phase === "string" ? evt.data.phase : "";
|
||||
const name = typeof evt.data.name === "string" ? evt.data.name : undefined;
|
||||
if (phase === "start" || phase === "update") {
|
||||
await params.typingSignals.signalToolStart();
|
||||
await params.opts?.onToolStart?.({ name, phase });
|
||||
}
|
||||
}
|
||||
// Track auto-compaction completion and notify UI layer.
|
||||
if (evt.stream === "compaction") {
|
||||
const phase = typeof evt.data.phase === "string" ? evt.data.phase : "";
|
||||
if (phase === "start") {
|
||||
await params.opts?.onCompactionStart?.();
|
||||
}
|
||||
const completed = evt.data?.completed === true;
|
||||
if (phase === "end" && completed) {
|
||||
attemptCompactionCount += 1;
|
||||
await params.opts?.onCompactionEnd?.();
|
||||
}
|
||||
}
|
||||
},
|
||||
// Always pass onBlockReply so flushBlockReplyBuffer works before tool execution,
|
||||
// even when regular block streaming is disabled. The handler sends directly
|
||||
// via opts.onBlockReply when the pipeline isn't available.
|
||||
onBlockReply: params.opts?.onBlockReply
|
||||
? createBlockReplyDeliveryHandler({
|
||||
onBlockReply: params.opts.onBlockReply,
|
||||
currentMessageId:
|
||||
params.sessionCtx.MessageSidFull ?? params.sessionCtx.MessageSid,
|
||||
normalizeStreamingText,
|
||||
applyReplyToMode: params.applyReplyToMode,
|
||||
normalizeMediaPaths: normalizeReplyMediaPaths,
|
||||
typingSignals: params.typingSignals,
|
||||
blockStreamingEnabled: params.blockStreamingEnabled,
|
||||
blockReplyPipeline,
|
||||
directlySentBlockKeys,
|
||||
})
|
||||
: undefined,
|
||||
onBlockReplyFlush:
|
||||
params.blockStreamingEnabled && blockReplyPipeline
|
||||
? async () => {
|
||||
await blockReplyPipeline.flush({ force: true });
|
||||
}
|
||||
: undefined,
|
||||
shouldEmitToolResult: params.shouldEmitToolResult,
|
||||
shouldEmitToolOutput: params.shouldEmitToolOutput,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature:
|
||||
bootstrapPromptWarningSignaturesSeen[
|
||||
bootstrapPromptWarningSignaturesSeen.length - 1
|
||||
],
|
||||
onToolResult: onToolResult
|
||||
? (() => {
|
||||
// Serialize tool result delivery to preserve message ordering.
|
||||
// Without this, concurrent tool callbacks race through typing signals
|
||||
// and message sends, causing out-of-order delivery to the user.
|
||||
// See: https://github.com/openclaw/openclaw/issues/11044
|
||||
let toolResultChain: Promise<void> = Promise.resolve();
|
||||
return (payload: ReplyPayload) => {
|
||||
toolResultChain = toolResultChain
|
||||
.then(async () => {
|
||||
const { text, skip } = normalizeStreamingText(payload);
|
||||
if (skip) {
|
||||
return;
|
||||
}
|
||||
await params.typingSignals.signalTextDelta(text);
|
||||
await onToolResult({
|
||||
...payload,
|
||||
text,
|
||||
});
|
||||
})
|
||||
.catch((err) => {
|
||||
// Keep chain healthy after an error so later tool results still deliver.
|
||||
logVerbose(`tool result delivery failed: ${String(err)}`);
|
||||
});
|
||||
const task = toolResultChain.finally(() => {
|
||||
params.pendingToolTasks.delete(task);
|
||||
});
|
||||
params.pendingToolTasks.add(task);
|
||||
};
|
||||
})()
|
||||
: undefined,
|
||||
});
|
||||
bootstrapPromptWarningSignaturesSeen = resolveBootstrapWarningSignaturesSeen(
|
||||
result.meta?.systemPromptReport,
|
||||
);
|
||||
const resultCompactionCount = Math.max(
|
||||
0,
|
||||
result.meta?.agentMeta?.compactionCount ?? 0,
|
||||
);
|
||||
attemptCompactionCount = Math.max(attemptCompactionCount, resultCompactionCount);
|
||||
return result;
|
||||
} finally {
|
||||
autoCompactionCount += attemptCompactionCount;
|
||||
}
|
||||
})();
|
||||
},
|
||||
});
|
||||
@ -654,7 +665,7 @@ export async function runAgentTurnWithFallback(params: {
|
||||
fallbackModel,
|
||||
fallbackAttempts,
|
||||
didLogHeartbeatStrip,
|
||||
autoCompactionCompleted,
|
||||
autoCompactionCount,
|
||||
directlySentBlockKeys: directlySentBlockKeys.size > 0 ? directlySentBlockKeys : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
@ -322,7 +322,7 @@ describe("runReplyAgent auto-compaction token update", () => {
|
||||
extraSystemPrompt?: string;
|
||||
onAgentEvent?: (evt: {
|
||||
stream?: string;
|
||||
data?: { phase?: string; willRetry?: boolean };
|
||||
data?: { phase?: string; willRetry?: boolean; completed?: boolean };
|
||||
}) => void;
|
||||
};
|
||||
|
||||
@ -397,7 +397,10 @@ describe("runReplyAgent auto-compaction token update", () => {
|
||||
runEmbeddedPiAgentMock.mockImplementation(async (params: EmbeddedRunParams) => {
|
||||
// Simulate auto-compaction during agent run
|
||||
params.onAgentEvent?.({ stream: "compaction", data: { phase: "start" } });
|
||||
params.onAgentEvent?.({ stream: "compaction", data: { phase: "end", willRetry: false } });
|
||||
params.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry: false, completed: true },
|
||||
});
|
||||
return {
|
||||
payloads: [{ text: "done" }],
|
||||
meta: {
|
||||
@ -455,6 +458,238 @@ describe("runReplyAgent auto-compaction token update", () => {
|
||||
expect(stored[sessionKey].compactionCount).toBe(1);
|
||||
});
|
||||
|
||||
it("tracks auto-compaction from embedded result metadata even when no compaction event is emitted", async () => {
|
||||
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-compact-meta-"));
|
||||
const storePath = path.join(tmp, "sessions.json");
|
||||
const sessionKey = "main";
|
||||
const sessionEntry = {
|
||||
sessionId: "session",
|
||||
updatedAt: Date.now(),
|
||||
totalTokens: 181_000,
|
||||
compactionCount: 0,
|
||||
};
|
||||
|
||||
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
|
||||
|
||||
runEmbeddedPiAgentMock.mockResolvedValue({
|
||||
payloads: [{ text: "done" }],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
usage: { input: 190_000, output: 8_000, total: 198_000 },
|
||||
lastCallUsage: { input: 10_000, output: 3_000, total: 13_000 },
|
||||
compactionCount: 2,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const config = {
|
||||
agents: { defaults: { compaction: { memoryFlush: { enabled: false } } } },
|
||||
};
|
||||
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
|
||||
storePath,
|
||||
sessionEntry,
|
||||
config,
|
||||
});
|
||||
|
||||
await runReplyAgent({
|
||||
commandBody: "hello",
|
||||
followupRun,
|
||||
queueKey: "main",
|
||||
resolvedQueue,
|
||||
shouldSteer: false,
|
||||
shouldFollowup: false,
|
||||
isActive: false,
|
||||
isStreaming: false,
|
||||
typing,
|
||||
sessionCtx,
|
||||
sessionEntry,
|
||||
sessionStore: { [sessionKey]: sessionEntry },
|
||||
sessionKey,
|
||||
storePath,
|
||||
defaultModel: "anthropic/claude-opus-4-5",
|
||||
agentCfgContextTokens: 200_000,
|
||||
resolvedVerboseLevel: "off",
|
||||
isNewSession: false,
|
||||
blockStreamingEnabled: false,
|
||||
resolvedBlockStreamingBreak: "message_end",
|
||||
shouldInjectGroupIntro: false,
|
||||
typingMode: "instant",
|
||||
});
|
||||
|
||||
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
|
||||
expect(stored[sessionKey].totalTokens).toBe(10_000);
|
||||
expect(stored[sessionKey].compactionCount).toBe(2);
|
||||
});
|
||||
|
||||
it("accumulates compactions across fallback attempts without double-counting a single attempt", async () => {
|
||||
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-compact-fallback-"));
|
||||
const storePath = path.join(tmp, "sessions.json");
|
||||
const sessionKey = "main";
|
||||
const sessionEntry = {
|
||||
sessionId: "session",
|
||||
updatedAt: Date.now(),
|
||||
totalTokens: 181_000,
|
||||
compactionCount: 0,
|
||||
};
|
||||
|
||||
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
|
||||
|
||||
runWithModelFallbackMock.mockImplementationOnce(async ({ run }: RunWithModelFallbackParams) => {
|
||||
try {
|
||||
await run("anthropic", "claude");
|
||||
} catch {
|
||||
// Expected first-attempt failure.
|
||||
}
|
||||
return {
|
||||
result: await run("openai", "gpt-5.2"),
|
||||
provider: "openai",
|
||||
model: "gpt-5.2",
|
||||
attempts: [{ provider: "anthropic", model: "claude", error: "attempt failed" }],
|
||||
};
|
||||
});
|
||||
|
||||
runEmbeddedPiAgentMock
|
||||
.mockImplementationOnce(async (params: EmbeddedRunParams) => {
|
||||
params.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry: true, completed: true },
|
||||
});
|
||||
throw new Error("attempt failed");
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
payloads: [{ text: "done" }],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
usage: { input: 190_000, output: 8_000, total: 198_000 },
|
||||
lastCallUsage: { input: 10_000, output: 3_000, total: 13_000 },
|
||||
compactionCount: 2,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const config = {
|
||||
agents: { defaults: { compaction: { memoryFlush: { enabled: false } } } },
|
||||
};
|
||||
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
|
||||
storePath,
|
||||
sessionEntry,
|
||||
config,
|
||||
});
|
||||
|
||||
await runReplyAgent({
|
||||
commandBody: "hello",
|
||||
followupRun,
|
||||
queueKey: "main",
|
||||
resolvedQueue,
|
||||
shouldSteer: false,
|
||||
shouldFollowup: false,
|
||||
isActive: false,
|
||||
isStreaming: false,
|
||||
typing,
|
||||
sessionCtx,
|
||||
sessionEntry,
|
||||
sessionStore: { [sessionKey]: sessionEntry },
|
||||
sessionKey,
|
||||
storePath,
|
||||
defaultModel: "anthropic/claude-opus-4-5",
|
||||
agentCfgContextTokens: 200_000,
|
||||
resolvedVerboseLevel: "off",
|
||||
isNewSession: false,
|
||||
blockStreamingEnabled: false,
|
||||
resolvedBlockStreamingBreak: "message_end",
|
||||
shouldInjectGroupIntro: false,
|
||||
typingMode: "instant",
|
||||
});
|
||||
|
||||
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
|
||||
expect(stored[sessionKey].totalTokens).toBe(10_000);
|
||||
expect(stored[sessionKey].compactionCount).toBe(3);
|
||||
});
|
||||
|
||||
it("does not count failed compaction end events from earlier fallback attempts", async () => {
|
||||
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-compact-fallback-failed-"));
|
||||
const storePath = path.join(tmp, "sessions.json");
|
||||
const sessionKey = "main";
|
||||
const sessionEntry = {
|
||||
sessionId: "session",
|
||||
updatedAt: Date.now(),
|
||||
totalTokens: 181_000,
|
||||
compactionCount: 0,
|
||||
};
|
||||
|
||||
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
|
||||
|
||||
runWithModelFallbackMock.mockImplementationOnce(async ({ run }: RunWithModelFallbackParams) => {
|
||||
try {
|
||||
await run("anthropic", "claude");
|
||||
} catch {
|
||||
// Expected first-attempt failure.
|
||||
}
|
||||
return {
|
||||
result: await run("openai", "gpt-5.2"),
|
||||
provider: "openai",
|
||||
model: "gpt-5.2",
|
||||
attempts: [{ provider: "anthropic", model: "claude", error: "attempt failed" }],
|
||||
};
|
||||
});
|
||||
|
||||
runEmbeddedPiAgentMock
|
||||
.mockImplementationOnce(async (params: EmbeddedRunParams) => {
|
||||
params.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry: true, completed: false },
|
||||
});
|
||||
throw new Error("attempt failed");
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
payloads: [{ text: "done" }],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
usage: { input: 190_000, output: 8_000, total: 198_000 },
|
||||
lastCallUsage: { input: 10_000, output: 3_000, total: 13_000 },
|
||||
compactionCount: 2,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const config = {
|
||||
agents: { defaults: { compaction: { memoryFlush: { enabled: false } } } },
|
||||
};
|
||||
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
|
||||
storePath,
|
||||
sessionEntry,
|
||||
config,
|
||||
});
|
||||
|
||||
await runReplyAgent({
|
||||
commandBody: "hello",
|
||||
followupRun,
|
||||
queueKey: "main",
|
||||
resolvedQueue,
|
||||
shouldSteer: false,
|
||||
shouldFollowup: false,
|
||||
isActive: false,
|
||||
isStreaming: false,
|
||||
typing,
|
||||
sessionCtx,
|
||||
sessionEntry,
|
||||
sessionStore: { [sessionKey]: sessionEntry },
|
||||
sessionKey,
|
||||
storePath,
|
||||
defaultModel: "anthropic/claude-opus-4-5",
|
||||
agentCfgContextTokens: 200_000,
|
||||
resolvedVerboseLevel: "off",
|
||||
isNewSession: false,
|
||||
blockStreamingEnabled: false,
|
||||
resolvedBlockStreamingBreak: "message_end",
|
||||
shouldInjectGroupIntro: false,
|
||||
typingMode: "instant",
|
||||
});
|
||||
|
||||
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
|
||||
expect(stored[sessionKey].totalTokens).toBe(10_000);
|
||||
expect(stored[sessionKey].compactionCount).toBe(2);
|
||||
});
|
||||
it("updates totalTokens from lastCallUsage even without compaction", async () => {
|
||||
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-usage-last-"));
|
||||
const storePath = path.join(tmp, "sessions.json");
|
||||
@ -537,7 +772,10 @@ describe("runReplyAgent auto-compaction token update", () => {
|
||||
|
||||
runEmbeddedPiAgentMock.mockImplementation(async (params: EmbeddedRunParams) => {
|
||||
params.onAgentEvent?.({ stream: "compaction", data: { phase: "start" } });
|
||||
params.onAgentEvent?.({ stream: "compaction", data: { phase: "end", willRetry: false } });
|
||||
params.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry: false, completed: true },
|
||||
});
|
||||
return {
|
||||
payloads: [{ text: "done" }],
|
||||
meta: {
|
||||
|
||||
@ -380,7 +380,7 @@ export async function runReplyAgent(params: {
|
||||
fallbackAttempts,
|
||||
directlySentBlockKeys,
|
||||
} = runOutcome;
|
||||
let { didLogHeartbeatStrip, autoCompactionCompleted } = runOutcome;
|
||||
let { didLogHeartbeatStrip, autoCompactionCount } = runOutcome;
|
||||
|
||||
if (
|
||||
shouldInjectGroupIntro &&
|
||||
@ -664,12 +664,13 @@ export async function runReplyAgent(params: {
|
||||
}
|
||||
}
|
||||
|
||||
if (autoCompactionCompleted) {
|
||||
if (autoCompactionCount > 0) {
|
||||
const count = await incrementRunCompactionCount({
|
||||
sessionEntry: activeSessionEntry,
|
||||
sessionStore: activeSessionStore,
|
||||
sessionKey,
|
||||
storePath,
|
||||
amount: autoCompactionCount,
|
||||
lastCallUsage: runResult.meta?.agentMeta?.lastCallUsage,
|
||||
contextTokensUsed,
|
||||
});
|
||||
|
||||
@ -71,7 +71,7 @@ function mockCompactionRun(params: {
|
||||
}) => {
|
||||
args.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry: params.willRetry },
|
||||
data: { phase: "end", willRetry: params.willRetry, completed: true },
|
||||
});
|
||||
return params.result;
|
||||
},
|
||||
@ -126,6 +126,110 @@ describe("createFollowupRunner compaction", () => {
|
||||
expect(firstCall?.[0]?.text).toContain("Auto-compaction complete");
|
||||
expect(sessionStore.main.compactionCount).toBe(1);
|
||||
});
|
||||
|
||||
it("tracks auto-compaction from embedded result metadata even when no compaction event is emitted", async () => {
|
||||
const storePath = path.join(
|
||||
await fs.mkdtemp(path.join(tmpdir(), "openclaw-compaction-meta-")),
|
||||
"sessions.json",
|
||||
);
|
||||
const sessionEntry: SessionEntry = {
|
||||
sessionId: "session",
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
const sessionStore: Record<string, SessionEntry> = {
|
||||
main: sessionEntry,
|
||||
};
|
||||
const onBlockReply = vi.fn(async () => {});
|
||||
|
||||
runEmbeddedPiAgentMock.mockResolvedValueOnce({
|
||||
payloads: [{ text: "final" }],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
compactionCount: 2,
|
||||
lastCallUsage: { input: 10_000, output: 3_000, total: 13_000 },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const runner = createFollowupRunner({
|
||||
opts: { onBlockReply },
|
||||
typing: createMockTypingController(),
|
||||
typingMode: "instant",
|
||||
sessionEntry,
|
||||
sessionStore,
|
||||
sessionKey: "main",
|
||||
storePath,
|
||||
defaultModel: "anthropic/claude-opus-4-5",
|
||||
});
|
||||
|
||||
const queued = createQueuedRun({
|
||||
run: {
|
||||
verboseLevel: "on",
|
||||
},
|
||||
});
|
||||
|
||||
await runner(queued);
|
||||
|
||||
expect(onBlockReply).toHaveBeenCalled();
|
||||
const firstCall = (onBlockReply.mock.calls as unknown as Array<Array<{ text?: string }>>)[0];
|
||||
expect(firstCall?.[0]?.text).toContain("Auto-compaction complete");
|
||||
expect(sessionStore.main.compactionCount).toBe(2);
|
||||
});
|
||||
|
||||
it("does not count failed compaction end events in followup runs", async () => {
|
||||
const storePath = path.join(
|
||||
await fs.mkdtemp(path.join(tmpdir(), "openclaw-compaction-failed-")),
|
||||
"sessions.json",
|
||||
);
|
||||
const sessionEntry: SessionEntry = {
|
||||
sessionId: "session",
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
const sessionStore: Record<string, SessionEntry> = {
|
||||
main: sessionEntry,
|
||||
};
|
||||
const onBlockReply = vi.fn(async () => {});
|
||||
|
||||
const runner = createFollowupRunner({
|
||||
opts: { onBlockReply },
|
||||
typing: createMockTypingController(),
|
||||
typingMode: "instant",
|
||||
sessionEntry,
|
||||
sessionStore,
|
||||
sessionKey: "main",
|
||||
storePath,
|
||||
defaultModel: "anthropic/claude-opus-4-5",
|
||||
});
|
||||
|
||||
const queued = createQueuedRun({
|
||||
run: {
|
||||
verboseLevel: "on",
|
||||
},
|
||||
});
|
||||
|
||||
runEmbeddedPiAgentMock.mockImplementationOnce(async (args) => {
|
||||
args.onAgentEvent?.({
|
||||
stream: "compaction",
|
||||
data: { phase: "end", willRetry: false, completed: false },
|
||||
});
|
||||
return {
|
||||
payloads: [{ text: "final" }],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
compactionCount: 0,
|
||||
lastCallUsage: { input: 10_000, output: 3_000, total: 13_000 },
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
await runner(queued);
|
||||
|
||||
expect(onBlockReply).toHaveBeenCalledTimes(1);
|
||||
const firstCall = (onBlockReply.mock.calls as unknown as Array<Array<{ text?: string }>>)[0];
|
||||
expect(firstCall?.[0]?.text).toBe("final");
|
||||
expect(sessionStore.main.compactionCount).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("createFollowupRunner bootstrap warning dedupe", () => {
|
||||
|
||||
@ -145,7 +145,7 @@ export function createFollowupRunner(params: {
|
||||
isControlUiVisible: shouldSurfaceToControlUi,
|
||||
});
|
||||
}
|
||||
let autoCompactionCompleted = false;
|
||||
let autoCompactionCount = 0;
|
||||
let runResult: Awaited<ReturnType<typeof runEmbeddedPiAgent>>;
|
||||
let fallbackProvider = queued.run.provider;
|
||||
let fallbackModel = queued.run.model;
|
||||
@ -168,68 +168,81 @@ export function createFollowupRunner(params: {
|
||||
}),
|
||||
run: async (provider, model, runOptions) => {
|
||||
const authProfile = resolveRunAuthProfile(queued.run, provider);
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: queued.run.sessionId,
|
||||
sessionKey: queued.run.sessionKey,
|
||||
agentId: queued.run.agentId,
|
||||
trigger: "user",
|
||||
messageChannel: queued.originatingChannel ?? undefined,
|
||||
messageProvider: queued.run.messageProvider,
|
||||
agentAccountId: queued.run.agentAccountId,
|
||||
messageTo: queued.originatingTo,
|
||||
messageThreadId: queued.originatingThreadId,
|
||||
currentChannelId: queued.originatingTo,
|
||||
currentThreadTs:
|
||||
queued.originatingThreadId != null ? String(queued.originatingThreadId) : undefined,
|
||||
groupId: queued.run.groupId,
|
||||
groupChannel: queued.run.groupChannel,
|
||||
groupSpace: queued.run.groupSpace,
|
||||
senderId: queued.run.senderId,
|
||||
senderName: queued.run.senderName,
|
||||
senderUsername: queued.run.senderUsername,
|
||||
senderE164: queued.run.senderE164,
|
||||
senderIsOwner: queued.run.senderIsOwner,
|
||||
sessionFile: queued.run.sessionFile,
|
||||
agentDir: queued.run.agentDir,
|
||||
workspaceDir: queued.run.workspaceDir,
|
||||
config: queued.run.config,
|
||||
skillsSnapshot: queued.run.skillsSnapshot,
|
||||
prompt: queued.prompt,
|
||||
extraSystemPrompt: queued.run.extraSystemPrompt,
|
||||
ownerNumbers: queued.run.ownerNumbers,
|
||||
enforceFinalTag: queued.run.enforceFinalTag,
|
||||
provider,
|
||||
model,
|
||||
...authProfile,
|
||||
thinkLevel: queued.run.thinkLevel,
|
||||
verboseLevel: queued.run.verboseLevel,
|
||||
reasoningLevel: queued.run.reasoningLevel,
|
||||
suppressToolErrorWarnings: opts?.suppressToolErrorWarnings,
|
||||
execOverrides: queued.run.execOverrides,
|
||||
bashElevated: queued.run.bashElevated,
|
||||
timeoutMs: queued.run.timeoutMs,
|
||||
runId,
|
||||
allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe,
|
||||
blockReplyBreak: queued.run.blockReplyBreak,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature:
|
||||
bootstrapPromptWarningSignaturesSeen[
|
||||
bootstrapPromptWarningSignaturesSeen.length - 1
|
||||
],
|
||||
onAgentEvent: (evt) => {
|
||||
if (evt.stream !== "compaction") {
|
||||
return;
|
||||
}
|
||||
const phase = typeof evt.data.phase === "string" ? evt.data.phase : "";
|
||||
if (phase === "end") {
|
||||
autoCompactionCompleted = true;
|
||||
}
|
||||
},
|
||||
});
|
||||
bootstrapPromptWarningSignaturesSeen = resolveBootstrapWarningSignaturesSeen(
|
||||
result.meta?.systemPromptReport,
|
||||
);
|
||||
return result;
|
||||
let attemptCompactionCount = 0;
|
||||
try {
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: queued.run.sessionId,
|
||||
sessionKey: queued.run.sessionKey,
|
||||
agentId: queued.run.agentId,
|
||||
trigger: "user",
|
||||
messageChannel: queued.originatingChannel ?? undefined,
|
||||
messageProvider: queued.run.messageProvider,
|
||||
agentAccountId: queued.run.agentAccountId,
|
||||
messageTo: queued.originatingTo,
|
||||
messageThreadId: queued.originatingThreadId,
|
||||
currentChannelId: queued.originatingTo,
|
||||
currentThreadTs:
|
||||
queued.originatingThreadId != null
|
||||
? String(queued.originatingThreadId)
|
||||
: undefined,
|
||||
groupId: queued.run.groupId,
|
||||
groupChannel: queued.run.groupChannel,
|
||||
groupSpace: queued.run.groupSpace,
|
||||
senderId: queued.run.senderId,
|
||||
senderName: queued.run.senderName,
|
||||
senderUsername: queued.run.senderUsername,
|
||||
senderE164: queued.run.senderE164,
|
||||
senderIsOwner: queued.run.senderIsOwner,
|
||||
sessionFile: queued.run.sessionFile,
|
||||
agentDir: queued.run.agentDir,
|
||||
workspaceDir: queued.run.workspaceDir,
|
||||
config: queued.run.config,
|
||||
skillsSnapshot: queued.run.skillsSnapshot,
|
||||
prompt: queued.prompt,
|
||||
extraSystemPrompt: queued.run.extraSystemPrompt,
|
||||
ownerNumbers: queued.run.ownerNumbers,
|
||||
enforceFinalTag: queued.run.enforceFinalTag,
|
||||
provider,
|
||||
model,
|
||||
...authProfile,
|
||||
thinkLevel: queued.run.thinkLevel,
|
||||
verboseLevel: queued.run.verboseLevel,
|
||||
reasoningLevel: queued.run.reasoningLevel,
|
||||
suppressToolErrorWarnings: opts?.suppressToolErrorWarnings,
|
||||
execOverrides: queued.run.execOverrides,
|
||||
bashElevated: queued.run.bashElevated,
|
||||
timeoutMs: queued.run.timeoutMs,
|
||||
runId,
|
||||
allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe,
|
||||
blockReplyBreak: queued.run.blockReplyBreak,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature:
|
||||
bootstrapPromptWarningSignaturesSeen[
|
||||
bootstrapPromptWarningSignaturesSeen.length - 1
|
||||
],
|
||||
onAgentEvent: (evt) => {
|
||||
if (evt.stream !== "compaction") {
|
||||
return;
|
||||
}
|
||||
const phase = typeof evt.data.phase === "string" ? evt.data.phase : "";
|
||||
const completed = evt.data?.completed === true;
|
||||
if (phase === "end" && completed) {
|
||||
attemptCompactionCount += 1;
|
||||
}
|
||||
},
|
||||
});
|
||||
bootstrapPromptWarningSignaturesSeen = resolveBootstrapWarningSignaturesSeen(
|
||||
result.meta?.systemPromptReport,
|
||||
);
|
||||
const resultCompactionCount = Math.max(
|
||||
0,
|
||||
result.meta?.agentMeta?.compactionCount ?? 0,
|
||||
);
|
||||
attemptCompactionCount = Math.max(attemptCompactionCount, resultCompactionCount);
|
||||
return result;
|
||||
} finally {
|
||||
autoCompactionCount += attemptCompactionCount;
|
||||
}
|
||||
},
|
||||
});
|
||||
runResult = fallbackResult.result;
|
||||
@ -326,12 +339,13 @@ export function createFollowupRunner(params: {
|
||||
return;
|
||||
}
|
||||
|
||||
if (autoCompactionCompleted) {
|
||||
if (autoCompactionCount > 0) {
|
||||
const count = await incrementRunCompactionCount({
|
||||
sessionEntry,
|
||||
sessionStore,
|
||||
sessionKey,
|
||||
storePath,
|
||||
amount: autoCompactionCount,
|
||||
lastCallUsage: runResult.meta?.agentMeta?.lastCallUsage,
|
||||
contextTokensUsed,
|
||||
});
|
||||
|
||||
@ -445,6 +445,23 @@ describe("incrementCompactionCount", () => {
|
||||
expect(stored[sessionKey].outputTokens).toBeUndefined();
|
||||
});
|
||||
|
||||
it("increments compaction count by an explicit amount", async () => {
|
||||
const entry = { sessionId: "s1", updatedAt: Date.now(), compactionCount: 2 } as SessionEntry;
|
||||
const { storePath, sessionKey, sessionStore } = await createCompactionSessionFixture(entry);
|
||||
|
||||
const count = await incrementCompactionCount({
|
||||
sessionEntry: entry,
|
||||
sessionStore,
|
||||
sessionKey,
|
||||
storePath,
|
||||
amount: 2,
|
||||
});
|
||||
expect(count).toBe(4);
|
||||
|
||||
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
|
||||
expect(stored[sessionKey].compactionCount).toBe(4);
|
||||
});
|
||||
|
||||
it("does not update totalTokens when tokensAfter is not provided", async () => {
|
||||
const entry = {
|
||||
sessionId: "s1",
|
||||
|
||||
@ -8,6 +8,7 @@ type IncrementRunCompactionCountParams = Omit<
|
||||
Parameters<typeof incrementCompactionCount>[0],
|
||||
"tokensAfter"
|
||||
> & {
|
||||
amount?: number;
|
||||
lastCallUsage?: NormalizedUsage;
|
||||
contextTokensUsed?: number;
|
||||
};
|
||||
@ -30,6 +31,7 @@ export async function incrementRunCompactionCount(
|
||||
sessionStore: params.sessionStore,
|
||||
sessionKey: params.sessionKey,
|
||||
storePath: params.storePath,
|
||||
amount: params.amount,
|
||||
tokensAfter: tokensAfterCompaction,
|
||||
});
|
||||
}
|
||||
|
||||
@ -255,6 +255,7 @@ export async function incrementCompactionCount(params: {
|
||||
sessionKey?: string;
|
||||
storePath?: string;
|
||||
now?: number;
|
||||
amount?: number;
|
||||
/** Token count after compaction - if provided, updates session token counts */
|
||||
tokensAfter?: number;
|
||||
}): Promise<number | undefined> {
|
||||
@ -264,6 +265,7 @@ export async function incrementCompactionCount(params: {
|
||||
sessionKey,
|
||||
storePath,
|
||||
now = Date.now(),
|
||||
amount = 1,
|
||||
tokensAfter,
|
||||
} = params;
|
||||
if (!sessionStore || !sessionKey) {
|
||||
@ -273,7 +275,8 @@ export async function incrementCompactionCount(params: {
|
||||
if (!entry) {
|
||||
return undefined;
|
||||
}
|
||||
const nextCount = (entry.compactionCount ?? 0) + 1;
|
||||
const incrementBy = Math.max(0, amount);
|
||||
const nextCount = (entry.compactionCount ?? 0) + incrementBy;
|
||||
// Build update payload with compaction count and optionally updated token counts
|
||||
const updates: Partial<SessionEntry> = {
|
||||
compactionCount: nextCount,
|
||||
|
||||
@ -266,10 +266,6 @@ describe("browser server-context listKnownProfileNames", () => {
|
||||
]),
|
||||
};
|
||||
|
||||
expect(listKnownProfileNames(state).toSorted()).toEqual([
|
||||
"chrome",
|
||||
"openclaw",
|
||||
"stale-removed",
|
||||
]);
|
||||
expect(listKnownProfileNames(state).toSorted()).toEqual(["openclaw", "stale-removed", "user"]);
|
||||
});
|
||||
});
|
||||
|
||||
@ -193,7 +193,7 @@ async function createRealSession(profileName: string): Promise<ChromeMcpSession>
|
||||
await client.close().catch(() => {});
|
||||
throw new BrowserProfileUnavailableError(
|
||||
`Chrome MCP existing-session attach failed for profile "${profileName}". ` +
|
||||
`Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` +
|
||||
`Make sure Chrome (v146+) is running. ` +
|
||||
`Details: ${String(err)}`,
|
||||
);
|
||||
}
|
||||
|
||||
@ -26,10 +26,8 @@ describe("browser config", () => {
|
||||
expect(user?.driver).toBe("existing-session");
|
||||
expect(user?.cdpPort).toBe(0);
|
||||
expect(user?.cdpUrl).toBe("");
|
||||
const chromeRelay = resolveProfile(resolved, "chrome-relay");
|
||||
expect(chromeRelay?.driver).toBe("extension");
|
||||
expect(chromeRelay?.cdpPort).toBe(18792);
|
||||
expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:18792");
|
||||
// chrome-relay is no longer auto-created
|
||||
expect(resolveProfile(resolved, "chrome-relay")).toBe(null);
|
||||
expect(resolved.remoteCdpTimeoutMs).toBe(1500);
|
||||
expect(resolved.remoteCdpHandshakeTimeoutMs).toBe(3000);
|
||||
});
|
||||
@ -38,10 +36,7 @@ describe("browser config", () => {
|
||||
withEnv({ OPENCLAW_GATEWAY_PORT: "19001" }, () => {
|
||||
const resolved = resolveBrowserConfig(undefined);
|
||||
expect(resolved.controlPort).toBe(19003);
|
||||
const chromeRelay = resolveProfile(resolved, "chrome-relay");
|
||||
expect(chromeRelay?.driver).toBe("extension");
|
||||
expect(chromeRelay?.cdpPort).toBe(19004);
|
||||
expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:19004");
|
||||
expect(resolveProfile(resolved, "chrome-relay")).toBe(null);
|
||||
|
||||
const openclaw = resolveProfile(resolved, "openclaw");
|
||||
expect(openclaw?.cdpPort).toBe(19012);
|
||||
@ -53,10 +48,7 @@ describe("browser config", () => {
|
||||
withEnv({ OPENCLAW_GATEWAY_PORT: undefined }, () => {
|
||||
const resolved = resolveBrowserConfig(undefined, { gateway: { port: 19011 } });
|
||||
expect(resolved.controlPort).toBe(19013);
|
||||
const chromeRelay = resolveProfile(resolved, "chrome-relay");
|
||||
expect(chromeRelay?.driver).toBe("extension");
|
||||
expect(chromeRelay?.cdpPort).toBe(19014);
|
||||
expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:19014");
|
||||
expect(resolveProfile(resolved, "chrome-relay")).toBe(null);
|
||||
|
||||
const openclaw = resolveProfile(resolved, "openclaw");
|
||||
expect(openclaw?.cdpPort).toBe(19022);
|
||||
@ -209,16 +201,6 @@ describe("browser config", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("does not add the built-in chrome-relay profile if the derived relay port is already used", () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
profiles: {
|
||||
openclaw: { cdpPort: 18792, color: "#FF4500" },
|
||||
},
|
||||
});
|
||||
expect(resolveProfile(resolved, "chrome-relay")).toBe(null);
|
||||
expect(resolved.defaultProfile).toBe("openclaw");
|
||||
});
|
||||
|
||||
it("defaults extraArgs to empty array when not provided", () => {
|
||||
const resolved = resolveBrowserConfig(undefined);
|
||||
expect(resolved.extraArgs).toEqual([]);
|
||||
@ -307,6 +289,7 @@ describe("browser config", () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
profiles: {
|
||||
"chrome-live": { driver: "existing-session", attachOnly: true, color: "#00AA00" },
|
||||
relay: { driver: "extension", cdpUrl: "http://127.0.0.1:18792", color: "#0066CC" },
|
||||
work: { cdpPort: 18801, color: "#0066CC" },
|
||||
},
|
||||
});
|
||||
@ -317,7 +300,7 @@ describe("browser config", () => {
|
||||
const managed = resolveProfile(resolved, "openclaw")!;
|
||||
expect(getBrowserProfileCapabilities(managed).usesChromeMcp).toBe(false);
|
||||
|
||||
const extension = resolveProfile(resolved, "chrome-relay")!;
|
||||
const extension = resolveProfile(resolved, "relay")!;
|
||||
expect(getBrowserProfileCapabilities(extension).usesChromeMcp).toBe(false);
|
||||
|
||||
const work = resolveProfile(resolved, "work")!;
|
||||
@ -358,17 +341,17 @@ describe("browser config", () => {
|
||||
it("explicit defaultProfile config overrides defaults in headless mode", () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
headless: true,
|
||||
defaultProfile: "chrome-relay",
|
||||
defaultProfile: "user",
|
||||
});
|
||||
expect(resolved.defaultProfile).toBe("chrome-relay");
|
||||
expect(resolved.defaultProfile).toBe("user");
|
||||
});
|
||||
|
||||
it("explicit defaultProfile config overrides defaults in noSandbox mode", () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
noSandbox: true,
|
||||
defaultProfile: "chrome-relay",
|
||||
defaultProfile: "user",
|
||||
});
|
||||
expect(resolved.defaultProfile).toBe("chrome-relay");
|
||||
expect(resolved.defaultProfile).toBe("user");
|
||||
});
|
||||
|
||||
it("allows custom profile as default even in headless mode", () => {
|
||||
|
||||
@ -14,7 +14,7 @@ import {
|
||||
DEFAULT_BROWSER_DEFAULT_PROFILE_NAME,
|
||||
DEFAULT_OPENCLAW_BROWSER_PROFILE_NAME,
|
||||
} from "./constants.js";
|
||||
import { CDP_PORT_RANGE_START, getUsedPorts } from "./profiles.js";
|
||||
import { CDP_PORT_RANGE_START } from "./profiles.js";
|
||||
|
||||
export type ResolvedBrowserConfig = {
|
||||
enabled: boolean;
|
||||
@ -197,36 +197,6 @@ function ensureDefaultUserBrowserProfile(
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure a built-in "chrome-relay" profile exists for the Chrome extension relay.
|
||||
*
|
||||
* Note: this is an OpenClaw browser profile (routing config), not a Chrome user profile.
|
||||
* It points at the local relay CDP endpoint (controlPort + 1).
|
||||
*/
|
||||
function ensureDefaultChromeRelayProfile(
|
||||
profiles: Record<string, BrowserProfileConfig>,
|
||||
controlPort: number,
|
||||
): Record<string, BrowserProfileConfig> {
|
||||
const result = { ...profiles };
|
||||
if (result["chrome-relay"]) {
|
||||
return result;
|
||||
}
|
||||
const relayPort = controlPort + 1;
|
||||
if (!Number.isFinite(relayPort) || relayPort <= 0 || relayPort > 65535) {
|
||||
return result;
|
||||
}
|
||||
// Avoid adding the built-in profile if the derived relay port is already used by another profile
|
||||
// (legacy single-profile configs may use controlPort+1 for openclaw/openclaw CDP).
|
||||
if (getUsedPorts(result).has(relayPort)) {
|
||||
return result;
|
||||
}
|
||||
result["chrome-relay"] = {
|
||||
driver: "extension",
|
||||
cdpUrl: `http://127.0.0.1:${relayPort}`,
|
||||
color: "#00AA00",
|
||||
};
|
||||
return result;
|
||||
}
|
||||
export function resolveBrowserConfig(
|
||||
cfg: BrowserConfig | undefined,
|
||||
rootConfig?: OpenClawConfig,
|
||||
@ -286,17 +256,14 @@ export function resolveBrowserConfig(
|
||||
const legacyCdpPort = rawCdpUrl ? cdpInfo.port : undefined;
|
||||
const isWsUrl = cdpInfo.parsed.protocol === "ws:" || cdpInfo.parsed.protocol === "wss:";
|
||||
const legacyCdpUrl = rawCdpUrl && isWsUrl ? cdpInfo.normalized : undefined;
|
||||
const profiles = ensureDefaultChromeRelayProfile(
|
||||
ensureDefaultUserBrowserProfile(
|
||||
ensureDefaultProfile(
|
||||
cfg?.profiles,
|
||||
defaultColor,
|
||||
legacyCdpPort,
|
||||
cdpPortRangeStart,
|
||||
legacyCdpUrl,
|
||||
),
|
||||
const profiles = ensureDefaultUserBrowserProfile(
|
||||
ensureDefaultProfile(
|
||||
cfg?.profiles,
|
||||
defaultColor,
|
||||
legacyCdpPort,
|
||||
cdpPortRangeStart,
|
||||
legacyCdpUrl,
|
||||
),
|
||||
controlPort,
|
||||
);
|
||||
const cdpProtocol = cdpInfo.parsed.protocol === "https:" ? "https" : "http";
|
||||
|
||||
|
||||
@ -3,10 +3,15 @@ import { resolveBrowserConfig, resolveProfile } from "../config.js";
|
||||
import { resolveSnapshotPlan } from "./agent.snapshot.plan.js";
|
||||
|
||||
describe("resolveSnapshotPlan", () => {
|
||||
it("defaults chrome-relay snapshots to aria when format is omitted", () => {
|
||||
const resolved = resolveBrowserConfig({});
|
||||
const profile = resolveProfile(resolved, "chrome-relay");
|
||||
it("defaults extension relay snapshots to aria when format is omitted", () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
profiles: {
|
||||
relay: { driver: "extension", cdpUrl: "http://127.0.0.1:18792", color: "#0066CC" },
|
||||
},
|
||||
});
|
||||
const profile = resolveProfile(resolved, "relay");
|
||||
expect(profile).toBeTruthy();
|
||||
expect(profile?.driver).toBe("extension");
|
||||
|
||||
const plan = resolveSnapshotPlan({
|
||||
profile: profile as NonNullable<typeof profile>,
|
||||
|
||||
@ -92,10 +92,10 @@ describe("browser server-context ensureTabAvailable", () => {
|
||||
getState: () => state,
|
||||
});
|
||||
|
||||
const chrome = ctx.forProfile("chrome");
|
||||
const first = await chrome.ensureTabAvailable();
|
||||
const chromeRelay = ctx.forProfile("chrome-relay");
|
||||
const first = await chromeRelay.ensureTabAvailable();
|
||||
expect(first.targetId).toBe("A");
|
||||
const second = await chrome.ensureTabAvailable();
|
||||
const second = await chromeRelay.ensureTabAvailable();
|
||||
expect(second.targetId).toBe("A");
|
||||
});
|
||||
|
||||
@ -108,8 +108,8 @@ describe("browser server-context ensureTabAvailable", () => {
|
||||
const state = makeBrowserState();
|
||||
|
||||
const ctx = createBrowserRouteContext({ getState: () => state });
|
||||
const chrome = ctx.forProfile("chrome");
|
||||
await expect(chrome.ensureTabAvailable("NOT_A_TAB")).rejects.toThrow(/tab not found/i);
|
||||
const chromeRelay = ctx.forProfile("chrome-relay");
|
||||
await expect(chromeRelay.ensureTabAvailable("NOT_A_TAB")).rejects.toThrow(/tab not found/i);
|
||||
});
|
||||
|
||||
it("returns a descriptive message when no extension tabs are attached", async () => {
|
||||
@ -118,8 +118,8 @@ describe("browser server-context ensureTabAvailable", () => {
|
||||
const state = makeBrowserState();
|
||||
|
||||
const ctx = createBrowserRouteContext({ getState: () => state });
|
||||
const chrome = ctx.forProfile("chrome");
|
||||
await expect(chrome.ensureTabAvailable()).rejects.toThrow(/no attached Chrome tabs/i);
|
||||
const chromeRelay = ctx.forProfile("chrome-relay");
|
||||
await expect(chromeRelay.ensureTabAvailable()).rejects.toThrow(/no attached Chrome tabs/i);
|
||||
});
|
||||
|
||||
it("waits briefly for extension tabs to reappear when a previous target exists", async () => {
|
||||
@ -138,11 +138,11 @@ describe("browser server-context ensureTabAvailable", () => {
|
||||
const state = makeBrowserState();
|
||||
|
||||
const ctx = createBrowserRouteContext({ getState: () => state });
|
||||
const chrome = ctx.forProfile("chrome");
|
||||
const first = await chrome.ensureTabAvailable();
|
||||
const chromeRelay = ctx.forProfile("chrome-relay");
|
||||
const first = await chromeRelay.ensureTabAvailable();
|
||||
expect(first.targetId).toBe("A");
|
||||
|
||||
const secondPromise = chrome.ensureTabAvailable();
|
||||
const secondPromise = chromeRelay.ensureTabAvailable();
|
||||
await vi.advanceTimersByTimeAsync(250);
|
||||
const second = await secondPromise;
|
||||
expect(second.targetId).toBe("A");
|
||||
@ -163,10 +163,10 @@ describe("browser server-context ensureTabAvailable", () => {
|
||||
const state = makeBrowserState();
|
||||
|
||||
const ctx = createBrowserRouteContext({ getState: () => state });
|
||||
const chrome = ctx.forProfile("chrome");
|
||||
await chrome.ensureTabAvailable();
|
||||
const chromeRelay = ctx.forProfile("chrome-relay");
|
||||
await chromeRelay.ensureTabAvailable();
|
||||
|
||||
const pending = expect(chrome.ensureTabAvailable()).rejects.toThrow(
|
||||
const pending = expect(chromeRelay.ensureTabAvailable()).rejects.toThrow(
|
||||
/no attached Chrome tabs/i,
|
||||
);
|
||||
await vi.advanceTimersByTimeAsync(3_500);
|
||||
|
||||
@ -22,6 +22,11 @@ const CANVAS_WS_OPEN_TIMEOUT_MS = 2_000;
|
||||
const CANVAS_RELOAD_TIMEOUT_MS = 4_000;
|
||||
const CANVAS_RELOAD_TEST_TIMEOUT_MS = 12_000;
|
||||
|
||||
function isLoopbackBindDenied(error: unknown) {
|
||||
const code = (error as NodeJS.ErrnoException | undefined)?.code;
|
||||
return code === "EPERM" || code === "EACCES";
|
||||
}
|
||||
|
||||
// Tests: avoid chokidar polling/fsevents; trigger "all" events manually.
|
||||
vi.mock("chokidar", () => {
|
||||
const createWatcher = () => {
|
||||
@ -102,8 +107,15 @@ describe("canvas host", () => {
|
||||
|
||||
it("creates a default index.html when missing", async () => {
|
||||
const dir = await createCaseDir();
|
||||
|
||||
const server = await startFixtureCanvasHost(dir);
|
||||
let server: Awaited<ReturnType<typeof startFixtureCanvasHost>>;
|
||||
try {
|
||||
server = await startFixtureCanvasHost(dir);
|
||||
} catch (error) {
|
||||
if (isLoopbackBindDenied(error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
try {
|
||||
const { res, html } = await fetchCanvasHtml(server.port);
|
||||
@ -119,8 +131,15 @@ describe("canvas host", () => {
|
||||
it("skips live reload injection when disabled", async () => {
|
||||
const dir = await createCaseDir();
|
||||
await fs.writeFile(path.join(dir, "index.html"), "<html><body>no-reload</body></html>", "utf8");
|
||||
|
||||
const server = await startFixtureCanvasHost(dir, { liveReload: false });
|
||||
let server: Awaited<ReturnType<typeof startFixtureCanvasHost>>;
|
||||
try {
|
||||
server = await startFixtureCanvasHost(dir, { liveReload: false });
|
||||
} catch (error) {
|
||||
if (isLoopbackBindDenied(error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
try {
|
||||
const { res, html } = await fetchCanvasHtml(server.port);
|
||||
@ -162,8 +181,27 @@ describe("canvas host", () => {
|
||||
}
|
||||
socket.destroy();
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", resolve));
|
||||
try {
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const onError = (error: Error) => {
|
||||
server.off("listening", onListening);
|
||||
reject(error);
|
||||
};
|
||||
const onListening = () => {
|
||||
server.off("error", onError);
|
||||
resolve();
|
||||
};
|
||||
server.once("error", onError);
|
||||
server.once("listening", onListening);
|
||||
server.listen(0, "127.0.0.1");
|
||||
});
|
||||
} catch (error) {
|
||||
await handler.close();
|
||||
if (isLoopbackBindDenied(error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
const port = (server.address() as AddressInfo).port;
|
||||
|
||||
try {
|
||||
@ -210,7 +248,15 @@ describe("canvas host", () => {
|
||||
await fs.writeFile(index, "<html><body>v1</body></html>", "utf8");
|
||||
|
||||
const watcherStart = chokidarMockState.watchers.length;
|
||||
const server = await startFixtureCanvasHost(dir);
|
||||
let server: Awaited<ReturnType<typeof startFixtureCanvasHost>>;
|
||||
try {
|
||||
server = await startFixtureCanvasHost(dir);
|
||||
} catch (error) {
|
||||
if (isLoopbackBindDenied(error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
try {
|
||||
const watcher = chokidarMockState.watchers[watcherStart];
|
||||
@ -267,6 +313,7 @@ describe("canvas host", () => {
|
||||
const linkPath = path.join(a2uiRoot, linkName);
|
||||
let createdBundle = false;
|
||||
let createdLink = false;
|
||||
let server: Awaited<ReturnType<typeof startFixtureCanvasHost>> | undefined;
|
||||
|
||||
try {
|
||||
await fs.stat(bundlePath);
|
||||
@ -278,9 +325,16 @@ describe("canvas host", () => {
|
||||
await fs.symlink(path.join(process.cwd(), "package.json"), linkPath);
|
||||
createdLink = true;
|
||||
|
||||
const server = await startFixtureCanvasHost(dir);
|
||||
|
||||
try {
|
||||
try {
|
||||
server = await startFixtureCanvasHost(dir);
|
||||
} catch (error) {
|
||||
if (isLoopbackBindDenied(error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
const res = await fetch(`http://127.0.0.1:${server.port}/__openclaw__/a2ui/`);
|
||||
const html = await res.text();
|
||||
expect(res.status).toBe(200);
|
||||
@ -302,7 +356,7 @@ describe("canvas host", () => {
|
||||
expect(symlinkRes.status).toBe(404);
|
||||
expect(await symlinkRes.text()).toBe("not found");
|
||||
} finally {
|
||||
await server.close();
|
||||
await server?.close();
|
||||
if (createdLink) {
|
||||
await fs.rm(linkPath, { force: true });
|
||||
}
|
||||
|
||||
@ -182,7 +182,7 @@ export async function inspectGatewayRestart(params: {
|
||||
return true;
|
||||
}
|
||||
if (runtimePid == null) {
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
return !listenerOwnedByRuntimePid({ listener, runtimePid });
|
||||
})
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { captureEnv } from "../../test-utils/env.js";
|
||||
import type { GatewayRestartSnapshot } from "./restart-health.js";
|
||||
|
||||
const callGatewayStatusProbe = vi.fn(async (_opts?: unknown) => ({ ok: true as const }));
|
||||
const loadGatewayTlsRuntime = vi.fn(async (_cfg?: unknown) => ({
|
||||
@ -18,6 +19,14 @@ const readLastGatewayErrorLine = vi.fn(async (_env?: NodeJS.ProcessEnv) => null)
|
||||
const auditGatewayServiceConfig = vi.fn(async (_opts?: unknown) => undefined);
|
||||
const serviceIsLoaded = vi.fn(async (_opts?: unknown) => true);
|
||||
const serviceReadRuntime = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ status: "running" }));
|
||||
const inspectGatewayRestart = vi.fn<(opts?: unknown) => Promise<GatewayRestartSnapshot>>(
|
||||
async (_opts?: unknown) => ({
|
||||
runtime: { status: "running", pid: 1234 },
|
||||
portUsage: { port: 19001, status: "busy", listeners: [], hints: [] },
|
||||
healthy: true,
|
||||
staleGatewayPids: [],
|
||||
}),
|
||||
);
|
||||
const serviceReadCommand = vi.fn<
|
||||
(env?: NodeJS.ProcessEnv) => Promise<{
|
||||
programArguments: string[];
|
||||
@ -117,6 +126,10 @@ vi.mock("./probe.js", () => ({
|
||||
probeGatewayStatus: (opts: unknown) => callGatewayStatusProbe(opts),
|
||||
}));
|
||||
|
||||
vi.mock("./restart-health.js", () => ({
|
||||
inspectGatewayRestart: (opts: unknown) => inspectGatewayRestart(opts),
|
||||
}));
|
||||
|
||||
const { gatherDaemonStatus } = await import("./status.gather.js");
|
||||
|
||||
describe("gatherDaemonStatus", () => {
|
||||
@ -139,6 +152,7 @@ describe("gatherDaemonStatus", () => {
|
||||
delete process.env.DAEMON_GATEWAY_PASSWORD;
|
||||
callGatewayStatusProbe.mockClear();
|
||||
loadGatewayTlsRuntime.mockClear();
|
||||
inspectGatewayRestart.mockClear();
|
||||
daemonLoadedConfig = {
|
||||
gateway: {
|
||||
bind: "lan",
|
||||
@ -362,4 +376,34 @@ describe("gatherDaemonStatus", () => {
|
||||
expect(callGatewayStatusProbe).not.toHaveBeenCalled();
|
||||
expect(status.rpc).toBeUndefined();
|
||||
});
|
||||
|
||||
it("surfaces stale gateway listener pids from restart health inspection", async () => {
|
||||
inspectGatewayRestart.mockResolvedValueOnce({
|
||||
runtime: { status: "running", pid: 8000 },
|
||||
portUsage: {
|
||||
port: 19001,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 9000, ppid: 8999, commandLine: "openclaw-gateway" }],
|
||||
hints: [],
|
||||
},
|
||||
healthy: false,
|
||||
staleGatewayPids: [9000],
|
||||
});
|
||||
|
||||
const status = await gatherDaemonStatus({
|
||||
rpc: {},
|
||||
probe: true,
|
||||
deep: false,
|
||||
});
|
||||
|
||||
expect(inspectGatewayRestart).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
port: 19001,
|
||||
}),
|
||||
);
|
||||
expect(status.health).toEqual({
|
||||
healthy: false,
|
||||
staleGatewayPids: [9000],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -29,6 +29,7 @@ import {
|
||||
import { pickPrimaryTailnetIPv4 } from "../../infra/tailnet.js";
|
||||
import { loadGatewayTlsRuntime } from "../../infra/tls/gateway.js";
|
||||
import { probeGatewayStatus } from "./probe.js";
|
||||
import { inspectGatewayRestart } from "./restart-health.js";
|
||||
import { normalizeListenerAddress, parsePortFromArgs, pickProbeHostForBind } from "./shared.js";
|
||||
import type { GatewayRpcOpts } from "./types.js";
|
||||
|
||||
@ -112,6 +113,10 @@ export type DaemonStatus = {
|
||||
error?: string;
|
||||
url?: string;
|
||||
};
|
||||
health?: {
|
||||
healthy: boolean;
|
||||
staleGatewayPids: number[];
|
||||
};
|
||||
extraServices: Array<{ label: string; detail: string; scope: string }>;
|
||||
};
|
||||
|
||||
@ -331,6 +336,14 @@ export async function gatherDaemonStatus(
|
||||
configPath: daemonConfigSummary.path,
|
||||
})
|
||||
: undefined;
|
||||
const health =
|
||||
opts.probe && loaded
|
||||
? await inspectGatewayRestart({
|
||||
service,
|
||||
port: daemonPort,
|
||||
env: serviceEnv,
|
||||
}).catch(() => undefined)
|
||||
: undefined;
|
||||
|
||||
let lastError: string | undefined;
|
||||
if (loaded && runtime?.status === "running" && portStatus && portStatus.status !== "busy") {
|
||||
@ -357,6 +370,14 @@ export async function gatherDaemonStatus(
|
||||
...(portCliStatus ? { portCli: portCliStatus } : {}),
|
||||
lastError,
|
||||
...(rpc ? { rpc: { ...rpc, url: gateway.probeUrl } } : {}),
|
||||
...(health
|
||||
? {
|
||||
health: {
|
||||
healthy: health.healthy,
|
||||
staleGatewayPids: health.staleGatewayPids,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
extraServices,
|
||||
};
|
||||
}
|
||||
|
||||
116
src/cli/daemon-cli/status.print.test.ts
Normal file
116
src/cli/daemon-cli/status.print.test.ts
Normal file
@ -0,0 +1,116 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const runtime = vi.hoisted(() => ({
|
||||
log: vi.fn<(line: string) => void>(),
|
||||
error: vi.fn<(line: string) => void>(),
|
||||
}));
|
||||
|
||||
vi.mock("../../runtime.js", () => ({
|
||||
defaultRuntime: runtime,
|
||||
}));
|
||||
|
||||
vi.mock("../../terminal/theme.js", () => ({
|
||||
colorize: (_rich: boolean, _theme: unknown, text: string) => text,
|
||||
}));
|
||||
|
||||
vi.mock("../../commands/onboard-helpers.js", () => ({
|
||||
resolveControlUiLinks: () => ({ httpUrl: "http://127.0.0.1:18789" }),
|
||||
}));
|
||||
|
||||
vi.mock("../../daemon/inspect.js", () => ({
|
||||
renderGatewayServiceCleanupHints: () => [],
|
||||
}));
|
||||
|
||||
vi.mock("../../daemon/launchd.js", () => ({
|
||||
resolveGatewayLogPaths: () => ({
|
||||
stdoutPath: "/tmp/gateway.out.log",
|
||||
stderrPath: "/tmp/gateway.err.log",
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("../../daemon/systemd-hints.js", () => ({
|
||||
isSystemdUnavailableDetail: () => false,
|
||||
renderSystemdUnavailableHints: () => [],
|
||||
}));
|
||||
|
||||
vi.mock("../../infra/wsl.js", () => ({
|
||||
isWSLEnv: () => false,
|
||||
}));
|
||||
|
||||
vi.mock("../../logging.js", () => ({
|
||||
getResolvedLoggerSettings: () => ({ file: "/tmp/openclaw.log" }),
|
||||
}));
|
||||
|
||||
vi.mock("./shared.js", () => ({
|
||||
createCliStatusTextStyles: () => ({
|
||||
rich: false,
|
||||
label: (text: string) => text,
|
||||
accent: (text: string) => text,
|
||||
infoText: (text: string) => text,
|
||||
okText: (text: string) => text,
|
||||
warnText: (text: string) => text,
|
||||
errorText: (text: string) => text,
|
||||
}),
|
||||
filterDaemonEnv: () => ({}),
|
||||
formatRuntimeStatus: () => "running (pid 8000)",
|
||||
resolveRuntimeStatusColor: () => "",
|
||||
renderRuntimeHints: () => [],
|
||||
safeDaemonEnv: () => [],
|
||||
}));
|
||||
|
||||
vi.mock("./status.gather.js", () => ({
|
||||
renderPortDiagnosticsForCli: () => [],
|
||||
resolvePortListeningAddresses: () => ["127.0.0.1:18789"],
|
||||
}));
|
||||
|
||||
const { printDaemonStatus } = await import("./status.print.js");
|
||||
|
||||
describe("printDaemonStatus", () => {
|
||||
beforeEach(() => {
|
||||
runtime.log.mockReset();
|
||||
runtime.error.mockReset();
|
||||
});
|
||||
|
||||
it("prints stale gateway pid guidance when runtime does not own the listener", () => {
|
||||
printDaemonStatus(
|
||||
{
|
||||
service: {
|
||||
label: "LaunchAgent",
|
||||
loaded: true,
|
||||
loadedText: "loaded",
|
||||
notLoadedText: "not loaded",
|
||||
runtime: { status: "running", pid: 8000 },
|
||||
},
|
||||
gateway: {
|
||||
bindMode: "loopback",
|
||||
bindHost: "127.0.0.1",
|
||||
port: 18789,
|
||||
portSource: "env/config",
|
||||
probeUrl: "ws://127.0.0.1:18789",
|
||||
},
|
||||
port: {
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 9000, ppid: 8999, address: "127.0.0.1:18789" }],
|
||||
hints: [],
|
||||
},
|
||||
rpc: {
|
||||
ok: false,
|
||||
error: "gateway closed (1006 abnormal closure (no close frame))",
|
||||
url: "ws://127.0.0.1:18789",
|
||||
},
|
||||
health: {
|
||||
healthy: false,
|
||||
staleGatewayPids: [9000],
|
||||
},
|
||||
extraServices: [],
|
||||
},
|
||||
{ json: false },
|
||||
);
|
||||
|
||||
expect(runtime.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Gateway runtime PID does not own the listening port"),
|
||||
);
|
||||
expect(runtime.error).toHaveBeenCalledWith(expect.stringContaining("openclaw gateway restart"));
|
||||
});
|
||||
});
|
||||
@ -194,6 +194,25 @@ export function printDaemonStatus(status: DaemonStatus, opts: { json: boolean })
|
||||
spacer();
|
||||
}
|
||||
|
||||
if (
|
||||
status.health &&
|
||||
status.health.staleGatewayPids.length > 0 &&
|
||||
service.runtime?.status === "running" &&
|
||||
typeof service.runtime.pid === "number"
|
||||
) {
|
||||
defaultRuntime.error(
|
||||
errorText(
|
||||
`Gateway runtime PID does not own the listening port. Other gateway process(es) are listening: ${status.health.staleGatewayPids.join(", ")}`,
|
||||
),
|
||||
);
|
||||
defaultRuntime.error(
|
||||
errorText(
|
||||
`Fix: run ${formatCliCommand("openclaw gateway restart")} and re-check with ${formatCliCommand("openclaw gateway status --deep")}.`,
|
||||
),
|
||||
);
|
||||
spacer();
|
||||
}
|
||||
|
||||
const systemdUnavailable =
|
||||
process.platform === "linux" && isSystemdUnavailableDetail(service.runtime?.detail);
|
||||
if (systemdUnavailable) {
|
||||
|
||||
@ -63,6 +63,13 @@ describe("maybeRemoveDeprecatedCliAuthProfiles", () => {
|
||||
refresh: "token-r2",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
"openai-codex:default": {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access: "token-c",
|
||||
refresh: "token-r3",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
@ -76,10 +83,11 @@ describe("maybeRemoveDeprecatedCliAuthProfiles", () => {
|
||||
profiles: {
|
||||
"anthropic:claude-cli": { provider: "anthropic", mode: "oauth" },
|
||||
"openai-codex:codex-cli": { provider: "openai-codex", mode: "oauth" },
|
||||
"openai-codex:default": { provider: "openai-codex", mode: "oauth" },
|
||||
},
|
||||
order: {
|
||||
anthropic: ["anthropic:claude-cli"],
|
||||
"openai-codex": ["openai-codex:codex-cli"],
|
||||
"openai-codex": ["openai-codex:codex-cli", "openai-codex:default"],
|
||||
},
|
||||
},
|
||||
} as const;
|
||||
@ -94,10 +102,12 @@ describe("maybeRemoveDeprecatedCliAuthProfiles", () => {
|
||||
};
|
||||
expect(raw.profiles?.["anthropic:claude-cli"]).toBeUndefined();
|
||||
expect(raw.profiles?.["openai-codex:codex-cli"]).toBeUndefined();
|
||||
expect(raw.profiles?.["openai-codex:default"]).toBeDefined();
|
||||
|
||||
expect(next.auth?.profiles?.["anthropic:claude-cli"]).toBeUndefined();
|
||||
expect(next.auth?.profiles?.["openai-codex:codex-cli"]).toBeUndefined();
|
||||
expect(next.auth?.profiles?.["openai-codex:default"]).toBeDefined();
|
||||
expect(next.auth?.order?.anthropic).toBeUndefined();
|
||||
expect(next.auth?.order?.["openai-codex"]).toBeUndefined();
|
||||
expect(next.auth?.order?.["openai-codex"]).toEqual(["openai-codex:default"]);
|
||||
});
|
||||
});
|
||||
|
||||
@ -21,6 +21,16 @@ const mocks = vi.hoisted(() => ({
|
||||
updateConfig: vi.fn(),
|
||||
logConfigUpdated: vi.fn(),
|
||||
openUrl: vi.fn(),
|
||||
loadAuthProfileStoreForRuntime: vi.fn(),
|
||||
listProfilesForProvider: vi.fn(),
|
||||
clearAuthProfileCooldown: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles.js", () => ({
|
||||
loadAuthProfileStoreForRuntime: mocks.loadAuthProfileStoreForRuntime,
|
||||
listProfilesForProvider: mocks.listProfilesForProvider,
|
||||
clearAuthProfileCooldown: mocks.clearAuthProfileCooldown,
|
||||
upsertAuthProfile: mocks.upsertAuthProfile,
|
||||
}));
|
||||
|
||||
vi.mock("@clack/prompts", () => ({
|
||||
@ -41,10 +51,6 @@ vi.mock("../../agents/workspace.js", () => ({
|
||||
resolveDefaultAgentWorkspaceDir: mocks.resolveDefaultAgentWorkspaceDir,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles.js", () => ({
|
||||
upsertAuthProfile: mocks.upsertAuthProfile,
|
||||
}));
|
||||
|
||||
vi.mock("../../plugins/providers.js", () => ({
|
||||
resolvePluginProviders: mocks.resolvePluginProviders,
|
||||
}));
|
||||
@ -155,6 +161,9 @@ describe("modelsAuthLoginCommand", () => {
|
||||
});
|
||||
mocks.writeOAuthCredentials.mockResolvedValue("openai-codex:user@example.com");
|
||||
mocks.resolvePluginProviders.mockReturnValue([]);
|
||||
mocks.loadAuthProfileStoreForRuntime.mockReturnValue({ profiles: {}, usageStats: {} });
|
||||
mocks.listProfilesForProvider.mockReturnValue([]);
|
||||
mocks.clearAuthProfileCooldown.mockResolvedValue(undefined);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@ -198,6 +207,60 @@ describe("modelsAuthLoginCommand", () => {
|
||||
expect(runtime.log).toHaveBeenCalledWith("Default model set to openai-codex/gpt-5.4");
|
||||
});
|
||||
|
||||
it("clears stale auth lockouts before attempting openai-codex login", async () => {
|
||||
const runtime = createRuntime();
|
||||
const fakeStore = {
|
||||
profiles: {
|
||||
"openai-codex:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
},
|
||||
},
|
||||
usageStats: {
|
||||
"openai-codex:user@example.com": {
|
||||
disabledUntil: Date.now() + 3_600_000,
|
||||
disabledReason: "auth_permanent",
|
||||
errorCount: 3,
|
||||
},
|
||||
},
|
||||
};
|
||||
mocks.loadAuthProfileStoreForRuntime.mockReturnValue(fakeStore);
|
||||
mocks.listProfilesForProvider.mockReturnValue(["openai-codex:user@example.com"]);
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
|
||||
|
||||
expect(mocks.clearAuthProfileCooldown).toHaveBeenCalledWith({
|
||||
store: fakeStore,
|
||||
profileId: "openai-codex:user@example.com",
|
||||
agentDir: "/tmp/openclaw/agents/main",
|
||||
});
|
||||
// Verify clearing happens before login attempt
|
||||
const clearOrder = mocks.clearAuthProfileCooldown.mock.invocationCallOrder[0];
|
||||
const loginOrder = mocks.loginOpenAICodexOAuth.mock.invocationCallOrder[0];
|
||||
expect(clearOrder).toBeLessThan(loginOrder);
|
||||
});
|
||||
|
||||
it("survives lockout clearing failure without blocking login", async () => {
|
||||
const runtime = createRuntime();
|
||||
mocks.loadAuthProfileStoreForRuntime.mockImplementation(() => {
|
||||
throw new Error("corrupt auth-profiles.json");
|
||||
});
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
|
||||
|
||||
expect(mocks.loginOpenAICodexOAuth).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("loads lockout state from the agent-scoped store", async () => {
|
||||
const runtime = createRuntime();
|
||||
mocks.loadAuthProfileStoreForRuntime.mockReturnValue({ profiles: {}, usageStats: {} });
|
||||
mocks.listProfilesForProvider.mockReturnValue([]);
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
|
||||
|
||||
expect(mocks.loadAuthProfileStoreForRuntime).toHaveBeenCalledWith("/tmp/openclaw/agents/main");
|
||||
});
|
||||
|
||||
it("keeps existing plugin error behavior for non built-in providers", async () => {
|
||||
const runtime = createRuntime();
|
||||
|
||||
|
||||
@ -10,7 +10,12 @@ import {
|
||||
resolveAgentWorkspaceDir,
|
||||
resolveDefaultAgentId,
|
||||
} from "../../agents/agent-scope.js";
|
||||
import { upsertAuthProfile } from "../../agents/auth-profiles.js";
|
||||
import {
|
||||
clearAuthProfileCooldown,
|
||||
listProfilesForProvider,
|
||||
loadAuthProfileStoreForRuntime,
|
||||
upsertAuthProfile,
|
||||
} from "../../agents/auth-profiles.js";
|
||||
import type { AuthProfileCredential } from "../../agents/auth-profiles/types.js";
|
||||
import { normalizeProviderId } from "../../agents/model-selection.js";
|
||||
import { resolveDefaultAgentWorkspaceDir } from "../../agents/workspace.js";
|
||||
@ -265,6 +270,24 @@ type LoginOptions = {
|
||||
setDefault?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* Clear stale cooldown/disabled state for all profiles matching a provider.
|
||||
* When a user explicitly runs `models auth login`, they intend to fix auth —
|
||||
* stale `auth_permanent` / `billing` lockouts should not persist across
|
||||
* a deliberate re-authentication attempt.
|
||||
*/
|
||||
async function clearStaleProfileLockouts(provider: string, agentDir: string): Promise<void> {
|
||||
try {
|
||||
const store = loadAuthProfileStoreForRuntime(agentDir);
|
||||
const profileIds = listProfilesForProvider(store, provider);
|
||||
for (const profileId of profileIds) {
|
||||
await clearAuthProfileCooldown({ store, profileId, agentDir });
|
||||
}
|
||||
} catch {
|
||||
// Best-effort housekeeping — never block re-authentication.
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveRequestedLoginProviderOrThrow(
|
||||
providers: ProviderPlugin[],
|
||||
rawProvider?: string,
|
||||
@ -356,6 +379,7 @@ export async function modelsAuthLoginCommand(opts: LoginOptions, runtime: Runtim
|
||||
const prompter = createClackPrompter();
|
||||
|
||||
if (requestedProviderId === "openai-codex") {
|
||||
await clearStaleProfileLockouts("openai-codex", agentDir);
|
||||
await runBuiltInOpenAICodexLogin({
|
||||
opts,
|
||||
runtime,
|
||||
@ -390,6 +414,8 @@ export async function modelsAuthLoginCommand(opts: LoginOptions, runtime: Runtim
|
||||
throw new Error("Unknown provider. Use --provider <id> to pick a provider plugin.");
|
||||
}
|
||||
|
||||
await clearStaleProfileLockouts(selectedProvider.id, agentDir);
|
||||
|
||||
const chosenMethod =
|
||||
pickAuthMethod(selectedProvider, opts.method) ??
|
||||
(selectedProvider.auth.length === 1
|
||||
|
||||
@ -111,9 +111,12 @@ describe("Nix integration (U3, U5, U9)", () => {
|
||||
});
|
||||
|
||||
it("CONFIG_PATH uses STATE_DIR when only state dir is overridden", () => {
|
||||
expect(resolveConfigPathCandidate(envWith({ OPENCLAW_STATE_DIR: "/custom/state" }))).toBe(
|
||||
path.join(path.resolve("/custom/state"), "openclaw.json"),
|
||||
);
|
||||
expect(
|
||||
resolveConfigPathCandidate(
|
||||
envWith({ OPENCLAW_STATE_DIR: "/custom/state", OPENCLAW_TEST_FAST: "1" }),
|
||||
() => path.join(path.sep, "tmp", "openclaw-config-home"),
|
||||
),
|
||||
).toBe(path.join(path.resolve("/custom/state"), "openclaw.json"));
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@ -44,7 +44,6 @@ async function writePluginFixture(params: {
|
||||
}
|
||||
|
||||
describe("config plugin validation", () => {
|
||||
const previousUmask = process.umask(0o022);
|
||||
let fixtureRoot = "";
|
||||
let suiteHome = "";
|
||||
let badPluginDir = "";
|
||||
@ -136,7 +135,6 @@ describe("config plugin validation", () => {
|
||||
afterAll(async () => {
|
||||
await fs.rm(fixtureRoot, { recursive: true, force: true });
|
||||
clearPluginManifestRegistryCache();
|
||||
process.umask(previousUmask);
|
||||
});
|
||||
|
||||
it("reports missing plugin refs across load paths, entries, and allowlist surfaces", async () => {
|
||||
|
||||
160
src/config/doc-baseline.test.ts
Normal file
160
src/config/doc-baseline.test.ts
Normal file
@ -0,0 +1,160 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import {
|
||||
buildConfigDocBaseline,
|
||||
collectConfigDocBaselineEntries,
|
||||
dedupeConfigDocBaselineEntries,
|
||||
normalizeConfigDocBaselineHelpPath,
|
||||
renderConfigDocBaselineStatefile,
|
||||
writeConfigDocBaselineStatefile,
|
||||
} from "./doc-baseline.js";
|
||||
|
||||
describe("config doc baseline", () => {
|
||||
const tempRoots: string[] = [];
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
tempRoots.splice(0).map(async (tempRoot) => {
|
||||
await fs.rm(tempRoot, { recursive: true, force: true });
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("is deterministic across repeated runs", async () => {
|
||||
const first = await renderConfigDocBaselineStatefile();
|
||||
const second = await renderConfigDocBaselineStatefile();
|
||||
|
||||
expect(second.json).toBe(first.json);
|
||||
expect(second.jsonl).toBe(first.jsonl);
|
||||
});
|
||||
|
||||
it("normalizes array and record paths to wildcard form", async () => {
|
||||
const baseline = await buildConfigDocBaseline();
|
||||
const paths = new Set(baseline.entries.map((entry) => entry.path));
|
||||
|
||||
expect(paths.has("session.sendPolicy.rules.*.match.keyPrefix")).toBe(true);
|
||||
expect(paths.has("env.*")).toBe(true);
|
||||
expect(normalizeConfigDocBaselineHelpPath("agents.list[].skills")).toBe("agents.list.*.skills");
|
||||
});
|
||||
|
||||
it("includes core, channel, and plugin config metadata", async () => {
|
||||
const baseline = await buildConfigDocBaseline();
|
||||
const byPath = new Map(baseline.entries.map((entry) => [entry.path, entry]));
|
||||
|
||||
expect(byPath.get("gateway.auth.token")).toMatchObject({
|
||||
kind: "core",
|
||||
sensitive: true,
|
||||
});
|
||||
expect(byPath.get("channels.telegram.botToken")).toMatchObject({
|
||||
kind: "channel",
|
||||
sensitive: true,
|
||||
});
|
||||
expect(byPath.get("plugins.entries.voice-call.config.twilio.authToken")).toMatchObject({
|
||||
kind: "plugin",
|
||||
sensitive: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves help text and tags from merged schema hints", async () => {
|
||||
const baseline = await buildConfigDocBaseline();
|
||||
const byPath = new Map(baseline.entries.map((entry) => [entry.path, entry]));
|
||||
const tokenEntry = byPath.get("gateway.auth.token");
|
||||
|
||||
expect(tokenEntry?.help).toContain("gateway access");
|
||||
expect(tokenEntry?.tags).toContain("auth");
|
||||
expect(tokenEntry?.tags).toContain("security");
|
||||
});
|
||||
|
||||
it("matches array help hints that still use [] notation", async () => {
|
||||
const baseline = await buildConfigDocBaseline();
|
||||
const byPath = new Map(baseline.entries.map((entry) => [entry.path, entry]));
|
||||
|
||||
expect(byPath.get("session.sendPolicy.rules.*.match.keyPrefix")).toMatchObject({
|
||||
help: expect.stringContaining("prefer rawKeyPrefix when exact full-key matching is required"),
|
||||
sensitive: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("walks union branches for nested config keys", async () => {
|
||||
const baseline = await buildConfigDocBaseline();
|
||||
const byPath = new Map(baseline.entries.map((entry) => [entry.path, entry]));
|
||||
|
||||
expect(byPath.get("bindings.*")).toMatchObject({
|
||||
hasChildren: true,
|
||||
});
|
||||
expect(byPath.get("bindings.*.type")).toBeDefined();
|
||||
expect(byPath.get("bindings.*.match.channel")).toBeDefined();
|
||||
expect(byPath.get("bindings.*.match.peer.id")).toBeDefined();
|
||||
});
|
||||
|
||||
it("merges tuple item metadata instead of dropping earlier entries", () => {
|
||||
const entries = dedupeConfigDocBaselineEntries(
|
||||
collectConfigDocBaselineEntries(
|
||||
{
|
||||
type: "array",
|
||||
items: [
|
||||
{
|
||||
type: "string",
|
||||
enum: ["alpha"],
|
||||
},
|
||||
{
|
||||
type: "number",
|
||||
enum: [42],
|
||||
},
|
||||
],
|
||||
},
|
||||
{},
|
||||
"tupleValues",
|
||||
),
|
||||
);
|
||||
const tupleEntry = new Map(entries.map((entry) => [entry.path, entry])).get("tupleValues.*");
|
||||
|
||||
expect(tupleEntry).toMatchObject({
|
||||
type: ["number", "string"],
|
||||
});
|
||||
expect(tupleEntry?.enumValues).toEqual(expect.arrayContaining([42, "alpha"]));
|
||||
expect(tupleEntry?.enumValues).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("supports check mode for stale generated artifacts", async () => {
|
||||
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-config-doc-baseline-"));
|
||||
tempRoots.push(tempRoot);
|
||||
|
||||
const initial = await writeConfigDocBaselineStatefile({
|
||||
repoRoot: tempRoot,
|
||||
jsonPath: "docs/.generated/config-baseline.json",
|
||||
statefilePath: "docs/.generated/config-baseline.jsonl",
|
||||
});
|
||||
expect(initial.wrote).toBe(true);
|
||||
|
||||
const current = await writeConfigDocBaselineStatefile({
|
||||
repoRoot: tempRoot,
|
||||
jsonPath: "docs/.generated/config-baseline.json",
|
||||
statefilePath: "docs/.generated/config-baseline.jsonl",
|
||||
check: true,
|
||||
});
|
||||
expect(current.changed).toBe(false);
|
||||
|
||||
await fs.writeFile(
|
||||
path.join(tempRoot, "docs/.generated/config-baseline.json"),
|
||||
'{"generatedBy":"broken","entries":[]}\n',
|
||||
"utf8",
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(tempRoot, "docs/.generated/config-baseline.jsonl"),
|
||||
'{"recordType":"meta","generatedBy":"broken","totalPaths":0}\n',
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const stale = await writeConfigDocBaselineStatefile({
|
||||
repoRoot: tempRoot,
|
||||
jsonPath: "docs/.generated/config-baseline.json",
|
||||
statefilePath: "docs/.generated/config-baseline.jsonl",
|
||||
check: true,
|
||||
});
|
||||
expect(stale.changed).toBe(true);
|
||||
expect(stale.wrote).toBe(false);
|
||||
});
|
||||
});
|
||||
578
src/config/doc-baseline.ts
Normal file
578
src/config/doc-baseline.ts
Normal file
@ -0,0 +1,578 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath, pathToFileURL } from "node:url";
|
||||
import type { ChannelPlugin } from "../channels/plugins/index.js";
|
||||
import { resolveOpenClawPackageRootSync } from "../infra/openclaw-root.js";
|
||||
import { loadPluginManifestRegistry } from "../plugins/manifest-registry.js";
|
||||
import { FIELD_HELP } from "./schema.help.js";
|
||||
import { buildConfigSchema, type ConfigSchemaResponse } from "./schema.js";
|
||||
|
||||
type JsonValue = null | boolean | number | string | JsonValue[] | { [key: string]: JsonValue };
|
||||
|
||||
type JsonSchemaNode = Record<string, unknown>;
|
||||
|
||||
type JsonSchemaObject = JsonSchemaNode & {
|
||||
type?: string | string[];
|
||||
properties?: Record<string, JsonSchemaObject>;
|
||||
required?: string[];
|
||||
additionalProperties?: JsonSchemaObject | boolean;
|
||||
items?: JsonSchemaObject | JsonSchemaObject[];
|
||||
enum?: unknown[];
|
||||
default?: unknown;
|
||||
deprecated?: boolean;
|
||||
anyOf?: JsonSchemaObject[];
|
||||
allOf?: JsonSchemaObject[];
|
||||
oneOf?: JsonSchemaObject[];
|
||||
};
|
||||
|
||||
export type ConfigDocBaselineKind = "core" | "channel" | "plugin";
|
||||
|
||||
export type ConfigDocBaselineEntry = {
|
||||
path: string;
|
||||
kind: ConfigDocBaselineKind;
|
||||
type?: string | string[];
|
||||
required: boolean;
|
||||
enumValues?: JsonValue[];
|
||||
defaultValue?: JsonValue;
|
||||
deprecated: boolean;
|
||||
sensitive: boolean;
|
||||
tags: string[];
|
||||
label?: string;
|
||||
help?: string;
|
||||
hasChildren: boolean;
|
||||
};
|
||||
|
||||
export type ConfigDocBaseline = {
|
||||
generatedBy: "scripts/generate-config-doc-baseline.ts";
|
||||
entries: ConfigDocBaselineEntry[];
|
||||
};
|
||||
|
||||
export type ConfigDocBaselineStatefileRender = {
|
||||
json: string;
|
||||
jsonl: string;
|
||||
baseline: ConfigDocBaseline;
|
||||
};
|
||||
|
||||
export type ConfigDocBaselineStatefileWriteResult = {
|
||||
changed: boolean;
|
||||
wrote: boolean;
|
||||
jsonPath: string;
|
||||
statefilePath: string;
|
||||
};
|
||||
|
||||
const GENERATED_BY = "scripts/generate-config-doc-baseline.ts" as const;
|
||||
const DEFAULT_JSON_OUTPUT = "docs/.generated/config-baseline.json";
|
||||
const DEFAULT_STATEFILE_OUTPUT = "docs/.generated/config-baseline.jsonl";
|
||||
function resolveRepoRoot(): string {
|
||||
const fromPackage = resolveOpenClawPackageRootSync({
|
||||
cwd: path.dirname(fileURLToPath(import.meta.url)),
|
||||
moduleUrl: import.meta.url,
|
||||
});
|
||||
if (fromPackage) {
|
||||
return fromPackage;
|
||||
}
|
||||
return path.resolve(path.dirname(fileURLToPath(import.meta.url)), "../..");
|
||||
}
|
||||
|
||||
function normalizeBaselinePath(rawPath: string): string {
|
||||
return rawPath
|
||||
.trim()
|
||||
.replace(/\[\]/g, ".*")
|
||||
.replace(/\[(\*|\d+)\]/g, ".*")
|
||||
.replace(/^\.+|\.+$/g, "")
|
||||
.replace(/\.+/g, ".");
|
||||
}
|
||||
|
||||
function normalizeJsonValue(value: unknown): JsonValue | undefined {
|
||||
if (value === null) {
|
||||
return null;
|
||||
}
|
||||
if (typeof value === "string" || typeof value === "boolean") {
|
||||
return value;
|
||||
}
|
||||
if (typeof value === "number") {
|
||||
return Number.isFinite(value) ? value : undefined;
|
||||
}
|
||||
if (Array.isArray(value)) {
|
||||
const normalized = value
|
||||
.map((entry) => normalizeJsonValue(entry))
|
||||
.filter((entry): entry is JsonValue => entry !== undefined);
|
||||
return normalized;
|
||||
}
|
||||
if (!value || typeof value !== "object") {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const entries = Object.entries(value as Record<string, unknown>)
|
||||
.toSorted(([left], [right]) => left.localeCompare(right))
|
||||
.map(([key, entry]) => {
|
||||
const normalized = normalizeJsonValue(entry);
|
||||
return normalized === undefined ? null : ([key, normalized] as const);
|
||||
})
|
||||
.filter((entry): entry is readonly [string, JsonValue] => entry !== null);
|
||||
|
||||
return Object.fromEntries(entries);
|
||||
}
|
||||
|
||||
function normalizeEnumValues(values: unknown[] | undefined): JsonValue[] | undefined {
|
||||
if (!values) {
|
||||
return undefined;
|
||||
}
|
||||
const normalized = values
|
||||
.map((entry) => normalizeJsonValue(entry))
|
||||
.filter((entry): entry is JsonValue => entry !== undefined);
|
||||
return normalized.length > 0 ? normalized : undefined;
|
||||
}
|
||||
|
||||
function asSchemaObject(value: unknown): JsonSchemaObject | null {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) {
|
||||
return null;
|
||||
}
|
||||
return value as JsonSchemaObject;
|
||||
}
|
||||
|
||||
function schemaHasChildren(schema: JsonSchemaObject): boolean {
|
||||
if (schema.properties && Object.keys(schema.properties).length > 0) {
|
||||
return true;
|
||||
}
|
||||
if (schema.additionalProperties && typeof schema.additionalProperties === "object") {
|
||||
return true;
|
||||
}
|
||||
if (Array.isArray(schema.items)) {
|
||||
return schema.items.some((entry) => typeof entry === "object" && entry !== null);
|
||||
}
|
||||
for (const branch of [schema.oneOf, schema.anyOf, schema.allOf]) {
|
||||
if (branch?.some((entry) => entry && typeof entry === "object" && schemaHasChildren(entry))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return Boolean(schema.items && typeof schema.items === "object");
|
||||
}
|
||||
|
||||
function splitHintLookupPath(path: string): string[] {
|
||||
const normalized = normalizeBaselinePath(path);
|
||||
return normalized ? normalized.split(".").filter(Boolean) : [];
|
||||
}
|
||||
|
||||
function resolveUiHintMatch(
|
||||
uiHints: ConfigSchemaResponse["uiHints"],
|
||||
path: string,
|
||||
): ConfigSchemaResponse["uiHints"][string] | undefined {
|
||||
const targetParts = splitHintLookupPath(path);
|
||||
let bestMatch:
|
||||
| {
|
||||
hint: ConfigSchemaResponse["uiHints"][string];
|
||||
wildcardCount: number;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
for (const [hintPath, hint] of Object.entries(uiHints)) {
|
||||
const hintParts = splitHintLookupPath(hintPath);
|
||||
if (hintParts.length !== targetParts.length) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let wildcardCount = 0;
|
||||
let matches = true;
|
||||
for (let index = 0; index < hintParts.length; index += 1) {
|
||||
const hintPart = hintParts[index];
|
||||
const targetPart = targetParts[index];
|
||||
if (hintPart === targetPart) {
|
||||
continue;
|
||||
}
|
||||
if (hintPart === "*") {
|
||||
wildcardCount += 1;
|
||||
continue;
|
||||
}
|
||||
matches = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!matches) {
|
||||
continue;
|
||||
}
|
||||
if (!bestMatch || wildcardCount < bestMatch.wildcardCount) {
|
||||
bestMatch = { hint, wildcardCount };
|
||||
}
|
||||
}
|
||||
|
||||
return bestMatch?.hint;
|
||||
}
|
||||
|
||||
function normalizeTypeValue(value: string | string[] | undefined): string | string[] | undefined {
|
||||
if (!value) {
|
||||
return undefined;
|
||||
}
|
||||
if (Array.isArray(value)) {
|
||||
const normalized = [...new Set(value)].toSorted((left, right) => left.localeCompare(right));
|
||||
return normalized.length === 1 ? normalized[0] : normalized;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
function mergeTypeValues(
|
||||
left: string | string[] | undefined,
|
||||
right: string | string[] | undefined,
|
||||
): string | string[] | undefined {
|
||||
const merged = new Set<string>();
|
||||
for (const value of [left, right]) {
|
||||
if (!value) {
|
||||
continue;
|
||||
}
|
||||
if (Array.isArray(value)) {
|
||||
for (const entry of value) {
|
||||
merged.add(entry);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
merged.add(value);
|
||||
}
|
||||
return normalizeTypeValue([...merged]);
|
||||
}
|
||||
|
||||
function areJsonValuesEqual(left: JsonValue | undefined, right: JsonValue | undefined): boolean {
|
||||
return JSON.stringify(left) === JSON.stringify(right);
|
||||
}
|
||||
|
||||
function mergeJsonValueArrays(
|
||||
left: JsonValue[] | undefined,
|
||||
right: JsonValue[] | undefined,
|
||||
): JsonValue[] | undefined {
|
||||
if (!left?.length) {
|
||||
return right ? [...right] : undefined;
|
||||
}
|
||||
if (!right?.length) {
|
||||
return [...left];
|
||||
}
|
||||
|
||||
const merged = new Map<string, JsonValue>();
|
||||
for (const value of [...left, ...right]) {
|
||||
merged.set(JSON.stringify(value), value);
|
||||
}
|
||||
return [...merged.entries()]
|
||||
.toSorted(([leftKey], [rightKey]) => leftKey.localeCompare(rightKey))
|
||||
.map(([, value]) => value);
|
||||
}
|
||||
|
||||
function mergeConfigDocBaselineEntry(
|
||||
current: ConfigDocBaselineEntry,
|
||||
next: ConfigDocBaselineEntry,
|
||||
): ConfigDocBaselineEntry {
|
||||
const label = current.label === next.label ? current.label : (current.label ?? next.label);
|
||||
const help = current.help === next.help ? current.help : (current.help ?? next.help);
|
||||
const defaultValue = areJsonValuesEqual(current.defaultValue, next.defaultValue)
|
||||
? (current.defaultValue ?? next.defaultValue)
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
path: current.path,
|
||||
kind: current.kind,
|
||||
type: mergeTypeValues(current.type, next.type),
|
||||
required: current.required && next.required,
|
||||
enumValues: mergeJsonValueArrays(current.enumValues, next.enumValues),
|
||||
defaultValue,
|
||||
deprecated: current.deprecated || next.deprecated,
|
||||
sensitive: current.sensitive || next.sensitive,
|
||||
tags: [...new Set([...current.tags, ...next.tags])].toSorted((left, right) =>
|
||||
left.localeCompare(right),
|
||||
),
|
||||
label,
|
||||
help,
|
||||
hasChildren: current.hasChildren || next.hasChildren,
|
||||
};
|
||||
}
|
||||
|
||||
function resolveEntryKind(configPath: string): ConfigDocBaselineKind {
|
||||
if (configPath.startsWith("channels.")) {
|
||||
return "channel";
|
||||
}
|
||||
if (configPath.startsWith("plugins.entries.")) {
|
||||
return "plugin";
|
||||
}
|
||||
return "core";
|
||||
}
|
||||
|
||||
async function resolveFirstExistingPath(candidates: string[]): Promise<string | null> {
|
||||
for (const candidate of candidates) {
|
||||
try {
|
||||
await fs.access(candidate);
|
||||
return candidate;
|
||||
} catch {
|
||||
// Keep scanning for other source file variants.
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function isChannelPlugin(value: unknown): value is ChannelPlugin {
|
||||
if (!value || typeof value !== "object") {
|
||||
return false;
|
||||
}
|
||||
const candidate = value as { id?: unknown; meta?: unknown; capabilities?: unknown };
|
||||
return typeof candidate.id === "string" && typeof candidate.meta === "object";
|
||||
}
|
||||
|
||||
async function importChannelPluginModule(rootDir: string): Promise<ChannelPlugin> {
|
||||
const modulePath = await resolveFirstExistingPath([
|
||||
path.join(rootDir, "src", "channel.ts"),
|
||||
path.join(rootDir, "src", "channel.js"),
|
||||
path.join(rootDir, "src", "plugin.ts"),
|
||||
path.join(rootDir, "src", "plugin.js"),
|
||||
path.join(rootDir, "src", "index.ts"),
|
||||
path.join(rootDir, "src", "index.js"),
|
||||
path.join(rootDir, "src", "channel.mts"),
|
||||
path.join(rootDir, "src", "channel.mjs"),
|
||||
path.join(rootDir, "src", "plugin.mts"),
|
||||
path.join(rootDir, "src", "plugin.mjs"),
|
||||
]);
|
||||
if (!modulePath) {
|
||||
throw new Error(`channel source not found under ${rootDir}`);
|
||||
}
|
||||
|
||||
const imported = (await import(pathToFileURL(modulePath).href)) as Record<string, unknown>;
|
||||
for (const value of Object.values(imported)) {
|
||||
if (isChannelPlugin(value)) {
|
||||
return value;
|
||||
}
|
||||
if (typeof value === "function" && value.length === 0) {
|
||||
const resolved = value();
|
||||
if (isChannelPlugin(resolved)) {
|
||||
return resolved;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`channel plugin export not found in ${modulePath}`);
|
||||
}
|
||||
|
||||
async function loadBundledConfigSchemaResponse(): Promise<ConfigSchemaResponse> {
|
||||
const repoRoot = resolveRepoRoot();
|
||||
const env = {
|
||||
...process.env,
|
||||
HOME: os.tmpdir(),
|
||||
OPENCLAW_STATE_DIR: path.join(os.tmpdir(), "openclaw-config-doc-baseline-state"),
|
||||
OPENCLAW_BUNDLED_PLUGINS_DIR: path.join(repoRoot, "extensions"),
|
||||
};
|
||||
|
||||
const manifestRegistry = loadPluginManifestRegistry({
|
||||
cache: false,
|
||||
env,
|
||||
config: {},
|
||||
});
|
||||
const channelPlugins = await Promise.all(
|
||||
manifestRegistry.plugins
|
||||
.filter((plugin) => plugin.origin === "bundled" && plugin.channels.length > 0)
|
||||
.map(async (plugin) => ({
|
||||
id: plugin.id,
|
||||
channel: await importChannelPluginModule(plugin.rootDir),
|
||||
})),
|
||||
);
|
||||
|
||||
return buildConfigSchema({
|
||||
plugins: manifestRegistry.plugins
|
||||
.filter((plugin) => plugin.origin === "bundled")
|
||||
.map((plugin) => ({
|
||||
id: plugin.id,
|
||||
name: plugin.name,
|
||||
description: plugin.description,
|
||||
configUiHints: plugin.configUiHints,
|
||||
configSchema: plugin.configSchema,
|
||||
})),
|
||||
channels: channelPlugins.map((entry) => ({
|
||||
id: entry.channel.id,
|
||||
label: entry.channel.meta.label,
|
||||
description: entry.channel.meta.blurb,
|
||||
configSchema: entry.channel.configSchema?.schema,
|
||||
configUiHints: entry.channel.configSchema?.uiHints,
|
||||
})),
|
||||
});
|
||||
}
|
||||
|
||||
export function collectConfigDocBaselineEntries(
|
||||
schema: JsonSchemaObject,
|
||||
uiHints: ConfigSchemaResponse["uiHints"],
|
||||
pathPrefix = "",
|
||||
required = false,
|
||||
entries: ConfigDocBaselineEntry[] = [],
|
||||
): ConfigDocBaselineEntry[] {
|
||||
const normalizedPath = normalizeBaselinePath(pathPrefix);
|
||||
if (normalizedPath) {
|
||||
const hint = resolveUiHintMatch(uiHints, normalizedPath);
|
||||
entries.push({
|
||||
path: normalizedPath,
|
||||
kind: resolveEntryKind(normalizedPath),
|
||||
type: normalizeTypeValue(schema.type),
|
||||
required,
|
||||
enumValues: normalizeEnumValues(schema.enum),
|
||||
defaultValue: normalizeJsonValue(schema.default),
|
||||
deprecated: schema.deprecated === true,
|
||||
sensitive: hint?.sensitive === true,
|
||||
tags: [...(hint?.tags ?? [])].toSorted((left, right) => left.localeCompare(right)),
|
||||
label: hint?.label,
|
||||
help: hint?.help,
|
||||
hasChildren: schemaHasChildren(schema),
|
||||
});
|
||||
}
|
||||
|
||||
const requiredKeys = new Set(schema.required ?? []);
|
||||
for (const key of Object.keys(schema.properties ?? {}).toSorted((left, right) =>
|
||||
left.localeCompare(right),
|
||||
)) {
|
||||
const child = asSchemaObject(schema.properties?.[key]);
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
const childPath = normalizedPath ? `${normalizedPath}.${key}` : key;
|
||||
collectConfigDocBaselineEntries(child, uiHints, childPath, requiredKeys.has(key), entries);
|
||||
}
|
||||
|
||||
if (schema.additionalProperties && typeof schema.additionalProperties === "object") {
|
||||
const wildcard = asSchemaObject(schema.additionalProperties);
|
||||
if (wildcard) {
|
||||
const wildcardPath = normalizedPath ? `${normalizedPath}.*` : "*";
|
||||
collectConfigDocBaselineEntries(wildcard, uiHints, wildcardPath, false, entries);
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(schema.items)) {
|
||||
for (const item of schema.items) {
|
||||
const child = asSchemaObject(item);
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
const itemPath = normalizedPath ? `${normalizedPath}.*` : "*";
|
||||
collectConfigDocBaselineEntries(child, uiHints, itemPath, false, entries);
|
||||
}
|
||||
} else if (schema.items && typeof schema.items === "object") {
|
||||
const itemSchema = asSchemaObject(schema.items);
|
||||
if (itemSchema) {
|
||||
const itemPath = normalizedPath ? `${normalizedPath}.*` : "*";
|
||||
collectConfigDocBaselineEntries(itemSchema, uiHints, itemPath, false, entries);
|
||||
}
|
||||
}
|
||||
|
||||
for (const branchSchema of [schema.oneOf, schema.anyOf, schema.allOf]) {
|
||||
for (const branch of branchSchema ?? []) {
|
||||
const child = asSchemaObject(branch);
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
collectConfigDocBaselineEntries(child, uiHints, normalizedPath, required, entries);
|
||||
}
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
export function dedupeConfigDocBaselineEntries(
|
||||
entries: ConfigDocBaselineEntry[],
|
||||
): ConfigDocBaselineEntry[] {
|
||||
const byPath = new Map<string, ConfigDocBaselineEntry>();
|
||||
for (const entry of entries) {
|
||||
const current = byPath.get(entry.path);
|
||||
byPath.set(entry.path, current ? mergeConfigDocBaselineEntry(current, entry) : entry);
|
||||
}
|
||||
return [...byPath.values()].toSorted((left, right) => left.path.localeCompare(right.path));
|
||||
}
|
||||
|
||||
export async function buildConfigDocBaseline(): Promise<ConfigDocBaseline> {
|
||||
const response = await loadBundledConfigSchemaResponse();
|
||||
const schemaRoot = asSchemaObject(response.schema);
|
||||
if (!schemaRoot) {
|
||||
throw new Error("config schema root is not an object");
|
||||
}
|
||||
const entries = dedupeConfigDocBaselineEntries(
|
||||
collectConfigDocBaselineEntries(schemaRoot, response.uiHints),
|
||||
);
|
||||
return {
|
||||
generatedBy: GENERATED_BY,
|
||||
entries,
|
||||
};
|
||||
}
|
||||
|
||||
export async function renderConfigDocBaselineStatefile(
|
||||
baseline?: ConfigDocBaseline,
|
||||
): Promise<ConfigDocBaselineStatefileRender> {
|
||||
const resolvedBaseline = baseline ?? (await buildConfigDocBaseline());
|
||||
const json = `${JSON.stringify(resolvedBaseline, null, 2)}\n`;
|
||||
const metadataLine = JSON.stringify({
|
||||
generatedBy: GENERATED_BY,
|
||||
recordType: "meta",
|
||||
totalPaths: resolvedBaseline.entries.length,
|
||||
});
|
||||
const entryLines = resolvedBaseline.entries.map((entry) =>
|
||||
JSON.stringify({
|
||||
recordType: "path",
|
||||
...entry,
|
||||
}),
|
||||
);
|
||||
return {
|
||||
json,
|
||||
jsonl: `${[metadataLine, ...entryLines].join("\n")}\n`,
|
||||
baseline: resolvedBaseline,
|
||||
};
|
||||
}
|
||||
|
||||
async function readIfExists(filePath: string): Promise<string | null> {
|
||||
try {
|
||||
return await fs.readFile(filePath, "utf8");
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function writeIfChanged(filePath: string, next: string): Promise<boolean> {
|
||||
const current = await readIfExists(filePath);
|
||||
if (current === next) {
|
||||
return false;
|
||||
}
|
||||
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
||||
await fs.writeFile(filePath, next, "utf8");
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function writeConfigDocBaselineStatefile(params?: {
|
||||
repoRoot?: string;
|
||||
check?: boolean;
|
||||
jsonPath?: string;
|
||||
statefilePath?: string;
|
||||
}): Promise<ConfigDocBaselineStatefileWriteResult> {
|
||||
const repoRoot = params?.repoRoot ?? resolveRepoRoot();
|
||||
const jsonPath = path.resolve(repoRoot, params?.jsonPath ?? DEFAULT_JSON_OUTPUT);
|
||||
const statefilePath = path.resolve(repoRoot, params?.statefilePath ?? DEFAULT_STATEFILE_OUTPUT);
|
||||
const rendered = await renderConfigDocBaselineStatefile();
|
||||
const currentJson = await readIfExists(jsonPath);
|
||||
const currentStatefile = await readIfExists(statefilePath);
|
||||
const changed = currentJson !== rendered.json || currentStatefile !== rendered.jsonl;
|
||||
|
||||
if (params?.check) {
|
||||
return {
|
||||
changed,
|
||||
wrote: false,
|
||||
jsonPath,
|
||||
statefilePath,
|
||||
};
|
||||
}
|
||||
|
||||
const wroteJson = await writeIfChanged(jsonPath, rendered.json);
|
||||
const wroteStatefile = await writeIfChanged(statefilePath, rendered.jsonl);
|
||||
return {
|
||||
changed,
|
||||
wrote: wroteJson || wroteStatefile,
|
||||
jsonPath,
|
||||
statefilePath,
|
||||
};
|
||||
}
|
||||
|
||||
export function normalizeConfigDocBaselineHelpPath(pathValue: string): string {
|
||||
return normalizeBaselinePath(pathValue);
|
||||
}
|
||||
|
||||
export function getNormalizedFieldHelp(): Record<string, string> {
|
||||
return Object.fromEntries(
|
||||
Object.entries(FIELD_HELP)
|
||||
.map(([configPath, help]) => [normalizeBaselinePath(configPath), help] as const)
|
||||
.toSorted(([left], [right]) => left.localeCompare(right)),
|
||||
);
|
||||
}
|
||||
@ -31,6 +31,7 @@ const AGENT_HEARTBEAT_KEYS = new Set([
|
||||
"ackMaxChars",
|
||||
"suppressToolErrorWarnings",
|
||||
"lightContext",
|
||||
"isolatedSession",
|
||||
]);
|
||||
|
||||
const CHANNEL_HEARTBEAT_KEYS = new Set(["showOk", "showAlerts", "useIndicator"]);
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterAll, afterEach, describe, expect, it } from "vitest";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import { clearPluginDiscoveryCache } from "../plugins/discovery.js";
|
||||
import {
|
||||
clearPluginManifestRegistryCache,
|
||||
@ -11,7 +11,6 @@ import { validateConfigObject } from "./config.js";
|
||||
import { applyPluginAutoEnable } from "./plugin-auto-enable.js";
|
||||
|
||||
const tempDirs: string[] = [];
|
||||
const previousUmask = process.umask(0o022);
|
||||
|
||||
function chmodSafeDir(dir: string) {
|
||||
if (process.platform === "win32") {
|
||||
@ -126,10 +125,6 @@ afterEach(() => {
|
||||
}
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
process.umask(previousUmask);
|
||||
});
|
||||
|
||||
describe("applyPluginAutoEnable", () => {
|
||||
it("auto-enables built-in channels and appends to existing allowlist", () => {
|
||||
const result = applyWithSlackConfig({ plugins: { allow: ["telegram"] } });
|
||||
|
||||
@ -2,6 +2,7 @@ import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { normalizeConfigDocBaselineHelpPath } from "./doc-baseline.js";
|
||||
import { FIELD_HELP } from "./schema.help.js";
|
||||
import {
|
||||
describeTalkSilenceTimeoutDefaults,
|
||||
@ -17,8 +18,18 @@ function readRepoFile(relativePath: string): string {
|
||||
describe("talk silence timeout defaults", () => {
|
||||
it("keeps help text and docs aligned with the policy", () => {
|
||||
const defaultsDescription = describeTalkSilenceTimeoutDefaults();
|
||||
const baselineLines = readRepoFile("docs/.generated/config-baseline.jsonl")
|
||||
.trim()
|
||||
.split("\n")
|
||||
.map((line) => JSON.parse(line) as { recordType: string; path?: string; help?: string });
|
||||
const talkEntry = baselineLines.find(
|
||||
(entry) =>
|
||||
entry.recordType === "path" &&
|
||||
entry.path === normalizeConfigDocBaselineHelpPath("talk.silenceTimeoutMs"),
|
||||
);
|
||||
|
||||
expect(FIELD_HELP["talk.silenceTimeoutMs"]).toContain(defaultsDescription);
|
||||
expect(talkEntry?.help).toContain(defaultsDescription);
|
||||
expect(readRepoFile("docs/gateway/configuration-reference.md")).toContain(defaultsDescription);
|
||||
expect(readRepoFile("docs/nodes/talk.md")).toContain(defaultsDescription);
|
||||
});
|
||||
|
||||
@ -253,6 +253,13 @@ export type AgentDefaultsConfig = {
|
||||
* Lightweight mode keeps only HEARTBEAT.md from workspace bootstrap files.
|
||||
*/
|
||||
lightContext?: boolean;
|
||||
/**
|
||||
* If true, run heartbeat turns in an isolated session with no prior
|
||||
* conversation history. The heartbeat only sees its bootstrap context
|
||||
* (HEARTBEAT.md when lightContext is also enabled). Dramatically reduces
|
||||
* per-heartbeat token cost by avoiding the full session transcript.
|
||||
*/
|
||||
isolatedSession?: boolean;
|
||||
/**
|
||||
* When enabled, deliver the model's reasoning payload for heartbeat runs (when available)
|
||||
* as a separate message prefixed with `Reasoning:` (same as `/reasoning on`).
|
||||
|
||||
@ -34,6 +34,7 @@ export const HeartbeatSchema = z
|
||||
ackMaxChars: z.number().int().nonnegative().optional(),
|
||||
suppressToolErrorWarnings: z.boolean().optional(),
|
||||
lightContext: z.boolean().optional(),
|
||||
isolatedSession: z.boolean().optional(),
|
||||
})
|
||||
.strict()
|
||||
.superRefine((val, ctx) => {
|
||||
|
||||
@ -29,6 +29,9 @@ const launchdRestartHandoffState = vi.hoisted(() => ({
|
||||
isCurrentProcessLaunchdServiceLabel: vi.fn<(label: string) => boolean>(() => false),
|
||||
scheduleDetachedLaunchdRestartHandoff: vi.fn((_params: unknown) => ({ ok: true, pid: 7331 })),
|
||||
}));
|
||||
const cleanStaleGatewayProcessesSync = vi.hoisted(() =>
|
||||
vi.fn<(port?: number) => number[]>(() => []),
|
||||
);
|
||||
const defaultProgramArguments = ["node", "-e", "process.exit(0)"];
|
||||
|
||||
function expectLaunchctlEnableBootstrapOrder(env: Record<string, string | undefined>) {
|
||||
@ -89,6 +92,10 @@ vi.mock("./launchd-restart-handoff.js", () => ({
|
||||
launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff(params),
|
||||
}));
|
||||
|
||||
vi.mock("../infra/restart-stale-pids.js", () => ({
|
||||
cleanStaleGatewayProcessesSync: (port?: number) => cleanStaleGatewayProcessesSync(port),
|
||||
}));
|
||||
|
||||
vi.mock("node:fs/promises", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("node:fs/promises")>();
|
||||
const wrapped = {
|
||||
@ -151,6 +158,8 @@ beforeEach(() => {
|
||||
state.dirModes.clear();
|
||||
state.files.clear();
|
||||
state.fileModes.clear();
|
||||
cleanStaleGatewayProcessesSync.mockReset();
|
||||
cleanStaleGatewayProcessesSync.mockReturnValue([]);
|
||||
launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReset();
|
||||
launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(false);
|
||||
launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReset();
|
||||
@ -328,7 +337,10 @@ describe("launchd install", () => {
|
||||
});
|
||||
|
||||
it("restarts LaunchAgent with kickstart and no bootout", async () => {
|
||||
const env = createDefaultLaunchdEnv();
|
||||
const env = {
|
||||
...createDefaultLaunchdEnv(),
|
||||
OPENCLAW_GATEWAY_PORT: "18789",
|
||||
};
|
||||
const result = await restartLaunchAgent({
|
||||
env,
|
||||
stdout: new PassThrough(),
|
||||
@ -338,11 +350,38 @@ describe("launchd install", () => {
|
||||
const label = "ai.openclaw.gateway";
|
||||
const serviceId = `${domain}/${label}`;
|
||||
expect(result).toEqual({ outcome: "completed" });
|
||||
expect(cleanStaleGatewayProcessesSync).toHaveBeenCalledWith(18789);
|
||||
expect(state.launchctlCalls).toContainEqual(["kickstart", "-k", serviceId]);
|
||||
expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false);
|
||||
expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false);
|
||||
});
|
||||
|
||||
it("uses the configured gateway port for stale cleanup", async () => {
|
||||
const env = {
|
||||
...createDefaultLaunchdEnv(),
|
||||
OPENCLAW_GATEWAY_PORT: "19001",
|
||||
};
|
||||
|
||||
await restartLaunchAgent({
|
||||
env,
|
||||
stdout: new PassThrough(),
|
||||
});
|
||||
|
||||
expect(cleanStaleGatewayProcessesSync).toHaveBeenCalledWith(19001);
|
||||
});
|
||||
|
||||
it("skips stale cleanup when no explicit launch agent port can be resolved", async () => {
|
||||
const env = createDefaultLaunchdEnv();
|
||||
state.files.clear();
|
||||
|
||||
await restartLaunchAgent({
|
||||
env,
|
||||
stdout: new PassThrough(),
|
||||
});
|
||||
|
||||
expect(cleanStaleGatewayProcessesSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("falls back to bootstrap when kickstart cannot find the service", async () => {
|
||||
const env = createDefaultLaunchdEnv();
|
||||
state.kickstartError = "Could not find service";
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { parseStrictInteger, parseStrictPositiveInteger } from "../infra/parse-finite-number.js";
|
||||
import { cleanStaleGatewayProcessesSync } from "../infra/restart-stale-pids.js";
|
||||
import {
|
||||
GATEWAY_LAUNCH_AGENT_LABEL,
|
||||
resolveGatewayServiceDescription,
|
||||
@ -113,6 +114,44 @@ async function execLaunchctl(
|
||||
return await execFileUtf8(file, fileArgs, isWindows ? { windowsHide: true } : {});
|
||||
}
|
||||
|
||||
function parseGatewayPortFromProgramArguments(
|
||||
programArguments: string[] | undefined,
|
||||
): number | null {
|
||||
if (!Array.isArray(programArguments) || programArguments.length === 0) {
|
||||
return null;
|
||||
}
|
||||
for (let index = 0; index < programArguments.length; index += 1) {
|
||||
const current = programArguments[index]?.trim();
|
||||
if (!current) {
|
||||
continue;
|
||||
}
|
||||
if (current === "--port") {
|
||||
const next = parseStrictPositiveInteger(programArguments[index + 1] ?? "");
|
||||
if (next !== undefined) {
|
||||
return next;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (current.startsWith("--port=")) {
|
||||
const value = parseStrictPositiveInteger(current.slice("--port=".length));
|
||||
if (value !== undefined) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function resolveLaunchAgentGatewayPort(env: GatewayServiceEnv): Promise<number | null> {
|
||||
const command = await readLaunchAgentProgramArguments(env).catch(() => null);
|
||||
const fromArgs = parseGatewayPortFromProgramArguments(command?.programArguments);
|
||||
if (fromArgs !== null) {
|
||||
return fromArgs;
|
||||
}
|
||||
const fromEnv = parseStrictPositiveInteger(env.OPENCLAW_GATEWAY_PORT ?? "");
|
||||
return fromEnv ?? null;
|
||||
}
|
||||
|
||||
function resolveGuiDomain(): string {
|
||||
if (typeof process.getuid !== "function") {
|
||||
return "gui/501";
|
||||
@ -514,6 +553,11 @@ export async function restartLaunchAgent({
|
||||
return { outcome: "scheduled" };
|
||||
}
|
||||
|
||||
const cleanupPort = await resolveLaunchAgentGatewayPort(serviceEnv);
|
||||
if (cleanupPort !== null) {
|
||||
cleanStaleGatewayProcessesSync(cleanupPort);
|
||||
}
|
||||
|
||||
const start = await execLaunchctl(["kickstart", "-k", serviceTarget]);
|
||||
if (start.code === 0) {
|
||||
writeLaunchAgentActionLine(stdout, "Restarted LaunchAgent", serviceTarget);
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterAll, beforeAll, describe, expect, test } from "vitest";
|
||||
import {
|
||||
BACKEND_GATEWAY_CLIENT,
|
||||
connectReq,
|
||||
CONTROL_UI_CLIENT,
|
||||
ConnectErrorDetailCodes,
|
||||
@ -144,6 +147,50 @@ describe("gateway auth compatibility baseline", () => {
|
||||
ws.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("keeps local backend device-token reconnects out of pairing", async () => {
|
||||
const identityPath = path.join(
|
||||
os.tmpdir(),
|
||||
`openclaw-backend-device-${process.pid}-${port}.json`,
|
||||
);
|
||||
const { loadOrCreateDeviceIdentity, publicKeyRawBase64UrlFromPem } =
|
||||
await import("../infra/device-identity.js");
|
||||
const { approveDevicePairing, requestDevicePairing, rotateDeviceToken } =
|
||||
await import("../infra/device-pairing.js");
|
||||
|
||||
const identity = loadOrCreateDeviceIdentity(identityPath);
|
||||
const pending = await requestDevicePairing({
|
||||
deviceId: identity.deviceId,
|
||||
publicKey: publicKeyRawBase64UrlFromPem(identity.publicKeyPem),
|
||||
clientId: BACKEND_GATEWAY_CLIENT.id,
|
||||
clientMode: BACKEND_GATEWAY_CLIENT.mode,
|
||||
role: "operator",
|
||||
scopes: ["operator.admin"],
|
||||
});
|
||||
await approveDevicePairing(pending.request.requestId);
|
||||
|
||||
const rotated = await rotateDeviceToken({
|
||||
deviceId: identity.deviceId,
|
||||
role: "operator",
|
||||
scopes: ["operator.admin"],
|
||||
});
|
||||
expect(rotated?.token).toBeTruthy();
|
||||
|
||||
const ws = await openWs(port);
|
||||
try {
|
||||
const res = await connectReq(ws, {
|
||||
skipDefaultAuth: true,
|
||||
client: { ...BACKEND_GATEWAY_CLIENT },
|
||||
deviceIdentityPath: identityPath,
|
||||
deviceToken: String(rotated?.token ?? ""),
|
||||
scopes: ["operator.admin"],
|
||||
});
|
||||
expect(res.ok).toBe(true);
|
||||
expect((res.payload as { type?: string } | undefined)?.type).toBe("hello-ok");
|
||||
} finally {
|
||||
ws.close();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("password mode", () => {
|
||||
|
||||
@ -89,7 +89,7 @@ describe("handshake auth helpers", () => {
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("skips backend self-pairing only for local shared-secret backend clients", () => {
|
||||
it("skips backend self-pairing for local trusted backend clients", () => {
|
||||
const connectParams = {
|
||||
client: {
|
||||
id: GATEWAY_CLIENT_IDS.GATEWAY_CLIENT,
|
||||
@ -106,6 +106,15 @@ describe("handshake auth helpers", () => {
|
||||
authMethod: "token",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
shouldSkipBackendSelfPairing({
|
||||
connectParams,
|
||||
isLocalClient: true,
|
||||
hasBrowserOriginHeader: false,
|
||||
sharedAuthOk: false,
|
||||
authMethod: "device-token",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
shouldSkipBackendSelfPairing({
|
||||
connectParams,
|
||||
|
||||
@ -74,11 +74,14 @@ export function shouldSkipBackendSelfPairing(params: {
|
||||
return false;
|
||||
}
|
||||
const usesSharedSecretAuth = params.authMethod === "token" || params.authMethod === "password";
|
||||
const usesDeviceTokenAuth = params.authMethod === "device-token";
|
||||
// `authMethod === "device-token"` only reaches this helper after the caller
|
||||
// has already accepted auth (`authOk === true`), so a separate
|
||||
// `deviceTokenAuthOk` flag would be redundant here.
|
||||
return (
|
||||
params.isLocalClient &&
|
||||
!params.hasBrowserOriginHeader &&
|
||||
params.sharedAuthOk &&
|
||||
usesSharedSecretAuth
|
||||
((params.sharedAuthOk && usesSharedSecretAuth) || usesDeviceTokenAuth)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { loadDotEnv } from "./dotenv.js";
|
||||
|
||||
async function writeEnvFile(filePath: string, contents: string) {
|
||||
@ -11,11 +11,10 @@ async function writeEnvFile(filePath: string, contents: string) {
|
||||
|
||||
async function withIsolatedEnvAndCwd(run: () => Promise<void>) {
|
||||
const prevEnv = { ...process.env };
|
||||
const prevCwd = process.cwd();
|
||||
try {
|
||||
await run();
|
||||
} finally {
|
||||
process.chdir(prevCwd);
|
||||
vi.restoreAllMocks();
|
||||
for (const key of Object.keys(process.env)) {
|
||||
if (!(key in prevEnv)) {
|
||||
delete process.env[key];
|
||||
@ -54,7 +53,7 @@ describe("loadDotEnv", () => {
|
||||
await writeEnvFile(path.join(stateDir, ".env"), "FOO=from-global\nBAR=1\n");
|
||||
await writeEnvFile(path.join(cwdDir, ".env"), "FOO=from-cwd\n");
|
||||
|
||||
process.chdir(cwdDir);
|
||||
vi.spyOn(process, "cwd").mockReturnValue(cwdDir);
|
||||
delete process.env.FOO;
|
||||
delete process.env.BAR;
|
||||
|
||||
@ -74,7 +73,7 @@ describe("loadDotEnv", () => {
|
||||
await writeEnvFile(path.join(stateDir, ".env"), "FOO=from-global\n");
|
||||
await writeEnvFile(path.join(cwdDir, ".env"), "FOO=from-cwd\n");
|
||||
|
||||
process.chdir(cwdDir);
|
||||
vi.spyOn(process, "cwd").mockReturnValue(cwdDir);
|
||||
|
||||
loadDotEnv({ quiet: true });
|
||||
|
||||
@ -87,7 +86,7 @@ describe("loadDotEnv", () => {
|
||||
await withIsolatedEnvAndCwd(async () => {
|
||||
await withDotEnvFixture(async ({ cwdDir, stateDir }) => {
|
||||
await writeEnvFile(path.join(stateDir, ".env"), "FOO=from-global\n");
|
||||
process.chdir(cwdDir);
|
||||
vi.spyOn(process, "cwd").mockReturnValue(cwdDir);
|
||||
delete process.env.FOO;
|
||||
|
||||
loadDotEnv({ quiet: true });
|
||||
|
||||
@ -42,7 +42,6 @@ describe("git commit resolution", () => {
|
||||
const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "../..");
|
||||
|
||||
beforeEach(async () => {
|
||||
process.chdir(repoRoot);
|
||||
vi.restoreAllMocks();
|
||||
vi.doUnmock("node:fs");
|
||||
vi.doUnmock("node:module");
|
||||
@ -52,7 +51,6 @@ describe("git commit resolution", () => {
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
process.chdir(repoRoot);
|
||||
vi.restoreAllMocks();
|
||||
vi.doUnmock("node:fs");
|
||||
vi.doUnmock("node:module");
|
||||
@ -87,9 +85,9 @@ describe("git commit resolution", () => {
|
||||
.trim()
|
||||
.slice(0, 7);
|
||||
|
||||
process.chdir(otherRepo);
|
||||
const { resolveCommitHash } = await import("./git-commit.js");
|
||||
const entryModuleUrl = pathToFileURL(path.join(repoRoot, "src", "entry.ts")).href;
|
||||
vi.spyOn(process, "cwd").mockReturnValue(otherRepo);
|
||||
|
||||
expect(resolveCommitHash({ moduleUrl: entryModuleUrl })).toBe(repoHead);
|
||||
expect(resolveCommitHash({ moduleUrl: entryModuleUrl })).not.toBe(otherHead);
|
||||
|
||||
@ -65,6 +65,7 @@ describe("runHeartbeatOnce – heartbeat model override", () => {
|
||||
model?: string;
|
||||
suppressToolErrorWarnings?: boolean;
|
||||
lightContext?: boolean;
|
||||
isolatedSession?: boolean;
|
||||
}) {
|
||||
return withHeartbeatFixture(async ({ tmpDir, storePath, seedSession }) => {
|
||||
const cfg: OpenClawConfig = {
|
||||
@ -77,6 +78,7 @@ describe("runHeartbeatOnce – heartbeat model override", () => {
|
||||
model: params.model,
|
||||
suppressToolErrorWarnings: params.suppressToolErrorWarnings,
|
||||
lightContext: params.lightContext,
|
||||
isolatedSession: params.isolatedSession,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -133,6 +135,72 @@ describe("runHeartbeatOnce – heartbeat model override", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("uses isolated session key when isolatedSession is enabled", async () => {
|
||||
await withHeartbeatFixture(async ({ tmpDir, storePath, seedSession }) => {
|
||||
const cfg: OpenClawConfig = {
|
||||
agents: {
|
||||
defaults: {
|
||||
workspace: tmpDir,
|
||||
heartbeat: {
|
||||
every: "5m",
|
||||
target: "whatsapp",
|
||||
isolatedSession: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
channels: { whatsapp: { allowFrom: ["*"] } },
|
||||
session: { store: storePath },
|
||||
};
|
||||
const sessionKey = resolveMainSessionKey(cfg);
|
||||
await seedSession(sessionKey, { lastChannel: "whatsapp", lastTo: "+1555" });
|
||||
|
||||
const replySpy = vi.spyOn(replyModule, "getReplyFromConfig");
|
||||
replySpy.mockResolvedValue({ text: "HEARTBEAT_OK" });
|
||||
|
||||
await runHeartbeatOnce({
|
||||
cfg,
|
||||
deps: { getQueueSize: () => 0, nowMs: () => 0 },
|
||||
});
|
||||
|
||||
expect(replySpy).toHaveBeenCalledTimes(1);
|
||||
const ctx = replySpy.mock.calls[0]?.[0];
|
||||
// Isolated heartbeat runs use a dedicated session key with :heartbeat suffix
|
||||
expect(ctx.SessionKey).toBe(`${sessionKey}:heartbeat`);
|
||||
});
|
||||
});
|
||||
|
||||
it("uses main session key when isolatedSession is not set", async () => {
|
||||
await withHeartbeatFixture(async ({ tmpDir, storePath, seedSession }) => {
|
||||
const cfg: OpenClawConfig = {
|
||||
agents: {
|
||||
defaults: {
|
||||
workspace: tmpDir,
|
||||
heartbeat: {
|
||||
every: "5m",
|
||||
target: "whatsapp",
|
||||
},
|
||||
},
|
||||
},
|
||||
channels: { whatsapp: { allowFrom: ["*"] } },
|
||||
session: { store: storePath },
|
||||
};
|
||||
const sessionKey = resolveMainSessionKey(cfg);
|
||||
await seedSession(sessionKey, { lastChannel: "whatsapp", lastTo: "+1555" });
|
||||
|
||||
const replySpy = vi.spyOn(replyModule, "getReplyFromConfig");
|
||||
replySpy.mockResolvedValue({ text: "HEARTBEAT_OK" });
|
||||
|
||||
await runHeartbeatOnce({
|
||||
cfg,
|
||||
deps: { getQueueSize: () => 0, nowMs: () => 0 },
|
||||
});
|
||||
|
||||
expect(replySpy).toHaveBeenCalledTimes(1);
|
||||
const ctx = replySpy.mock.calls[0]?.[0];
|
||||
expect(ctx.SessionKey).toBe(sessionKey);
|
||||
});
|
||||
});
|
||||
|
||||
it("passes per-agent heartbeat model override (merged with defaults)", async () => {
|
||||
await withHeartbeatFixture(async ({ tmpDir, storePath, seedSession }) => {
|
||||
const cfg: OpenClawConfig = {
|
||||
|
||||
@ -35,6 +35,7 @@ import {
|
||||
updateSessionStore,
|
||||
} from "../config/sessions.js";
|
||||
import type { AgentDefaultsConfig } from "../config/types.agent-defaults.js";
|
||||
import { resolveCronSession } from "../cron/isolated-agent/session.js";
|
||||
import { createSubsystemLogger } from "../logging/subsystem.js";
|
||||
import { getQueueSize } from "../process/command-queue.js";
|
||||
import { CommandLane } from "../process/lanes.js";
|
||||
@ -659,6 +660,30 @@ export async function runHeartbeatOnce(opts: {
|
||||
}
|
||||
const { entry, sessionKey, storePath } = preflight.session;
|
||||
const previousUpdatedAt = entry?.updatedAt;
|
||||
|
||||
// When isolatedSession is enabled, create a fresh session via the same
|
||||
// pattern as cron sessionTarget: "isolated". This gives the heartbeat
|
||||
// a new session ID (empty transcript) each run, avoiding the cost of
|
||||
// sending the full conversation history (~100K tokens) to the LLM.
|
||||
// Delivery routing still uses the main session entry (lastChannel, lastTo).
|
||||
const useIsolatedSession = heartbeat?.isolatedSession === true;
|
||||
let runSessionKey = sessionKey;
|
||||
let runStorePath = storePath;
|
||||
if (useIsolatedSession) {
|
||||
const isolatedKey = `${sessionKey}:heartbeat`;
|
||||
const cronSession = resolveCronSession({
|
||||
cfg,
|
||||
sessionKey: isolatedKey,
|
||||
agentId,
|
||||
nowMs: startedAt,
|
||||
forceNew: true,
|
||||
});
|
||||
cronSession.store[isolatedKey] = cronSession.sessionEntry;
|
||||
await saveSessionStore(cronSession.storePath, cronSession.store);
|
||||
runSessionKey = isolatedKey;
|
||||
runStorePath = cronSession.storePath;
|
||||
}
|
||||
|
||||
const delivery = resolveHeartbeatDeliveryTarget({ cfg, entry, heartbeat });
|
||||
const heartbeatAccountId = heartbeat?.accountId?.trim();
|
||||
if (delivery.reason === "unknown-account") {
|
||||
@ -707,7 +732,7 @@ export async function runHeartbeatOnce(opts: {
|
||||
AccountId: delivery.accountId,
|
||||
MessageThreadId: delivery.threadId,
|
||||
Provider: hasExecCompletion ? "exec-event" : hasCronEvents ? "cron-event" : "heartbeat",
|
||||
SessionKey: sessionKey,
|
||||
SessionKey: runSessionKey,
|
||||
};
|
||||
if (!visibility.showAlerts && !visibility.showOk && !visibility.useIndicator) {
|
||||
emitHeartbeatEvent({
|
||||
@ -758,10 +783,11 @@ export async function runHeartbeatOnce(opts: {
|
||||
};
|
||||
|
||||
try {
|
||||
// Capture transcript state before the heartbeat run so we can prune if HEARTBEAT_OK
|
||||
// Capture transcript state before the heartbeat run so we can prune if HEARTBEAT_OK.
|
||||
// For isolated sessions, capture the isolated transcript (not the main session's).
|
||||
const transcriptState = await captureTranscriptState({
|
||||
storePath,
|
||||
sessionKey,
|
||||
storePath: runStorePath,
|
||||
sessionKey: runSessionKey,
|
||||
agentId,
|
||||
});
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ import { ReadableStream } from "node:stream/web";
|
||||
import { afterEach, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import type { VoyageBatchOutputLine, VoyageBatchRequest } from "./batch-voyage.js";
|
||||
import type { VoyageEmbeddingClient } from "./embeddings-voyage.js";
|
||||
import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js";
|
||||
|
||||
// Mock internal.js if needed, but runWithConcurrency is simple enough to keep real.
|
||||
// We DO need to mock retryAsync to avoid actual delays/retries logic complicating tests
|
||||
@ -35,6 +36,7 @@ describe("runVoyageEmbeddingBatches", () => {
|
||||
it("successfully submits batch, waits, and streams results", async () => {
|
||||
const fetchMock = vi.fn();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
|
||||
// Sequence of fetch calls:
|
||||
// 1. Upload file
|
||||
@ -130,6 +132,7 @@ describe("runVoyageEmbeddingBatches", () => {
|
||||
it("handles empty lines and stream chunks correctly", async () => {
|
||||
const fetchMock = vi.fn();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
|
||||
// 1. Upload
|
||||
fetchMock.mockResolvedValueOnce({ ok: true, json: async () => ({ id: "f1" }) });
|
||||
|
||||
@ -9,6 +9,7 @@ import {
|
||||
isGeminiEmbedding2Model,
|
||||
resolveGeminiOutputDimensionality,
|
||||
} from "./embeddings-gemini.js";
|
||||
import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js";
|
||||
|
||||
vi.mock("../agents/model-auth.js", async () => {
|
||||
const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js");
|
||||
@ -67,6 +68,7 @@ async function createProviderWithFetch(
|
||||
options: Partial<Parameters<typeof createGeminiEmbeddingProvider>[0]> & { model: string },
|
||||
) {
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockResolvedProviderKey();
|
||||
const { provider } = await createGeminiEmbeddingProvider({
|
||||
config: {} as never,
|
||||
@ -449,6 +451,7 @@ describe("gemini model normalization", () => {
|
||||
it("handles models/ prefix for v2 model", async () => {
|
||||
const fetchMock = createGeminiFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockResolvedProviderKey();
|
||||
|
||||
const { provider } = await createGeminiEmbeddingProvider({
|
||||
@ -467,6 +470,7 @@ describe("gemini model normalization", () => {
|
||||
it("handles gemini/ prefix for v2 model", async () => {
|
||||
const fetchMock = createGeminiFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockResolvedProviderKey();
|
||||
|
||||
const { provider } = await createGeminiEmbeddingProvider({
|
||||
@ -485,6 +489,7 @@ describe("gemini model normalization", () => {
|
||||
it("handles google/ prefix for v2 model", async () => {
|
||||
const fetchMock = createGeminiFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockResolvedProviderKey();
|
||||
|
||||
const { provider } = await createGeminiEmbeddingProvider({
|
||||
|
||||
@ -33,6 +33,7 @@ async function createDefaultVoyageProvider(
|
||||
fetchMock: ReturnType<typeof createFetchMock>,
|
||||
) {
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockVoyageApiKey();
|
||||
return createVoyageEmbeddingProvider({
|
||||
config: {} as never,
|
||||
|
||||
@ -179,6 +179,7 @@ describe("embedding provider remote overrides", () => {
|
||||
it("builds Gemini embeddings requests with api key header", async () => {
|
||||
const fetchMock = createGeminiFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockResolvedProviderKey("provider-key");
|
||||
|
||||
const cfg = {
|
||||
@ -230,6 +231,7 @@ describe("embedding provider remote overrides", () => {
|
||||
it("uses GEMINI_API_KEY env indirection for Gemini remote apiKey", async () => {
|
||||
const fetchMock = createGeminiFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
vi.stubEnv("GEMINI_API_KEY", "env-gemini-key");
|
||||
|
||||
const result = await createEmbeddingProvider({
|
||||
@ -253,6 +255,7 @@ describe("embedding provider remote overrides", () => {
|
||||
it("builds Mistral embeddings requests with bearer auth", async () => {
|
||||
const fetchMock = createFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
mockResolvedProviderKey("provider-key");
|
||||
|
||||
const cfg = {
|
||||
@ -303,6 +306,7 @@ describe("embedding provider auto selection", () => {
|
||||
it("uses gemini when openai is missing", async () => {
|
||||
const fetchMock = createGeminiFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => {
|
||||
if (provider === "openai") {
|
||||
throw new Error('No API key found for provider "openai".');
|
||||
@ -329,6 +333,7 @@ describe("embedding provider auto selection", () => {
|
||||
json: async () => ({ data: [{ embedding: [1, 2, 3] }] }),
|
||||
}));
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => {
|
||||
if (provider === "openai") {
|
||||
return { apiKey: "openai-key", source: "env: OPENAI_API_KEY", mode: "api-key" };
|
||||
@ -357,6 +362,7 @@ describe("embedding provider auto selection", () => {
|
||||
it("uses mistral when openai/gemini/voyage are missing", async () => {
|
||||
const fetchMock = createFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => {
|
||||
if (provider === "mistral") {
|
||||
return { apiKey: "mistral-key", source: "env: MISTRAL_API_KEY", mode: "api-key" }; // pragma: allowlist secret
|
||||
|
||||
@ -6,6 +6,7 @@ import { useFastShortTimeouts } from "../../test/helpers/fast-short-timeouts.js"
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { getMemorySearchManager, type MemoryIndexManager } from "./index.js";
|
||||
import { createOpenAIEmbeddingProviderMock } from "./test-embeddings-mock.js";
|
||||
import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js";
|
||||
import "./test-runtime-mocks.js";
|
||||
|
||||
const embedBatch = vi.fn(async (_texts: string[]) => [] as number[][]);
|
||||
@ -174,6 +175,7 @@ describe("memory indexing with OpenAI batches", () => {
|
||||
const { fetchMock } = createOpenAIBatchFetchMock();
|
||||
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
|
||||
try {
|
||||
if (!manager) {
|
||||
@ -216,6 +218,7 @@ describe("memory indexing with OpenAI batches", () => {
|
||||
});
|
||||
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
|
||||
try {
|
||||
if (!manager) {
|
||||
@ -255,6 +258,7 @@ describe("memory indexing with OpenAI batches", () => {
|
||||
});
|
||||
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
mockPublicPinnedHostname();
|
||||
|
||||
try {
|
||||
if (!manager) {
|
||||
|
||||
@ -174,8 +174,6 @@ export async function runNodeHost(opts: NodeHostRunOptions): Promise<void> {
|
||||
const scheme = gateway.tls ? "wss" : "ws";
|
||||
const url = `${scheme}://${host}:${port}`;
|
||||
const pathEnv = ensureNodePathEnv();
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`node host PATH: ${pathEnv}`);
|
||||
|
||||
const client = new GatewayClient({
|
||||
url,
|
||||
|
||||
@ -126,5 +126,8 @@ describe("plugin-sdk subpath exports", () => {
|
||||
const twitch = await import("openclaw/plugin-sdk/twitch");
|
||||
expect(typeof twitch.DEFAULT_ACCOUNT_ID).toBe("string");
|
||||
expect(typeof twitch.normalizeAccountId).toBe("function");
|
||||
|
||||
const zalo = await import("openclaw/plugin-sdk/zalo");
|
||||
expect(typeof zalo.resolveClientIp).toBe("function");
|
||||
});
|
||||
});
|
||||
|
||||
@ -61,6 +61,7 @@ export { buildSecretInputSchema } from "./secret-input-schema.js";
|
||||
export { MarkdownConfigSchema } from "../config/zod-schema.core.js";
|
||||
export { waitForAbortSignal } from "../infra/abort-signal.js";
|
||||
export { createDedupeCache } from "../infra/dedupe.js";
|
||||
export { resolveClientIp } from "../gateway/net.js";
|
||||
export { emptyPluginConfigSchema } from "../plugins/config-schema.js";
|
||||
export type { PluginRuntime } from "../plugins/runtime/types.js";
|
||||
export type { OpenClawPluginApi } from "../plugins/types.js";
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user