Merge branch 'main' into codex/cortex-openclaw-integration

This commit is contained in:
Marc J Saint-jour 2026-03-12 20:45:16 -04:00 committed by GitHub
commit 8d3eee1a55
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
79 changed files with 1975 additions and 149 deletions

View File

@ -10,6 +10,8 @@ Docs: https://docs.openclaw.ai
- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
- Control UI/dashboard-v2: refresh the gateway dashboard with modular overview, chat, config, agent, and session views, plus a command palette, mobile bottom tabs, and richer chat tools like slash commands, search, export, and pinned messages. (#41503) Thanks @BunsDev.
- Models/plugins: move Ollama, vLLM, and SGLang onto the provider-plugin architecture, with provider-owned onboarding, discovery, model-picker setup, and post-selection hooks so core provider wiring is more modular.
- OpenAI/GPT-5.4 fast mode: add configurable session-level fast toggles across `/fast`, TUI, Control UI, and ACP, with per-model config defaults and OpenAI/Codex request shaping.
- Anthropic/Claude fast mode: map the shared `/fast` toggle and `params.fastMode` to direct Anthropic API-key `service_tier` requests, with live verification for both Anthropic and OpenAI fast-mode tiers.
### Fixes
@ -33,8 +35,10 @@ Docs: https://docs.openclaw.ai
- Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras.
- Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras.
- Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write.
- Windows/native update: make package installs use the npm update path instead of the git path, carry portable Git into native Windows updates, and mirror the installer's Windows npm env so `openclaw update` no longer dies early on missing `git` or `node-llama-cpp` download setup.
- Sandbox/write: preserve pinned mutation-helper payload stdin so sandboxed `write` no longer reports success while creating empty files. (#43876) Thanks @glitch418x.
- Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible `\u{...}` escapes instead of spoofing the reviewed command. (`GHSA-pcqg-f7rg-xfvv`)(#43687) Thanks @EkiXu and @vincentkoc.
- Hooks/agent deliveries: dedupe repeated hook requests by optional idempotency key so webhook retries can reuse the first run instead of launching duplicate agent executions. (#44438) Thanks @vincentkoc.
- Security/exec detection: normalize compatibility Unicode and strip invisible formatting code points before obfuscation checks so zero-width and fullwidth command tricks no longer suppress heuristic detection. (`GHSA-9r3v-37xh-2cf6`)(#44091) Thanks @wooluo and @vincentkoc.
- Security/exec allowlist: preserve POSIX case sensitivity and keep `?` within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (`GHSA-f8r2-vg7x-gh8m`)(#43798) Thanks @zpbrent and @vincentkoc.
- Security/commands: require sender ownership for `/config` and `/debug` so authorized non-owner senders can no longer reach owner-only config and runtime debug surfaces. (`GHSA-r7vr-gr74-94p8`)(#44305) Thanks @tdjackey and @vincentkoc.

View File

@ -47,6 +47,7 @@ OpenClaw ships with the piai catalog. These providers require **no**
- Override per model via `agents.defaults.models["openai/<model>"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`)
- OpenAI Responses WebSocket warm-up defaults to enabled via `params.openaiWsWarmup` (`true`/`false`)
- OpenAI priority processing can be enabled via `agents.defaults.models["openai/<model>"].params.serviceTier`
- OpenAI fast mode can be enabled per model via `agents.defaults.models["<provider>/<model>"].params.fastMode`
```json5
{
@ -61,6 +62,7 @@ OpenClaw ships with the piai catalog. These providers require **no**
- Optional rotation: `ANTHROPIC_API_KEYS`, `ANTHROPIC_API_KEY_1`, `ANTHROPIC_API_KEY_2`, plus `OPENCLAW_LIVE_ANTHROPIC_KEY` (single override)
- Example model: `anthropic/claude-opus-4-6`
- CLI: `openclaw onboard --auth-choice token` (paste setup-token) or `openclaw models auth paste-token --provider anthropic`
- Direct API-key models support the shared `/fast` toggle and `params.fastMode`; OpenClaw maps that to Anthropic `service_tier` (`auto` vs `standard_only`)
- Policy note: setup-token support is technical compatibility; Anthropic has blocked some subscription usage outside Claude Code in the past. Verify current Anthropic terms and decide based on your risk tolerance.
- Recommendation: Anthropic API key auth is the safer, recommended path over subscription setup-token auth.
@ -78,6 +80,7 @@ OpenClaw ships with the piai catalog. These providers require **no**
- CLI: `openclaw onboard --auth-choice openai-codex` or `openclaw models auth login --provider openai-codex`
- Default transport is `auto` (WebSocket-first, SSE fallback)
- Override per model via `agents.defaults.models["openai-codex/<model>"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`)
- Shares the same `/fast` toggle and `params.fastMode` config as direct `openai/*`
- Policy note: OpenAI Codex OAuth is explicitly supported for external tools/workflows like OpenClaw.
```json5

View File

@ -281,7 +281,7 @@ Runtime override (owner only):
- `openclaw status` — shows store path and recent sessions.
- `openclaw sessions --json` — dumps every entry (filter with `--active <minutes>`).
- `openclaw gateway call sessions.list --params '{}'` — fetch sessions from the running gateway (use `--url`/`--token` for remote gateway access).
- Send `/status` as a standalone message in chat to see whether the agent is reachable, how much of the session context is used, current thinking/verbose toggles, and when your WhatsApp web creds were last refreshed (helps spot relink needs).
- Send `/status` as a standalone message in chat to see whether the agent is reachable, how much of the session context is used, current thinking/fast/verbose toggles, and when your WhatsApp web creds were last refreshed (helps spot relink needs).
- Send `/context list` or `/context detail` to see whats in the system prompt and injected workspace files (and the biggest context contributors).
- Send `/stop` (or standalone abort phrases like `stop`, `stop action`, `stop run`, `stop openclaw`) to abort the current run, clear queued followups for that session, and stop any sub-agent runs spawned from it (the reply includes the stopped count).
- Send `/compact` (optional instructions) as a standalone message to summarize older context and free up window space. See [/concepts/compaction](/concepts/compaction).

View File

@ -44,6 +44,34 @@ openclaw onboard --anthropic-api-key "$ANTHROPIC_API_KEY"
- [Adaptive thinking](https://platform.claude.com/docs/en/build-with-claude/adaptive-thinking)
- [Extended thinking](https://platform.claude.com/docs/en/build-with-claude/extended-thinking)
## Fast mode (Anthropic API)
OpenClaw's shared `/fast` toggle also supports direct Anthropic API-key traffic.
- `/fast on` maps to `service_tier: "auto"`
- `/fast off` maps to `service_tier: "standard_only"`
- Config default:
```json5
{
agents: {
defaults: {
models: {
"anthropic/claude-sonnet-4-5": {
params: { fastMode: true },
},
},
},
},
}
```
Important limits:
- This is **API-key only**. Anthropic setup-token / OAuth auth does not honor OpenClaw fast-mode tier injection.
- OpenClaw only injects Anthropic service tiers for direct `api.anthropic.com` requests. If you route `anthropic/*` through a proxy or gateway, `/fast` leaves `service_tier` untouched.
- Anthropic reports the effective tier on the response under `usage.service_tier`. On accounts without Priority Tier capacity, `service_tier: "auto"` may still resolve to `standard`.
## Prompt caching (Anthropic API)
OpenClaw supports Anthropic's prompt caching feature. This is **API-only**; subscription auth does not honor cache settings.

View File

@ -165,6 +165,46 @@ pass that field through on direct `openai/*` Responses requests.
Supported values are `auto`, `default`, `flex`, and `priority`.
### OpenAI fast mode
OpenClaw exposes a shared fast-mode toggle for both `openai/*` and
`openai-codex/*` sessions:
- Chat/UI: `/fast status|on|off`
- Config: `agents.defaults.models["<provider>/<model>"].params.fastMode`
When fast mode is enabled, OpenClaw applies a low-latency OpenAI profile:
- `reasoning.effort = "low"` when the payload does not already specify reasoning
- `text.verbosity = "low"` when the payload does not already specify verbosity
- `service_tier = "priority"` for direct `openai/*` Responses calls to `api.openai.com`
Example:
```json5
{
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
fastMode: true,
},
},
"openai-codex/gpt-5.4": {
params: {
fastMode: true,
},
},
},
},
},
}
```
Session overrides win over config. Clearing the session override in the Sessions UI
returns the session to the configured default.
### OpenAI Responses server-side compaction
For direct OpenAI Responses models (`openai/*` using `api: "openai-responses"` with

View File

@ -14,7 +14,7 @@ The host-only bash chat command uses `! <cmd>` (with `/bash <cmd>` as an alias).
There are two related systems:
- **Commands**: standalone `/...` messages.
- **Directives**: `/think`, `/verbose`, `/reasoning`, `/elevated`, `/exec`, `/model`, `/queue`.
- **Directives**: `/think`, `/fast`, `/verbose`, `/reasoning`, `/elevated`, `/exec`, `/model`, `/queue`.
- Directives are stripped from the message before the model sees it.
- In normal chat messages (not directive-only), they are treated as “inline hints” and do **not** persist session settings.
- In directive-only messages (the message contains only directives), they persist to the session and reply with an acknowledgement.
@ -108,6 +108,7 @@ Text + native (when enabled):
- `/send on|off|inherit` (owner-only)
- `/reset` or `/new [model]` (optional model hint; remainder is passed through)
- `/think <off|minimal|low|medium|high|xhigh>` (dynamic choices by model/provider; aliases: `/thinking`, `/t`)
- `/fast status|on|off` (omitting the arg shows the current effective fast-mode state)
- `/verbose on|full|off` (alias: `/v`)
- `/reasoning on|off|stream` (alias: `/reason`; when on, sends a separate message prefixed `Reasoning:`; `stream` = Telegram draft only)
- `/elevated on|off|ask|full` (alias: `/elev`; `full` skips exec approvals)
@ -138,6 +139,7 @@ Notes:
- Discord thread-binding commands (`/focus`, `/unfocus`, `/agents`, `/session idle`, `/session max-age`) require effective thread bindings to be enabled (`session.threadBindings.enabled` and/or `channels.discord.threadBindings.enabled`).
- ACP command reference and runtime behavior: [ACP Agents](/tools/acp-agents).
- `/verbose` is meant for debugging and extra visibility; keep it **off** in normal use.
- `/fast on|off` persists a session override. Use the Sessions UI `inherit` option to clear it and fall back to config defaults.
- Tool failure summaries are still shown when relevant, but detailed failure text is only included when `/verbose` is `on` or `full`.
- `/reasoning` (and `/verbose`) are risky in group settings: they may reveal internal reasoning or tool output you did not intend to expose. Prefer leaving them off, especially in group chats.
- **Fast path:** command-only messages from allowlisted senders are handled immediately (bypass queue + model).

View File

@ -1,7 +1,7 @@
---
summary: "Directive syntax for /think + /verbose and how they affect model reasoning"
summary: "Directive syntax for /think, /fast, /verbose, and reasoning visibility"
read_when:
- Adjusting thinking or verbose directive parsing or defaults
- Adjusting thinking, fast-mode, or verbose directive parsing or defaults
title: "Thinking Levels"
---
@ -42,6 +42,21 @@ title: "Thinking Levels"
- **Embedded Pi**: the resolved level is passed to the in-process Pi agent runtime.
## Fast mode (/fast)
- Levels: `on|off`.
- Directive-only message toggles a session fast-mode override and replies `Fast mode enabled.` / `Fast mode disabled.`.
- Send `/fast` (or `/fast status`) with no mode to see the current effective fast-mode state.
- OpenClaw resolves fast mode in this order:
1. Inline/directive-only `/fast on|off`
2. Session override
3. Per-model config: `agents.defaults.models["<provider>/<model>"].params.fastMode`
4. Fallback: `off`
- For `openai/*`, fast mode applies the OpenAI fast profile: `service_tier=priority` when supported, plus low reasoning effort and low text verbosity.
- For `openai-codex/*`, fast mode applies the same low-latency profile on Codex Responses. OpenClaw keeps one shared `/fast` toggle across both auth paths.
- For direct `anthropic/*` API-key requests, fast mode maps to Anthropic service tiers: `/fast on` sets `service_tier=auto`, `/fast off` sets `service_tier=standard_only`.
- Anthropic fast mode is API-key only. OpenClaw skips Anthropic service-tier injection for Claude setup-token / OAuth auth and for non-Anthropic proxy base URLs.
## Verbose directives (/verbose or /v)
- Levels: `on` (minimal) | `full` | `off` (default).

View File

@ -75,7 +75,7 @@ The Control UI can localize itself on first load based on your browser locale, a
- Stream tool calls + live tool output cards in Chat (agent events)
- Channels: WhatsApp/Telegram/Discord/Slack + plugin channels (Mattermost, etc.) status + QR login + per-channel config (`channels.status`, `web.login.*`, `config.patch`)
- Instances: presence list + refresh (`system-presence`)
- Sessions: list + per-session thinking/verbose overrides (`sessions.list`, `sessions.patch`)
- Sessions: list + per-session thinking/fast/verbose/reasoning overrides (`sessions.list`, `sessions.patch`)
- Cron jobs: list/add/edit/run/enable/disable + run history (`cron.*`)
- Skills: status, enable/disable, install, API key updates (`skills.*`)
- Nodes: list + caps (`node.list`)

View File

@ -37,7 +37,7 @@ Use `--password` if your Gateway uses password auth.
- Header: connection URL, current agent, current session.
- Chat log: user messages, assistant replies, system notices, tool cards.
- Status line: connection/run state (connecting, running, streaming, idle, error).
- Footer: connection state + agent + session + model + think/verbose/reasoning + token counts + deliver.
- Footer: connection state + agent + session + model + think/fast/verbose/reasoning + token counts + deliver.
- Input: text editor with autocomplete.
## Mental model: agents + sessions
@ -92,6 +92,7 @@ Core:
Session controls:
- `/think <off|minimal|low|medium|high>`
- `/fast <status|on|off>`
- `/verbose <on|full|off>`
- `/reasoning <on|off|stream>`
- `/usage <off|tokens|full>`

View File

@ -1384,7 +1384,6 @@ validate_changelog_merge_hygiene() {
prepare_gates() {
local pr="$1"
local skip_test="${2:-false}"
enter_worktree "$pr" false
checkout_prep_branch "$pr"
@ -1419,9 +1418,7 @@ prepare_gates() {
run_quiet_logged "pnpm build" ".local/gates-build.log" pnpm build
run_quiet_logged "pnpm check" ".local/gates-check.log" pnpm check
if [ "$skip_test" = "true" ]; then
echo "Test skipped (--no-test). Full suite deferred to Test phase."
elif [ "$docs_only" = "true" ]; then
if [ "$docs_only" = "true" ]; then
echo "Docs-only change detected with high confidence; skipping pnpm test."
else
run_quiet_logged "pnpm test" ".local/gates-test.log" pnpm test
@ -1990,7 +1987,7 @@ main() {
prepare_validate_commit "$pr"
;;
prepare-gates)
prepare_gates "$pr" "${3:-false}"
prepare_gates "$pr"
;;
prepare-push)
prepare_push "$pr"

View File

@ -1,20 +1,13 @@
#!/usr/bin/env bash
set -euo pipefail
if [ "$#" -lt 2 ]; then
echo "Usage: scripts/pr-prepare <init|validate-commit|gates|push|run> <PR> [--no-test]"
if [ "$#" -ne 2 ]; then
echo "Usage: scripts/pr-prepare <init|validate-commit|gates|push|run> <PR>"
exit 2
fi
mode="$1"
pr="$2"
shift 2
no_test=false
for arg in "$@"; do
case "$arg" in
--no-test) no_test=true ;;
esac
done
script_dir="$(cd "$(dirname "$0")" && pwd)"
base="$script_dir/pr"
if common_git_dir=$(git -C "$script_dir" rev-parse --path-format=absolute --git-common-dir 2>/dev/null); then
@ -32,11 +25,7 @@ case "$mode" in
exec "$base" prepare-validate-commit "$pr"
;;
gates)
if [ "$no_test" = "true" ]; then
exec "$base" prepare-gates "$pr" true
else
exec "$base" prepare-gates "$pr"
fi
exec "$base" prepare-gates "$pr"
;;
push)
exec "$base" prepare-push "$pr"

View File

@ -645,6 +645,77 @@ describe("acp setSessionConfigOption bridge behavior", () => {
sessionStore.clearAllSessionsForTest();
});
it("updates fast mode ACP config options through gateway session patches", async () => {
const sessionStore = createInMemorySessionStore();
const connection = createAcpConnection();
const sessionUpdate = connection.__sessionUpdateMock;
const request = vi.fn(async (method: string, params?: unknown) => {
if (method === "sessions.list") {
return {
ts: Date.now(),
path: "/tmp/sessions.json",
count: 1,
defaults: {
modelProvider: null,
model: null,
contextTokens: null,
},
sessions: [
{
key: "fast-session",
kind: "direct",
updatedAt: Date.now(),
thinkingLevel: "minimal",
modelProvider: "openai",
model: "gpt-5.4",
fastMode: true,
},
],
};
}
if (method === "sessions.patch") {
expect(params).toEqual({
key: "fast-session",
fastMode: true,
});
}
return { ok: true };
}) as GatewayClient["request"];
const agent = new AcpGatewayAgent(connection, createAcpGateway(request), {
sessionStore,
});
await agent.loadSession(createLoadSessionRequest("fast-session"));
sessionUpdate.mockClear();
const result = await agent.setSessionConfigOption(
createSetSessionConfigOptionRequest("fast-session", "fast_mode", "on"),
);
expect(result.configOptions).toEqual(
expect.arrayContaining([
expect.objectContaining({
id: "fast_mode",
currentValue: "on",
}),
]),
);
expect(sessionUpdate).toHaveBeenCalledWith({
sessionId: "fast-session",
update: {
sessionUpdate: "config_option_update",
configOptions: expect.arrayContaining([
expect.objectContaining({
id: "fast_mode",
currentValue: "on",
}),
]),
},
});
sessionStore.clearAllSessionsForTest();
});
it("rejects non-string ACP config option values", async () => {
const sessionStore = createInMemorySessionStore();
const connection = createAcpConnection();

View File

@ -53,6 +53,7 @@ import { ACP_AGENT_INFO, type AcpServerOptions } from "./types.js";
// Maximum allowed prompt size (2MB) to prevent DoS via memory exhaustion (CWE-400, GHSA-cxpw-2g23-2vgw)
const MAX_PROMPT_BYTES = 2 * 1024 * 1024;
const ACP_THOUGHT_LEVEL_CONFIG_ID = "thought_level";
const ACP_FAST_MODE_CONFIG_ID = "fast_mode";
const ACP_VERBOSE_LEVEL_CONFIG_ID = "verbose_level";
const ACP_REASONING_LEVEL_CONFIG_ID = "reasoning_level";
const ACP_RESPONSE_USAGE_CONFIG_ID = "response_usage";
@ -88,6 +89,7 @@ type GatewaySessionPresentationRow = Pick<
| "derivedTitle"
| "updatedAt"
| "thinkingLevel"
| "fastMode"
| "modelProvider"
| "model"
| "verboseLevel"
@ -209,6 +211,13 @@ function buildSessionPresentation(params: {
currentValue: currentModeId,
values: availableLevelIds,
}),
buildSelectConfigOption({
id: ACP_FAST_MODE_CONFIG_ID,
name: "Fast mode",
description: "Controls whether OpenAI sessions use the Gateway fast-mode profile.",
currentValue: row.fastMode ? "on" : "off",
values: ["off", "on"],
}),
buildSelectConfigOption({
id: ACP_VERBOSE_LEVEL_CONFIG_ID,
name: "Tool verbosity",
@ -925,6 +934,7 @@ export class AcpGatewayAgent implements Agent {
thinkingLevel: session.thinkingLevel,
modelProvider: session.modelProvider,
model: session.model,
fastMode: session.fastMode,
verboseLevel: session.verboseLevel,
reasoningLevel: session.reasoningLevel,
responseUsage: session.responseUsage,
@ -940,7 +950,7 @@ export class AcpGatewayAgent implements Agent {
value: string | boolean,
): {
overrides: Partial<GatewaySessionPresentationRow>;
patch: Record<string, string>;
patch: Record<string, string | boolean>;
} {
if (typeof value !== "string") {
throw new Error(
@ -953,6 +963,11 @@ export class AcpGatewayAgent implements Agent {
patch: { thinkingLevel: value },
overrides: { thinkingLevel: value },
};
case ACP_FAST_MODE_CONFIG_ID:
return {
patch: { fastMode: value === "on" },
overrides: { fastMode: value === "on" },
};
case ACP_VERBOSE_LEVEL_CONFIG_ID:
return {
patch: { verboseLevel: value },

58
src/agents/fast-mode.ts Normal file
View File

@ -0,0 +1,58 @@
import { normalizeFastMode } from "../auto-reply/thinking.js";
import type { OpenClawConfig } from "../config/config.js";
import type { SessionEntry } from "../config/sessions.js";
export type FastModeState = {
enabled: boolean;
source: "session" | "config" | "default";
};
export function resolveFastModeParam(
extraParams: Record<string, unknown> | undefined,
): boolean | undefined {
return normalizeFastMode(
(extraParams?.fastMode ?? extraParams?.fast_mode) as string | boolean | null | undefined,
);
}
function resolveConfiguredFastModeRaw(params: {
cfg: OpenClawConfig | undefined;
provider: string;
model: string;
}): unknown {
const modelKey = `${params.provider}/${params.model}`;
const modelConfig = params.cfg?.agents?.defaults?.models?.[modelKey];
return modelConfig?.params?.fastMode ?? modelConfig?.params?.fast_mode;
}
export function resolveConfiguredFastMode(params: {
cfg: OpenClawConfig | undefined;
provider: string;
model: string;
}): boolean {
return (
normalizeFastMode(
resolveConfiguredFastModeRaw(params) as string | boolean | null | undefined,
) ?? false
);
}
export function resolveFastModeState(params: {
cfg: OpenClawConfig | undefined;
provider: string;
model: string;
sessionEntry?: Pick<SessionEntry, "fastMode"> | undefined;
}): FastModeState {
const sessionOverride = normalizeFastMode(params.sessionEntry?.fastMode);
if (sessionOverride !== undefined) {
return { enabled: sessionOverride, source: "session" };
}
const configuredRaw = resolveConfiguredFastModeRaw(params);
const configured = normalizeFastMode(configuredRaw as string | boolean | null | undefined);
if (configured !== undefined) {
return { enabled: configured, source: "config" };
}
return { enabled: false, source: "default" };
}

View File

@ -6,12 +6,16 @@ import { isTruthyEnvValue } from "../infra/env.js";
import { applyExtraParamsToAgent } from "./pi-embedded-runner.js";
const OPENAI_KEY = process.env.OPENAI_API_KEY ?? "";
const ANTHROPIC_KEY = process.env.ANTHROPIC_API_KEY ?? "";
const GEMINI_KEY = process.env.GEMINI_API_KEY ?? "";
const LIVE = isTruthyEnvValue(process.env.OPENAI_LIVE_TEST) || isTruthyEnvValue(process.env.LIVE);
const ANTHROPIC_LIVE =
isTruthyEnvValue(process.env.ANTHROPIC_LIVE_TEST) || isTruthyEnvValue(process.env.LIVE);
const GEMINI_LIVE =
isTruthyEnvValue(process.env.GEMINI_LIVE_TEST) || isTruthyEnvValue(process.env.LIVE);
const describeLive = LIVE && OPENAI_KEY ? describe : describe.skip;
const describeAnthropicLive = ANTHROPIC_LIVE && ANTHROPIC_KEY ? describe : describe.skip;
const describeGeminiLive = GEMINI_LIVE && GEMINI_KEY ? describe : describe.skip;
describeLive("pi embedded extra params (live)", () => {
@ -65,6 +69,79 @@ describeLive("pi embedded extra params (live)", () => {
// Should respect maxTokens from config (16) — allow a small buffer for provider rounding.
expect(outputTokens ?? 0).toBeLessThanOrEqual(20);
}, 30_000);
it("verifies OpenAI fast-mode service_tier semantics against the live API", async () => {
const headers = {
"content-type": "application/json",
authorization: `Bearer ${OPENAI_KEY}`,
};
const runProbe = async (serviceTier: "default" | "priority") => {
const res = await fetch("https://api.openai.com/v1/responses", {
method: "POST",
headers,
body: JSON.stringify({
model: "gpt-5.4",
input: "Reply with OK.",
max_output_tokens: 32,
service_tier: serviceTier,
}),
});
const json = (await res.json()) as {
error?: { message?: string };
service_tier?: string;
status?: string;
};
expect(res.ok, json.error?.message ?? `HTTP ${res.status}`).toBe(true);
return json;
};
const standard = await runProbe("default");
expect(standard.service_tier).toBe("default");
expect(standard.status).toBe("completed");
const fast = await runProbe("priority");
expect(fast.service_tier).toBe("priority");
expect(fast.status).toBe("completed");
}, 45_000);
});
describeAnthropicLive("pi embedded extra params (anthropic live)", () => {
it("verifies Anthropic fast-mode service_tier semantics against the live API", async () => {
const headers = {
"content-type": "application/json",
"x-api-key": ANTHROPIC_KEY,
"anthropic-version": "2023-06-01",
};
const runProbe = async (serviceTier: "auto" | "standard_only") => {
const res = await fetch("https://api.anthropic.com/v1/messages", {
method: "POST",
headers,
body: JSON.stringify({
model: "claude-sonnet-4-5",
max_tokens: 32,
service_tier: serviceTier,
messages: [{ role: "user", content: "Reply with OK." }],
}),
});
const json = (await res.json()) as {
error?: { message?: string };
stop_reason?: string;
usage?: { service_tier?: string };
};
expect(res.ok, json.error?.message ?? `HTTP ${res.status}`).toBe(true);
return json;
};
const standard = await runProbe("standard_only");
expect(standard.usage?.service_tier).toBe("standard");
expect(standard.stop_reason).toBe("end_turn");
const fast = await runProbe("auto");
expect(["standard", "priority"]).toContain(fast.usage?.service_tier);
expect(fast.stop_reason).toBe("end_turn");
}, 45_000);
});
describeGeminiLive("pi embedded extra params (gemini live)", () => {

View File

@ -201,9 +201,11 @@ describe("applyExtraParamsToAgent", () => {
model:
| Model<"openai-responses">
| Model<"openai-codex-responses">
| Model<"openai-completions">;
| Model<"openai-completions">
| Model<"anthropic-messages">;
options?: SimpleStreamOptions;
cfg?: Record<string, unknown>;
extraParamsOverride?: Record<string, unknown>;
payload?: Record<string, unknown>;
}) {
const payload = params.payload ?? { store: false };
@ -217,6 +219,7 @@ describe("applyExtraParamsToAgent", () => {
params.cfg as Parameters<typeof applyExtraParamsToAgent>[1],
params.applyProvider,
params.applyModelId,
params.extraParamsOverride,
);
const context: Context = { messages: [] };
void agent.streamFn?.(params.model, context, params.options ?? {});
@ -1627,6 +1630,165 @@ describe("applyExtraParamsToAgent", () => {
expect(payload.service_tier).toBe("default");
});
it("injects fast-mode payload defaults for direct OpenAI Responses", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
fastMode: true,
},
},
},
},
},
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
payload: {
store: false,
},
});
expect(payload.reasoning).toEqual({ effort: "low" });
expect(payload.text).toEqual({ verbosity: "low" });
expect(payload.service_tier).toBe("priority");
});
it("preserves caller-provided OpenAI payload fields when fast mode is enabled", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
extraParamsOverride: { fastMode: true },
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
payload: {
reasoning: { effort: "medium" },
text: { verbosity: "high" },
service_tier: "default",
},
});
expect(payload.reasoning).toEqual({ effort: "medium" });
expect(payload.text).toEqual({ verbosity: "high" });
expect(payload.service_tier).toBe("default");
});
it("injects service_tier=auto for Anthropic fast mode on direct API-key models", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-5",
extraParamsOverride: { fastMode: true },
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-5",
baseUrl: "https://api.anthropic.com",
} as unknown as Model<"anthropic-messages">,
payload: {},
});
expect(payload.service_tier).toBe("auto");
});
it("injects service_tier=standard_only for Anthropic fast mode off", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-5",
extraParamsOverride: { fastMode: false },
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-5",
baseUrl: "https://api.anthropic.com",
} as unknown as Model<"anthropic-messages">,
payload: {},
});
expect(payload.service_tier).toBe("standard_only");
});
it("preserves caller-provided Anthropic service_tier values", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-5",
extraParamsOverride: { fastMode: true },
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-5",
baseUrl: "https://api.anthropic.com",
} as unknown as Model<"anthropic-messages">,
payload: {
service_tier: "standard_only",
},
});
expect(payload.service_tier).toBe("standard_only");
});
it("does not inject Anthropic fast mode service_tier for OAuth auth", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-5",
extraParamsOverride: { fastMode: true },
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-5",
baseUrl: "https://api.anthropic.com",
} as unknown as Model<"anthropic-messages">,
options: {
apiKey: "sk-ant-oat-test-token",
},
payload: {},
});
expect(payload).not.toHaveProperty("service_tier");
});
it("does not inject Anthropic fast mode service_tier for proxied base URLs", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-5",
extraParamsOverride: { fastMode: true },
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-5",
baseUrl: "https://proxy.example.com/anthropic",
} as unknown as Model<"anthropic-messages">,
payload: {},
});
expect(payload).not.toHaveProperty("service_tier");
});
it("applies fast-mode defaults for openai-codex responses without service_tier", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai-codex",
applyModelId: "gpt-5.4",
extraParamsOverride: { fastMode: true },
model: {
api: "openai-codex-responses",
provider: "openai-codex",
id: "gpt-5.4",
baseUrl: "https://chatgpt.com/backend-api",
} as unknown as Model<"openai-codex-responses">,
payload: {
store: false,
},
});
expect(payload.reasoning).toEqual({ effort: "low" });
expect(payload.text).toEqual({ verbosity: "low" });
expect(payload).not.toHaveProperty("service_tier");
});
it("does not inject service_tier for non-openai providers", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "azure-openai-responses",

View File

@ -1,5 +1,6 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
import { streamSimple } from "@mariozechner/pi-ai";
import { resolveFastModeParam } from "../fast-mode.js";
import {
requiresOpenAiCompatibleAnthropicToolPayload,
usesOpenAiFunctionAnthropicToolSchema,
@ -18,6 +19,7 @@ const PI_AI_OAUTH_ANTHROPIC_BETAS = [
"oauth-2025-04-20",
...PI_AI_DEFAULT_ANTHROPIC_BETAS,
] as const;
type AnthropicServiceTier = "auto" | "standard_only";
type CacheRetention = "none" | "short" | "long";
@ -53,6 +55,25 @@ function isAnthropicOAuthApiKey(apiKey: unknown): boolean {
return typeof apiKey === "string" && apiKey.includes("sk-ant-oat");
}
function isAnthropicPublicApiBaseUrl(baseUrl: unknown): boolean {
if (baseUrl == null) {
return true;
}
if (typeof baseUrl !== "string" || !baseUrl.trim()) {
return true;
}
try {
return new URL(baseUrl).hostname.toLowerCase() === "api.anthropic.com";
} catch {
return baseUrl.toLowerCase().includes("api.anthropic.com");
}
}
function resolveAnthropicFastServiceTier(enabled: boolean): AnthropicServiceTier {
return enabled ? "auto" : "standard_only";
}
function requiresAnthropicToolPayloadCompatibilityForModel(model: {
api?: unknown;
provider?: unknown;
@ -304,6 +325,44 @@ export function createAnthropicToolPayloadCompatibilityWrapper(
};
}
export function createAnthropicFastModeWrapper(
baseStreamFn: StreamFn | undefined,
enabled: boolean,
): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
const serviceTier = resolveAnthropicFastServiceTier(enabled);
return (model, context, options) => {
if (
model.api !== "anthropic-messages" ||
model.provider !== "anthropic" ||
!isAnthropicPublicApiBaseUrl(model.baseUrl) ||
isAnthropicOAuthApiKey(options?.apiKey)
) {
return underlying(model, context, options);
}
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
const payloadObj = payload as Record<string, unknown>;
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
}
return originalOnPayload?.(payload, model);
},
});
};
}
export function resolveAnthropicFastMode(
extraParams: Record<string, unknown> | undefined,
): boolean | undefined {
return resolveFastModeParam(extraParams);
}
export function createBedrockNoCacheWrapper(baseStreamFn: StreamFn | undefined): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
return (model, context, options) =>

View File

@ -5,9 +5,11 @@ import type { ThinkLevel } from "../../auto-reply/thinking.js";
import type { OpenClawConfig } from "../../config/config.js";
import {
createAnthropicBetaHeadersWrapper,
createAnthropicFastModeWrapper,
createAnthropicToolPayloadCompatibilityWrapper,
createBedrockNoCacheWrapper,
isAnthropicBedrockModel,
resolveAnthropicFastMode,
resolveAnthropicBetas,
resolveCacheRetention,
} from "./anthropic-stream-wrappers.js";
@ -22,8 +24,10 @@ import {
import {
createCodexDefaultTransportWrapper,
createOpenAIDefaultTransportWrapper,
createOpenAIFastModeWrapper,
createOpenAIResponsesContextManagementWrapper,
createOpenAIServiceTierWrapper,
resolveOpenAIFastMode,
resolveOpenAIServiceTier,
} from "./openai-stream-wrappers.js";
import {
@ -437,6 +441,18 @@ export function applyExtraParamsToAgent(
// upstream model-ID heuristics for Gemini 3.1 variants.
agent.streamFn = createGoogleThinkingPayloadWrapper(agent.streamFn, thinkingLevel);
const anthropicFastMode = resolveAnthropicFastMode(merged);
if (anthropicFastMode !== undefined) {
log.debug(`applying Anthropic fast mode=${anthropicFastMode} for ${provider}/${modelId}`);
agent.streamFn = createAnthropicFastModeWrapper(agent.streamFn, anthropicFastMode);
}
const openAIFastMode = resolveOpenAIFastMode(merged);
if (openAIFastMode) {
log.debug(`applying OpenAI fast mode for ${provider}/${modelId}`);
agent.streamFn = createOpenAIFastModeWrapper(agent.streamFn);
}
const openAIServiceTier = resolveOpenAIServiceTier(merged);
if (openAIServiceTier) {
log.debug(`applying OpenAI service_tier=${openAIServiceTier} for ${provider}/${modelId}`);

View File

@ -4,6 +4,7 @@ import { streamSimple } from "@mariozechner/pi-ai";
import { log } from "./logger.js";
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
type OpenAIReasoningEffort = "low" | "medium" | "high";
const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]);
const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]);
@ -168,6 +169,89 @@ export function resolveOpenAIServiceTier(
return normalized;
}
function normalizeOpenAIFastMode(value: unknown): boolean | undefined {
if (typeof value === "boolean") {
return value;
}
if (typeof value !== "string") {
return undefined;
}
const normalized = value.trim().toLowerCase();
if (
normalized === "on" ||
normalized === "true" ||
normalized === "yes" ||
normalized === "1" ||
normalized === "fast"
) {
return true;
}
if (
normalized === "off" ||
normalized === "false" ||
normalized === "no" ||
normalized === "0" ||
normalized === "normal"
) {
return false;
}
return undefined;
}
export function resolveOpenAIFastMode(
extraParams: Record<string, unknown> | undefined,
): boolean | undefined {
const raw = extraParams?.fastMode ?? extraParams?.fast_mode;
const normalized = normalizeOpenAIFastMode(raw);
if (raw !== undefined && normalized === undefined) {
const rawSummary = typeof raw === "string" ? raw : typeof raw;
log.warn(`ignoring invalid OpenAI fast mode param: ${rawSummary}`);
}
return normalized;
}
function resolveFastModeReasoningEffort(modelId: unknown): OpenAIReasoningEffort {
if (typeof modelId !== "string") {
return "low";
}
const normalized = modelId.trim().toLowerCase();
// Keep fast mode broadly compatible across GPT-5 family variants by using
// the lowest shared non-disabled effort that current transports accept.
if (normalized.startsWith("gpt-5")) {
return "low";
}
return "low";
}
function applyOpenAIFastModePayloadOverrides(params: {
payloadObj: Record<string, unknown>;
model: { provider?: unknown; id?: unknown; baseUrl?: unknown; api?: unknown };
}): void {
if (params.payloadObj.reasoning === undefined) {
params.payloadObj.reasoning = {
effort: resolveFastModeReasoningEffort(params.model.id),
};
}
const existingText = params.payloadObj.text;
if (existingText === undefined) {
params.payloadObj.text = { verbosity: "low" };
} else if (existingText && typeof existingText === "object" && !Array.isArray(existingText)) {
const textObj = existingText as Record<string, unknown>;
if (textObj.verbosity === undefined) {
textObj.verbosity = "low";
}
}
if (
params.model.provider === "openai" &&
params.payloadObj.service_tier === undefined &&
isOpenAIPublicApiBaseUrl(params.model.baseUrl)
) {
params.payloadObj.service_tier = "priority";
}
}
export function createOpenAIResponsesContextManagementWrapper(
baseStreamFn: StreamFn | undefined,
extraParams: Record<string, unknown> | undefined,
@ -203,6 +287,31 @@ export function createOpenAIResponsesContextManagementWrapper(
};
}
export function createOpenAIFastModeWrapper(baseStreamFn: StreamFn | undefined): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
return (model, context, options) => {
if (
(model.api !== "openai-responses" && model.api !== "openai-codex-responses") ||
(model.provider !== "openai" && model.provider !== "openai-codex")
) {
return underlying(model, context, options);
}
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
applyOpenAIFastModePayloadOverrides({
payloadObj: payload as Record<string, unknown>,
model,
});
}
return originalOnPayload?.(payload, model);
},
});
};
}
export function createOpenAIServiceTierWrapper(
baseStreamFn: StreamFn | undefined,
serviceTier: OpenAIServiceTier,

View File

@ -892,6 +892,7 @@ export async function runEmbeddedPiAgent(
agentId: workspaceResolution.agentId,
legacyBeforeAgentStartResult,
thinkLevel,
fastMode: params.fastMode,
verboseLevel: params.verboseLevel,
reasoningLevel: params.reasoningLevel,
toolResultFormat: resolvedToolResultFormat,

View File

@ -1931,7 +1931,10 @@ export async function runEmbeddedAttempt(
params.config,
params.provider,
params.modelId,
params.streamParams,
{
...params.streamParams,
fastMode: params.fastMode,
},
params.thinkLevel,
sessionAgentId,
);

View File

@ -79,6 +79,7 @@ export type RunEmbeddedPiAgentParams = {
authProfileId?: string;
authProfileIdSource?: "auto" | "user";
thinkLevel?: ThinkLevel;
fastMode?: boolean;
verboseLevel?: VerboseLevel;
reasoningLevel?: ReasoningLevel;
toolResultFormat?: ToolResultFormat;

View File

@ -604,6 +604,22 @@ function buildChatCommands(): ChatCommandDefinition[] {
],
argsMenu: "auto",
}),
defineChatCommand({
key: "fast",
nativeName: "fast",
description: "Toggle fast mode.",
textAlias: "/fast",
category: "options",
args: [
{
name: "mode",
description: "status, on, or off",
type: "string",
choices: ["status", "on", "off"],
},
],
argsMenu: "auto",
}),
defineChatCommand({
key: "reasoning",
nativeName: "reasoning",

View File

@ -198,39 +198,15 @@ describe("commands registry", () => {
]);
});
it("invalidates cached command lists after plugin registry updates", () => {
const before = listChatCommands();
expect(before.find((command) => command.key === "dock:msteams")).toBeFalsy();
setActivePluginRegistry(
createTestRegistry([
{
pluginId: "test-plugin",
source: "test",
plugin: {
id: "msteams",
meta: {
id: "msteams",
label: "Microsoft Teams",
selectionLabel: "Microsoft Teams",
docsPath: "/channels/msteams",
blurb: "test stub.",
},
capabilities: {
chatTypes: ["direct"],
nativeCommands: true,
},
config: {
listAccountIds: () => ["default"],
resolveAccount: () => ({}),
},
},
},
]),
);
const after = listChatCommands();
expect(after.find((command) => command.key === "dock:msteams")).toBeTruthy();
it("registers fast mode as a first-class options command", () => {
const fast = listChatCommands().find((command) => command.key === "fast");
expect(fast).toMatchObject({
nativeName: "fast",
textAliases: ["/fast"],
category: "options",
});
const modeArg = fast?.args?.find((arg) => arg.name === "mode");
expect(modeArg?.choices).toEqual(["status", "on", "off"]);
});
it("detects known text commands", () => {

View File

@ -4,6 +4,7 @@ import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.j
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { runEmbeddedPiAgentMock } from "./reply.directive.directive-behavior.e2e-mocks.js";
export { loadModelCatalog } from "../agents/model-catalog.js";
export { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
@ -134,7 +135,7 @@ export function assertElevatedOffStatusReply(text: string | undefined) {
export function installDirectiveBehaviorE2EHooks() {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
runEmbeddedPiAgentMock.mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue(DEFAULT_TEST_MODEL_CATALOG);
});

View File

@ -1,5 +1,5 @@
import "./reply.directive.directive-behavior.e2e-mocks.js";
import { describe, expect, it, vi } from "vitest";
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { loadSessionStore } from "../config/sessions.js";
import {
@ -10,10 +10,10 @@ import {
makeRestrictedElevatedDisabledConfig,
makeWhatsAppDirectiveConfig,
replyText,
runEmbeddedPiAgent,
sessionStorePath,
withTempHome,
} from "./reply.directive.directive-behavior.e2e-harness.js";
import { runEmbeddedPiAgentMock } from "./reply.directive.directive-behavior.e2e-mocks.js";
import { getReplyFromConfig } from "./reply.js";
const COMMAND_MESSAGE_BASE = {
@ -126,6 +126,18 @@ describe("directive behavior", () => {
it("reports current directive defaults when no arguments are provided", async () => {
await withTempHome(async (home) => {
const fastText = await runCommand(home, "/fast", {
defaults: {
models: {
"anthropic/claude-opus-4-5": {
params: { fastMode: true },
},
},
},
});
expect(fastText).toContain("Current fast mode: on (config)");
expect(fastText).toContain("Options: on, off.");
const verboseText = await runCommand(home, "/verbose", {
defaults: { verboseDefault: "on" },
});
@ -158,7 +170,28 @@ describe("directive behavior", () => {
expect(execText).toContain(
"Options: host=sandbox|gateway|node, security=deny|allowlist|full, ask=off|on-miss|always, node=<id>.",
);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("persists fast toggles across /status and /fast", async () => {
await withTempHome(async (home) => {
const storePath = sessionStorePath(home);
const onText = await runCommand(home, "/fast on");
expect(onText).toContain("Fast mode enabled");
expect(loadSessionStore(storePath)["agent:main:main"]?.fastMode).toBe(true);
const statusText = await runCommand(home, "/status");
const optionsLine = statusText?.split("\n").find((line) => line.trim().startsWith("⚙️"));
expect(optionsLine).toContain("Fast: on");
const offText = await runCommand(home, "/fast off");
expect(offText).toContain("Fast mode disabled");
expect(loadSessionStore(storePath)["agent:main:main"]?.fastMode).toBe(false);
const fastText = await runCommand(home, "/fast");
expect(fastText).toContain("Current fast mode: off");
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("persists elevated toggles across /status and /elevated", async () => {
@ -181,7 +214,7 @@ describe("directive behavior", () => {
const store = loadSessionStore(storePath);
expect(store["agent:main:main"]?.elevatedLevel).toBe("on");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("enforces per-agent elevated restrictions and status visibility", async () => {
@ -217,7 +250,7 @@ describe("directive behavior", () => {
);
const statusText = replyText(statusRes);
expect(statusText).not.toContain("elevated");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("applies per-agent allowlist requirements before allowing elevated", async () => {
@ -245,7 +278,7 @@ describe("directive behavior", () => {
const allowedText = replyText(allowedRes);
expect(allowedText).toContain("Elevated mode set to ask");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("handles runtime warning, invalid level, and multi-directive elevated inputs", async () => {
@ -280,7 +313,7 @@ describe("directive behavior", () => {
expect(text).toContain(snippet);
}
}
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("persists queue overrides and reset behavior", async () => {
@ -317,12 +350,12 @@ describe("directive behavior", () => {
expect(entry?.queueDebounceMs).toBeUndefined();
expect(entry?.queueCap).toBeUndefined();
expect(entry?.queueDrop).toBeUndefined();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});
it("strips inline elevated directives from the user text (does not persist session override)", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
runEmbeddedPiAgentMock.mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
@ -346,7 +379,7 @@ describe("directive behavior", () => {
const store = loadSessionStore(storePath);
expect(store["agent:main:main"]?.elevatedLevel).toBeUndefined();
const calls = vi.mocked(runEmbeddedPiAgent).mock.calls;
const calls = runEmbeddedPiAgentMock.mock.calls;
expect(calls.length).toBeGreaterThan(0);
const call = calls[0]?.[0];
expect(call?.prompt).toContain("hello there");

View File

@ -8,7 +8,7 @@ import {
extractThinkDirective,
extractVerboseDirective,
} from "./reply.js";
import { extractStatusDirective } from "./reply/directives.js";
import { extractFastDirective, extractStatusDirective } from "./reply/directives.js";
describe("directive parsing", () => {
it("ignores verbose directive inside URL", () => {
@ -49,6 +49,12 @@ describe("directive parsing", () => {
expect(res.reasoningLevel).toBe("stream");
});
it("matches fast directive", () => {
const res = extractFastDirective("/fast on please");
expect(res.hasDirective).toBe(true);
expect(res.fastMode).toBe(true);
});
it("matches elevated with leading space", () => {
const res = extractElevatedDirective(" please /elevated on now");
expect(res.hasDirective).toBe(true);
@ -106,6 +112,14 @@ describe("directive parsing", () => {
expect(res.cleaned).toBe("");
});
it("matches fast with no argument", () => {
const res = extractFastDirective("/fast:");
expect(res.hasDirective).toBe(true);
expect(res.fastMode).toBeUndefined();
expect(res.rawLevel).toBeUndefined();
expect(res.cleaned).toBe("");
});
it("matches reasoning with no argument", () => {
const res = extractReasoningDirective("/reasoning:");
expect(res.hasDirective).toBe(true);

View File

@ -27,6 +27,7 @@ import { handlePluginCommand } from "./commands-plugin.js";
import {
handleAbortTrigger,
handleActivationCommand,
handleFastCommand,
handleRestartCommand,
handleSessionCommand,
handleSendPolicyCommand,
@ -177,6 +178,7 @@ export async function handleCommands(params: HandleCommandsParams): Promise<Comm
handleBashCommand,
handleActivationCommand,
handleSendPolicyCommand,
handleFastCommand,
handleUsageCommand,
handleSessionCommand,
handleRestartCommand,

View File

@ -1,3 +1,4 @@
import { resolveFastModeState } from "../../agents/fast-mode.js";
import { parseDurationMs } from "../../cli/parse-duration.js";
import { isRestartEnabled } from "../../config/commands.js";
import {
@ -22,7 +23,7 @@ import {
import { formatTokenCount, formatUsd } from "../../utils/usage-format.js";
import { parseActivationCommand } from "../group-activation.js";
import { parseSendPolicyCommand } from "../send-policy.js";
import { normalizeUsageDisplay, resolveResponseUsageMode } from "../thinking.js";
import { normalizeFastMode, normalizeUsageDisplay, resolveResponseUsageMode } from "../thinking.js";
import { isDiscordSurface, isTelegramSurface, resolveChannelAccountId } from "./channel-context.js";
import { handleAbortTrigger, handleStopCommand } from "./commands-session-abort.js";
import { persistSessionEntry } from "./commands-session-store.js";
@ -291,6 +292,57 @@ export const handleUsageCommand: CommandHandler = async (params, allowTextComman
};
};
export const handleFastCommand: CommandHandler = async (params, allowTextCommands) => {
if (!allowTextCommands) {
return null;
}
const normalized = params.command.commandBodyNormalized;
if (normalized !== "/fast" && !normalized.startsWith("/fast ")) {
return null;
}
if (!params.command.isAuthorizedSender) {
logVerbose(
`Ignoring /fast from unauthorized sender: ${params.command.senderId || "<unknown>"}`,
);
return { shouldContinue: false };
}
const rawArgs = normalized === "/fast" ? "" : normalized.slice("/fast".length).trim();
const rawMode = rawArgs.toLowerCase();
if (!rawMode || rawMode === "status") {
const state = resolveFastModeState({
cfg: params.cfg,
provider: params.provider,
model: params.model,
sessionEntry: params.sessionEntry,
});
const suffix =
state.source === "config" ? " (config)" : state.source === "default" ? " (default)" : "";
return {
shouldContinue: false,
reply: { text: `⚙️ Current fast mode: ${state.enabled ? "on" : "off"}${suffix}.` },
};
}
const nextMode = normalizeFastMode(rawMode);
if (nextMode === undefined) {
return {
shouldContinue: false,
reply: { text: "⚙️ Usage: /fast status|on|off" },
};
}
if (params.sessionEntry && params.sessionStore && params.sessionKey) {
params.sessionEntry.fastMode = nextMode;
await persistSessionEntry(params);
}
return {
shouldContinue: false,
reply: { text: `⚙️ Fast mode ${nextMode ? "enabled" : "disabled"}.` },
};
};
export const handleSessionCommand: CommandHandler = async (params, allowTextCommands) => {
if (!allowTextCommands) {
return null;

View File

@ -3,7 +3,7 @@ import {
resolveDefaultAgentId,
resolveSessionAgentId,
} from "../../agents/agent-scope.js";
import { resolveAgentCortexModeStatus, resolveCortexChannelTarget } from "../../agents/cortex.js";
import { resolveFastModeState } from "../../agents/fast-mode.js";
import { resolveModelAuthLabel } from "../../agents/model-auth-label.js";
import { listSubagentRunsForRequester } from "../../agents/subagent-registry.js";
import {
@ -43,6 +43,7 @@ export async function buildStatusReply(params: {
model: string;
contextTokens: number;
resolvedThinkLevel?: ThinkLevel;
resolvedFastMode?: boolean;
resolvedVerboseLevel: VerboseLevel;
resolvedReasoningLevel: ReasoningLevel;
resolvedElevatedLevel?: ElevatedLevel;
@ -64,6 +65,7 @@ export async function buildStatusReply(params: {
model,
contextTokens,
resolvedThinkLevel,
resolvedFastMode,
resolvedVerboseLevel,
resolvedReasoningLevel,
resolvedElevatedLevel,
@ -188,6 +190,14 @@ export async function buildStatusReply(params: {
})
: selectedModelAuth;
const agentDefaults = cfg.agents?.defaults ?? {};
const effectiveFastMode =
resolvedFastMode ??
resolveFastModeState({
cfg,
provider,
model,
sessionEntry,
}).enabled;
const statusText = buildStatusMessage({
config: cfg,
agent: {
@ -209,6 +219,7 @@ export async function buildStatusReply(params: {
sessionStorePath: storePath,
groupActivation,
resolvedThink: resolvedThinkLevel ?? (await resolveDefaultThinkingLevel()),
resolvedFast: effectiveFastMode,
resolvedVerbose: resolvedVerboseLevel,
resolvedReasoning: resolvedReasoningLevel,
resolvedElevated: resolvedElevatedLevel,

View File

@ -48,12 +48,17 @@ export async function applyInlineDirectivesFastLane(
}
const agentCfg = params.agentCfg;
const { currentThinkLevel, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel } =
await resolveCurrentDirectiveLevels({
sessionEntry,
agentCfg,
resolveDefaultThinkingLevel: () => modelState.resolveDefaultThinkingLevel(),
});
const {
currentThinkLevel,
currentFastMode,
currentVerboseLevel,
currentReasoningLevel,
currentElevatedLevel,
} = await resolveCurrentDirectiveLevels({
sessionEntry,
agentCfg,
resolveDefaultThinkingLevel: () => modelState.resolveDefaultThinkingLevel(),
});
const directiveAck = await handleDirectiveOnly({
cfg,
@ -77,6 +82,7 @@ export async function applyInlineDirectivesFastLane(
initialModelLabel: params.initialModelLabel,
formatModelSwitchEvent,
currentThinkLevel,
currentFastMode,
currentVerboseLevel,
currentReasoningLevel,
currentElevatedLevel,

View File

@ -3,6 +3,7 @@ import {
resolveAgentDir,
resolveSessionAgentId,
} from "../../agents/agent-scope.js";
import { resolveFastModeState } from "../../agents/fast-mode.js";
import { resolveSandboxRuntimeStatus } from "../../agents/sandbox.js";
import type { OpenClawConfig } from "../../config/config.js";
import { type SessionEntry, updateSessionStore } from "../../config/sessions.js";
@ -78,6 +79,7 @@ export async function handleDirectiveOnly(
initialModelLabel,
formatModelSwitchEvent,
currentThinkLevel,
currentFastMode,
currentVerboseLevel,
currentReasoningLevel,
currentElevatedLevel,
@ -131,6 +133,15 @@ export async function handleDirectiveOnly(
const resolvedProvider = modelSelection?.provider ?? provider;
const resolvedModel = modelSelection?.model ?? model;
const fastModeState = resolveFastModeState({
cfg: params.cfg,
provider: resolvedProvider,
model: resolvedModel,
sessionEntry,
});
const effectiveFastMode = directives.fastMode ?? currentFastMode ?? fastModeState.enabled;
const effectiveFastModeSource =
directives.fastMode !== undefined ? "session" : fastModeState.source;
if (directives.hasThinkDirective && !directives.thinkLevel) {
// If no argument was provided, show the current level
@ -158,6 +169,25 @@ export async function handleDirectiveOnly(
text: `Unrecognized verbose level "${directives.rawVerboseLevel}". Valid levels: off, on, full.`,
};
}
if (directives.hasFastDirective && directives.fastMode === undefined) {
if (!directives.rawFastMode) {
const sourceSuffix =
effectiveFastModeSource === "config"
? " (config)"
: effectiveFastModeSource === "default"
? " (default)"
: "";
return {
text: withOptions(
`Current fast mode: ${effectiveFastMode ? "on" : "off"}${sourceSuffix}.`,
"on, off",
),
};
}
return {
text: `Unrecognized fast mode "${directives.rawFastMode}". Valid levels: on, off.`,
};
}
if (directives.hasReasoningDirective && !directives.reasoningLevel) {
if (!directives.rawReasoningLevel) {
const level = currentReasoningLevel ?? "off";
@ -279,11 +309,18 @@ export async function handleDirectiveOnly(
directives.elevatedLevel !== undefined &&
elevatedEnabled &&
elevatedAllowed;
const fastModeChanged =
directives.hasFastDirective &&
directives.fastMode !== undefined &&
directives.fastMode !== currentFastMode;
let reasoningChanged =
directives.hasReasoningDirective && directives.reasoningLevel !== undefined;
if (directives.hasThinkDirective && directives.thinkLevel) {
sessionEntry.thinkingLevel = directives.thinkLevel;
}
if (directives.hasFastDirective && directives.fastMode !== undefined) {
sessionEntry.fastMode = directives.fastMode;
}
if (shouldDowngradeXHigh) {
sessionEntry.thinkingLevel = "high";
}
@ -380,6 +417,13 @@ export async function handleDirectiveOnly(
: `Thinking level set to ${directives.thinkLevel}.`,
);
}
if (directives.hasFastDirective && directives.fastMode !== undefined) {
parts.push(
directives.fastMode
? formatDirectiveAck("Fast mode enabled.")
: formatDirectiveAck("Fast mode disabled."),
);
}
if (directives.hasVerboseDirective && directives.verboseLevel) {
parts.push(
directives.verboseLevel === "off"
@ -459,6 +503,12 @@ export async function handleDirectiveOnly(
if (directives.hasQueueDirective && directives.dropPolicy) {
parts.push(formatDirectiveAck(`Queue drop set to ${directives.dropPolicy}.`));
}
if (fastModeChanged) {
enqueueSystemEvent(`Fast mode ${sessionEntry.fastMode ? "enabled" : "disabled"}.`, {
sessionKey,
contextKey: `fast:${sessionEntry.fastMode ? "on" : "off"}`,
});
}
const ack = parts.join(" ").trim();
if (!ack && directives.hasStatusDirective) {
return undefined;

View File

@ -3,6 +3,7 @@ import type { ElevatedLevel, ReasoningLevel, ThinkLevel, VerboseLevel } from "..
export async function resolveCurrentDirectiveLevels(params: {
sessionEntry?: {
thinkingLevel?: unknown;
fastMode?: unknown;
verboseLevel?: unknown;
reasoningLevel?: unknown;
elevatedLevel?: unknown;
@ -15,6 +16,7 @@ export async function resolveCurrentDirectiveLevels(params: {
resolveDefaultThinkingLevel: () => Promise<ThinkLevel | undefined>;
}): Promise<{
currentThinkLevel: ThinkLevel | undefined;
currentFastMode: boolean | undefined;
currentVerboseLevel: VerboseLevel | undefined;
currentReasoningLevel: ReasoningLevel;
currentElevatedLevel: ElevatedLevel | undefined;
@ -24,6 +26,8 @@ export async function resolveCurrentDirectiveLevels(params: {
(await params.resolveDefaultThinkingLevel()) ??
(params.agentCfg?.thinkingDefault as ThinkLevel | undefined);
const currentThinkLevel = resolvedDefaultThinkLevel;
const currentFastMode =
typeof params.sessionEntry?.fastMode === "boolean" ? params.sessionEntry.fastMode : undefined;
const currentVerboseLevel =
(params.sessionEntry?.verboseLevel as VerboseLevel | undefined) ??
(params.agentCfg?.verboseDefault as VerboseLevel | undefined);
@ -34,6 +38,7 @@ export async function resolveCurrentDirectiveLevels(params: {
(params.agentCfg?.elevatedDefault as ElevatedLevel | undefined);
return {
currentThinkLevel,
currentFastMode,
currentVerboseLevel,
currentReasoningLevel,
currentElevatedLevel,

View File

@ -32,6 +32,7 @@ export type HandleDirectiveOnlyCoreParams = {
export type HandleDirectiveOnlyParams = HandleDirectiveOnlyCoreParams & {
currentThinkLevel?: ThinkLevel;
currentFastMode?: boolean;
currentVerboseLevel?: VerboseLevel;
currentReasoningLevel?: ReasoningLevel;
currentElevatedLevel?: ElevatedLevel;

View File

@ -6,6 +6,7 @@ import type { ElevatedLevel, ReasoningLevel, ThinkLevel, VerboseLevel } from "./
import {
extractElevatedDirective,
extractExecDirective,
extractFastDirective,
extractReasoningDirective,
extractStatusDirective,
extractThinkDirective,
@ -23,6 +24,9 @@ export type InlineDirectives = {
hasVerboseDirective: boolean;
verboseLevel?: VerboseLevel;
rawVerboseLevel?: string;
hasFastDirective: boolean;
fastMode?: boolean;
rawFastMode?: string;
hasReasoningDirective: boolean;
reasoningLevel?: ReasoningLevel;
rawReasoningLevel?: string;
@ -80,12 +84,18 @@ export function parseInlineDirectives(
rawLevel: rawVerboseLevel,
hasDirective: hasVerboseDirective,
} = extractVerboseDirective(thinkCleaned);
const {
cleaned: fastCleaned,
fastMode,
rawLevel: rawFastMode,
hasDirective: hasFastDirective,
} = extractFastDirective(verboseCleaned);
const {
cleaned: reasoningCleaned,
reasoningLevel,
rawLevel: rawReasoningLevel,
hasDirective: hasReasoningDirective,
} = extractReasoningDirective(verboseCleaned);
} = extractReasoningDirective(fastCleaned);
const {
cleaned: elevatedCleaned,
elevatedLevel,
@ -151,6 +161,9 @@ export function parseInlineDirectives(
hasVerboseDirective,
verboseLevel,
rawVerboseLevel,
hasFastDirective,
fastMode,
rawFastMode,
hasReasoningDirective,
reasoningLevel,
rawReasoningLevel,
@ -201,6 +214,7 @@ export function isDirectiveOnly(params: {
if (
!directives.hasThinkDirective &&
!directives.hasVerboseDirective &&
!directives.hasFastDirective &&
!directives.hasReasoningDirective &&
!directives.hasElevatedDirective &&
!directives.hasExecDirective &&

View File

@ -2,6 +2,7 @@ import { escapeRegExp } from "../../utils.js";
import type { NoticeLevel, ReasoningLevel } from "../thinking.js";
import {
type ElevatedLevel,
normalizeFastMode,
normalizeElevatedLevel,
normalizeNoticeLevel,
normalizeReasoningLevel,
@ -124,6 +125,24 @@ export function extractVerboseDirective(body?: string): {
};
}
export function extractFastDirective(body?: string): {
cleaned: string;
fastMode?: boolean;
rawLevel?: string;
hasDirective: boolean;
} {
if (!body) {
return { cleaned: "", hasDirective: false };
}
const extracted = extractLevelDirective(body, ["fast"], normalizeFastMode);
return {
cleaned: extracted.cleaned,
fastMode: extracted.level,
rawLevel: extracted.rawLevel,
hasDirective: extracted.hasDirective,
};
}
export function extractNoticeDirective(body?: string): {
cleaned: string;
noticeLevel?: NoticeLevel;

View File

@ -150,6 +150,7 @@ export async function applyInlineDirectiveOverrides(params: {
}
const {
currentThinkLevel: resolvedDefaultThinkLevel,
currentFastMode,
currentVerboseLevel,
currentReasoningLevel,
currentElevatedLevel,
@ -162,6 +163,7 @@ export async function applyInlineDirectiveOverrides(params: {
const directiveReply = await handleDirectiveOnly({
...createDirectiveHandlingBase(),
currentThinkLevel,
currentFastMode,
currentVerboseLevel,
currentReasoningLevel,
currentElevatedLevel,
@ -201,6 +203,7 @@ export async function applyInlineDirectiveOverrides(params: {
const hasAnyDirective =
directives.hasThinkDirective ||
directives.hasFastDirective ||
directives.hasVerboseDirective ||
directives.hasReasoningDirective ||
directives.hasElevatedDirective ||

View File

@ -26,6 +26,9 @@ export function clearInlineDirectives(cleaned: string): InlineDirectives {
hasVerboseDirective: false,
verboseLevel: undefined,
rawVerboseLevel: undefined,
hasFastDirective: false,
fastMode: undefined,
rawFastMode: undefined,
hasReasoningDirective: false,
reasoningLevel: undefined,
rawReasoningLevel: undefined,

View File

@ -1,4 +1,5 @@
import type { ExecToolDefaults } from "../../agents/bash-tools.js";
import { resolveFastModeState } from "../../agents/fast-mode.js";
import type { ModelAliasIndex } from "../../agents/model-selection.js";
import { resolveSandboxRuntimeStatus } from "../../agents/sandbox.js";
import type { SkillCommandSpec } from "../../agents/skills.js";
@ -37,6 +38,7 @@ export type ReplyDirectiveContinuation = {
elevatedFailures: Array<{ gate: string; key: string }>;
defaultActivation: ReturnType<typeof defaultGroupActivation>;
resolvedThinkLevel: ThinkLevel | undefined;
resolvedFastMode: boolean;
resolvedVerboseLevel: VerboseLevel | undefined;
resolvedReasoningLevel: ReasoningLevel;
resolvedElevatedLevel: ElevatedLevel;
@ -228,6 +230,7 @@ export async function resolveReplyDirectives(params: {
const hasInlineDirective =
parsedDirectives.hasThinkDirective ||
parsedDirectives.hasVerboseDirective ||
parsedDirectives.hasFastDirective ||
parsedDirectives.hasReasoningDirective ||
parsedDirectives.hasElevatedDirective ||
parsedDirectives.hasExecDirective ||
@ -260,6 +263,7 @@ export async function resolveReplyDirectives(params: {
...parsedDirectives,
hasThinkDirective: false,
hasVerboseDirective: false,
hasFastDirective: false,
hasReasoningDirective: false,
hasStatusDirective: false,
hasModelDirective: false,
@ -340,6 +344,14 @@ export async function resolveReplyDirectives(params: {
const defaultActivation = defaultGroupActivation(requireMention);
const resolvedThinkLevel =
directives.thinkLevel ?? (sessionEntry?.thinkingLevel as ThinkLevel | undefined);
const resolvedFastMode =
directives.fastMode ??
resolveFastModeState({
cfg,
provider,
model,
sessionEntry,
}).enabled;
const resolvedVerboseLevel =
directives.verboseLevel ??
@ -479,6 +491,7 @@ export async function resolveReplyDirectives(params: {
elevatedFailures,
defaultActivation,
resolvedThinkLevel: resolvedThinkLevelWithDefault,
resolvedFastMode,
resolvedVerboseLevel,
resolvedReasoningLevel,
resolvedElevatedLevel,

View File

@ -1,6 +1,7 @@
import crypto from "node:crypto";
import { resolveSessionAuthProfileOverride } from "../../agents/auth-profiles/session-override.js";
import type { ExecToolDefaults } from "../../agents/bash-tools.js";
import { resolveFastModeState } from "../../agents/fast-mode.js";
import {
abortEmbeddedPiRun,
isEmbeddedPiRunActive,
@ -509,6 +510,12 @@ export async function runPreparedReply(
authProfileId,
authProfileIdSource,
thinkLevel: resolvedThinkLevel,
fastMode: resolveFastModeState({
cfg,
provider,
model,
sessionEntry,
}).enabled,
verboseLevel: resolvedVerboseLevel,
reasoningLevel: resolvedReasoningLevel,
elevatedLevel: resolvedElevatedLevel,

View File

@ -128,6 +128,21 @@ describe("buildStatusMessage", () => {
});
expect(normalizeTestText(text)).toContain("Cortex: minimal (session override)");
it("shows fast mode when enabled", () => {
const text = buildStatusMessage({
agent: {
model: "openai/gpt-5.4",
},
sessionEntry: {
sessionId: "fast",
updatedAt: 0,
fastMode: true,
},
sessionKey: "agent:main:main",
queue: { mode: "collect", depth: 0 },
});
expect(normalizeTestText(text)).toContain("Fast: on");
});
it("notes channel model overrides in status output", () => {
@ -735,6 +750,10 @@ describe("buildHelpMessage", () => {
expect(text).not.toContain("/config");
expect(text).not.toContain("/debug");
});
it("includes /fast in help output", () => {
expect(buildHelpMessage()).toContain("/fast on|off");
});
});
describe("buildCommandsMessagePaginated", () => {

View File

@ -77,6 +77,7 @@ type StatusArgs = {
sessionStorePath?: string;
groupActivation?: "mention" | "always";
resolvedThink?: ThinkLevel;
resolvedFast?: boolean;
resolvedVerbose?: VerboseLevel;
resolvedReasoning?: ReasoningLevel;
resolvedElevated?: ElevatedLevel;
@ -511,6 +512,7 @@ export function buildStatusMessage(args: StatusArgs): string {
args.resolvedThink ?? args.sessionEntry?.thinkingLevel ?? args.agent?.thinkingDefault ?? "off";
const verboseLevel =
args.resolvedVerbose ?? args.sessionEntry?.verboseLevel ?? args.agent?.verboseDefault ?? "off";
const fastMode = args.resolvedFast ?? args.sessionEntry?.fastMode ?? false;
const reasoningLevel = args.resolvedReasoning ?? args.sessionEntry?.reasoningLevel ?? "off";
const elevatedLevel =
args.resolvedElevated ??
@ -557,6 +559,7 @@ export function buildStatusMessage(args: StatusArgs): string {
const optionParts = [
`Runtime: ${runtime.label}`,
`Think: ${thinkLevel}`,
fastMode ? "Fast: on" : null,
verboseLabel,
reasoningLevel !== "off" ? `Reasoning: ${reasoningLevel}` : null,
elevatedLabel,
@ -730,7 +733,7 @@ export function buildHelpMessage(cfg?: OpenClawConfig): string {
lines.push(" /new | /reset | /compact [instructions] | /stop");
lines.push("");
const optionParts = ["/think <level>", "/model <id>", "/verbose on|off"];
const optionParts = ["/think <level>", "/model <id>", "/fast on|off", "/verbose on|off"];
if (isCommandFlagEnabled(cfg, "config")) {
optionParts.push("/config");
}

View File

@ -218,6 +218,24 @@ export function resolveResponseUsageMode(raw?: string | null): UsageDisplayLevel
return normalizeUsageDisplay(raw) ?? "off";
}
// Normalize fast-mode flags used to toggle low-latency model behavior.
export function normalizeFastMode(raw?: string | boolean | null): boolean | undefined {
if (typeof raw === "boolean") {
return raw;
}
if (!raw) {
return undefined;
}
const key = raw.toLowerCase();
if (["off", "false", "no", "0", "disable", "disabled", "normal"].includes(key)) {
return false;
}
if (["on", "true", "yes", "1", "enable", "enabled", "fast"].includes(key)) {
return true;
}
return undefined;
}
// Normalize elevated flags used to toggle elevated bash permissions.
export function normalizeElevatedLevel(raw?: string | null): ElevatedLevel | undefined {
if (!raw) {

View File

@ -1,3 +1,4 @@
import fs from "node:fs/promises";
import path from "node:path";
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig, ConfigFileSnapshot } from "../config/types.openclaw.js";
@ -390,14 +391,13 @@ describe("update-cli", () => {
},
{
name: "defaults to stable channel for package installs when unset",
mode: "npm" as const,
options: { yes: true },
prepare: async () => {
const tempDir = createCaseDir("openclaw-update");
mockPackageInstallStatus(tempDir);
},
expectedChannel: "stable" as const,
expectedTag: "latest",
expectedChannel: undefined as "stable" | undefined,
expectedTag: undefined as string | undefined,
},
{
name: "uses stored beta channel when configured",
@ -414,14 +414,25 @@ describe("update-cli", () => {
},
])("$name", async ({ mode, options, prepare, expectedChannel, expectedTag }) => {
await prepare();
vi.mocked(runGatewayUpdate).mockResolvedValue(makeOkUpdateResult({ mode }));
if (mode) {
vi.mocked(runGatewayUpdate).mockResolvedValue(makeOkUpdateResult({ mode }));
}
await updateCommand(options);
const call = expectUpdateCallChannel(expectedChannel);
if (expectedTag !== undefined) {
expect(call?.tag).toBe(expectedTag);
if (expectedChannel !== undefined) {
const call = expectUpdateCallChannel(expectedChannel);
if (expectedTag !== undefined) {
expect(call?.tag).toBe(expectedTag);
}
return;
}
expect(runGatewayUpdate).not.toHaveBeenCalled();
expect(runCommandWithTimeout).toHaveBeenCalledWith(
["npm", "i", "-g", "openclaw@latest", "--no-fund", "--no-audit", "--loglevel=error"],
expect.any(Object),
);
});
it("falls back to latest when beta tag is older than release", async () => {
@ -436,32 +447,104 @@ describe("update-cli", () => {
tag: "latest",
version: "1.2.3-1",
});
vi.mocked(runGatewayUpdate).mockResolvedValue(
makeOkUpdateResult({
mode: "npm",
}),
);
await updateCommand({});
const call = expectUpdateCallChannel("beta");
expect(call?.tag).toBe("latest");
expect(runGatewayUpdate).not.toHaveBeenCalled();
expect(runCommandWithTimeout).toHaveBeenCalledWith(
["npm", "i", "-g", "openclaw@latest", "--no-fund", "--no-audit", "--loglevel=error"],
expect.any(Object),
);
});
it("honors --tag override", async () => {
const tempDir = createCaseDir("openclaw-update");
vi.mocked(resolveOpenClawPackageRoot).mockResolvedValue(tempDir);
vi.mocked(runGatewayUpdate).mockResolvedValue(
makeOkUpdateResult({
mode: "npm",
}),
);
mockPackageInstallStatus(tempDir);
await updateCommand({ tag: "next" });
const call = vi.mocked(runGatewayUpdate).mock.calls[0]?.[0];
expect(call?.tag).toBe("next");
expect(runGatewayUpdate).not.toHaveBeenCalled();
expect(runCommandWithTimeout).toHaveBeenCalledWith(
["npm", "i", "-g", "openclaw@next", "--no-fund", "--no-audit", "--loglevel=error"],
expect.any(Object),
);
});
it("prepends portable Git PATH for package updates on Windows", async () => {
const platformSpy = vi.spyOn(process, "platform", "get").mockReturnValue("win32");
const tempDir = createCaseDir("openclaw-update");
const localAppData = createCaseDir("openclaw-localappdata");
const portableGitMingw = path.join(
localAppData,
"OpenClaw",
"deps",
"portable-git",
"mingw64",
"bin",
);
const portableGitUsr = path.join(
localAppData,
"OpenClaw",
"deps",
"portable-git",
"usr",
"bin",
);
await fs.mkdir(portableGitMingw, { recursive: true });
await fs.mkdir(portableGitUsr, { recursive: true });
mockPackageInstallStatus(tempDir);
pathExists.mockImplementation(
async (candidate: string) => candidate === portableGitMingw || candidate === portableGitUsr,
);
await withEnvAsync({ LOCALAPPDATA: localAppData }, async () => {
await updateCommand({ yes: true });
});
platformSpy.mockRestore();
const updateCall = vi
.mocked(runCommandWithTimeout)
.mock.calls.find(
(call) =>
Array.isArray(call[0]) &&
call[0][0] === "npm" &&
call[0][1] === "i" &&
call[0][2] === "-g",
);
const mergedPath = updateCall?.[1]?.env?.Path ?? updateCall?.[1]?.env?.PATH ?? "";
expect(mergedPath.split(path.delimiter).slice(0, 2)).toEqual([
portableGitMingw,
portableGitUsr,
]);
expect(updateCall?.[1]?.env?.NPM_CONFIG_SCRIPT_SHELL).toBe("cmd.exe");
expect(updateCall?.[1]?.env?.NODE_LLAMA_CPP_SKIP_DOWNLOAD).toBe("1");
});
it("uses OPENCLAW_UPDATE_PACKAGE_SPEC for package updates", async () => {
const tempDir = createCaseDir("openclaw-update");
mockPackageInstallStatus(tempDir);
await withEnvAsync(
{ OPENCLAW_UPDATE_PACKAGE_SPEC: "http://10.211.55.2:8138/openclaw-next.tgz" },
async () => {
await updateCommand({ yes: true, tag: "latest" });
},
);
expect(runGatewayUpdate).not.toHaveBeenCalled();
expect(runCommandWithTimeout).toHaveBeenCalledWith(
[
"npm",
"i",
"-g",
"http://10.211.55.2:8138/openclaw-next.tgz",
"--no-fund",
"--no-audit",
"--loglevel=error",
],
expect.any(Object),
);
});
it("updateCommand outputs JSON when --json is set", async () => {
@ -648,15 +731,15 @@ describe("update-cli", () => {
name: "requires confirmation without --yes",
options: {},
shouldExit: true,
shouldRunUpdate: false,
shouldRunPackageUpdate: false,
},
{
name: "allows downgrade with --yes",
options: { yes: true },
shouldExit: false,
shouldRunUpdate: true,
shouldRunPackageUpdate: true,
},
])("$name in non-interactive mode", async ({ options, shouldExit, shouldRunUpdate }) => {
])("$name in non-interactive mode", async ({ options, shouldExit, shouldRunPackageUpdate }) => {
await setupNonInteractiveDowngrade();
await updateCommand(options);
@ -667,7 +750,12 @@ describe("update-cli", () => {
expect(vi.mocked(defaultRuntime.exit).mock.calls.some((call) => call[0] === 1)).toBe(
shouldExit,
);
expect(vi.mocked(runGatewayUpdate).mock.calls.length > 0).toBe(shouldRunUpdate);
expect(vi.mocked(runGatewayUpdate).mock.calls.length > 0).toBe(false);
expect(
vi
.mocked(runCommandWithTimeout)
.mock.calls.some((call) => Array.isArray(call[0]) && call[0][0] === "npm"),
).toBe(shouldRunPackageUpdate);
});
it("dry-run bypasses downgrade confirmation checks in non-interactive mode", async () => {

View File

@ -144,6 +144,7 @@ export async function runUpdateStep(params: {
cwd?: string;
timeoutMs: number;
progress?: UpdateStepProgress;
env?: NodeJS.ProcessEnv;
}): Promise<UpdateStepResult> {
const command = params.argv.join(" ");
params.progress?.onStepStart?.({
@ -156,6 +157,7 @@ export async function runUpdateStep(params: {
const started = Date.now();
const res = await runCommandWithTimeout(params.argv, {
cwd: params.cwd,
env: params.env,
timeoutMs: params.timeoutMs,
});
const durationMs = Date.now() - started;

View File

@ -24,8 +24,10 @@ import {
checkUpdateStatus,
} from "../../infra/update-check.js";
import {
createGlobalInstallEnv,
cleanupGlobalRenameDirs,
globalInstallArgs,
resolveGlobalInstallSpec,
resolveGlobalPackageRoot,
} from "../../infra/update-global.js";
import { runGatewayUpdate, type UpdateRunResult } from "../../infra/update-runner.js";
@ -269,12 +271,18 @@ async function runPackageInstallUpdate(params: {
installKind: params.installKind,
timeoutMs: params.timeoutMs,
});
const installEnv = await createGlobalInstallEnv();
const runCommand = createGlobalCommandRunner();
const pkgRoot = await resolveGlobalPackageRoot(manager, runCommand, params.timeoutMs);
const packageName =
(pkgRoot ? await readPackageName(pkgRoot) : await readPackageName(params.root)) ??
DEFAULT_PACKAGE_NAME;
const installSpec = resolveGlobalInstallSpec({
packageName,
tag: params.tag,
env: installEnv,
});
const beforeVersion = pkgRoot ? await readPackageVersion(pkgRoot) : null;
if (pkgRoot) {
@ -286,7 +294,8 @@ async function runPackageInstallUpdate(params: {
const updateStep = await runUpdateStep({
name: "global update",
argv: globalInstallArgs(manager, `${packageName}@${params.tag}`),
argv: globalInstallArgs(manager, installSpec),
env: installEnv,
timeoutMs: params.timeoutMs,
progress: params.progress,
});
@ -380,6 +389,7 @@ async function runGitUpdate(params: {
name: "global install",
argv: globalInstallArgs(manager, updateRoot),
cwd: updateRoot,
env: await createGlobalInstallEnv(),
timeoutMs: effectiveTimeout,
progress: params.progress,
});
@ -835,28 +845,29 @@ export async function updateCommand(opts: UpdateCommandOptions): Promise<void> {
}
}
const result = switchToPackage
? await runPackageInstallUpdate({
root,
installKind,
tag,
timeoutMs: timeoutMs ?? 20 * 60_000,
startedAt,
progress,
})
: await runGitUpdate({
root,
switchToGit,
installKind,
timeoutMs,
startedAt,
progress,
channel,
tag,
showProgress,
opts,
stop,
});
const result =
updateInstallKind === "package"
? await runPackageInstallUpdate({
root,
installKind,
tag,
timeoutMs: timeoutMs ?? 20 * 60_000,
startedAt,
progress,
})
: await runGitUpdate({
root,
switchToGit,
installKind,
timeoutMs,
startedAt,
progress,
channel,
tag,
showProgress,
opts,
stop,
});
stop();
printResult(result, { ...opts, hideSteps: showProgress });

View File

@ -15,6 +15,8 @@ export type AgentStreamParams = {
/** Provider stream params override (best-effort). */
temperature?: number;
maxTokens?: number;
/** Provider fast-mode override (best-effort). */
fastMode?: boolean;
};
export type AgentRunContext = {

View File

@ -36,6 +36,9 @@ const buildFlags = (entry?: SessionEntry): string[] => {
if (typeof verbose === "string" && verbose.length > 0) {
flags.push(`verbose:${verbose}`);
}
if (typeof entry?.fastMode === "boolean") {
flags.push(entry.fastMode ? "fast" : "fast:off");
}
const reasoning = entry?.reasoningLevel;
if (typeof reasoning === "string" && reasoning.length > 0) {
flags.push(`reasoning:${reasoning}`);
@ -170,6 +173,7 @@ export async function getStatusSummary(
updatedAt,
age,
thinkingLevel: entry?.thinkingLevel,
fastMode: entry?.fastMode,
verboseLevel: entry?.verboseLevel,
reasoningLevel: entry?.reasoningLevel,
elevatedLevel: entry?.elevatedLevel,

View File

@ -8,6 +8,7 @@ export type SessionStatus = {
updatedAt: number | null;
age: number | null;
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;
elevatedLevel?: string;

View File

@ -100,6 +100,7 @@ export type SessionEntry = {
abortCutoffTimestamp?: number;
chatType?: SessionChatType;
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;
elevatedLevel?: string;

View File

@ -11,6 +11,7 @@ import { type HookMappingResolved, resolveHookMappings } from "./hooks-mapping.j
const DEFAULT_HOOKS_PATH = "/hooks";
const DEFAULT_HOOKS_MAX_BODY_BYTES = 256 * 1024;
const MAX_HOOK_IDEMPOTENCY_KEY_LENGTH = 256;
export type HooksConfigResolved = {
basePath: string;
@ -223,6 +224,7 @@ export type HookAgentPayload = {
message: string;
name: string;
agentId?: string;
idempotencyKey?: string;
wakeMode: "now" | "next-heartbeat";
sessionKey?: string;
deliver: boolean;
@ -263,6 +265,28 @@ export function resolveHookDeliver(raw: unknown): boolean {
return raw !== false;
}
function resolveOptionalHookIdempotencyKey(raw: unknown): string | undefined {
if (typeof raw !== "string") {
return undefined;
}
const trimmed = raw.trim();
if (!trimmed || trimmed.length > MAX_HOOK_IDEMPOTENCY_KEY_LENGTH) {
return undefined;
}
return trimmed;
}
export function resolveHookIdempotencyKey(params: {
payload: Record<string, unknown>;
headers?: Record<string, string>;
}): string | undefined {
return (
resolveOptionalHookIdempotencyKey(params.headers?.["idempotency-key"]) ||
resolveOptionalHookIdempotencyKey(params.headers?.["x-openclaw-idempotency-key"]) ||
resolveOptionalHookIdempotencyKey(params.payload.idempotencyKey)
);
}
export function resolveHookTargetAgentId(
hooksConfig: HooksConfigResolved,
agentId: string | undefined,
@ -366,6 +390,7 @@ export function normalizeAgentPayload(payload: Record<string, unknown>):
const agentIdRaw = payload.agentId;
const agentId =
typeof agentIdRaw === "string" && agentIdRaw.trim() ? agentIdRaw.trim() : undefined;
const idempotencyKey = resolveOptionalHookIdempotencyKey(payload.idempotencyKey);
const wakeMode = payload.wakeMode === "next-heartbeat" ? "next-heartbeat" : "now";
const sessionKeyRaw = payload.sessionKey;
const sessionKey =
@ -396,6 +421,7 @@ export function normalizeAgentPayload(payload: Record<string, unknown>):
message,
name,
agentId,
idempotencyKey,
wakeMode,
sessionKey,
deliver,

View File

@ -52,6 +52,7 @@ export const SessionsPatchParamsSchema = Type.Object(
key: NonEmptyString,
label: Type.Optional(Type.Union([SessionLabelString, Type.Null()])),
thinkingLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
fastMode: Type.Optional(Type.Union([Type.Boolean(), Type.Null()])),
verboseLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
reasoningLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
responseUsage: Type.Optional(

View File

@ -1,3 +1,4 @@
import { createHash } from "node:crypto";
import {
createServer as createHttpServer,
type Server as HttpServer,
@ -42,6 +43,7 @@ import {
isHookAgentAllowed,
normalizeAgentPayload,
normalizeHookHeaders,
resolveHookIdempotencyKey,
normalizeWakePayload,
readJsonBody,
normalizeHookDispatchSessionKey,
@ -55,6 +57,7 @@ import { getBearerToken } from "./http-utils.js";
import { resolveRequestClientIp } from "./net.js";
import { handleOpenAiHttpRequest } from "./openai-http.js";
import { handleOpenResponsesHttpRequest } from "./openresponses-http.js";
import { DEDUPE_MAX, DEDUPE_TTL_MS } from "./server-constants.js";
import {
authorizeCanvasRequest,
enforcePluginRouteGatewayAuth,
@ -85,6 +88,18 @@ export type HookClientIpConfig = Readonly<{
allowRealIpFallback?: boolean;
}>;
type HookReplayEntry = {
ts: number;
runId: string;
};
type HookReplayScope = {
pathKey: string;
token: string | undefined;
idempotencyKey?: string;
dispatchScope: Record<string, unknown>;
};
function sendJson(res: ServerResponse, status: number, body: unknown) {
res.statusCode = status;
res.setHeader("Content-Type", "application/json; charset=utf-8");
@ -361,6 +376,7 @@ export function createHooksRequestHandler(
} & HookDispatchers,
): HooksRequestHandler {
const { getHooksConfig, logHooks, dispatchAgentHook, dispatchWakeHook, getClientIpConfig } = opts;
const hookReplayCache = new Map<string, HookReplayEntry>();
const hookAuthLimiter = createAuthRateLimiter({
maxAttempts: HOOK_AUTH_FAILURE_LIMIT,
windowMs: HOOK_AUTH_FAILURE_WINDOW_MS,
@ -381,6 +397,66 @@ export function createHooksRequestHandler(
return normalizeRateLimitClientIp(clientIp);
};
const pruneHookReplayCache = (now: number) => {
const cutoff = now - DEDUPE_TTL_MS;
for (const [key, entry] of hookReplayCache) {
if (entry.ts < cutoff) {
hookReplayCache.delete(key);
}
}
while (hookReplayCache.size > DEDUPE_MAX) {
const oldestKey = hookReplayCache.keys().next().value;
if (!oldestKey) {
break;
}
hookReplayCache.delete(oldestKey);
}
};
const buildHookReplayCacheKey = (params: HookReplayScope): string | undefined => {
const idem = params.idempotencyKey?.trim();
if (!idem) {
return undefined;
}
const tokenFingerprint = createHash("sha256")
.update(params.token ?? "", "utf8")
.digest("hex");
const idempotencyFingerprint = createHash("sha256").update(idem, "utf8").digest("hex");
const scopeFingerprint = createHash("sha256")
.update(
JSON.stringify({
pathKey: params.pathKey,
dispatchScope: params.dispatchScope,
}),
"utf8",
)
.digest("hex");
return `${tokenFingerprint}:${scopeFingerprint}:${idempotencyFingerprint}`;
};
const resolveCachedHookRunId = (key: string | undefined, now: number): string | undefined => {
if (!key) {
return undefined;
}
pruneHookReplayCache(now);
const cached = hookReplayCache.get(key);
if (!cached) {
return undefined;
}
hookReplayCache.delete(key);
hookReplayCache.set(key, cached);
return cached.runId;
};
const rememberHookRunId = (key: string | undefined, runId: string, now: number) => {
if (!key) {
return;
}
hookReplayCache.delete(key);
hookReplayCache.set(key, { ts: now, runId });
pruneHookReplayCache(now);
};
return async (req, res) => {
const hooksConfig = getHooksConfig();
if (!hooksConfig) {
@ -454,6 +530,11 @@ export function createHooksRequestHandler(
const payload = typeof body.value === "object" && body.value !== null ? body.value : {};
const headers = normalizeHookHeaders(req);
const idempotencyKey = resolveHookIdempotencyKey({
payload: payload as Record<string, unknown>,
headers,
});
const now = Date.now();
if (subPath === "wake") {
const normalized = normalizeWakePayload(payload as Record<string, unknown>);
@ -486,14 +567,41 @@ export function createHooksRequestHandler(
return true;
}
const targetAgentId = resolveHookTargetAgentId(hooksConfig, normalized.value.agentId);
const replayKey = buildHookReplayCacheKey({
pathKey: "agent",
token,
idempotencyKey,
dispatchScope: {
agentId: targetAgentId ?? null,
sessionKey:
normalized.value.sessionKey ?? hooksConfig.sessionPolicy.defaultSessionKey ?? null,
message: normalized.value.message,
name: normalized.value.name,
wakeMode: normalized.value.wakeMode,
deliver: normalized.value.deliver,
channel: normalized.value.channel,
to: normalized.value.to ?? null,
model: normalized.value.model ?? null,
thinking: normalized.value.thinking ?? null,
timeoutSeconds: normalized.value.timeoutSeconds ?? null,
},
});
const cachedRunId = resolveCachedHookRunId(replayKey, now);
if (cachedRunId) {
sendJson(res, 200, { ok: true, runId: cachedRunId });
return true;
}
const normalizedDispatchSessionKey = normalizeHookDispatchSessionKey({
sessionKey: sessionKey.value,
targetAgentId,
});
const runId = dispatchAgentHook({
...normalized.value,
sessionKey: normalizeHookDispatchSessionKey({
sessionKey: sessionKey.value,
targetAgentId,
}),
idempotencyKey,
sessionKey: normalizedDispatchSessionKey,
agentId: targetAgentId,
});
rememberHookRunId(replayKey, runId, now);
sendJson(res, 200, { ok: true, runId });
return true;
}
@ -543,15 +651,41 @@ export function createHooksRequestHandler(
return true;
}
const targetAgentId = resolveHookTargetAgentId(hooksConfig, mapped.action.agentId);
const normalizedDispatchSessionKey = normalizeHookDispatchSessionKey({
sessionKey: sessionKey.value,
targetAgentId,
});
const replayKey = buildHookReplayCacheKey({
pathKey: subPath || "mapping",
token,
idempotencyKey,
dispatchScope: {
agentId: targetAgentId ?? null,
sessionKey:
mapped.action.sessionKey ?? hooksConfig.sessionPolicy.defaultSessionKey ?? null,
message: mapped.action.message,
name: mapped.action.name ?? "Hook",
wakeMode: mapped.action.wakeMode,
deliver: resolveHookDeliver(mapped.action.deliver),
channel,
to: mapped.action.to ?? null,
model: mapped.action.model ?? null,
thinking: mapped.action.thinking ?? null,
timeoutSeconds: mapped.action.timeoutSeconds ?? null,
},
});
const cachedRunId = resolveCachedHookRunId(replayKey, now);
if (cachedRunId) {
sendJson(res, 200, { ok: true, runId: cachedRunId });
return true;
}
const runId = dispatchAgentHook({
message: mapped.action.message,
name: mapped.action.name ?? "Hook",
idempotencyKey,
agentId: targetAgentId,
wakeMode: mapped.action.wakeMode,
sessionKey: normalizeHookDispatchSessionKey({
sessionKey: sessionKey.value,
targetAgentId,
}),
sessionKey: normalizedDispatchSessionKey,
deliver: resolveHookDeliver(mapped.action.deliver),
channel,
to: mapped.action.to,
@ -560,6 +694,7 @@ export function createHooksRequestHandler(
timeoutSeconds: mapped.action.timeoutSeconds,
allowUnsafeExternalContent: mapped.action.allowUnsafeExternalContent,
});
rememberHookRunId(replayKey, runId, now);
sendJson(res, 200, { ok: true, runId });
return true;
}

View File

@ -379,6 +379,7 @@ export const agentHandlers: GatewayRequestHandlers = {
sessionId,
updatedAt: now,
thinkingLevel: entry?.thinkingLevel,
fastMode: entry?.fastMode,
verboseLevel: entry?.verboseLevel,
reasoningLevel: entry?.reasoningLevel,
systemSent: entry?.systemSent,

View File

@ -980,6 +980,7 @@ export const chatHandlers: GatewayRequestHandlers = {
sessionId,
messages: bounded.messages,
thinkingLevel,
fastMode: entry?.fastMode,
verboseLevel,
});
},

View File

@ -166,6 +166,7 @@ async function touchSessionStore(params: {
sessionId: params.sessionId,
updatedAt: params.now,
thinkingLevel: params.entry?.thinkingLevel,
fastMode: params.entry?.fastMode,
verboseLevel: params.entry?.verboseLevel,
reasoningLevel: params.entry?.reasoningLevel,
systemSent: params.entry?.systemSent,

View File

@ -1,6 +1,8 @@
import { describe, expect, test } from "vitest";
import fs from "node:fs/promises";
import { afterEach, describe, expect, test, vi } from "vitest";
import { resolveMainSessionKeyFromConfig } from "../config/sessions.js";
import { drainSystemEvents, peekSystemEvents } from "../infra/system-events.js";
import { DEDUPE_TTL_MS } from "./server-constants.js";
import {
cronIsolatedRun,
installGatewayTestHooks,
@ -14,6 +16,10 @@ installGatewayTestHooks({ scope: "suite" });
const resolveMainKey = () => resolveMainSessionKeyFromConfig();
const HOOK_TOKEN = "hook-secret";
afterEach(() => {
vi.restoreAllMocks();
});
function buildHookJsonHeaders(options?: {
token?: string | null;
headers?: Record<string, string>;
@ -279,6 +285,165 @@ describe("gateway server hooks", () => {
});
});
test("dedupes repeated /hooks/agent deliveries by idempotency key", async () => {
testState.hooksConfig = { enabled: true, token: HOOK_TOKEN };
await withGatewayServer(async ({ port }) => {
cronIsolatedRun.mockClear();
cronIsolatedRun.mockResolvedValue({ status: "ok", summary: "done" });
const first = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": "hook-idem-1" } },
);
expect(first.status).toBe(200);
const firstBody = (await first.json()) as { runId?: string };
expect(firstBody.runId).toBeTruthy();
await waitForSystemEvent();
expect(cronIsolatedRun).toHaveBeenCalledTimes(1);
drainSystemEvents(resolveMainKey());
const second = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": "hook-idem-1" } },
);
expect(second.status).toBe(200);
const secondBody = (await second.json()) as { runId?: string };
expect(secondBody.runId).toBe(firstBody.runId);
expect(cronIsolatedRun).toHaveBeenCalledTimes(1);
expect(peekSystemEvents(resolveMainKey())).toHaveLength(0);
});
});
test("dedupes hook retries even when trusted-proxy client IP changes", async () => {
testState.hooksConfig = { enabled: true, token: HOOK_TOKEN };
const configPath = process.env.OPENCLAW_CONFIG_PATH;
expect(configPath).toBeTruthy();
await fs.writeFile(
configPath!,
JSON.stringify({ gateway: { trustedProxies: ["127.0.0.1"] } }, null, 2),
"utf-8",
);
await withGatewayServer(async ({ port }) => {
cronIsolatedRun.mockClear();
cronIsolatedRun.mockResolvedValue({ status: "ok", summary: "done" });
const first = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{
headers: {
"Idempotency-Key": "hook-idem-forwarded",
"X-Forwarded-For": "198.51.100.10",
},
},
);
expect(first.status).toBe(200);
const firstBody = (await first.json()) as { runId?: string };
await waitForSystemEvent();
drainSystemEvents(resolveMainKey());
const second = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{
headers: {
"Idempotency-Key": "hook-idem-forwarded",
"X-Forwarded-For": "203.0.113.25",
},
},
);
expect(second.status).toBe(200);
const secondBody = (await second.json()) as { runId?: string };
expect(secondBody.runId).toBe(firstBody.runId);
expect(cronIsolatedRun).toHaveBeenCalledTimes(1);
});
});
test("does not retain oversized idempotency keys for replay dedupe", async () => {
testState.hooksConfig = { enabled: true, token: HOOK_TOKEN };
const oversizedKey = "x".repeat(257);
await withGatewayServer(async ({ port }) => {
cronIsolatedRun.mockClear();
cronIsolatedRun.mockResolvedValue({ status: "ok", summary: "done" });
const first = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": oversizedKey } },
);
expect(first.status).toBe(200);
await waitForSystemEvent();
drainSystemEvents(resolveMainKey());
const second = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": oversizedKey } },
);
expect(second.status).toBe(200);
await waitForSystemEvent();
expect(cronIsolatedRun).toHaveBeenCalledTimes(2);
});
});
test("expires hook idempotency entries from first delivery time", async () => {
testState.hooksConfig = { enabled: true, token: HOOK_TOKEN };
const nowSpy = vi.spyOn(Date, "now");
nowSpy.mockReturnValue(1_000_000);
await withGatewayServer(async ({ port }) => {
cronIsolatedRun.mockClear();
cronIsolatedRun.mockResolvedValue({ status: "ok", summary: "done" });
const first = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": "fixed-window-idem" } },
);
expect(first.status).toBe(200);
const firstBody = (await first.json()) as { runId?: string };
await waitForSystemEvent();
drainSystemEvents(resolveMainKey());
nowSpy.mockReturnValue(1_000_000 + DEDUPE_TTL_MS - 1);
const second = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": "fixed-window-idem" } },
);
expect(second.status).toBe(200);
const secondBody = (await second.json()) as { runId?: string };
expect(secondBody.runId).toBe(firstBody.runId);
expect(cronIsolatedRun).toHaveBeenCalledTimes(1);
nowSpy.mockReturnValue(1_000_000 + DEDUPE_TTL_MS + 1);
const third = await postHook(
port,
"/hooks/agent",
{ message: "Do it", name: "Email" },
{ headers: { "Idempotency-Key": "fixed-window-idem" } },
);
expect(third.status).toBe(200);
const thirdBody = (await third.json()) as { runId?: string };
expect(thirdBody.runId).toBeTruthy();
expect(thirdBody.runId).not.toBe(firstBody.runId);
expect(cronIsolatedRun).toHaveBeenCalledTimes(2);
});
});
test("enforces hooks.allowedAgentIds for explicit agent routing", async () => {
testState.hooksConfig = {
enabled: true,

View File

@ -326,6 +326,7 @@ export async function performGatewaySessionReset(params: {
systemSent: false,
abortedLastRun: false,
thinkingLevel: currentEntry?.thinkingLevel,
fastMode: currentEntry?.fastMode,
verboseLevel: currentEntry?.verboseLevel,
reasoningLevel: currentEntry?.reasoningLevel,
responseUsage: currentEntry?.responseUsage,

View File

@ -929,6 +929,7 @@ export function listSessionsFromStore(params: {
systemSent: entry?.systemSent,
abortedLastRun: entry?.abortedLastRun,
thinkingLevel: entry?.thinkingLevel,
fastMode: entry?.fastMode,
verboseLevel: entry?.verboseLevel,
reasoningLevel: entry?.reasoningLevel,
elevatedLevel: entry?.elevatedLevel,

View File

@ -32,6 +32,7 @@ export type GatewaySessionRow = {
systemSent?: boolean;
abortedLastRun?: boolean;
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;
elevatedLevel?: string;

View File

@ -149,6 +149,37 @@ describe("gateway sessions patch", () => {
expect(entry.reasoningLevel).toBeUndefined();
});
test("persists fastMode=false (does not clear)", async () => {
const entry = expectPatchOk(
await runPatch({
patch: { key: MAIN_SESSION_KEY, fastMode: false },
}),
);
expect(entry.fastMode).toBe(false);
});
test("persists fastMode=true", async () => {
const entry = expectPatchOk(
await runPatch({
patch: { key: MAIN_SESSION_KEY, fastMode: true },
}),
);
expect(entry.fastMode).toBe(true);
});
test("clears fastMode when patch sets null", async () => {
const store: Record<string, SessionEntry> = {
[MAIN_SESSION_KEY]: { fastMode: true } as SessionEntry,
};
const entry = expectPatchOk(
await runPatch({
store,
patch: { key: MAIN_SESSION_KEY, fastMode: null },
}),
);
expect(entry.fastMode).toBeUndefined();
});
test("persists elevatedLevel=off (does not clear)", async () => {
const entry = expectPatchOk(
await runPatch({

View File

@ -11,6 +11,7 @@ import {
formatThinkingLevels,
formatXHighModelHint,
normalizeElevatedLevel,
normalizeFastMode,
normalizeReasoningLevel,
normalizeThinkLevel,
normalizeUsageDisplay,
@ -252,6 +253,19 @@ export async function applySessionsPatchToStore(params: {
}
}
if ("fastMode" in patch) {
const raw = patch.fastMode;
if (raw === null) {
delete next.fastMode;
} else if (raw !== undefined) {
const normalized = normalizeFastMode(raw);
if (normalized === undefined) {
return invalid("invalid fastMode (use true or false)");
}
next.fastMode = normalized;
}
}
if ("verboseLevel" in patch) {
const raw = patch.verboseLevel;
const parsed = parseVerboseOverride(raw);

View File

@ -2,6 +2,7 @@ import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { pathExists } from "../utils.js";
import { applyPathPrepend } from "./path-prepend.js";
export type GlobalInstallManager = "npm" | "pnpm" | "bun";
@ -19,6 +20,74 @@ const NPM_GLOBAL_INSTALL_OMIT_OPTIONAL_FLAGS = [
...NPM_GLOBAL_INSTALL_QUIET_FLAGS,
] as const;
async function resolvePortableGitPathPrepend(
env: NodeJS.ProcessEnv | undefined,
): Promise<string[]> {
if (process.platform !== "win32") {
return [];
}
const localAppData = env?.LOCALAPPDATA?.trim() || process.env.LOCALAPPDATA?.trim();
if (!localAppData) {
return [];
}
const portableGitRoot = path.join(localAppData, "OpenClaw", "deps", "portable-git");
const candidates = [
path.join(portableGitRoot, "mingw64", "bin"),
path.join(portableGitRoot, "usr", "bin"),
path.join(portableGitRoot, "cmd"),
path.join(portableGitRoot, "bin"),
];
const existing: string[] = [];
for (const candidate of candidates) {
if (await pathExists(candidate)) {
existing.push(candidate);
}
}
return existing;
}
function applyWindowsPackageInstallEnv(env: Record<string, string>) {
if (process.platform !== "win32") {
return;
}
env.NPM_CONFIG_UPDATE_NOTIFIER = "false";
env.NPM_CONFIG_FUND = "false";
env.NPM_CONFIG_AUDIT = "false";
env.NPM_CONFIG_SCRIPT_SHELL = "cmd.exe";
env.NODE_LLAMA_CPP_SKIP_DOWNLOAD = "1";
}
export function resolveGlobalInstallSpec(params: {
packageName: string;
tag: string;
env?: NodeJS.ProcessEnv;
}): string {
const override =
params.env?.OPENCLAW_UPDATE_PACKAGE_SPEC?.trim() ||
process.env.OPENCLAW_UPDATE_PACKAGE_SPEC?.trim();
if (override) {
return override;
}
return `${params.packageName}@${params.tag}`;
}
export async function createGlobalInstallEnv(
env?: NodeJS.ProcessEnv,
): Promise<NodeJS.ProcessEnv | undefined> {
const pathPrepend = await resolvePortableGitPathPrepend(env);
if (pathPrepend.length === 0 && process.platform !== "win32") {
return env;
}
const merged = Object.fromEntries(
Object.entries(env ?? process.env)
.filter(([, value]) => value != null)
.map(([key, value]) => [key, String(value)]),
) as Record<string, string>;
applyPathPrepend(merged, pathPrepend);
applyWindowsPackageInstallEnv(merged);
return merged;
}
async function tryRealpath(targetPath: string): Promise<string> {
try {
return await fs.realpath(targetPath);

View File

@ -156,12 +156,15 @@ describe("runGatewayUpdate", () => {
}
async function runWithCommand(
runCommand: (argv: string[]) => Promise<CommandResult>,
runCommand: (
argv: string[],
options?: { env?: NodeJS.ProcessEnv; cwd?: string; timeoutMs?: number },
) => Promise<CommandResult>,
options?: { channel?: "stable" | "beta"; tag?: string; cwd?: string },
) {
return runGatewayUpdate({
cwd: options?.cwd ?? tempDir,
runCommand: async (argv, _runOptions) => runCommand(argv),
runCommand: async (argv, runOptions) => runCommand(argv, runOptions),
timeoutMs: 5000,
...(options?.channel ? { channel: options.channel } : {}),
...(options?.tag ? { tag: options.tag } : {}),
@ -419,6 +422,41 @@ describe("runGatewayUpdate", () => {
expect(calls.some((call) => call === expectedInstallCommand)).toBe(true);
});
it("falls back to global npm update when git is missing from PATH", async () => {
const nodeModules = path.join(tempDir, "node_modules");
const pkgRoot = path.join(nodeModules, "openclaw");
await seedGlobalPackageRoot(pkgRoot);
const calls: string[] = [];
const runCommand = async (argv: string[]): Promise<CommandResult> => {
const key = argv.join(" ");
calls.push(key);
if (key === `git -C ${pkgRoot} rev-parse --show-toplevel`) {
throw Object.assign(new Error("spawn git ENOENT"), { code: "ENOENT" });
}
if (key === "npm root -g") {
return { stdout: nodeModules, stderr: "", code: 0 };
}
if (key === "pnpm root -g") {
return { stdout: "", stderr: "", code: 1 };
}
if (key === "npm i -g openclaw@latest --no-fund --no-audit --loglevel=error") {
await fs.writeFile(
path.join(pkgRoot, "package.json"),
JSON.stringify({ name: "openclaw", version: "2.0.0" }),
"utf-8",
);
}
return { stdout: "ok", stderr: "", code: 0 };
};
const result = await runWithCommand(runCommand, { cwd: pkgRoot });
expect(result.status).toBe("ok");
expect(result.mode).toBe("npm");
expect(calls).toContain("npm i -g openclaw@latest --no-fund --no-audit --loglevel=error");
});
it("cleans stale npm rename dirs before global update", async () => {
const nodeModules = path.join(tempDir, "node_modules");
const pkgRoot = path.join(nodeModules, "openclaw");
@ -477,6 +515,118 @@ describe("runGatewayUpdate", () => {
]);
});
it("prepends portable Git PATH for global Windows npm updates", async () => {
const platformSpy = vi.spyOn(process, "platform", "get").mockReturnValue("win32");
const localAppData = path.join(tempDir, "local-app-data");
const portableGitMingw = path.join(
localAppData,
"OpenClaw",
"deps",
"portable-git",
"mingw64",
"bin",
);
const portableGitUsr = path.join(
localAppData,
"OpenClaw",
"deps",
"portable-git",
"usr",
"bin",
);
await fs.mkdir(portableGitMingw, { recursive: true });
await fs.mkdir(portableGitUsr, { recursive: true });
const nodeModules = path.join(tempDir, "node_modules");
const pkgRoot = path.join(nodeModules, "openclaw");
await seedGlobalPackageRoot(pkgRoot);
let installEnv: NodeJS.ProcessEnv | undefined;
const runCommand = async (
argv: string[],
options?: { env?: NodeJS.ProcessEnv },
): Promise<CommandResult> => {
const key = argv.join(" ");
if (key === `git -C ${pkgRoot} rev-parse --show-toplevel`) {
return { stdout: "", stderr: "not a git repository", code: 128 };
}
if (key === "npm root -g") {
return { stdout: nodeModules, stderr: "", code: 0 };
}
if (key === "pnpm root -g") {
return { stdout: "", stderr: "", code: 1 };
}
if (key === "npm i -g openclaw@latest --no-fund --no-audit --loglevel=error") {
installEnv = options?.env;
await fs.writeFile(
path.join(pkgRoot, "package.json"),
JSON.stringify({ name: "openclaw", version: "2.0.0" }),
"utf-8",
);
}
return { stdout: "ok", stderr: "", code: 0 };
};
await withEnvAsync({ LOCALAPPDATA: localAppData }, async () => {
const result = await runWithCommand(runCommand, { cwd: pkgRoot });
expect(result.status).toBe("ok");
});
platformSpy.mockRestore();
const mergedPath = installEnv?.Path ?? installEnv?.PATH ?? "";
expect(mergedPath.split(path.delimiter).slice(0, 2)).toEqual([
portableGitMingw,
portableGitUsr,
]);
expect(installEnv?.NPM_CONFIG_SCRIPT_SHELL).toBe("cmd.exe");
expect(installEnv?.NODE_LLAMA_CPP_SKIP_DOWNLOAD).toBe("1");
});
it("uses OPENCLAW_UPDATE_PACKAGE_SPEC for global package updates", async () => {
const nodeModules = path.join(tempDir, "node_modules");
const pkgRoot = path.join(nodeModules, "openclaw");
await seedGlobalPackageRoot(pkgRoot);
const calls: string[] = [];
const runCommand = async (argv: string[]): Promise<CommandResult> => {
const key = argv.join(" ");
calls.push(key);
if (key === `git -C ${pkgRoot} rev-parse --show-toplevel`) {
return { stdout: "", stderr: "not a git repository", code: 128 };
}
if (key === "npm root -g") {
return { stdout: nodeModules, stderr: "", code: 0 };
}
if (key === "pnpm root -g") {
return { stdout: "", stderr: "", code: 1 };
}
if (
key ===
"npm i -g http://10.211.55.2:8138/openclaw-next.tgz --no-fund --no-audit --loglevel=error"
) {
await fs.writeFile(
path.join(pkgRoot, "package.json"),
JSON.stringify({ name: "openclaw", version: "2.0.0" }),
"utf-8",
);
}
return { stdout: "ok", stderr: "", code: 0 };
};
await withEnvAsync(
{ OPENCLAW_UPDATE_PACKAGE_SPEC: "http://10.211.55.2:8138/openclaw-next.tgz" },
async () => {
const result = await runWithCommand(runCommand, { cwd: pkgRoot });
expect(result.status).toBe("ok");
},
);
expect(calls).toContain(
"npm i -g http://10.211.55.2:8138/openclaw-next.tgz --no-fund --no-audit --loglevel=error",
);
});
it("updates global bun installs when detected", async () => {
const bunInstall = path.join(tempDir, "bun-install");
await withEnvAsync({ BUN_INSTALL: bunInstall }, async () => {

View File

@ -22,9 +22,11 @@ import {
import { compareSemverStrings } from "./update-check.js";
import {
cleanupGlobalRenameDirs,
createGlobalInstallEnv,
detectGlobalInstallManagerForRoot,
globalInstallArgs,
globalInstallFallbackArgs,
resolveGlobalInstallSpec,
} from "./update-global.js";
export type UpdateStepResult = {
@ -201,7 +203,10 @@ async function resolveGitRoot(
for (const dir of candidates) {
const res = await runCommand(["git", "-C", dir, "rev-parse", "--show-toplevel"], {
timeoutMs,
});
}).catch(() => null);
if (!res) {
continue;
}
if (res.code === 0) {
const root = res.stdout.trim();
if (root) {
@ -868,14 +873,20 @@ export async function runGatewayUpdate(opts: UpdateRunnerOptions = {}): Promise<
});
const channel = opts.channel ?? DEFAULT_PACKAGE_CHANNEL;
const tag = normalizeTag(opts.tag ?? channelToNpmTag(channel));
const spec = `${packageName}@${tag}`;
const steps: UpdateStepResult[] = [];
const globalInstallEnv = await createGlobalInstallEnv();
const spec = resolveGlobalInstallSpec({
packageName,
tag,
env: globalInstallEnv,
});
const updateStep = await runStep({
runCommand,
name: "global update",
argv: globalInstallArgs(globalManager, spec),
cwd: pkgRoot,
timeoutMs,
env: globalInstallEnv,
progress,
stepIndex: 0,
totalSteps: 1,
@ -892,6 +903,7 @@ export async function runGatewayUpdate(opts: UpdateRunnerOptions = {}): Promise<
argv: fallbackArgv,
cwd: pkgRoot,
timeoutMs,
env: globalInstallEnv,
progress,
stepIndex: 0,
totalSteps: 1,

View File

@ -4,6 +4,7 @@ import { formatThinkingLevels, listThinkingLevelLabels } from "../auto-reply/thi
import type { OpenClawConfig } from "../config/types.js";
const VERBOSE_LEVELS = ["on", "off"];
const FAST_LEVELS = ["status", "on", "off"];
const REASONING_LEVELS = ["on", "off"];
const ELEVATED_LEVELS = ["on", "off", "ask", "full"];
const ACTIVATION_LEVELS = ["mention", "always"];
@ -52,6 +53,7 @@ export function parseCommand(input: string): ParsedCommand {
export function getSlashCommands(options: SlashCommandOptions = {}): SlashCommand[] {
const thinkLevels = listThinkingLevelLabels(options.provider, options.model);
const verboseCompletions = createLevelCompletion(VERBOSE_LEVELS);
const fastCompletions = createLevelCompletion(FAST_LEVELS);
const reasoningCompletions = createLevelCompletion(REASONING_LEVELS);
const usageCompletions = createLevelCompletion(USAGE_FOOTER_LEVELS);
const elevatedCompletions = createLevelCompletion(ELEVATED_LEVELS);
@ -76,6 +78,11 @@ export function getSlashCommands(options: SlashCommandOptions = {}): SlashComman
.filter((v) => v.startsWith(prefix.toLowerCase()))
.map((value) => ({ value, label: value })),
},
{
name: "fast",
description: "Set fast mode on/off",
getArgumentCompletions: fastCompletions,
},
{
name: "verbose",
description: "Set verbose on/off",
@ -142,6 +149,7 @@ export function helpText(options: SlashCommandOptions = {}): string {
"/session <key> (or /sessions)",
"/model <provider/model> (or /models)",
`/think <${thinkLevels}>`,
"/fast <status|on|off>",
"/verbose <on|off>",
"/reasoning <on|off>",
"/usage <off|tokens|full>",

View File

@ -79,6 +79,7 @@ export type GatewaySessionList = {
Pick<
SessionInfo,
| "thinkingLevel"
| "fastMode"
| "verboseLevel"
| "reasoningLevel"
| "model"
@ -92,6 +93,7 @@ export type GatewaySessionList = {
key: string;
sessionId?: string;
updatedAt?: number | null;
fastMode?: boolean;
sendPolicy?: string;
responseUsage?: ResponseUsageMode;
label?: string;

View File

@ -345,6 +345,27 @@ export function createCommandHandlers(context: CommandHandlerContext) {
chatLog.addSystem(`verbose failed: ${String(err)}`);
}
break;
case "fast":
if (!args || args === "status") {
chatLog.addSystem(`fast mode: ${state.sessionInfo.fastMode ? "on" : "off"}`);
break;
}
if (args !== "on" && args !== "off") {
chatLog.addSystem("usage: /fast <status|on|off>");
break;
}
try {
const result = await client.patchSession({
key: state.currentSessionKey,
fastMode: args === "on",
});
chatLog.addSystem(`fast mode ${args === "on" ? "enabled" : "disabled"}`);
applySessionInfoFromPatch(result);
await refreshSessionInfo();
} catch (err) {
chatLog.addSystem(`fast failed: ${String(err)}`);
}
break;
case "reasoning":
if (!args) {
chatLog.addSystem("usage: /reasoning <on|off>");

View File

@ -165,6 +165,9 @@ export function createSessionActions(context: SessionActionContext) {
if (entry?.thinkingLevel !== undefined) {
next.thinkingLevel = entry.thinkingLevel;
}
if (entry?.fastMode !== undefined) {
next.fastMode = entry.fastMode;
}
if (entry?.verboseLevel !== undefined) {
next.verboseLevel = entry.verboseLevel;
}
@ -286,10 +289,12 @@ export function createSessionActions(context: SessionActionContext) {
messages?: unknown[];
sessionId?: string;
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
};
state.currentSessionId = typeof record.sessionId === "string" ? record.sessionId : null;
state.sessionInfo.thinkingLevel = record.thinkingLevel ?? state.sessionInfo.thinkingLevel;
state.sessionInfo.fastMode = record.fastMode ?? state.sessionInfo.fastMode;
state.sessionInfo.verboseLevel = record.verboseLevel ?? state.sessionInfo.verboseLevel;
const showTools = (state.sessionInfo.verboseLevel ?? "off") !== "off";
chatLog.clearAll();

View File

@ -28,6 +28,7 @@ export type ResponseUsageMode = "on" | "off" | "tokens" | "full";
export type SessionInfo = {
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;
model?: string;

View File

@ -752,6 +752,7 @@ export async function runTui(opts: TuiOptions) {
: "unknown";
const tokens = formatTokens(sessionInfo.totalTokens ?? null, sessionInfo.contextTokens ?? null);
const think = sessionInfo.thinkingLevel ?? "off";
const fast = sessionInfo.fastMode === true;
const verbose = sessionInfo.verboseLevel ?? "off";
const reasoning = sessionInfo.reasoningLevel ?? "off";
const reasoningLabel =
@ -761,6 +762,7 @@ export async function runTui(opts: TuiOptions) {
`session ${sessionLabel}`,
modelLabel,
think !== "off" ? `think ${think}` : null,
fast ? "fast" : null,
verbose !== "off" ? `verbose ${verbose}` : null,
reasoningLabel,
tokens,

View File

@ -378,4 +378,42 @@ describe("executeSlashCommand directives", () => {
expect(result.content).toBe("Current verbose level: full.\nOptions: on, full, off.");
expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {});
});
it("reports the current fast mode for bare /fast", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.list") {
return {
sessions: [row("agent:main:main", { fastMode: true })],
};
}
throw new Error(`unexpected method: ${method}`);
});
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"agent:main:main",
"fast",
"",
);
expect(result.content).toBe("Current fast mode: on.\nOptions: status, on, off.");
expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {});
});
it("patches fast mode for /fast on", async () => {
const request = vi.fn().mockResolvedValue({ ok: true });
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"agent:main:main",
"fast",
"on",
);
expect(result.content).toBe("Fast mode enabled.");
expect(request).toHaveBeenCalledWith("sessions.patch", {
key: "agent:main:main",
fastMode: true,
});
});
});

View File

@ -63,6 +63,8 @@ export async function executeSlashCommand(
return await executeModel(client, sessionKey, args);
case "think":
return await executeThink(client, sessionKey, args);
case "fast":
return await executeFast(client, sessionKey, args);
case "verbose":
return await executeVerbose(client, sessionKey, args);
case "export":
@ -252,6 +254,44 @@ async function executeVerbose(
}
}
async function executeFast(
client: GatewayBrowserClient,
sessionKey: string,
args: string,
): Promise<SlashCommandResult> {
const rawMode = args.trim().toLowerCase();
if (!rawMode || rawMode === "status") {
try {
const session = await loadCurrentSession(client, sessionKey);
return {
content: formatDirectiveOptions(
`Current fast mode: ${resolveCurrentFastMode(session)}.`,
"status, on, off",
),
};
} catch (err) {
return { content: `Failed to get fast mode: ${String(err)}` };
}
}
if (rawMode !== "on" && rawMode !== "off") {
return {
content: `Unrecognized fast mode "${args.trim()}". Valid levels: status, on, off.`,
};
}
try {
await client.request("sessions.patch", { key: sessionKey, fastMode: rawMode === "on" });
return {
content: `Fast mode ${rawMode === "on" ? "enabled" : "disabled"}.`,
action: "refresh",
};
} catch (err) {
return { content: `Failed to set fast mode: ${String(err)}` };
}
}
async function executeUsage(
client: GatewayBrowserClient,
sessionKey: string,
@ -534,6 +574,10 @@ function resolveCurrentThinkingLevel(
});
}
function resolveCurrentFastMode(session: GatewaySessionRow | undefined): "on" | "off" {
return session?.fastMode === true ? "on" : "off";
}
function fmtTokens(n: number): string {
if (n >= 1_000_000) {
return `${(n / 1_000_000).toFixed(1).replace(/\.0$/, "")}M`;

View File

@ -23,4 +23,11 @@ describe("parseSlashCommand", () => {
args: "full",
});
});
it("parses fast commands", () => {
expect(parseSlashCommand("/fast:on")).toMatchObject({
command: { name: "fast" },
args: "on",
});
});
});

View File

@ -88,6 +88,15 @@ export const SLASH_COMMANDS: SlashCommandDef[] = [
executeLocal: true,
argOptions: ["on", "off", "full"],
},
{
name: "fast",
description: "Toggle fast mode",
args: "<status|on|off>",
icon: "zap",
category: "model",
executeLocal: true,
argOptions: ["status", "on", "off"],
},
// ── Tools ──
{

View File

@ -63,6 +63,7 @@ export async function patchSession(
patch: {
label?: string | null;
thinkingLevel?: string | null;
fastMode?: boolean | null;
verboseLevel?: string | null;
reasoningLevel?: string | null;
},
@ -77,6 +78,9 @@ export async function patchSession(
if ("thinkingLevel" in patch) {
params.thinkingLevel = patch.thinkingLevel;
}
if ("fastMode" in patch) {
params.fastMode = patch.fastMode;
}
if ("verboseLevel" in patch) {
params.verboseLevel = patch.verboseLevel;
}

View File

@ -379,6 +379,7 @@ export type GatewaySessionRow = {
systemSent?: boolean;
abortedLastRun?: boolean;
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;
elevatedLevel?: string;
@ -396,6 +397,7 @@ export type SessionsPatchResult = SessionsPatchResultBase<{
sessionId: string;
updatedAt?: number;
thinkingLevel?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;
elevatedLevel?: string;

View File

@ -60,7 +60,7 @@ describe("sessions view", () => {
await Promise.resolve();
const selects = container.querySelectorAll("select");
const verbose = selects[1] as HTMLSelectElement | undefined;
const verbose = selects[2] as HTMLSelectElement | undefined;
expect(verbose?.value).toBe("full");
expect(Array.from(verbose?.options ?? []).some((option) => option.value === "full")).toBe(true);
});
@ -83,10 +83,32 @@ describe("sessions view", () => {
await Promise.resolve();
const selects = container.querySelectorAll("select");
const reasoning = selects[2] as HTMLSelectElement | undefined;
const reasoning = selects[3] as HTMLSelectElement | undefined;
expect(reasoning?.value).toBe("custom-mode");
expect(
Array.from(reasoning?.options ?? []).some((option) => option.value === "custom-mode"),
).toBe(true);
});
it("renders explicit fast mode without falling back to inherit", async () => {
const container = document.createElement("div");
render(
renderSessions(
buildProps(
buildResult({
key: "agent:main:main",
kind: "direct",
updatedAt: Date.now(),
fastMode: true,
}),
),
),
container,
);
await Promise.resolve();
const selects = container.querySelectorAll("select");
const fast = selects[1] as HTMLSelectElement | undefined;
expect(fast?.value).toBe("on");
});
});

View File

@ -37,6 +37,7 @@ export type SessionsProps = {
patch: {
label?: string | null;
thinkingLevel?: string | null;
fastMode?: boolean | null;
verboseLevel?: string | null;
reasoningLevel?: string | null;
},
@ -52,6 +53,11 @@ const VERBOSE_LEVELS = [
{ value: "on", label: "on" },
{ value: "full", label: "full" },
] as const;
const FAST_LEVELS = [
{ value: "", label: "inherit" },
{ value: "on", label: "on" },
{ value: "off", label: "off" },
] as const;
const REASONING_LEVELS = ["", "off", "on", "stream"] as const;
const PAGE_SIZES = [10, 25, 50, 100] as const;
@ -306,6 +312,7 @@ export function renderSessions(props: SessionsProps) {
${sortHeader("updated", "Updated")}
${sortHeader("tokens", "Tokens")}
<th>Thinking</th>
<th>Fast</th>
<th>Verbose</th>
<th>Reasoning</th>
<th style="width: 60px;"></th>
@ -316,7 +323,7 @@ export function renderSessions(props: SessionsProps) {
paginated.length === 0
? html`
<tr>
<td colspan="9" style="text-align: center; padding: 48px 16px; color: var(--muted)">
<td colspan="10" style="text-align: center; padding: 48px 16px; color: var(--muted)">
No sessions found.
</td>
</tr>
@ -390,6 +397,8 @@ function renderRow(
const isBinaryThinking = isBinaryThinkingProvider(row.modelProvider);
const thinking = resolveThinkLevelDisplay(rawThinking, isBinaryThinking);
const thinkLevels = withCurrentOption(resolveThinkLevelOptions(row.modelProvider), thinking);
const fastMode = row.fastMode === true ? "on" : row.fastMode === false ? "off" : "";
const fastLevels = withCurrentLabeledOption(FAST_LEVELS, fastMode);
const verbose = row.verboseLevel ?? "";
const verboseLevels = withCurrentLabeledOption(VERBOSE_LEVELS, verbose);
const reasoning = row.reasoningLevel ?? "";
@ -465,6 +474,23 @@ function renderRow(
)}
</select>
</td>
<td>
<select
?disabled=${disabled}
style="padding: 6px 10px; font-size: 13px; border: 1px solid var(--border); border-radius: var(--radius-sm); min-width: 90px;"
@change=${(e: Event) => {
const value = (e.target as HTMLSelectElement).value;
onPatch(row.key, { fastMode: value === "" ? null : value === "on" });
}}
>
${fastLevels.map(
(level) =>
html`<option value=${level.value} ?selected=${fastMode === level.value}>
${level.label}
</option>`,
)}
</select>
</td>
<td>
<select
?disabled=${disabled}