diff --git a/.agent/workflows/update_clawdbot.md b/.agent/workflows/update_clawdbot.md
index 04a079aab41..0543e7c2a68 100644
--- a/.agent/workflows/update_clawdbot.md
+++ b/.agent/workflows/update_clawdbot.md
@@ -1,8 +1,8 @@
---
-description: Update Clawdbot from upstream when branch has diverged (ahead/behind)
+description: Update OpenClaw from upstream when branch has diverged (ahead/behind)
---
-# Clawdbot Upstream Sync Workflow
+# OpenClaw Upstream Sync Workflow
Use this workflow when your fork has diverged from upstream (e.g., "18 commits ahead, 29 commits behind").
@@ -132,16 +132,16 @@ pnpm mac:package
```bash
# Kill running app
-pkill -x "Clawdbot" || true
+pkill -x "OpenClaw" || true
# Move old version
-mv /Applications/Clawdbot.app /tmp/Clawdbot-backup.app
+mv /Applications/OpenClaw.app /tmp/OpenClaw-backup.app
# Install new build
-cp -R dist/Clawdbot.app /Applications/
+cp -R dist/OpenClaw.app /Applications/
# Launch
-open /Applications/Clawdbot.app
+open /Applications/OpenClaw.app
```
---
@@ -235,7 +235,7 @@ If upstream introduced new model configurations:
# Check for OpenRouter API key requirements
grep -r "openrouter\|OPENROUTER" src/ --include="*.ts" --include="*.js"
-# Update clawdbot.json with fallback chains
+# Update openclaw.json with fallback chains
# Add model fallback configurations as needed
```
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dce61691d04..09a1af818b4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -22,9 +22,16 @@ Docs: https://docs.openclaw.ai
- Control UI/dashboard: preserve structured gateway shutdown reasons across restart disconnects so config-triggered restarts no longer fall back to `disconnected (1006): no reason`. (#46532) Thanks @vincentkoc.
- Android/chat: theme the thinking dropdown and TLS trust dialogs explicitly so popup surfaces match the active app theme instead of falling back to mismatched Material defaults.
- Z.AI/onboarding: detect a working default model even for explicit `zai-coding-*` endpoint choices, so Coding Plan setup can keep the selected endpoint while defaulting to `glm-5` when available or `glm-4.7` as fallback. (#45969)
+- Models/OpenRouter runtime capabilities: fetch uncatalogued OpenRouter model metadata on first use so newly added vision models keep image input instead of silently degrading to text-only, with top-level capability field fallbacks for `/api/v1/models`. (#45824) Thanks @DJjjjhao.
- Z.AI/onboarding: add `glm-5-turbo` to the default Z.AI provider catalog so onboarding-generated configs expose the new model alongside the existing GLM defaults. (#46670) Thanks @tomsun28.
- Zalo Personal/group gating: stop reapplying `dmPolicy.allowFrom` as a sender gate for already-allowlisted groups when `groupAllowFrom` is unset, so any member of an allowed group can trigger replies while DMs stay restricted. (#40146)
- Plugins/install precedence: keep bundled plugins ahead of auto-discovered globals by default, but let an explicitly installed plugin record win its own duplicate-id tie so installed channel plugins load from `~/.openclaw/extensions` after `openclaw plugins install`.
+- macOS/canvas actions: keep unattended local agent actions on trusted in-app canvas surfaces only, and stop exposing the deep-link fallback key to arbitrary page scripts. Thanks @vincentkoc.
+- Agents/compaction: extend the enclosing run deadline once while compaction is actively in flight, and abort the underlying SDK compaction on timeout/cancel so large-session compactions stop freezing mid-run. (#46889) Thanks @asyncjason.
+
+### Fixes
+
+- Slack/interactive replies: preserve `channelData.slack.blocks` through live DM delivery and preview-finalized edits so Block Kit button and select directives render instead of falling back to raw text. Thanks @vincentkoc.
- Zalo/plugin runtime: export `resolveClientIp` from `openclaw/plugin-sdk/zalo` so installed builds no longer crash on startup when the webhook monitor loads from the packaged extension instead of the monorepo source tree. (#46549) Thanks @No898.
- CI/channel test routing: move the built-in channel suites into `test:channels` and keep them out of `test:extensions`, so extension CI no longer fails after the channel migration while targeted test routing still sends Slack, Signal, and iMessage suites to the right lane. (#46066) Thanks @scoootscooob.
- Browser/profiles: drop the auto-created `chrome-relay` browser profile; users who need the Chrome extension relay must now create their own profile via `openclaw browser create-profile`. (#45777) Thanks @odysseus0.
diff --git a/README.md b/README.md
index 767f4bc2141..d5a22313f27 100644
--- a/README.md
+++ b/README.md
@@ -2,8 +2,8 @@
-
-
+
+
diff --git a/apps/macos/Sources/OpenClaw/CanvasA2UIActionMessageHandler.swift b/apps/macos/Sources/OpenClaw/CanvasA2UIActionMessageHandler.swift
index 4f47ea835df..c81d4b59705 100644
--- a/apps/macos/Sources/OpenClaw/CanvasA2UIActionMessageHandler.swift
+++ b/apps/macos/Sources/OpenClaw/CanvasA2UIActionMessageHandler.swift
@@ -18,13 +18,10 @@ final class CanvasA2UIActionMessageHandler: NSObject, WKScriptMessageHandler {
func userContentController(_: WKUserContentController, didReceive message: WKScriptMessage) {
guard Self.allMessageNames.contains(message.name) else { return }
- // Only accept actions from local Canvas content (not arbitrary web pages).
+ // Only accept actions from the in-app canvas scheme. Local-network HTTP
+ // pages are regular web content and must not get direct agent dispatch.
guard let webView = message.webView, let url = webView.url else { return }
- if let scheme = url.scheme, CanvasScheme.allSchemes.contains(scheme) {
- // ok
- } else if Self.isLocalNetworkCanvasURL(url) {
- // ok
- } else {
+ guard let scheme = url.scheme, CanvasScheme.allSchemes.contains(scheme) else {
return
}
@@ -107,10 +104,5 @@ final class CanvasA2UIActionMessageHandler: NSObject, WKScriptMessageHandler {
}
}
}
-
- static func isLocalNetworkCanvasURL(_ url: URL) -> Bool {
- LocalNetworkURLSupport.isLocalNetworkHTTPURL(url)
- }
-
// Formatting helpers live in OpenClawKit (`OpenClawCanvasA2UIAction`).
}
diff --git a/apps/macos/Sources/OpenClaw/CanvasWindowController.swift b/apps/macos/Sources/OpenClaw/CanvasWindowController.swift
index 8017304087e..0032bfff0fa 100644
--- a/apps/macos/Sources/OpenClaw/CanvasWindowController.swift
+++ b/apps/macos/Sources/OpenClaw/CanvasWindowController.swift
@@ -50,21 +50,24 @@ final class CanvasWindowController: NSWindowController, WKNavigationDelegate, NS
// Bridge A2UI "a2uiaction" DOM events back into the native agent loop.
//
- // Prefer WKScriptMessageHandler when WebKit exposes it, otherwise fall back to an unattended deep link
- // (includes the app-generated key so it won't prompt).
+ // Keep the bridge on the trusted in-app canvas scheme only, and do not
+ // expose unattended deep-link credentials to page JavaScript.
canvasWindowLogger.debug("CanvasWindowController init building A2UI bridge script")
- let deepLinkKey = DeepLinkHandler.currentCanvasKey()
let injectedSessionKey = sessionKey.trimmingCharacters(in: .whitespacesAndNewlines).nonEmpty ?? "main"
+ let allowedSchemesJSON = (
+ try? String(
+ data: JSONSerialization.data(withJSONObject: CanvasScheme.allSchemes),
+ encoding: .utf8)
+ ) ?? "[]"
let bridgeScript = """
(() => {
try {
- const allowedSchemes = \(String(describing: CanvasScheme.allSchemes));
+ const allowedSchemes = \(allowedSchemesJSON);
const protocol = location.protocol.replace(':', '');
if (!allowedSchemes.includes(protocol)) return;
if (globalThis.__openclawA2UIBridgeInstalled) return;
globalThis.__openclawA2UIBridgeInstalled = true;
- const deepLinkKey = \(Self.jsStringLiteral(deepLinkKey));
const sessionKey = \(Self.jsStringLiteral(injectedSessionKey));
const machineName = \(Self.jsStringLiteral(InstanceIdentity.displayName));
const instanceId = \(Self.jsStringLiteral(InstanceIdentity.instanceId));
@@ -104,24 +107,8 @@ final class CanvasWindowController: NSWindowController, WKNavigationDelegate, NS
return;
}
- const ctx = userAction.context ? (' ctx=' + JSON.stringify(userAction.context)) : '';
- const message =
- 'CANVAS_A2UI action=' + userAction.name +
- ' session=' + sessionKey +
- ' surface=' + userAction.surfaceId +
- ' component=' + (userAction.sourceComponentId || '-') +
- ' host=' + machineName.replace(/\\s+/g, '_') +
- ' instance=' + instanceId +
- ctx +
- ' default=update_canvas';
- const params = new URLSearchParams();
- params.set('message', message);
- params.set('sessionKey', sessionKey);
- params.set('thinking', 'low');
- params.set('deliver', 'false');
- params.set('channel', 'last');
- params.set('key', deepLinkKey);
- location.href = 'openclaw://agent?' + params.toString();
+ // Without the native handler, fail closed instead of exposing an
+ // unattended deep-link credential to page JavaScript.
} catch {}
}, true);
} catch {}
diff --git a/docs/assets/openclaw-logo-text-dark.svg b/docs/assets/openclaw-logo-text-dark.svg
new file mode 100644
index 00000000000..317a203c8a4
--- /dev/null
+++ b/docs/assets/openclaw-logo-text-dark.svg
@@ -0,0 +1,418 @@
+
+
+
+
diff --git a/docs/assets/openclaw-logo-text.svg b/docs/assets/openclaw-logo-text.svg
new file mode 100644
index 00000000000..34038af7b3e
--- /dev/null
+++ b/docs/assets/openclaw-logo-text.svg
@@ -0,0 +1,418 @@
+
+
+
+
diff --git a/extensions/tlon/src/monitor/index.ts b/extensions/tlon/src/monitor/index.ts
index a9291878101..1ea42902aaf 100644
--- a/extensions/tlon/src/monitor/index.ts
+++ b/extensions/tlon/src/monitor/index.ts
@@ -301,7 +301,7 @@ export async function monitorTlonProvider(opts: MonitorTlonOpts = {}): Promise 0 ? newSettings.dmAllowlist : account.dmAllowlist;
+ effectiveDmAllowlist = newSettings.dmAllowlist;
runtime.log?.(`[tlon] Settings: dmAllowlist updated to ${effectiveDmAllowlist.join(", ")}`);
}
@@ -1551,10 +1551,7 @@ export async function monitorTlonProvider(opts: MonitorTlonOpts = {}): Promise 0
- ? newSettings.groupInviteAllowlist
- : account.groupInviteAllowlist;
+ effectiveGroupInviteAllowlist = newSettings.groupInviteAllowlist;
runtime.log?.(
`[tlon] Settings: groupInviteAllowlist updated to ${effectiveGroupInviteAllowlist.join(", ")}`,
);
diff --git a/skills/apple-reminders/SKILL.md b/skills/apple-reminders/SKILL.md
index 2f4d48cd424..fc743a7714e 100644
--- a/skills/apple-reminders/SKILL.md
+++ b/skills/apple-reminders/SKILL.md
@@ -40,11 +40,11 @@ Use `remindctl` to manage Apple Reminders directly from the terminal.
❌ **DON'T use this skill when:**
-- Scheduling Clawdbot tasks or alerts → use `cron` tool with systemEvent instead
+- Scheduling OpenClaw tasks or alerts → use `cron` tool with systemEvent instead
- Calendar events or appointments → use Apple Calendar
- Project/work task management → use Notion, GitHub Issues, or task queue
- One-time notifications → use `cron` tool for timed alerts
-- User says "remind me" but means a Clawdbot alert → clarify first
+- User says "remind me" but means an OpenClaw alert → clarify first
## Setup
@@ -112,7 +112,7 @@ Accepted by `--due` and date filters:
User: "Remind me to check on the deploy in 2 hours"
-**Ask:** "Do you want this in Apple Reminders (syncs to your phone) or as a Clawdbot alert (I'll message you here)?"
+**Ask:** "Do you want this in Apple Reminders (syncs to your phone) or as an OpenClaw alert (I'll message you here)?"
- Apple Reminders → use this skill
-- Clawdbot alert → use `cron` tool with systemEvent
+- OpenClaw alert → use `cron` tool with systemEvent
diff --git a/skills/imsg/SKILL.md b/skills/imsg/SKILL.md
index 21c41ad1c36..fdd17999dcf 100644
--- a/skills/imsg/SKILL.md
+++ b/skills/imsg/SKILL.md
@@ -47,7 +47,7 @@ Use `imsg` to read and send iMessage/SMS via macOS Messages.app.
- Slack messages → use `slack` skill
- Group chat management (adding/removing members) → not supported
- Bulk/mass messaging → always confirm with user first
-- Replying in current conversation → just reply normally (Clawdbot routes automatically)
+- Replying in current conversation → just reply normally (OpenClaw routes automatically)
## Requirements
diff --git a/src/agents/context.lookup.test.ts b/src/agents/context.lookup.test.ts
index a395f0b3089..e5025b36c76 100644
--- a/src/agents/context.lookup.test.ts
+++ b/src/agents/context.lookup.test.ts
@@ -90,6 +90,20 @@ describe("lookupContextTokens", () => {
}
});
+ it("skips eager warmup for logs commands that do not need model metadata at startup", async () => {
+ const loadConfigMock = vi.fn(() => ({ models: {} }));
+ mockContextModuleDeps(loadConfigMock);
+
+ const argvSnapshot = process.argv;
+ process.argv = ["node", "openclaw", "logs", "--limit", "5"];
+ try {
+ await import("./context.js");
+ expect(loadConfigMock).not.toHaveBeenCalled();
+ } finally {
+ process.argv = argvSnapshot;
+ }
+ });
+
it("retries config loading after backoff when an initial load fails", async () => {
vi.useFakeTimers();
const loadConfigMock = vi
diff --git a/src/agents/context.ts b/src/agents/context.ts
index c18d9534689..5550f67e3b7 100644
--- a/src/agents/context.ts
+++ b/src/agents/context.ts
@@ -108,9 +108,24 @@ function getCommandPathFromArgv(argv: string[]): string[] {
return tokens;
}
+const SKIP_EAGER_WARMUP_PRIMARY_COMMANDS = new Set([
+ "backup",
+ "completion",
+ "config",
+ "directory",
+ "doctor",
+ "health",
+ "hooks",
+ "logs",
+ "plugins",
+ "secrets",
+ "update",
+ "webhooks",
+]);
+
function shouldSkipEagerContextWindowWarmup(argv: string[] = process.argv): boolean {
- const [primary, secondary] = getCommandPathFromArgv(argv);
- return primary === "config" && secondary === "validate";
+ const [primary] = getCommandPathFromArgv(argv);
+ return primary ? SKIP_EAGER_WARMUP_PRIMARY_COMMANDS.has(primary) : false;
}
function primeConfiguredContextWindows(): OpenClawConfig | undefined {
diff --git a/src/agents/pi-embedded-runner.compaction-safety-timeout.test.ts b/src/agents/pi-embedded-runner.compaction-safety-timeout.test.ts
index 31906dd733e..1898373cfc9 100644
--- a/src/agents/pi-embedded-runner.compaction-safety-timeout.test.ts
+++ b/src/agents/pi-embedded-runner.compaction-safety-timeout.test.ts
@@ -2,6 +2,7 @@ import { afterEach, describe, expect, it, vi } from "vitest";
import {
compactWithSafetyTimeout,
EMBEDDED_COMPACTION_TIMEOUT_MS,
+ resolveCompactionTimeoutMs,
} from "./pi-embedded-runner/compaction-safety-timeout.js";
describe("compactWithSafetyTimeout", () => {
@@ -42,4 +43,113 @@ describe("compactWithSafetyTimeout", () => {
).rejects.toBe(error);
expect(vi.getTimerCount()).toBe(0);
});
+
+ it("calls onCancel when compaction times out", async () => {
+ vi.useFakeTimers();
+ const onCancel = vi.fn();
+
+ const compactPromise = compactWithSafetyTimeout(() => new Promise(() => {}), 30, {
+ onCancel,
+ });
+ const timeoutAssertion = expect(compactPromise).rejects.toThrow("Compaction timed out");
+
+ await vi.advanceTimersByTimeAsync(30);
+ await timeoutAssertion;
+ expect(onCancel).toHaveBeenCalledTimes(1);
+ expect(vi.getTimerCount()).toBe(0);
+ });
+
+ it("aborts early on external abort signal and calls onCancel once", async () => {
+ vi.useFakeTimers();
+ const controller = new AbortController();
+ const onCancel = vi.fn();
+ const reason = new Error("request timed out");
+
+ const compactPromise = compactWithSafetyTimeout(() => new Promise(() => {}), 100, {
+ abortSignal: controller.signal,
+ onCancel,
+ });
+ const abortAssertion = expect(compactPromise).rejects.toBe(reason);
+
+ controller.abort(reason);
+ await abortAssertion;
+ expect(onCancel).toHaveBeenCalledTimes(1);
+ expect(vi.getTimerCount()).toBe(0);
+ });
+
+ it("ignores onCancel errors and still rejects with the timeout", async () => {
+ vi.useFakeTimers();
+ const compactPromise = compactWithSafetyTimeout(() => new Promise(() => {}), 30, {
+ onCancel: () => {
+ throw new Error("abortCompaction failed");
+ },
+ });
+ const timeoutAssertion = expect(compactPromise).rejects.toThrow("Compaction timed out");
+
+ await vi.advanceTimersByTimeAsync(30);
+ await timeoutAssertion;
+ expect(vi.getTimerCount()).toBe(0);
+ });
+});
+
+describe("resolveCompactionTimeoutMs", () => {
+ it("returns default when config is undefined", () => {
+ expect(resolveCompactionTimeoutMs(undefined)).toBe(EMBEDDED_COMPACTION_TIMEOUT_MS);
+ });
+
+ it("returns default when compaction config is missing", () => {
+ expect(resolveCompactionTimeoutMs({ agents: { defaults: {} } })).toBe(
+ EMBEDDED_COMPACTION_TIMEOUT_MS,
+ );
+ });
+
+ it("returns default when timeoutSeconds is not set", () => {
+ expect(
+ resolveCompactionTimeoutMs({ agents: { defaults: { compaction: { mode: "safeguard" } } } }),
+ ).toBe(EMBEDDED_COMPACTION_TIMEOUT_MS);
+ });
+
+ it("converts timeoutSeconds to milliseconds", () => {
+ expect(
+ resolveCompactionTimeoutMs({
+ agents: { defaults: { compaction: { timeoutSeconds: 1800 } } },
+ }),
+ ).toBe(1_800_000);
+ });
+
+ it("floors fractional seconds", () => {
+ expect(
+ resolveCompactionTimeoutMs({
+ agents: { defaults: { compaction: { timeoutSeconds: 120.7 } } },
+ }),
+ ).toBe(120_000);
+ });
+
+ it("returns default for zero", () => {
+ expect(
+ resolveCompactionTimeoutMs({ agents: { defaults: { compaction: { timeoutSeconds: 0 } } } }),
+ ).toBe(EMBEDDED_COMPACTION_TIMEOUT_MS);
+ });
+
+ it("returns default for negative values", () => {
+ expect(
+ resolveCompactionTimeoutMs({ agents: { defaults: { compaction: { timeoutSeconds: -5 } } } }),
+ ).toBe(EMBEDDED_COMPACTION_TIMEOUT_MS);
+ });
+
+ it("returns default for NaN", () => {
+ expect(
+ resolveCompactionTimeoutMs({
+ agents: { defaults: { compaction: { timeoutSeconds: NaN } } },
+ }),
+ ).toBe(EMBEDDED_COMPACTION_TIMEOUT_MS);
+ });
+
+ it("returns default for Infinity", () => {
+ expect(
+ resolveCompactionTimeoutMs({
+ agents: { defaults: { compaction: { timeoutSeconds: Infinity } } },
+ }),
+ ).toBe(EMBEDDED_COMPACTION_TIMEOUT_MS);
+ });
});
diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts
index af7cfd7e1bf..0a864236b81 100644
--- a/src/agents/pi-embedded-runner/compact.hooks.test.ts
+++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts
@@ -14,6 +14,7 @@ const {
resolveMemorySearchConfigMock,
resolveSessionAgentIdMock,
estimateTokensMock,
+ sessionAbortCompactionMock,
} = vi.hoisted(() => {
const contextEngineCompactMock = vi.fn(async () => ({
ok: true as boolean,
@@ -65,6 +66,7 @@ const {
})),
resolveSessionAgentIdMock: vi.fn(() => "main"),
estimateTokensMock: vi.fn((_message?: unknown) => 10),
+ sessionAbortCompactionMock: vi.fn(),
};
});
@@ -121,6 +123,7 @@ vi.mock("@mariozechner/pi-coding-agent", () => {
session.messages.splice(1);
return await sessionCompactImpl();
}),
+ abortCompaction: sessionAbortCompactionMock,
dispose: vi.fn(),
};
return { session };
@@ -151,6 +154,7 @@ vi.mock("../models-config.js", () => ({
}));
vi.mock("../model-auth.js", () => ({
+ applyLocalNoAuthHeaderOverride: vi.fn((model: unknown) => model),
getApiKeyForModel: vi.fn(async () => ({ apiKey: "test", mode: "env" })),
resolveModelAuthMode: vi.fn(() => "env"),
}));
@@ -420,6 +424,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
resolveSessionAgentIdMock.mockReturnValue("main");
estimateTokensMock.mockReset();
estimateTokensMock.mockReturnValue(10);
+ sessionAbortCompactionMock.mockReset();
unregisterApiProviders(getCustomApiRegistrySourceId("ollama"));
});
@@ -772,6 +777,24 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
expect(result.ok).toBe(true);
});
+
+ it("aborts in-flight compaction when the caller abort signal fires", async () => {
+ const controller = new AbortController();
+ sessionCompactImpl.mockImplementationOnce(() => new Promise(() => {}));
+
+ const resultPromise = compactEmbeddedPiSessionDirect(
+ directCompactionArgs({
+ abortSignal: controller.signal,
+ }),
+ );
+
+ controller.abort(new Error("request timed out"));
+ const result = await resultPromise;
+
+ expect(result.ok).toBe(false);
+ expect(result.reason).toContain("request timed out");
+ expect(sessionAbortCompactionMock).toHaveBeenCalledTimes(1);
+ });
});
describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => {
diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts
index 63678333bed..89f3d4a066a 100644
--- a/src/agents/pi-embedded-runner/compact.ts
+++ b/src/agents/pi-embedded-runner/compact.ts
@@ -76,7 +76,7 @@ import {
import { resolveTranscriptPolicy } from "../transcript-policy.js";
import {
compactWithSafetyTimeout,
- EMBEDDED_COMPACTION_TIMEOUT_MS,
+ resolveCompactionTimeoutMs,
} from "./compaction-safety-timeout.js";
import { buildEmbeddedExtensionFactories } from "./extensions.js";
import {
@@ -87,7 +87,7 @@ import {
import { getDmHistoryLimitFromSessionKey, limitHistoryTurns } from "./history.js";
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
import { log } from "./logger.js";
-import { buildModelAliasLines, resolveModel } from "./model.js";
+import { buildModelAliasLines, resolveModelAsync } from "./model.js";
import { buildEmbeddedSandboxInfo } from "./sandbox-info.js";
import { prewarmSessionFile, trackSessionManagerAccess } from "./session-manager-cache.js";
import { resolveEmbeddedRunSkillEntries } from "./skills-runtime.js";
@@ -143,6 +143,7 @@ export type CompactEmbeddedPiSessionParams = {
enqueue?: typeof enqueueCommand;
extraSystemPrompt?: string;
ownerNumbers?: string[];
+ abortSignal?: AbortSignal;
};
type CompactionMessageMetrics = {
@@ -423,7 +424,7 @@ export async function compactEmbeddedPiSessionDirect(
};
const agentDir = params.agentDir ?? resolveOpenClawAgentDir();
await ensureOpenClawModelsJson(params.config, agentDir);
- const { model, error, authStorage, modelRegistry } = resolveModel(
+ const { model, error, authStorage, modelRegistry } = await resolveModelAsync(
provider,
modelId,
agentDir,
@@ -687,10 +688,11 @@ export async function compactEmbeddedPiSessionDirect(
});
const systemPromptOverride = createSystemPromptOverride(appendPrompt);
+ const compactionTimeoutMs = resolveCompactionTimeoutMs(params.config);
const sessionLock = await acquireSessionWriteLock({
sessionFile: params.sessionFile,
maxHoldMs: resolveSessionLockMaxHoldFromTimeout({
- timeoutMs: EMBEDDED_COMPACTION_TIMEOUT_MS,
+ timeoutMs: compactionTimeoutMs,
}),
});
try {
@@ -915,8 +917,15 @@ export async function compactEmbeddedPiSessionDirect(
// If token estimation throws on a malformed message, fall back to 0 so
// the sanity check below becomes a no-op instead of crashing compaction.
}
- const result = await compactWithSafetyTimeout(() =>
- session.compact(params.customInstructions),
+ const result = await compactWithSafetyTimeout(
+ () => session.compact(params.customInstructions),
+ compactionTimeoutMs,
+ {
+ abortSignal: params.abortSignal,
+ onCancel: () => {
+ session.abortCompaction();
+ },
+ },
);
await runPostCompactionSideEffects({
config: params.config,
@@ -1064,7 +1073,12 @@ export async function compactEmbeddedPiSession(
const ceProvider = (params.provider ?? DEFAULT_PROVIDER).trim() || DEFAULT_PROVIDER;
const ceModelId = (params.model ?? DEFAULT_MODEL).trim() || DEFAULT_MODEL;
const agentDir = params.agentDir ?? resolveOpenClawAgentDir();
- const { model: ceModel } = resolveModel(ceProvider, ceModelId, agentDir, params.config);
+ const { model: ceModel } = await resolveModelAsync(
+ ceProvider,
+ ceModelId,
+ agentDir,
+ params.config,
+ );
const ceCtxInfo = resolveContextWindowInfo({
cfg: params.config,
provider: ceProvider,
diff --git a/src/agents/pi-embedded-runner/compaction-safety-timeout.ts b/src/agents/pi-embedded-runner/compaction-safety-timeout.ts
index 689aa9a931f..bd15368ee2a 100644
--- a/src/agents/pi-embedded-runner/compaction-safety-timeout.ts
+++ b/src/agents/pi-embedded-runner/compaction-safety-timeout.ts
@@ -1,10 +1,93 @@
+import type { OpenClawConfig } from "../../config/config.js";
import { withTimeout } from "../../node-host/with-timeout.js";
-export const EMBEDDED_COMPACTION_TIMEOUT_MS = 300_000;
+export const EMBEDDED_COMPACTION_TIMEOUT_MS = 900_000;
+
+const MAX_SAFE_TIMEOUT_MS = 2_147_000_000;
+
+function createAbortError(signal: AbortSignal): Error {
+ const reason = "reason" in signal ? signal.reason : undefined;
+ if (reason instanceof Error) {
+ return reason;
+ }
+ const err = reason ? new Error("aborted", { cause: reason }) : new Error("aborted");
+ err.name = "AbortError";
+ return err;
+}
+
+export function resolveCompactionTimeoutMs(cfg?: OpenClawConfig): number {
+ const raw = cfg?.agents?.defaults?.compaction?.timeoutSeconds;
+ if (typeof raw === "number" && Number.isFinite(raw) && raw > 0) {
+ return Math.min(Math.floor(raw) * 1000, MAX_SAFE_TIMEOUT_MS);
+ }
+ return EMBEDDED_COMPACTION_TIMEOUT_MS;
+}
export async function compactWithSafetyTimeout(
compact: () => Promise,
timeoutMs: number = EMBEDDED_COMPACTION_TIMEOUT_MS,
+ opts?: {
+ abortSignal?: AbortSignal;
+ onCancel?: () => void;
+ },
): Promise {
- return await withTimeout(() => compact(), timeoutMs, "Compaction");
+ let canceled = false;
+ const cancel = () => {
+ if (canceled) {
+ return;
+ }
+ canceled = true;
+ try {
+ opts?.onCancel?.();
+ } catch {
+ // Best-effort cancellation hook. Keep the timeout/abort path intact even
+ // if the underlying compaction cancel operation throws.
+ }
+ };
+
+ return await withTimeout(
+ async (timeoutSignal) => {
+ let timeoutListener: (() => void) | undefined;
+ let externalAbortListener: (() => void) | undefined;
+ let externalAbortPromise: Promise | undefined;
+ const abortSignal = opts?.abortSignal;
+
+ if (timeoutSignal) {
+ timeoutListener = () => {
+ cancel();
+ };
+ timeoutSignal.addEventListener("abort", timeoutListener, { once: true });
+ }
+
+ if (abortSignal) {
+ if (abortSignal.aborted) {
+ cancel();
+ throw createAbortError(abortSignal);
+ }
+ externalAbortPromise = new Promise((_, reject) => {
+ externalAbortListener = () => {
+ cancel();
+ reject(createAbortError(abortSignal));
+ };
+ abortSignal.addEventListener("abort", externalAbortListener, { once: true });
+ });
+ }
+
+ try {
+ if (externalAbortPromise) {
+ return await Promise.race([compact(), externalAbortPromise]);
+ }
+ return await compact();
+ } finally {
+ if (timeoutListener) {
+ timeoutSignal?.removeEventListener("abort", timeoutListener);
+ }
+ if (externalAbortListener) {
+ abortSignal?.removeEventListener("abort", externalAbortListener);
+ }
+ }
+ },
+ timeoutMs,
+ "Compaction",
+ );
}
diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts
index c56064967e1..47da838cc6a 100644
--- a/src/agents/pi-embedded-runner/model.test.ts
+++ b/src/agents/pi-embedded-runner/model.test.ts
@@ -5,8 +5,22 @@ vi.mock("../pi-model-discovery.js", () => ({
discoverModels: vi.fn(() => ({ find: vi.fn(() => null) })),
}));
+import type { OpenRouterModelCapabilities } from "./openrouter-model-capabilities.js";
+
+const mockGetOpenRouterModelCapabilities = vi.fn<
+ (modelId: string) => OpenRouterModelCapabilities | undefined
+>(() => undefined);
+const mockLoadOpenRouterModelCapabilities = vi.fn<(modelId: string) => Promise>(
+ async () => {},
+);
+vi.mock("./openrouter-model-capabilities.js", () => ({
+ getOpenRouterModelCapabilities: (modelId: string) => mockGetOpenRouterModelCapabilities(modelId),
+ loadOpenRouterModelCapabilities: (modelId: string) =>
+ mockLoadOpenRouterModelCapabilities(modelId),
+}));
+
import type { OpenClawConfig } from "../../config/config.js";
-import { buildInlineProviderModels, resolveModel } from "./model.js";
+import { buildInlineProviderModels, resolveModel, resolveModelAsync } from "./model.js";
import {
buildOpenAICodexForwardCompatExpectation,
makeModel,
@@ -17,6 +31,10 @@ import {
beforeEach(() => {
resetMockDiscoverModels();
+ mockGetOpenRouterModelCapabilities.mockReset();
+ mockGetOpenRouterModelCapabilities.mockReturnValue(undefined);
+ mockLoadOpenRouterModelCapabilities.mockReset();
+ mockLoadOpenRouterModelCapabilities.mockResolvedValue();
});
function buildForwardCompatTemplate(params: {
@@ -416,6 +434,107 @@ describe("resolveModel", () => {
});
});
+ it("uses OpenRouter API capabilities for unknown models when cache is populated", () => {
+ mockGetOpenRouterModelCapabilities.mockReturnValue({
+ name: "Healer Alpha",
+ input: ["text", "image"],
+ reasoning: true,
+ contextWindow: 262144,
+ maxTokens: 65536,
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
+ });
+
+ const result = resolveModel("openrouter", "openrouter/healer-alpha", "/tmp/agent");
+
+ expect(result.error).toBeUndefined();
+ expect(result.model).toMatchObject({
+ provider: "openrouter",
+ id: "openrouter/healer-alpha",
+ name: "Healer Alpha",
+ reasoning: true,
+ input: ["text", "image"],
+ contextWindow: 262144,
+ maxTokens: 65536,
+ });
+ });
+
+ it("falls back to text-only when OpenRouter API cache is empty", () => {
+ mockGetOpenRouterModelCapabilities.mockReturnValue(undefined);
+
+ const result = resolveModel("openrouter", "openrouter/healer-alpha", "/tmp/agent");
+
+ expect(result.error).toBeUndefined();
+ expect(result.model).toMatchObject({
+ provider: "openrouter",
+ id: "openrouter/healer-alpha",
+ reasoning: false,
+ input: ["text"],
+ });
+ });
+
+ it("preloads OpenRouter capabilities before first async resolve of an unknown model", async () => {
+ mockLoadOpenRouterModelCapabilities.mockImplementation(async (modelId) => {
+ if (modelId === "google/gemini-3.1-flash-image-preview") {
+ mockGetOpenRouterModelCapabilities.mockReturnValue({
+ name: "Google: Nano Banana 2 (Gemini 3.1 Flash Image Preview)",
+ input: ["text", "image"],
+ reasoning: true,
+ contextWindow: 65536,
+ maxTokens: 65536,
+ cost: { input: 0.5, output: 3, cacheRead: 0, cacheWrite: 0 },
+ });
+ }
+ });
+
+ const result = await resolveModelAsync(
+ "openrouter",
+ "google/gemini-3.1-flash-image-preview",
+ "/tmp/agent",
+ );
+
+ expect(mockLoadOpenRouterModelCapabilities).toHaveBeenCalledWith(
+ "google/gemini-3.1-flash-image-preview",
+ );
+ expect(result.error).toBeUndefined();
+ expect(result.model).toMatchObject({
+ provider: "openrouter",
+ id: "google/gemini-3.1-flash-image-preview",
+ reasoning: true,
+ input: ["text", "image"],
+ contextWindow: 65536,
+ maxTokens: 65536,
+ });
+ });
+
+ it("skips OpenRouter preload for models already present in the registry", async () => {
+ mockDiscoveredModel({
+ provider: "openrouter",
+ modelId: "openrouter/healer-alpha",
+ templateModel: {
+ id: "openrouter/healer-alpha",
+ name: "Healer Alpha",
+ api: "openai-completions",
+ provider: "openrouter",
+ baseUrl: "https://openrouter.ai/api/v1",
+ reasoning: true,
+ input: ["text", "image"],
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
+ contextWindow: 262144,
+ maxTokens: 65536,
+ },
+ });
+
+ const result = await resolveModelAsync("openrouter", "openrouter/healer-alpha", "/tmp/agent");
+
+ expect(mockLoadOpenRouterModelCapabilities).not.toHaveBeenCalled();
+ expect(result.error).toBeUndefined();
+ expect(result.model).toMatchObject({
+ provider: "openrouter",
+ id: "openrouter/healer-alpha",
+ input: ["text", "image"],
+ });
+ });
+
it("prefers configured provider api metadata over discovered registry model", () => {
mockDiscoveredModel({
provider: "onehub",
@@ -788,6 +907,27 @@ describe("resolveModel", () => {
);
});
+ it("keeps suppressed openai gpt-5.3-codex-spark from falling through provider fallback", () => {
+ const cfg = {
+ models: {
+ providers: {
+ openai: {
+ baseUrl: "https://api.openai.com/v1",
+ api: "openai-responses",
+ models: [{ ...makeModel("gpt-4.1"), api: "openai-responses" }],
+ },
+ },
+ },
+ } as OpenClawConfig;
+
+ const result = resolveModel("openai", "gpt-5.3-codex-spark", "/tmp/agent", cfg);
+
+ expect(result.model).toBeUndefined();
+ expect(result.error).toBe(
+ "Unknown model: openai/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.",
+ );
+ });
+
it("rejects azure openai gpt-5.3-codex-spark with a codex-only hint", () => {
const result = resolveModel("azure-openai-responses", "gpt-5.3-codex-spark", "/tmp/agent");
diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts
index 751d22e4843..2ead43e96e0 100644
--- a/src/agents/pi-embedded-runner/model.ts
+++ b/src/agents/pi-embedded-runner/model.ts
@@ -14,6 +14,10 @@ import {
} from "../model-suppression.js";
import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js";
import { normalizeResolvedProviderModel } from "./model.provider-normalization.js";
+import {
+ getOpenRouterModelCapabilities,
+ loadOpenRouterModelCapabilities,
+} from "./openrouter-model-capabilities.js";
type InlineModelEntry = ModelDefinitionConfig & {
provider: string;
@@ -156,28 +160,31 @@ export function buildInlineProviderModels(
});
}
-export function resolveModelWithRegistry(params: {
+function resolveExplicitModelWithRegistry(params: {
provider: string;
modelId: string;
modelRegistry: ModelRegistry;
cfg?: OpenClawConfig;
-}): Model | undefined {
+}): { kind: "resolved"; model: Model } | { kind: "suppressed" } | undefined {
const { provider, modelId, modelRegistry, cfg } = params;
if (shouldSuppressBuiltInModel({ provider, id: modelId })) {
- return undefined;
+ return { kind: "suppressed" };
}
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
const model = modelRegistry.find(provider, modelId) as Model | null;
if (model) {
- return normalizeResolvedModel({
- provider,
- model: applyConfiguredProviderOverrides({
- discoveredModel: model,
- providerConfig,
- modelId,
+ return {
+ kind: "resolved",
+ model: normalizeResolvedModel({
+ provider,
+ model: applyConfiguredProviderOverrides({
+ discoveredModel: model,
+ providerConfig,
+ modelId,
+ }),
}),
- });
+ };
}
const providers = cfg?.models?.providers ?? {};
@@ -187,40 +194,70 @@ export function resolveModelWithRegistry(params: {
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
);
if (inlineMatch?.api) {
- return normalizeResolvedModel({ provider, model: inlineMatch as Model });
+ return {
+ kind: "resolved",
+ model: normalizeResolvedModel({ provider, model: inlineMatch as Model }),
+ };
}
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
// Otherwise, configured providers can default to a generic API and break specific transports.
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
if (forwardCompat) {
- return normalizeResolvedModel({
- provider,
- model: applyConfiguredProviderOverrides({
- discoveredModel: forwardCompat,
- providerConfig,
- modelId,
+ return {
+ kind: "resolved",
+ model: normalizeResolvedModel({
+ provider,
+ model: applyConfiguredProviderOverrides({
+ discoveredModel: forwardCompat,
+ providerConfig,
+ modelId,
+ }),
}),
- });
+ };
}
+ return undefined;
+}
+
+export function resolveModelWithRegistry(params: {
+ provider: string;
+ modelId: string;
+ modelRegistry: ModelRegistry;
+ cfg?: OpenClawConfig;
+}): Model | undefined {
+ const explicitModel = resolveExplicitModelWithRegistry(params);
+ if (explicitModel?.kind === "suppressed") {
+ return undefined;
+ }
+ if (explicitModel?.kind === "resolved") {
+ return explicitModel.model;
+ }
+
+ const { provider, modelId, cfg } = params;
+ const normalizedProvider = normalizeProviderId(provider);
+ const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
+
// OpenRouter is a pass-through proxy - any model ID available on OpenRouter
// should work without being pre-registered in the local catalog.
+ // Try to fetch actual capabilities from the OpenRouter API so that new models
+ // (not yet in the static pi-ai snapshot) get correct image/reasoning support.
if (normalizedProvider === "openrouter") {
+ const capabilities = getOpenRouterModelCapabilities(modelId);
return normalizeResolvedModel({
provider,
model: {
id: modelId,
- name: modelId,
+ name: capabilities?.name ?? modelId,
api: "openai-completions",
provider,
baseUrl: "https://openrouter.ai/api/v1",
- reasoning: false,
- input: ["text"],
- cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
- contextWindow: DEFAULT_CONTEXT_TOKENS,
+ reasoning: capabilities?.reasoning ?? false,
+ input: capabilities?.input ?? ["text"],
+ cost: capabilities?.cost ?? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
+ contextWindow: capabilities?.contextWindow ?? DEFAULT_CONTEXT_TOKENS,
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
- maxTokens: 8192,
+ maxTokens: capabilities?.maxTokens ?? 8192,
} as Model,
});
}
@@ -287,6 +324,46 @@ export function resolveModel(
};
}
+export async function resolveModelAsync(
+ provider: string,
+ modelId: string,
+ agentDir?: string,
+ cfg?: OpenClawConfig,
+): Promise<{
+ model?: Model;
+ error?: string;
+ authStorage: AuthStorage;
+ modelRegistry: ModelRegistry;
+}> {
+ const resolvedAgentDir = agentDir ?? resolveOpenClawAgentDir();
+ const authStorage = discoverAuthStorage(resolvedAgentDir);
+ const modelRegistry = discoverModels(authStorage, resolvedAgentDir);
+ const explicitModel = resolveExplicitModelWithRegistry({ provider, modelId, modelRegistry, cfg });
+ if (explicitModel?.kind === "suppressed") {
+ return {
+ error: buildUnknownModelError(provider, modelId),
+ authStorage,
+ modelRegistry,
+ };
+ }
+ if (!explicitModel && normalizeProviderId(provider) === "openrouter") {
+ await loadOpenRouterModelCapabilities(modelId);
+ }
+ const model =
+ explicitModel?.kind === "resolved"
+ ? explicitModel.model
+ : resolveModelWithRegistry({ provider, modelId, modelRegistry, cfg });
+ if (model) {
+ return { model, authStorage, modelRegistry };
+ }
+
+ return {
+ error: buildUnknownModelError(provider, modelId),
+ authStorage,
+ modelRegistry,
+ };
+}
+
/**
* Build a more helpful error when the model is not found.
*
diff --git a/src/agents/pi-embedded-runner/openrouter-model-capabilities.test.ts b/src/agents/pi-embedded-runner/openrouter-model-capabilities.test.ts
new file mode 100644
index 00000000000..aa830c13d4d
--- /dev/null
+++ b/src/agents/pi-embedded-runner/openrouter-model-capabilities.test.ts
@@ -0,0 +1,111 @@
+import { mkdtempSync, rmSync } from "node:fs";
+import { tmpdir } from "node:os";
+import { join } from "node:path";
+import { afterEach, describe, expect, it, vi } from "vitest";
+
+describe("openrouter-model-capabilities", () => {
+ afterEach(() => {
+ vi.resetModules();
+ vi.unstubAllGlobals();
+ delete process.env.OPENCLAW_STATE_DIR;
+ });
+
+ it("uses top-level OpenRouter max token fields when top_provider is absent", async () => {
+ const stateDir = mkdtempSync(join(tmpdir(), "openclaw-openrouter-capabilities-"));
+ process.env.OPENCLAW_STATE_DIR = stateDir;
+
+ vi.stubGlobal(
+ "fetch",
+ vi.fn(
+ async () =>
+ new Response(
+ JSON.stringify({
+ data: [
+ {
+ id: "acme/top-level-max-completion",
+ name: "Top Level Max Completion",
+ architecture: { modality: "text+image->text" },
+ supported_parameters: ["reasoning"],
+ context_length: 65432,
+ max_completion_tokens: 12345,
+ pricing: { prompt: "0.000001", completion: "0.000002" },
+ },
+ {
+ id: "acme/top-level-max-output",
+ name: "Top Level Max Output",
+ modality: "text+image->text",
+ context_length: 54321,
+ max_output_tokens: 23456,
+ pricing: { prompt: "0.000003", completion: "0.000004" },
+ },
+ ],
+ }),
+ {
+ status: 200,
+ headers: { "content-type": "application/json" },
+ },
+ ),
+ ),
+ );
+
+ const module = await import("./openrouter-model-capabilities.js");
+
+ try {
+ await module.loadOpenRouterModelCapabilities("acme/top-level-max-completion");
+
+ expect(module.getOpenRouterModelCapabilities("acme/top-level-max-completion")).toMatchObject({
+ input: ["text", "image"],
+ reasoning: true,
+ contextWindow: 65432,
+ maxTokens: 12345,
+ });
+ expect(module.getOpenRouterModelCapabilities("acme/top-level-max-output")).toMatchObject({
+ input: ["text", "image"],
+ reasoning: false,
+ contextWindow: 54321,
+ maxTokens: 23456,
+ });
+ } finally {
+ rmSync(stateDir, { recursive: true, force: true });
+ }
+ });
+
+ it("does not refetch immediately after an awaited miss for the same model id", async () => {
+ const stateDir = mkdtempSync(join(tmpdir(), "openclaw-openrouter-capabilities-"));
+ process.env.OPENCLAW_STATE_DIR = stateDir;
+
+ const fetchSpy = vi.fn(
+ async () =>
+ new Response(
+ JSON.stringify({
+ data: [
+ {
+ id: "acme/known-model",
+ name: "Known Model",
+ architecture: { modality: "text->text" },
+ context_length: 1234,
+ },
+ ],
+ }),
+ {
+ status: 200,
+ headers: { "content-type": "application/json" },
+ },
+ ),
+ );
+ vi.stubGlobal("fetch", fetchSpy);
+
+ const module = await import("./openrouter-model-capabilities.js");
+
+ try {
+ await module.loadOpenRouterModelCapabilities("acme/missing-model");
+ expect(module.getOpenRouterModelCapabilities("acme/missing-model")).toBeUndefined();
+ expect(fetchSpy).toHaveBeenCalledTimes(1);
+
+ expect(module.getOpenRouterModelCapabilities("acme/missing-model")).toBeUndefined();
+ expect(fetchSpy).toHaveBeenCalledTimes(2);
+ } finally {
+ rmSync(stateDir, { recursive: true, force: true });
+ }
+ });
+});
diff --git a/src/agents/pi-embedded-runner/openrouter-model-capabilities.ts b/src/agents/pi-embedded-runner/openrouter-model-capabilities.ts
new file mode 100644
index 00000000000..931826ef033
--- /dev/null
+++ b/src/agents/pi-embedded-runner/openrouter-model-capabilities.ts
@@ -0,0 +1,301 @@
+/**
+ * Runtime OpenRouter model capability detection.
+ *
+ * When an OpenRouter model is not in the built-in static list, we look up its
+ * actual capabilities from a cached copy of the OpenRouter model catalog.
+ *
+ * Cache layers (checked in order):
+ * 1. In-memory Map (instant, cleared on process restart)
+ * 2. On-disk JSON file (/cache/openrouter-models.json)
+ * 3. OpenRouter API fetch (populates both layers)
+ *
+ * Model capabilities are assumed stable — the cache has no TTL expiry.
+ * A background refresh is triggered only when a model is not found in
+ * the cache (i.e. a newly added model on OpenRouter).
+ *
+ * Sync callers can read whatever is already cached. Async callers can await a
+ * one-time fetch so the first unknown-model lookup resolves with real
+ * capabilities instead of the text-only fallback.
+ */
+
+import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
+import { join } from "node:path";
+import { resolveStateDir } from "../../config/paths.js";
+import { resolveProxyFetchFromEnv } from "../../infra/net/proxy-fetch.js";
+import { createSubsystemLogger } from "../../logging/subsystem.js";
+
+const log = createSubsystemLogger("openrouter-model-capabilities");
+
+const OPENROUTER_MODELS_URL = "https://openrouter.ai/api/v1/models";
+const FETCH_TIMEOUT_MS = 10_000;
+const DISK_CACHE_FILENAME = "openrouter-models.json";
+
+// ---------------------------------------------------------------------------
+// Types
+// ---------------------------------------------------------------------------
+
+interface OpenRouterApiModel {
+ id: string;
+ name?: string;
+ modality?: string;
+ architecture?: {
+ modality?: string;
+ };
+ supported_parameters?: string[];
+ context_length?: number;
+ max_completion_tokens?: number;
+ max_output_tokens?: number;
+ top_provider?: {
+ max_completion_tokens?: number;
+ };
+ pricing?: {
+ prompt?: string;
+ completion?: string;
+ input_cache_read?: string;
+ input_cache_write?: string;
+ };
+}
+
+export interface OpenRouterModelCapabilities {
+ name: string;
+ input: Array<"text" | "image">;
+ reasoning: boolean;
+ contextWindow: number;
+ maxTokens: number;
+ cost: {
+ input: number;
+ output: number;
+ cacheRead: number;
+ cacheWrite: number;
+ };
+}
+
+interface DiskCachePayload {
+ models: Record;
+}
+
+// ---------------------------------------------------------------------------
+// Disk cache
+// ---------------------------------------------------------------------------
+
+function resolveDiskCacheDir(): string {
+ return join(resolveStateDir(), "cache");
+}
+
+function resolveDiskCachePath(): string {
+ return join(resolveDiskCacheDir(), DISK_CACHE_FILENAME);
+}
+
+function writeDiskCache(map: Map): void {
+ try {
+ const cacheDir = resolveDiskCacheDir();
+ if (!existsSync(cacheDir)) {
+ mkdirSync(cacheDir, { recursive: true });
+ }
+ const payload: DiskCachePayload = {
+ models: Object.fromEntries(map),
+ };
+ writeFileSync(resolveDiskCachePath(), JSON.stringify(payload), "utf-8");
+ } catch (err: unknown) {
+ const message = err instanceof Error ? err.message : String(err);
+ log.debug(`Failed to write OpenRouter disk cache: ${message}`);
+ }
+}
+
+function isValidCapabilities(value: unknown): value is OpenRouterModelCapabilities {
+ if (!value || typeof value !== "object") {
+ return false;
+ }
+ const record = value as Record;
+ return (
+ typeof record.name === "string" &&
+ Array.isArray(record.input) &&
+ typeof record.reasoning === "boolean" &&
+ typeof record.contextWindow === "number" &&
+ typeof record.maxTokens === "number"
+ );
+}
+
+function readDiskCache(): Map | undefined {
+ try {
+ const cachePath = resolveDiskCachePath();
+ if (!existsSync(cachePath)) {
+ return undefined;
+ }
+ const raw = readFileSync(cachePath, "utf-8");
+ const payload = JSON.parse(raw) as unknown;
+ if (!payload || typeof payload !== "object") {
+ return undefined;
+ }
+ const models = (payload as DiskCachePayload).models;
+ if (!models || typeof models !== "object") {
+ return undefined;
+ }
+ const map = new Map();
+ for (const [id, caps] of Object.entries(models)) {
+ if (isValidCapabilities(caps)) {
+ map.set(id, caps);
+ }
+ }
+ return map.size > 0 ? map : undefined;
+ } catch {
+ return undefined;
+ }
+}
+
+// ---------------------------------------------------------------------------
+// In-memory cache state
+// ---------------------------------------------------------------------------
+
+let cache: Map | undefined;
+let fetchInFlight: Promise | undefined;
+const skipNextMissRefresh = new Set();
+
+function parseModel(model: OpenRouterApiModel): OpenRouterModelCapabilities {
+ const input: Array<"text" | "image"> = ["text"];
+ const modality = model.architecture?.modality ?? model.modality ?? "";
+ const inputModalities = modality.split("->")[0] ?? "";
+ if (inputModalities.includes("image")) {
+ input.push("image");
+ }
+
+ return {
+ name: model.name || model.id,
+ input,
+ reasoning: model.supported_parameters?.includes("reasoning") ?? false,
+ contextWindow: model.context_length || 128_000,
+ maxTokens:
+ model.top_provider?.max_completion_tokens ??
+ model.max_completion_tokens ??
+ model.max_output_tokens ??
+ 8192,
+ cost: {
+ input: parseFloat(model.pricing?.prompt || "0") * 1_000_000,
+ output: parseFloat(model.pricing?.completion || "0") * 1_000_000,
+ cacheRead: parseFloat(model.pricing?.input_cache_read || "0") * 1_000_000,
+ cacheWrite: parseFloat(model.pricing?.input_cache_write || "0") * 1_000_000,
+ },
+ };
+}
+
+// ---------------------------------------------------------------------------
+// API fetch
+// ---------------------------------------------------------------------------
+
+async function doFetch(): Promise {
+ const controller = new AbortController();
+ const timeout = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
+ try {
+ const fetchFn = resolveProxyFetchFromEnv() ?? globalThis.fetch;
+
+ const response = await fetchFn(OPENROUTER_MODELS_URL, {
+ signal: controller.signal,
+ });
+
+ if (!response.ok) {
+ log.warn(`OpenRouter models API returned ${response.status}`);
+ return;
+ }
+
+ const data = (await response.json()) as { data?: OpenRouterApiModel[] };
+ const models = data.data ?? [];
+ const map = new Map();
+
+ for (const model of models) {
+ if (!model.id) {
+ continue;
+ }
+ map.set(model.id, parseModel(model));
+ }
+
+ cache = map;
+ writeDiskCache(map);
+ log.debug(`Cached ${map.size} OpenRouter models from API`);
+ } catch (err: unknown) {
+ const message = err instanceof Error ? err.message : String(err);
+ log.warn(`Failed to fetch OpenRouter models: ${message}`);
+ } finally {
+ clearTimeout(timeout);
+ }
+}
+
+function triggerFetch(): void {
+ if (fetchInFlight) {
+ return;
+ }
+ fetchInFlight = doFetch().finally(() => {
+ fetchInFlight = undefined;
+ });
+}
+
+// ---------------------------------------------------------------------------
+// Public API
+// ---------------------------------------------------------------------------
+
+/**
+ * Ensure the cache is populated. Checks in-memory first, then disk, then
+ * triggers a background API fetch as a last resort.
+ * Does not block — returns immediately.
+ */
+export function ensureOpenRouterModelCache(): void {
+ if (cache) {
+ return;
+ }
+
+ // Try loading from disk before hitting the network.
+ const disk = readDiskCache();
+ if (disk) {
+ cache = disk;
+ log.debug(`Loaded ${disk.size} OpenRouter models from disk cache`);
+ return;
+ }
+
+ triggerFetch();
+}
+
+/**
+ * Ensure capabilities for a specific model are available before first use.
+ *
+ * Known cached entries return immediately. Unknown entries wait for at most
+ * one catalog fetch, then leave sync resolution to read from the populated
+ * cache on the same request.
+ */
+export async function loadOpenRouterModelCapabilities(modelId: string): Promise {
+ ensureOpenRouterModelCache();
+ if (cache?.has(modelId)) {
+ return;
+ }
+ let fetchPromise = fetchInFlight;
+ if (!fetchPromise) {
+ triggerFetch();
+ fetchPromise = fetchInFlight;
+ }
+ await fetchPromise;
+ if (!cache?.has(modelId)) {
+ skipNextMissRefresh.add(modelId);
+ }
+}
+
+/**
+ * Synchronously look up model capabilities from the cache.
+ *
+ * If a model is not found but the cache exists, a background refresh is
+ * triggered in case it's a newly added model not yet in the cache.
+ */
+export function getOpenRouterModelCapabilities(
+ modelId: string,
+): OpenRouterModelCapabilities | undefined {
+ ensureOpenRouterModelCache();
+ const result = cache?.get(modelId);
+
+ // Model not found but cache exists — may be a newly added model.
+ // Trigger a refresh so the next call picks it up.
+ if (!result && skipNextMissRefresh.delete(modelId)) {
+ return undefined;
+ }
+ if (!result && cache && !fetchInFlight) {
+ triggerFetch();
+ }
+
+ return result;
+}
diff --git a/src/agents/pi-embedded-runner/run.ts b/src/agents/pi-embedded-runner/run.ts
index 4ca6c0ea226..65d87712ca8 100644
--- a/src/agents/pi-embedded-runner/run.ts
+++ b/src/agents/pi-embedded-runner/run.ts
@@ -66,7 +66,7 @@ import { derivePromptTokens, normalizeUsage, type UsageLike } from "../usage.js"
import { redactRunIdentifier, resolveRunWorkspaceDir } from "../workspace-run.js";
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
import { log } from "./logger.js";
-import { resolveModel } from "./model.js";
+import { resolveModelAsync } from "./model.js";
import { runEmbeddedAttempt } from "./run/attempt.js";
import { createFailoverDecisionLogger } from "./run/failover-observation.js";
import type { RunEmbeddedPiAgentParams } from "./run/params.js";
@@ -367,7 +367,7 @@ export async function runEmbeddedPiAgent(
log.info(`[hooks] model overridden to ${modelId}`);
}
- const { model, error, authStorage, modelRegistry } = resolveModel(
+ const { model, error, authStorage, modelRegistry } = await resolveModelAsync(
provider,
modelId,
agentDir,
diff --git a/src/agents/pi-embedded-runner/run/attempt.ts b/src/agents/pi-embedded-runner/run/attempt.ts
index a4cf2d75260..ef5a63cdcd1 100644
--- a/src/agents/pi-embedded-runner/run/attempt.ts
+++ b/src/agents/pi-embedded-runner/run/attempt.ts
@@ -97,6 +97,7 @@ import { DEFAULT_BOOTSTRAP_FILENAME } from "../../workspace.js";
import { isRunnerAbortError } from "../abort.js";
import { appendCacheTtlTimestamp, isCacheTtlEligibleProvider } from "../cache-ttl.js";
import type { CompactEmbeddedPiSessionParams } from "../compact.js";
+import { resolveCompactionTimeoutMs } from "../compaction-safety-timeout.js";
import { buildEmbeddedExtensionFactories } from "../extensions.js";
import { applyExtraParamsToAgent } from "../extra-params.js";
import {
@@ -130,6 +131,8 @@ import { describeUnknownError, mapThinkingLevel } from "../utils.js";
import { flushPendingToolResultsAfterIdle } from "../wait-for-idle-before-flush.js";
import { waitForCompactionRetryWithAggregateTimeout } from "./compaction-retry-aggregate-timeout.js";
import {
+ resolveRunTimeoutDuringCompaction,
+ resolveRunTimeoutWithCompactionGraceMs,
selectCompactionTimeoutSnapshot,
shouldFlagCompactionTimeout,
} from "./compaction-timeout.js";
@@ -1706,7 +1709,10 @@ export async function runEmbeddedAttempt(
const sessionLock = await acquireSessionWriteLock({
sessionFile: params.sessionFile,
maxHoldMs: resolveSessionLockMaxHoldFromTimeout({
- timeoutMs: params.timeoutMs,
+ timeoutMs: resolveRunTimeoutWithCompactionGraceMs({
+ runTimeoutMs: params.timeoutMs,
+ compactionTimeoutMs: resolveCompactionTimeoutMs(params.config),
+ }),
}),
});
@@ -2150,6 +2156,20 @@ export async function runEmbeddedAttempt(
err.name = "AbortError";
return err;
};
+ const abortCompaction = () => {
+ if (!activeSession.isCompacting) {
+ return;
+ }
+ try {
+ activeSession.abortCompaction();
+ } catch (err) {
+ if (!isProbeSession) {
+ log.warn(
+ `embedded run abortCompaction failed: runId=${params.runId} sessionId=${params.sessionId} err=${String(err)}`,
+ );
+ }
+ }
+ };
const abortRun = (isTimeout = false, reason?: unknown) => {
aborted = true;
if (isTimeout) {
@@ -2160,6 +2180,7 @@ export async function runEmbeddedAttempt(
} else {
runAbortController.abort(reason);
}
+ abortCompaction();
void activeSession.abort();
};
const abortable = (promise: Promise): Promise => {
@@ -2240,38 +2261,63 @@ export async function runEmbeddedAttempt(
let abortWarnTimer: NodeJS.Timeout | undefined;
const isProbeSession = params.sessionId?.startsWith("probe-") ?? false;
- const abortTimer = setTimeout(
- () => {
- if (!isProbeSession) {
- log.warn(
- `embedded run timeout: runId=${params.runId} sessionId=${params.sessionId} timeoutMs=${params.timeoutMs}`,
- );
- }
- if (
- shouldFlagCompactionTimeout({
- isTimeout: true,
+ const compactionTimeoutMs = resolveCompactionTimeoutMs(params.config);
+ let abortTimer: NodeJS.Timeout | undefined;
+ let compactionGraceUsed = false;
+ const scheduleAbortTimer = (delayMs: number, reason: "initial" | "compaction-grace") => {
+ abortTimer = setTimeout(
+ () => {
+ const timeoutAction = resolveRunTimeoutDuringCompaction({
isCompactionPendingOrRetrying: subscription.isCompacting(),
isCompactionInFlight: activeSession.isCompacting,
- })
- ) {
- timedOutDuringCompaction = true;
- }
- abortRun(true);
- if (!abortWarnTimer) {
- abortWarnTimer = setTimeout(() => {
- if (!activeSession.isStreaming) {
- return;
- }
+ graceAlreadyUsed: compactionGraceUsed,
+ });
+ if (timeoutAction === "extend") {
+ compactionGraceUsed = true;
if (!isProbeSession) {
log.warn(
- `embedded run abort still streaming: runId=${params.runId} sessionId=${params.sessionId}`,
+ `embedded run timeout reached during compaction; extending deadline: ` +
+ `runId=${params.runId} sessionId=${params.sessionId} extraMs=${compactionTimeoutMs}`,
);
}
- }, 10_000);
- }
- },
- Math.max(1, params.timeoutMs),
- );
+ scheduleAbortTimer(compactionTimeoutMs, "compaction-grace");
+ return;
+ }
+
+ if (!isProbeSession) {
+ log.warn(
+ reason === "compaction-grace"
+ ? `embedded run timeout after compaction grace: runId=${params.runId} sessionId=${params.sessionId} timeoutMs=${params.timeoutMs} compactionGraceMs=${compactionTimeoutMs}`
+ : `embedded run timeout: runId=${params.runId} sessionId=${params.sessionId} timeoutMs=${params.timeoutMs}`,
+ );
+ }
+ if (
+ shouldFlagCompactionTimeout({
+ isTimeout: true,
+ isCompactionPendingOrRetrying: subscription.isCompacting(),
+ isCompactionInFlight: activeSession.isCompacting,
+ })
+ ) {
+ timedOutDuringCompaction = true;
+ }
+ abortRun(true);
+ if (!abortWarnTimer) {
+ abortWarnTimer = setTimeout(() => {
+ if (!activeSession.isStreaming) {
+ return;
+ }
+ if (!isProbeSession) {
+ log.warn(
+ `embedded run abort still streaming: runId=${params.runId} sessionId=${params.sessionId}`,
+ );
+ }
+ }, 10_000);
+ }
+ },
+ Math.max(1, delayMs),
+ );
+ };
+ scheduleAbortTimer(params.timeoutMs, "initial");
let messagesSnapshot: AgentMessage[] = [];
let sessionIdUsed = activeSession.sessionId;
diff --git a/src/agents/pi-embedded-runner/run/compaction-timeout.test.ts b/src/agents/pi-embedded-runner/run/compaction-timeout.test.ts
index 24785c0792d..3853e0ebd25 100644
--- a/src/agents/pi-embedded-runner/run/compaction-timeout.test.ts
+++ b/src/agents/pi-embedded-runner/run/compaction-timeout.test.ts
@@ -1,6 +1,8 @@
import { describe, expect, it } from "vitest";
import { castAgentMessage } from "../../test-helpers/agent-message-fixtures.js";
import {
+ resolveRunTimeoutDuringCompaction,
+ resolveRunTimeoutWithCompactionGraceMs,
selectCompactionTimeoutSnapshot,
shouldFlagCompactionTimeout,
} from "./compaction-timeout.js";
@@ -31,6 +33,45 @@ describe("compaction-timeout helpers", () => {
).toBe(false);
});
+ it("extends the first run timeout reached during compaction", () => {
+ expect(
+ resolveRunTimeoutDuringCompaction({
+ isCompactionPendingOrRetrying: false,
+ isCompactionInFlight: true,
+ graceAlreadyUsed: false,
+ }),
+ ).toBe("extend");
+ });
+
+ it("aborts after compaction grace has already been used", () => {
+ expect(
+ resolveRunTimeoutDuringCompaction({
+ isCompactionPendingOrRetrying: true,
+ isCompactionInFlight: false,
+ graceAlreadyUsed: true,
+ }),
+ ).toBe("abort");
+ });
+
+ it("aborts immediately when no compaction is active", () => {
+ expect(
+ resolveRunTimeoutDuringCompaction({
+ isCompactionPendingOrRetrying: false,
+ isCompactionInFlight: false,
+ graceAlreadyUsed: false,
+ }),
+ ).toBe("abort");
+ });
+
+ it("adds one compaction grace window to the run timeout budget", () => {
+ expect(
+ resolveRunTimeoutWithCompactionGraceMs({
+ runTimeoutMs: 600_000,
+ compactionTimeoutMs: 900_000,
+ }),
+ ).toBe(1_500_000);
+ });
+
it("uses pre-compaction snapshot when compaction timeout occurs", () => {
const pre = [castAgentMessage({ role: "assistant", content: "pre" })] as const;
const current = [castAgentMessage({ role: "assistant", content: "current" })] as const;
diff --git a/src/agents/pi-embedded-runner/run/compaction-timeout.ts b/src/agents/pi-embedded-runner/run/compaction-timeout.ts
index 45a945257f6..97e1dfff4e3 100644
--- a/src/agents/pi-embedded-runner/run/compaction-timeout.ts
+++ b/src/agents/pi-embedded-runner/run/compaction-timeout.ts
@@ -13,6 +13,24 @@ export function shouldFlagCompactionTimeout(signal: CompactionTimeoutSignal): bo
return signal.isCompactionPendingOrRetrying || signal.isCompactionInFlight;
}
+export function resolveRunTimeoutDuringCompaction(params: {
+ isCompactionPendingOrRetrying: boolean;
+ isCompactionInFlight: boolean;
+ graceAlreadyUsed: boolean;
+}): "extend" | "abort" {
+ if (!params.isCompactionPendingOrRetrying && !params.isCompactionInFlight) {
+ return "abort";
+ }
+ return params.graceAlreadyUsed ? "abort" : "extend";
+}
+
+export function resolveRunTimeoutWithCompactionGraceMs(params: {
+ runTimeoutMs: number;
+ compactionTimeoutMs: number;
+}): number {
+ return params.runTimeoutMs + params.compactionTimeoutMs;
+}
+
export type SnapshotSelectionParams = {
timedOutDuringCompaction: boolean;
preCompactionSnapshot: AgentMessage[] | null;
diff --git a/src/config/schema.help.quality.test.ts b/src/config/schema.help.quality.test.ts
index f74728e360b..7de4e592b23 100644
--- a/src/config/schema.help.quality.test.ts
+++ b/src/config/schema.help.quality.test.ts
@@ -384,6 +384,7 @@ const TARGET_KEYS = [
"agents.defaults.compaction.qualityGuard.enabled",
"agents.defaults.compaction.qualityGuard.maxRetries",
"agents.defaults.compaction.postCompactionSections",
+ "agents.defaults.compaction.timeoutSeconds",
"agents.defaults.compaction.model",
"agents.defaults.compaction.memoryFlush",
"agents.defaults.compaction.memoryFlush.enabled",
diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts
index 7fbfdec76d8..a4e2e125528 100644
--- a/src/config/schema.help.ts
+++ b/src/config/schema.help.ts
@@ -1045,6 +1045,8 @@ export const FIELD_HELP: Record = {
'Controls post-compaction session memory reindex mode: "off", "async", or "await" (default: "async"). Use "await" for strongest freshness, "async" for lower compaction latency, and "off" only when session-memory sync is handled elsewhere.',
"agents.defaults.compaction.postCompactionSections":
'AGENTS.md H2/H3 section names re-injected after compaction so the agent reruns critical startup guidance. Leave unset to use "Session Startup"/"Red Lines" with legacy fallback to "Every Session"/"Safety"; set to [] to disable reinjection entirely.',
+ "agents.defaults.compaction.timeoutSeconds":
+ "Maximum time in seconds allowed for a single compaction operation before it is aborted (default: 900). Increase this for very large sessions that need more time to summarize, or decrease it to fail faster on unresponsive models.",
"agents.defaults.compaction.model":
"Optional provider/model override used only for compaction summarization. Set this when you want compaction to run on a different model than the session default, and leave it unset to keep using the primary agent model.",
"agents.defaults.compaction.memoryFlush":
diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts
index e700f2329b4..dc5195fb766 100644
--- a/src/config/schema.labels.ts
+++ b/src/config/schema.labels.ts
@@ -474,6 +474,7 @@ export const FIELD_LABELS: Record = {
"agents.defaults.compaction.qualityGuard.maxRetries": "Compaction Quality Guard Max Retries",
"agents.defaults.compaction.postIndexSync": "Compaction Post-Index Sync",
"agents.defaults.compaction.postCompactionSections": "Post-Compaction Context Sections",
+ "agents.defaults.compaction.timeoutSeconds": "Compaction Timeout (Seconds)",
"agents.defaults.compaction.model": "Compaction Model Override",
"agents.defaults.compaction.memoryFlush": "Compaction Memory Flush",
"agents.defaults.compaction.memoryFlush.enabled": "Compaction Memory Flush Enabled",
diff --git a/src/config/types.agent-defaults.ts b/src/config/types.agent-defaults.ts
index d2bdbb096ff..e5613c7649d 100644
--- a/src/config/types.agent-defaults.ts
+++ b/src/config/types.agent-defaults.ts
@@ -338,6 +338,8 @@ export type AgentCompactionConfig = {
* When set, compaction uses this model instead of the agent's primary model.
* Falls back to the primary model when unset. */
model?: string;
+ /** Maximum time in seconds for a single compaction operation (default: 900). */
+ timeoutSeconds?: number;
};
export type AgentCompactionMemoryFlushConfig = {
diff --git a/src/config/zod-schema.agent-defaults.ts b/src/config/zod-schema.agent-defaults.ts
index dfa7e23e1c1..b2cc5603c90 100644
--- a/src/config/zod-schema.agent-defaults.ts
+++ b/src/config/zod-schema.agent-defaults.ts
@@ -107,6 +107,7 @@ export const AgentDefaultsSchema = z
postIndexSync: z.enum(["off", "async", "await"]).optional(),
postCompactionSections: z.array(z.string()).optional(),
model: z.string().optional(),
+ timeoutSeconds: z.number().int().positive().optional(),
memoryFlush: z
.object({
enabled: z.boolean().optional(),
diff --git a/src/tts/tts-core.ts b/src/tts/tts-core.ts
index 93325c8fb06..5d3000d7ad3 100644
--- a/src/tts/tts-core.ts
+++ b/src/tts/tts-core.ts
@@ -10,7 +10,7 @@ import {
type ModelRef,
} from "../agents/model-selection.js";
import { createConfiguredOllamaStreamFn } from "../agents/ollama-stream.js";
-import { resolveModel } from "../agents/pi-embedded-runner/model.js";
+import { resolveModelAsync } from "../agents/pi-embedded-runner/model.js";
import type { OpenClawConfig } from "../config/config.js";
import type {
ResolvedTtsConfig,
@@ -456,7 +456,7 @@ export async function summarizeText(params: {
const startTime = Date.now();
const { ref } = resolveSummaryModelRef(cfg, config);
- const resolved = resolveModel(ref.provider, ref.model, undefined, cfg);
+ const resolved = await resolveModelAsync(ref.provider, ref.model, undefined, cfg);
if (!resolved.model) {
throw new Error(resolved.error ?? `Unknown summary model: ${ref.provider}/${ref.model}`);
}