From d9907fb15ede84e8093d053807f5adddc5073a31 Mon Sep 17 00:00:00 2001 From: Ryan Date: Fri, 20 Mar 2026 23:43:57 -0500 Subject: [PATCH] fix(core): preserve totalTokens on zero usage reports When an LLM provider (like vLLM) reports 0 tokens used, OpenClaw previously cleared the session's totalTokens by setting it to undefined. This caused the TUI to display 'tokens ?' during active sessions. By removing the else block, the last known token count is preserved, which is especially important when using context engines like lossless-claw that estimate tokens independently. --- src/agents/command/session-store.ts | 11 +++++++++++ src/agents/subagent-announce.ts | 3 ++- src/auto-reply/reply/commands-context-report.ts | 9 +++++++-- src/auto-reply/reply/session.ts | 3 ++- src/auto-reply/status.ts | 3 ++- src/config/sessions/types.ts | 5 +++++ src/cron/isolated-agent/run.ts | 13 +++++++++++++ src/shared/subagents-format.ts | 7 ++++++- src/tui/tui-types.ts | 2 ++ 9 files changed, 50 insertions(+), 6 deletions(-) diff --git a/src/agents/command/session-store.ts b/src/agents/command/session-store.ts index 0df9d66dc72..3922bf61996 100644 --- a/src/agents/command/session-store.ts +++ b/src/agents/command/session-store.ts @@ -68,10 +68,17 @@ export async function updateSessionStoreAfterAgentRun(params: { updatedAt: Date.now(), contextTokens, }; + const modelChanged = + (entry.model !== undefined && entry.model !== modelUsed) || + (entry.modelProvider !== undefined && entry.modelProvider !== providerUsed); setSessionRuntimeModel(next, { provider: providerUsed, model: modelUsed, }); + if (modelChanged) { + next.totalTokens = undefined; + next.totalTokensFresh = false; + } if (isCliProvider(providerUsed, cfg)) { const cliSessionId = result.meta.agentMeta?.sessionId?.trim(); if (cliSessionId) { @@ -105,9 +112,13 @@ export async function updateSessionStoreAfterAgentRun(params: { if (typeof totalTokens === "number" && Number.isFinite(totalTokens) && totalTokens > 0) { next.totalTokens = totalTokens; next.totalTokensFresh = true; + next.totalTokensEstimate = totalTokens; } else { next.totalTokens = undefined; next.totalTokensFresh = false; + if (typeof totalTokens === "number" && Number.isFinite(totalTokens)) { + next.totalTokensEstimate = totalTokens; + } } next.cacheRead = usage.cacheRead ?? 0; next.cacheWrite = usage.cacheWrite ?? 0; diff --git a/src/agents/subagent-announce.ts b/src/agents/subagent-announce.ts index ab2fbb1140e..570fadc8dc1 100644 --- a/src/agents/subagent-announce.ts +++ b/src/agents/subagent-announce.ts @@ -1,3 +1,4 @@ +import { resolveTotalTokens } from "../shared/subagents-format.js"; import { resolveQueueSettings } from "../auto-reply/reply/queue.js"; import { isSilentReplyText, SILENT_REPLY_TOKEN } from "../auto-reply/tokens.js"; import { DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH } from "../config/agent-limits.js"; @@ -550,7 +551,7 @@ async function buildCompactAnnounceStatsLine(params: { const input = typeof entry?.inputTokens === "number" ? entry.inputTokens : 0; const output = typeof entry?.outputTokens === "number" ? entry.outputTokens : 0; const ioTotal = input + output; - const promptCache = typeof entry?.totalTokens === "number" ? entry.totalTokens : undefined; + const promptCache = resolveTotalTokens(entry as any); const runtimeMs = typeof params.startedAt === "number" && typeof params.endedAt === "number" ? Math.max(0, params.endedAt - params.startedAt) diff --git a/src/auto-reply/reply/commands-context-report.ts b/src/auto-reply/reply/commands-context-report.ts index cbf190c4c88..2c2c4f531fe 100644 --- a/src/auto-reply/reply/commands-context-report.ts +++ b/src/auto-reply/reply/commands-context-report.ts @@ -4,7 +4,10 @@ import { resolveBootstrapTotalMaxChars, } from "../../agents/pi-embedded-helpers.js"; import { buildSystemPromptReport } from "../../agents/system-prompt-report.js"; -import type { SessionSystemPromptReport } from "../../config/sessions/types.js"; +import { + resolveFreshSessionTotalTokens, + type SessionSystemPromptReport, +} from "../../config/sessions/types.js"; import type { ReplyPayload } from "../types.js"; import { resolveCommandsSystemPromptBundle } from "./commands-system-prompt.js"; import type { HandleCommandsParams } from "./commands-types.js"; @@ -96,8 +99,10 @@ export async function buildContextReply(params: HandleCommandsParams): Promise 0 && parentTokens > parentForkMaxTokens) { // Parent context is too large — forking would create a thread session // that immediately overflows the model's context window. Start fresh diff --git a/src/auto-reply/status.ts b/src/auto-reply/status.ts index 1b7aa2a87ec..2b0736e3458 100644 --- a/src/auto-reply/status.ts +++ b/src/auto-reply/status.ts @@ -14,6 +14,7 @@ import { resolveChannelModelOverride } from "../channels/model-overrides.js"; import { isCommandFlagEnabled } from "../config/commands.js"; import type { OpenClawConfig } from "../config/config.js"; import { + resolveFreshSessionTotalTokens, resolveMainSessionKey, resolveSessionFilePath, resolveSessionFilePathOptions, @@ -460,7 +461,7 @@ export function buildStatusMessage(args: StatusArgs): string { let outputTokens = entry?.outputTokens; let cacheRead = entry?.cacheRead; let cacheWrite = entry?.cacheWrite; - let totalTokens = entry?.totalTokens ?? (entry?.inputTokens ?? 0) + (entry?.outputTokens ?? 0); + let totalTokens = resolveFreshSessionTotalTokens(entry) ?? (entry?.inputTokens ?? 0) + (entry?.outputTokens ?? 0); // Prefer prompt-size tokens from the session transcript when it looks larger // (cached prompt tokens are often missing from agent meta/store). diff --git a/src/config/sessions/types.ts b/src/config/sessions/types.ts index 6513fc81b37..d613d5bbf4c 100644 --- a/src/config/sessions/types.ts +++ b/src/config/sessions/types.ts @@ -142,6 +142,11 @@ export type SessionEntry = { inputTokens?: number; outputTokens?: number; totalTokens?: number; + /** + * Last known total tokens (including summaries), used for display when + * a fresh model-reported count is unavailable. + */ + totalTokensEstimate?: number; /** * Whether totalTokens reflects a fresh context snapshot for the latest run. * Undefined means legacy/unknown freshness; false forces consumers to treat diff --git a/src/cron/isolated-agent/run.ts b/src/cron/isolated-agent/run.ts index 1a122f56864..24800586aae 100644 --- a/src/cron/isolated-agent/run.ts +++ b/src/cron/isolated-agent/run.ts @@ -752,10 +752,19 @@ export async function runCronIsolatedAgentTurn(params: { lookupContextTokens(modelUsed, { allowAsyncLoad: false }) ?? DEFAULT_CONTEXT_TOKENS; + const modelChanged = + (cronSession.sessionEntry.model !== undefined && + cronSession.sessionEntry.model !== modelUsed) || + (cronSession.sessionEntry.modelProvider !== undefined && + cronSession.sessionEntry.modelProvider !== providerUsed); setSessionRuntimeModel(cronSession.sessionEntry, { provider: providerUsed, model: modelUsed, }); + if (modelChanged) { + cronSession.sessionEntry.totalTokens = undefined; + cronSession.sessionEntry.totalTokensFresh = false; + } cronSession.sessionEntry.contextTokens = contextTokens; if (isCliProvider(providerUsed, cfgWithAgentDefaults)) { const cliSessionId = finalRunResult.meta?.agentMeta?.sessionId?.trim(); @@ -790,10 +799,14 @@ export async function runCronIsolatedAgentTurn(params: { if (typeof totalTokens === "number" && Number.isFinite(totalTokens) && totalTokens > 0) { cronSession.sessionEntry.totalTokens = totalTokens; cronSession.sessionEntry.totalTokensFresh = true; + cronSession.sessionEntry.totalTokensEstimate = totalTokens; telemetryUsage.total_tokens = totalTokens; } else { cronSession.sessionEntry.totalTokens = undefined; cronSession.sessionEntry.totalTokensFresh = false; + if (typeof totalTokens === "number" && Number.isFinite(totalTokens)) { + cronSession.sessionEntry.totalTokensEstimate = totalTokens; + } } cronSession.sessionEntry.cacheRead = usage.cacheRead ?? 0; cronSession.sessionEntry.cacheWrite = usage.cacheWrite ?? 0; diff --git a/src/shared/subagents-format.ts b/src/shared/subagents-format.ts index 643c4b58ca5..b18f1ff8a2d 100644 --- a/src/shared/subagents-format.ts +++ b/src/shared/subagents-format.ts @@ -42,6 +42,7 @@ export function truncateLine(value: string, maxLength: number) { export type TokenUsageLike = { totalTokens?: unknown; + totalTokensFresh?: unknown; inputTokens?: unknown; outputTokens?: unknown; }; @@ -50,7 +51,11 @@ export function resolveTotalTokens(entry?: TokenUsageLike) { if (!entry || typeof entry !== "object") { return undefined; } - if (typeof entry.totalTokens === "number" && Number.isFinite(entry.totalTokens)) { + if ( + entry.totalTokensFresh !== false && + typeof entry.totalTokens === "number" && + Number.isFinite(entry.totalTokens) + ) { return entry.totalTokens; } const input = typeof entry.inputTokens === "number" ? entry.inputTokens : 0; diff --git a/src/tui/tui-types.ts b/src/tui/tui-types.ts index eeda9693ebf..ce73e9d90c9 100644 --- a/src/tui/tui-types.ts +++ b/src/tui/tui-types.ts @@ -48,6 +48,7 @@ export type SessionInfo = { inputTokens?: number | null; outputTokens?: number | null; totalTokens?: number | null; + totalTokensEstimate?: number | null; responseUsage?: ResponseUsageMode; updatedAt?: number | null; displayName?: string; @@ -91,6 +92,7 @@ export type GatewayStatusSummary = { age?: number | null; model?: string | null; totalTokens?: number | null; + totalTokensEstimate?: number | null; contextTokens?: number | null; remainingTokens?: number | null; percentUsed?: number | null;