Compare commits

...

7 Commits

Author SHA1 Message Date
Peter Steinberger
3f0603df54 fix: resolve responsePrefix templates for routed replies (#928) (thanks @sebslight) 2026-01-15 05:26:42 +00:00
Sebastian
f32fc1d5d0 Remove debug logging for responsePrefix template resolution 2026-01-15 05:21:33 +00:00
Sebastian
d34a1f1d25 debug: use console.log instead of logVerbose for always-visible logging 2026-01-15 05:21:33 +00:00
Sebastian
31aa46dea9 debug: add responsePrefix template logging 2026-01-15 05:21:33 +00:00
Sebastian
b3a1024f65 fix: mutate prefixContext object instead of reassigning for closure correctness 2026-01-15 05:21:33 +00:00
Sebastian
7fbd2ab07b debug: add prefix template resolution logging 2026-01-15 05:21:33 +00:00
Sebastian
3908ea25bc feat: add dynamic template variables to messages.responsePrefix (#923)
Adds support for template variables in `messages.responsePrefix` that
resolve dynamically at runtime with the actual model used (including
after fallback).

Supported variables (case-insensitive):
- {model} - short model name (e.g., "claude-opus-4-5", "gpt-4o")
- {modelFull} - full model identifier (e.g., "anthropic/claude-opus-4-5")
- {provider} - provider name (e.g., "anthropic", "openai")
- {thinkingLevel} or {think} - thinking level ("high", "low", "off")
- {identity.name} or {identityName} - agent identity name

Example: "[{model} | think:{thinkingLevel}]" → "[claude-opus-4-5 | think:high]"

Variables show the actual model used after fallback, not the intended
model. Unresolved variables remain as literal text.

Implementation:
- New module: src/auto-reply/reply/response-prefix-template.ts
- Template interpolation in normalize-reply.ts via context provider
- onModelSelected callback in agent-runner-execution.ts
- Updated all 6 provider message handlers (web, signal, discord,
  telegram, slack, imessage)
- 27 unit tests covering all variables and edge cases
- Documentation in docs/gateway/configuration.md and JSDoc

Fixes #923
2026-01-15 05:21:33 +00:00
19 changed files with 525 additions and 10 deletions

View File

@ -6,6 +6,7 @@
- Docs: clarify per-agent auth stores, sandboxed skill binaries, and elevated semantics.
- Docs: add FAQ entries for missing provider auth after adding agents and Gemini thinking signature errors.
- Agents: add optional auth-profile copy prompt on `agents add` and improve auth error messaging.
- Messages: add responsePrefix template variables (model/provider/identity/think), including routed replies. (#928) — thanks @sebslight.
- Security: add `clawdbot security audit` (`--deep`, `--fix`) and surface it in `status --all` and `doctor`.
- Security: add `clawdbot security audit` (`--deep`, `--fix`) and surface it in `status --all` and `doctor` (includes browser control exposure checks).
- Plugins: add Zalo channel plugin with gateway HTTP hooks and onboarding install prompt. (#854) — thanks @longmaba.

View File

@ -1209,6 +1209,31 @@ streaming, final replies) across channels unless already present.
If `messages.responsePrefix` is unset, no prefix is applied by default.
Set it to `"auto"` to derive `[{identity.name}]` for the routed agent (when set).
#### Template variables
The `responsePrefix` string can include template variables that resolve dynamically:
| Variable | Description | Example |
|----------|-------------|---------|
| `{model}` | Short model name | `claude-opus-4-5`, `gpt-4o` |
| `{modelFull}` | Full model identifier | `anthropic/claude-opus-4-5` |
| `{provider}` | Provider name | `anthropic`, `openai` |
| `{thinkingLevel}` | Current thinking level | `high`, `low`, `off` |
| `{identity.name}` | Agent identity name | (same as `"auto"` mode) |
Variables are case-insensitive (`{MODEL}` = `{model}`). `{think}` is an alias for `{thinkingLevel}`.
Unresolved variables remain as literal text.
```json5
{
messages: {
responsePrefix: "[{model} | think:{thinkingLevel}]"
}
}
```
Example output: `[claude-opus-4-5 | think:high] Here's my response...`
WhatsApp inbound prefix is configured via `channels.whatsapp.messagePrefix` (deprecated:
`messages.messagePrefix`). Default stays **unchanged**: `"[clawdbot]"` when
`channels.whatsapp.allowFrom` is empty, otherwise `""` (no prefix). When using

View File

@ -26,6 +26,11 @@ export function resolveIdentityNamePrefix(
return `[${name}]`;
}
/** Returns just the identity name (without brackets) for template context. */
export function resolveIdentityName(cfg: ClawdbotConfig, agentId: string): string | undefined {
return resolveAgentIdentity(cfg, agentId)?.name?.trim() || undefined;
}
export function resolveMessagePrefix(
cfg: ClawdbotConfig,
agentId: string,

View File

@ -125,6 +125,14 @@ export async function runAgentTurnWithFallback(params: {
resolveAgentIdFromSessionKey(params.followupRun.run.sessionKey),
),
run: (provider, model) => {
// Notify that model selection is complete (including after fallback).
// This allows responsePrefix template interpolation with the actual model.
params.opts?.onModelSelected?.({
provider,
model,
thinkLevel: params.followupRun.run.thinkLevel,
});
if (isCliProvider(provider, params.followupRun.run.config)) {
const startedAt = Date.now();
emitAgentEvent({

View File

@ -1,12 +1,18 @@
import { resolveSessionAgentId } from "../../agents/agent-scope.js";
import { resolveIdentityName } from "../../agents/identity.js";
import type { ClawdbotConfig } from "../../config/config.js";
import { logVerbose } from "../../globals.js";
import { getReplyFromConfig } from "../reply.js";
import type { MsgContext } from "../templating.js";
import type { GetReplyOptions, ReplyPayload } from "../types.js";
import type { GetReplyOptions, ModelSelectedContext, ReplyPayload } from "../types.js";
import { tryFastAbortFromMessage } from "./abort.js";
import { shouldSkipDuplicateInbound } from "./inbound-dedupe.js";
import type { ReplyDispatcher, ReplyDispatchKind } from "./reply-dispatcher.js";
import { isRoutableChannel, routeReply } from "./route-reply.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "./response-prefix-template.js";
export type DispatchFromConfigResult = {
queuedFinal: boolean;
@ -39,6 +45,23 @@ export async function dispatchReplyFromConfig(params: {
const shouldRouteToOriginating =
isRoutableChannel(originatingChannel) && originatingTo && originatingChannel !== currentSurface;
const sessionAgentId = resolveSessionAgentId({
sessionKey: ctx.SessionKey,
config: cfg,
});
const responsePrefixContext = shouldRouteToOriginating
? createResponsePrefixContext(resolveIdentityName(cfg, sessionAgentId))
: undefined;
const onModelSelected =
responsePrefixContext || params.replyOptions?.onModelSelected
? (selection: ModelSelectedContext) => {
if (responsePrefixContext) {
applyModelSelectionToResponsePrefixContext(responsePrefixContext, selection);
}
params.replyOptions?.onModelSelected?.(selection);
}
: undefined;
/**
* Helper to send a payload via route-reply (async).
* Only used when actually routing to a different provider.
@ -61,6 +84,7 @@ export async function dispatchReplyFromConfig(params: {
accountId: ctx.AccountId,
threadId: ctx.MessageThreadId,
cfg,
responsePrefixContext,
abortSignal,
});
if (!result.ok) {
@ -82,6 +106,7 @@ export async function dispatchReplyFromConfig(params: {
accountId: ctx.AccountId,
threadId: ctx.MessageThreadId,
cfg,
responsePrefixContext,
});
queuedFinal = result.ok;
if (result.ok) routedFinalCount += 1;
@ -103,6 +128,7 @@ export async function dispatchReplyFromConfig(params: {
ctx,
{
...params.replyOptions,
onModelSelected,
onToolResult: (payload: ReplyPayload) => {
if (shouldRouteToOriginating) {
// Fire-and-forget for streaming tool results when routing.
@ -140,6 +166,7 @@ export async function dispatchReplyFromConfig(params: {
accountId: ctx.AccountId,
threadId: ctx.MessageThreadId,
cfg,
responsePrefixContext,
});
if (!result.ok) {
logVerbose(

View File

@ -1,9 +1,15 @@
import { stripHeartbeatToken } from "../heartbeat.js";
import { HEARTBEAT_TOKEN, isSilentReplyText, SILENT_REPLY_TOKEN } from "../tokens.js";
import type { ReplyPayload } from "../types.js";
import {
resolveResponsePrefixTemplate,
type ResponsePrefixContext,
} from "./response-prefix-template.js";
export type NormalizeReplyOptions = {
responsePrefix?: string;
/** Context for template variable interpolation in responsePrefix */
responsePrefixContext?: ResponsePrefixContext;
onHeartbeatStrip?: () => void;
stripHeartbeat?: boolean;
silentToken?: string;
@ -36,13 +42,18 @@ export function normalizeReplyPayload(
text = stripped.text;
}
// Resolve template variables in responsePrefix if context is provided
const effectivePrefix = opts.responsePrefixContext
? resolveResponsePrefixTemplate(opts.responsePrefix, opts.responsePrefixContext)
: opts.responsePrefix;
if (
opts.responsePrefix &&
effectivePrefix &&
text &&
text.trim() !== HEARTBEAT_TOKEN &&
!text.startsWith(opts.responsePrefix)
!text.startsWith(effectivePrefix)
) {
text = `${opts.responsePrefix} ${text}`;
text = `${effectivePrefix} ${text}`;
}
return { ...payload, text };

View File

@ -1,6 +1,7 @@
import type { HumanDelayConfig } from "../../config/types.js";
import type { GetReplyOptions, ReplyPayload } from "../types.js";
import { normalizeReplyPayload } from "./normalize-reply.js";
import type { ResponsePrefixContext } from "./response-prefix-template.js";
import type { TypingController } from "./typing.js";
export type ReplyDispatchKind = "tool" | "block" | "final";
@ -33,6 +34,11 @@ const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
export type ReplyDispatcherOptions = {
deliver: ReplyDispatchDeliverer;
responsePrefix?: string;
/** Static context for response prefix template interpolation. */
responsePrefixContext?: ResponsePrefixContext;
/** Dynamic context provider for response prefix template interpolation.
* Called at normalization time, after model selection is complete. */
responsePrefixContextProvider?: () => ResponsePrefixContext;
onHeartbeatStrip?: () => void;
onIdle?: () => void;
onError?: ReplyDispatchErrorHandler;
@ -61,10 +67,17 @@ export type ReplyDispatcher = {
function normalizeReplyPayloadInternal(
payload: ReplyPayload,
opts: Pick<ReplyDispatcherOptions, "responsePrefix" | "onHeartbeatStrip">,
opts: Pick<
ReplyDispatcherOptions,
"responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip"
>,
): ReplyPayload | null {
// Prefer dynamic context provider over static context
const prefixContext = opts.responsePrefixContextProvider?.() ?? opts.responsePrefixContext;
return normalizeReplyPayload(payload, {
responsePrefix: opts.responsePrefix,
responsePrefixContext: prefixContext,
onHeartbeatStrip: opts.onHeartbeatStrip,
});
}

View File

@ -0,0 +1,181 @@
import { describe, expect, it } from "vitest";
import {
extractShortModelName,
hasTemplateVariables,
resolveResponsePrefixTemplate,
} from "./response-prefix-template.js";
describe("resolveResponsePrefixTemplate", () => {
it("returns undefined for undefined template", () => {
expect(resolveResponsePrefixTemplate(undefined, {})).toBeUndefined();
});
it("returns template as-is when no variables present", () => {
expect(resolveResponsePrefixTemplate("[Claude]", {})).toBe("[Claude]");
});
it("resolves {model} variable", () => {
const result = resolveResponsePrefixTemplate("[{model}]", {
model: "gpt-5.2",
});
expect(result).toBe("[gpt-5.2]");
});
it("resolves {modelFull} variable", () => {
const result = resolveResponsePrefixTemplate("[{modelFull}]", {
modelFull: "openai-codex/gpt-5.2",
});
expect(result).toBe("[openai-codex/gpt-5.2]");
});
it("resolves {provider} variable", () => {
const result = resolveResponsePrefixTemplate("[{provider}]", {
provider: "anthropic",
});
expect(result).toBe("[anthropic]");
});
it("resolves {thinkingLevel} variable", () => {
const result = resolveResponsePrefixTemplate("think:{thinkingLevel}", {
thinkingLevel: "high",
});
expect(result).toBe("think:high");
});
it("resolves {think} as alias for thinkingLevel", () => {
const result = resolveResponsePrefixTemplate("think:{think}", {
thinkingLevel: "low",
});
expect(result).toBe("think:low");
});
it("resolves {identity.name} variable", () => {
const result = resolveResponsePrefixTemplate("[{identity.name}]", {
identityName: "Clawdbot",
});
expect(result).toBe("[Clawdbot]");
});
it("resolves {identityName} as alias", () => {
const result = resolveResponsePrefixTemplate("[{identityName}]", {
identityName: "Clawdbot",
});
expect(result).toBe("[Clawdbot]");
});
it("resolves multiple variables", () => {
const result = resolveResponsePrefixTemplate("[{model} | think:{thinkingLevel}]", {
model: "claude-opus-4-5",
thinkingLevel: "high",
});
expect(result).toBe("[claude-opus-4-5 | think:high]");
});
it("leaves unresolved variables as-is", () => {
const result = resolveResponsePrefixTemplate("[{model}]", {});
expect(result).toBe("[{model}]");
});
it("leaves unrecognized variables as-is", () => {
const result = resolveResponsePrefixTemplate("[{unknownVar}]", {
model: "gpt-5.2",
});
expect(result).toBe("[{unknownVar}]");
});
it("handles case insensitivity", () => {
const result = resolveResponsePrefixTemplate("[{MODEL} | {ThinkingLevel}]", {
model: "gpt-5.2",
thinkingLevel: "low",
});
expect(result).toBe("[gpt-5.2 | low]");
});
it("handles mixed resolved and unresolved variables", () => {
const result = resolveResponsePrefixTemplate("[{model} | {provider}]", {
model: "gpt-5.2",
// provider not provided
});
expect(result).toBe("[gpt-5.2 | {provider}]");
});
it("handles complex template with all variables", () => {
const result = resolveResponsePrefixTemplate(
"[{identity.name}] {provider}/{model} (think:{thinkingLevel})",
{
identityName: "Clawdbot",
provider: "anthropic",
model: "claude-opus-4-5",
thinkingLevel: "high",
},
);
expect(result).toBe("[Clawdbot] anthropic/claude-opus-4-5 (think:high)");
});
});
describe("extractShortModelName", () => {
it("strips provider prefix", () => {
expect(extractShortModelName("openai/gpt-5.2")).toBe("gpt-5.2");
expect(extractShortModelName("anthropic/claude-opus-4-5")).toBe("claude-opus-4-5");
expect(extractShortModelName("openai-codex/gpt-5.2-codex")).toBe("gpt-5.2-codex");
});
it("strips date suffix", () => {
expect(extractShortModelName("claude-opus-4-5-20251101")).toBe("claude-opus-4-5");
expect(extractShortModelName("gpt-5.2-20250115")).toBe("gpt-5.2");
});
it("strips -latest suffix", () => {
expect(extractShortModelName("gpt-5.2-latest")).toBe("gpt-5.2");
expect(extractShortModelName("claude-sonnet-latest")).toBe("claude-sonnet");
});
it("handles model without provider", () => {
expect(extractShortModelName("gpt-5.2")).toBe("gpt-5.2");
expect(extractShortModelName("claude-opus-4-5")).toBe("claude-opus-4-5");
});
it("handles full path with provider and date suffix", () => {
expect(extractShortModelName("anthropic/claude-opus-4-5-20251101")).toBe("claude-opus-4-5");
});
it("preserves version numbers that look like dates but are not", () => {
// Date suffix must be exactly 8 digits at the end
expect(extractShortModelName("model-v1234567")).toBe("model-v1234567");
expect(extractShortModelName("model-123456789")).toBe("model-123456789");
});
});
describe("hasTemplateVariables", () => {
it("returns false for undefined", () => {
expect(hasTemplateVariables(undefined)).toBe(false);
});
it("returns false for empty string", () => {
expect(hasTemplateVariables("")).toBe(false);
});
it("returns false for static prefix", () => {
expect(hasTemplateVariables("[Claude]")).toBe(false);
});
it("returns true when template variables present", () => {
expect(hasTemplateVariables("[{model}]")).toBe(true);
expect(hasTemplateVariables("{provider}")).toBe(true);
expect(hasTemplateVariables("prefix {thinkingLevel} suffix")).toBe(true);
});
it("returns true for multiple variables", () => {
expect(hasTemplateVariables("[{model} | {provider}]")).toBe(true);
});
it("handles consecutive calls correctly (regex lastIndex reset)", () => {
// First call
expect(hasTemplateVariables("[{model}]")).toBe(true);
// Second call should still work
expect(hasTemplateVariables("[{model}]")).toBe(true);
// Static string should return false
expect(hasTemplateVariables("[Claude]")).toBe(false);
});
});

View File

@ -0,0 +1,117 @@
/**
* Template interpolation for response prefix.
*
* Supports variables like `{model}`, `{provider}`, `{thinkingLevel}`, etc.
* Variables are case-insensitive and unresolved ones remain as literal text.
*/
export type ResponsePrefixContext = {
/** Short model name (e.g., "gpt-5.2", "claude-opus-4-5") */
model?: string;
/** Full model ID including provider (e.g., "openai-codex/gpt-5.2") */
modelFull?: string;
/** Provider name (e.g., "openai-codex", "anthropic") */
provider?: string;
/** Current thinking level (e.g., "high", "low", "off") */
thinkingLevel?: string;
/** Agent identity name */
identityName?: string;
};
export type ModelSelectionInfo = {
provider: string;
model: string;
thinkLevel?: string;
};
export function createResponsePrefixContext(identityName?: string): ResponsePrefixContext {
return identityName ? { identityName } : {};
}
// Regex pattern for template variables: {variableName} or {variable.name}
const TEMPLATE_VAR_PATTERN = /\{([a-zA-Z][a-zA-Z0-9.]*)\}/g;
/**
* Interpolate template variables in a response prefix string.
*
* @param template - The template string with `{variable}` placeholders
* @param context - Context object with values for interpolation
* @returns The interpolated string, or undefined if template is undefined
*
* @example
* resolveResponsePrefixTemplate("[{model} | think:{thinkingLevel}]", {
* model: "gpt-5.2",
* thinkingLevel: "high"
* })
* // Returns: "[gpt-5.2 | think:high]"
*/
export function resolveResponsePrefixTemplate(
template: string | undefined,
context: ResponsePrefixContext,
): string | undefined {
if (!template) return undefined;
return template.replace(TEMPLATE_VAR_PATTERN, (match, varName: string) => {
const normalizedVar = varName.toLowerCase();
switch (normalizedVar) {
case "model":
return context.model ?? match;
case "modelfull":
return context.modelFull ?? match;
case "provider":
return context.provider ?? match;
case "thinkinglevel":
case "think":
return context.thinkingLevel ?? match;
case "identity.name":
case "identityname":
return context.identityName ?? match;
default:
// Leave unrecognized variables as-is
return match;
}
});
}
/**
* Extract short model name from a full model string.
*
* Strips:
* - Provider prefix (e.g., "openai/" from "openai/gpt-5.2")
* - Date suffixes (e.g., "-20251101" from "claude-opus-4-5-20251101")
* - Common version suffixes (e.g., "-latest")
*
* @example
* extractShortModelName("openai-codex/gpt-5.2") // "gpt-5.2"
* extractShortModelName("claude-opus-4-5-20251101") // "claude-opus-4-5"
* extractShortModelName("gpt-5.2-latest") // "gpt-5.2"
*/
export function extractShortModelName(fullModel: string): string {
// Strip provider prefix
const slash = fullModel.lastIndexOf("/");
const modelPart = slash >= 0 ? fullModel.slice(slash + 1) : fullModel;
// Strip date suffixes (YYYYMMDD format)
return modelPart.replace(/-\d{8}$/, "").replace(/-latest$/, "");
}
export function applyModelSelectionToResponsePrefixContext(
context: ResponsePrefixContext,
selection: ModelSelectionInfo,
): void {
context.provider = selection.provider;
context.model = extractShortModelName(selection.model);
context.modelFull = `${selection.provider}/${selection.model}`;
context.thinkingLevel = selection.thinkLevel ?? "off";
}
/**
* Check if a template string contains any template variables.
*/
export function hasTemplateVariables(template: string | undefined): boolean {
if (!template) return false;
// Reset lastIndex since we're using a global regex
TEMPLATE_VAR_PATTERN.lastIndex = 0;
return TEMPLATE_VAR_PATTERN.test(template);
}

View File

@ -111,6 +111,28 @@ describe("routeReply", () => {
);
});
it("resolves responsePrefix template variables when context is provided", async () => {
mocks.sendMessageSlack.mockClear();
const cfg = {
messages: { responsePrefix: "[{model} | {identity.name}]" },
} as unknown as ClawdbotConfig;
await routeReply({
payload: { text: "hi" },
channel: "slack",
to: "channel:C123",
cfg,
responsePrefixContext: {
model: "gpt-5.2",
identityName: "Clawdbot",
},
});
expect(mocks.sendMessageSlack).toHaveBeenCalledWith(
"channel:C123",
"[gpt-5.2 | Clawdbot] hi",
expect.any(Object),
);
});
it("does not derive responsePrefix from agent identity when routing", async () => {
mocks.sendMessageSlack.mockClear();
const cfg = {

View File

@ -15,6 +15,7 @@ import { INTERNAL_MESSAGE_CHANNEL } from "../../utils/message-channel.js";
import type { OriginatingChannelType } from "../templating.js";
import type { ReplyPayload } from "../types.js";
import { normalizeReplyPayload } from "./normalize-reply.js";
import type { ResponsePrefixContext } from "./response-prefix-template.js";
export type RouteReplyParams = {
/** The reply payload to send. */
@ -31,6 +32,8 @@ export type RouteReplyParams = {
threadId?: number;
/** Config for provider-specific settings. */
cfg: ClawdbotConfig;
/** Optional response prefix template context (model/provider/identity). */
responsePrefixContext?: ResponsePrefixContext;
/** Optional abort signal for cooperative cancellation. */
abortSignal?: AbortSignal;
};
@ -69,6 +72,7 @@ export async function routeReply(params: RouteReplyParams): Promise<RouteReplyRe
: cfg.messages?.responsePrefix;
const normalized = normalizeReplyPayload(payload, {
responsePrefix,
responsePrefixContext: params.responsePrefixContext,
});
if (!normalized) return { ok: true };

View File

@ -5,6 +5,13 @@ export type BlockReplyContext = {
timeoutMs?: number;
};
/** Context passed to onModelSelected callback with actual model used. */
export type ModelSelectedContext = {
provider: string;
model: string;
thinkLevel: string | undefined;
};
export type GetReplyOptions = {
onReplyStart?: () => Promise<void> | void;
onTypingController?: (typing: TypingController) => void;
@ -13,6 +20,9 @@ export type GetReplyOptions = {
onReasoningStream?: (payload: ReplyPayload) => Promise<void> | void;
onBlockReply?: (payload: ReplyPayload, context?: BlockReplyContext) => Promise<void> | void;
onToolResult?: (payload: ReplyPayload) => Promise<void> | void;
/** Called when the actual model is selected (including after fallback).
* Use this to get model/provider/thinkLevel for responsePrefix template interpolation. */
onModelSelected?: (ctx: ModelSelectedContext) => void;
disableBlockStreaming?: boolean;
/** Timeout for block reply delivery (ms). */
blockReplyTimeoutMs?: number;

View File

@ -44,8 +44,21 @@ export type MessagesConfig = {
messagePrefix?: string;
/**
* Prefix auto-added to all outbound replies.
* - string: explicit prefix
*
* - string: explicit prefix (may include template variables)
* - special value: `"auto"` derives `[{agents.list[].identity.name}]` for the routed agent (when set)
*
* Supported template variables (case-insensitive):
* - `{model}` - short model name (e.g., `claude-opus-4-5`, `gpt-4o`)
* - `{modelFull}` - full model identifier (e.g., `anthropic/claude-opus-4-5`)
* - `{provider}` - provider name (e.g., `anthropic`, `openai`)
* - `{thinkingLevel}` or `{think}` - current thinking level (`high`, `low`, `off`)
* - `{identity.name}` or `{identityName}` - agent identity name
*
* Example: `"[{model} | think:{thinkingLevel}]"` `"[claude-opus-4-5 | think:high]"`
*
* Unresolved variables remain as literal text (e.g., `{model}` if context unavailable).
*
* Default: none
*/
responsePrefix?: string;

View File

@ -2,7 +2,12 @@ import {
resolveAckReaction,
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../agents/identity.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "../../auto-reply/reply/response-prefix-template.js";
import { formatAgentEnvelope, formatThreadStarterEnvelope } from "../../auto-reply/envelope.js";
import { dispatchReplyFromConfig } from "../../auto-reply/reply/dispatch-from-config.js";
import { buildHistoryContextFromMap, clearHistoryEntries } from "../../auto-reply/reply/history.js";
@ -280,8 +285,12 @@ export async function processDiscordMessage(ctx: DiscordMessagePreflightContext)
const typingChannelId = deliverTarget.startsWith("channel:")
? deliverTarget.slice("channel:".length)
: message.channelId;
const prefixContext = createResponsePrefixContext(resolveIdentityName(cfg, route.agentId));
const { dispatcher, replyOptions, markDispatchIdle } = createReplyDispatcherWithTyping({
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
deliver: async (payload: ReplyPayload) => {
const replyToId = replyReference.use();
@ -316,6 +325,9 @@ export async function processDiscordMessage(ctx: DiscordMessagePreflightContext)
typeof discordConfig?.blockStreaming === "boolean"
? !discordConfig.blockStreaming
: undefined,
onModelSelected: (ctx) => {
applyModelSelectionToResponsePrefixContext(prefixContext, ctx);
},
},
});
markDispatchIdle();

View File

@ -1,4 +1,12 @@
import { resolveEffectiveMessagesConfig, resolveHumanDelayConfig } from "../../agents/identity.js";
import {
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../agents/identity.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "../../auto-reply/reply/response-prefix-template.js";
import { resolveTextChunkLimit } from "../../auto-reply/chunk.js";
import { hasControlCommand } from "../../auto-reply/command-detection.js";
import { formatAgentEnvelope } from "../../auto-reply/envelope.js";
@ -341,8 +349,12 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
}
let didSendReply = false;
const prefixContext = createResponsePrefixContext(resolveIdentityName(cfg, route.agentId));
const dispatcher = createReplyDispatcher({
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
deliver: async (payload) => {
await deliverReplies({
@ -370,6 +382,9 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
typeof accountInfo.config.blockStreaming === "boolean"
? !accountInfo.config.blockStreaming
: undefined,
onModelSelected: (ctx) => {
applyModelSelectionToResponsePrefixContext(prefixContext, ctx);
},
},
});
if (!queuedFinal) {

View File

@ -1,4 +1,12 @@
import { resolveEffectiveMessagesConfig, resolveHumanDelayConfig } from "../../agents/identity.js";
import {
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../agents/identity.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "../../auto-reply/reply/response-prefix-template.js";
import { formatAgentEnvelope } from "../../auto-reply/envelope.js";
import { dispatchReplyFromConfig } from "../../auto-reply/reply/dispatch-from-config.js";
import { buildHistoryContextFromMap, clearHistoryEntries } from "../../auto-reply/reply/history.js";
@ -310,8 +318,14 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) {
}
let didSendReply = false;
const prefixContext = createResponsePrefixContext(
resolveIdentityName(deps.cfg, route.agentId),
);
const dispatcher = createReplyDispatcher({
responsePrefix: resolveEffectiveMessagesConfig(deps.cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(deps.cfg, route.agentId),
deliver: async (payload) => {
await deps.deliverReplies({
@ -338,6 +352,9 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) {
replyOptions: {
disableBlockStreaming:
typeof deps.blockStreaming === "boolean" ? !deps.blockStreaming : undefined,
onModelSelected: (ctx) => {
applyModelSelectionToResponsePrefixContext(prefixContext, ctx);
},
},
});
if (!queuedFinal) {

View File

@ -1,7 +1,12 @@
import {
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../../agents/identity.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "../../../auto-reply/reply/response-prefix-template.js";
import { dispatchReplyFromConfig } from "../../../auto-reply/reply/dispatch-from-config.js";
import { clearHistoryEntries } from "../../../auto-reply/reply/history.js";
import { createReplyDispatcherWithTyping } from "../../../auto-reply/reply/reply-dispatcher.js";
@ -62,8 +67,12 @@ export async function dispatchPreparedSlackMessage(prepared: PreparedSlackMessag
};
let didSendReply = false;
const prefixContext = createResponsePrefixContext(resolveIdentityName(cfg, route.agentId));
const { dispatcher, replyOptions, markDispatchIdle } = createReplyDispatcherWithTyping({
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
deliver: async (payload) => {
const replyThreadTs = replyPlan.nextThreadTs();
@ -104,6 +113,9 @@ export async function dispatchPreparedSlackMessage(prepared: PreparedSlackMessag
typeof account.config.blockStreaming === "boolean"
? !account.config.blockStreaming
: undefined,
onModelSelected: (ctx) => {
applyModelSelectionToResponsePrefixContext(prefixContext, ctx);
},
},
});
markDispatchIdle();

View File

@ -1,5 +1,9 @@
// @ts-nocheck
import { resolveEffectiveMessagesConfig } from "../agents/identity.js";
import { resolveEffectiveMessagesConfig, resolveIdentityName } from "../agents/identity.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "../auto-reply/reply/response-prefix-template.js";
import { EmbeddedBlockChunker } from "../agents/pi-embedded-block-chunker.js";
import { clearHistoryEntries } from "../auto-reply/reply/history.js";
import { dispatchReplyWithBufferedBlockDispatcher } from "../auto-reply/reply/provider-dispatcher.js";
@ -114,12 +118,15 @@ export const dispatchTelegramMessage = async ({
Boolean(draftStream) ||
(typeof telegramCfg.blockStreaming === "boolean" ? !telegramCfg.blockStreaming : undefined);
const prefixContext = createResponsePrefixContext(resolveIdentityName(cfg, route.agentId));
let didSendReply = false;
const { queuedFinal } = await dispatchReplyWithBufferedBlockDispatcher({
ctx: ctxPayload,
cfg,
dispatcherOptions: {
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
deliver: async (payload, info) => {
if (info.kind === "final") {
await flushDraft();
@ -151,6 +158,9 @@ export const dispatchTelegramMessage = async ({
}
: undefined,
disableBlockStreaming,
onModelSelected: (ctx) => {
applyModelSelectionToResponsePrefixContext(prefixContext, ctx);
},
},
});
draftStream?.stop();

View File

@ -1,4 +1,8 @@
import { resolveEffectiveMessagesConfig } from "../../../agents/identity.js";
import { resolveEffectiveMessagesConfig, resolveIdentityName } from "../../../agents/identity.js";
import {
applyModelSelectionToResponsePrefixContext,
createResponsePrefixContext,
} from "../../../auto-reply/reply/response-prefix-template.js";
import { resolveTextChunkLimit } from "../../../auto-reply/chunk.js";
import { formatAgentEnvelope } from "../../../auto-reply/envelope.js";
import { buildHistoryContext } from "../../../auto-reply/reply/history.js";
@ -173,6 +177,10 @@ export async function processMessage(params: {
params.route.agentId,
).responsePrefix;
const prefixContext = createResponsePrefixContext(
resolveIdentityName(params.cfg, params.route.agentId),
);
const { queuedFinal } = await dispatchReplyWithBufferedBlockDispatcher({
ctx: {
Body: combinedBody,
@ -210,6 +218,7 @@ export async function processMessage(params: {
replyResolver: params.replyResolver,
dispatcherOptions: {
responsePrefix,
responsePrefixContextProvider: () => prefixContext,
onHeartbeatStrip: () => {
if (!didLogHeartbeatStrip) {
didLogHeartbeatStrip = true;
@ -267,6 +276,9 @@ export async function processMessage(params: {
typeof params.cfg.channels?.whatsapp?.blockStreaming === "boolean"
? !params.cfg.channels.whatsapp.blockStreaming
: undefined,
onModelSelected: (ctx) => {
applyModelSelectionToResponsePrefixContext(prefixContext, ctx);
},
},
});