* feat(bluebubbles): auto-strip markdown from outbound messages (#7402) * fix(security): add timeout to webhook body reading (#6762) Adds 30-second timeout to readBody() in voice-call, bluebubbles, and nostr webhook handlers. Prevents Slow-Loris DoS (CWE-400, CVSS 7.5). Merged with existing maxBytes protection in voice-call. * fix(security): unify Error objects and lint fixes in webhook timeouts (#6762) * fix: prevent plugins from auto-enabling without user consent (#3961) Changes default plugin enabled state from true to false in enablePluginEntry(). Preserves existing enabled:true values. Fixes #3932. * fix: apply hierarchical mediaMaxMb config to all channels (#8749) Generalizes resolveAttachmentMaxBytes() to use account → channel → global config resolution for all channels, not just BlueBubbles. Fixes #7847. * fix(bluebubbles): sanitize attachment filenames against header injection (#10333) Strip ", \r, \n, and \\ from filenames after path.basename() to prevent multipart Content-Disposition header injection (CWE-93, CVSS 5.4). Also adds sanitization to setGroupIconBlueBubbles which had zero filename sanitization. * fix(lint): exclude extensions/ from Oxlint preflight check (#9313) Extensions use PluginRuntime|null patterns that trigger no-redundant-type-constituents because PluginRuntime resolves to any. Excluding extensions/ from Oxlint unblocks user upgrades. Re-applies the approach from closed PR #10087. * fix(bluebubbles): add tempGuid to createNewChatWithMessage payload (#7745) Non-Private-API mode (AppleScript) requires tempGuid in send payloads. The main sendMessageBlueBubbles already had it, but createNewChatWithMessage was missing it, causing 400 errors for new chat creation without Private API. * fix: send stop-typing signal when run ends with NO_REPLY (#8785) Adds onCleanup callback to the typing controller that fires when the controller is cleaned up while typing was active (e.g., after NO_REPLY). Channels using createTypingCallbacks automatically get stop-typing on cleanup. This prevents the typing indicator from lingering in group chats when the agent decides not to reply. * fix(telegram): deduplicate skill commands in multi-agent setup (#5717) Two fixes: 1. Skip duplicate workspace dirs when listing skill commands across agents. Multiple agents sharing the same workspace would produce duplicate commands with _2, _3 suffixes. 2. Clear stale commands via deleteMyCommands before registering new ones. Commands from deleted skills now get cleaned up on restart. * fix: add size limits to unbounded in-memory caches (#4948) Adds max-size caps with oldest-entry eviction to prevent OOM in long-running deployments: - BlueBubbles serverInfoCache: 64 entries (already has TTL) - Google Chat authCache: 32 entries - Matrix directRoomCache: 1024 entries - Discord presenceCache: 5000 entries per account * fix: address review concerns (#11093) - Chain deleteMyCommands → setMyCommands to prevent race condition (#5717) - Rename enablePluginEntry to registerPluginEntry (now sets enabled: false) - Add Slow-Loris timeout test for readJsonBody (#6023)
194 lines
6.4 KiB
TypeScript
194 lines
6.4 KiB
TypeScript
import type { HumanDelayConfig } from "../../config/types.js";
|
|
import type { GetReplyOptions, ReplyPayload } from "../types.js";
|
|
import type { ResponsePrefixContext } from "./response-prefix-template.js";
|
|
import type { TypingController } from "./typing.js";
|
|
import { sleep } from "../../utils.js";
|
|
import { normalizeReplyPayload, type NormalizeReplySkipReason } from "./normalize-reply.js";
|
|
|
|
export type ReplyDispatchKind = "tool" | "block" | "final";
|
|
|
|
type ReplyDispatchErrorHandler = (err: unknown, info: { kind: ReplyDispatchKind }) => void;
|
|
|
|
type ReplyDispatchSkipHandler = (
|
|
payload: ReplyPayload,
|
|
info: { kind: ReplyDispatchKind; reason: NormalizeReplySkipReason },
|
|
) => void;
|
|
|
|
type ReplyDispatchDeliverer = (
|
|
payload: ReplyPayload,
|
|
info: { kind: ReplyDispatchKind },
|
|
) => Promise<void>;
|
|
|
|
const DEFAULT_HUMAN_DELAY_MIN_MS = 800;
|
|
const DEFAULT_HUMAN_DELAY_MAX_MS = 2500;
|
|
|
|
/** Generate a random delay within the configured range. */
|
|
function getHumanDelay(config: HumanDelayConfig | undefined): number {
|
|
const mode = config?.mode ?? "off";
|
|
if (mode === "off") {
|
|
return 0;
|
|
}
|
|
const min =
|
|
mode === "custom" ? (config?.minMs ?? DEFAULT_HUMAN_DELAY_MIN_MS) : DEFAULT_HUMAN_DELAY_MIN_MS;
|
|
const max =
|
|
mode === "custom" ? (config?.maxMs ?? DEFAULT_HUMAN_DELAY_MAX_MS) : DEFAULT_HUMAN_DELAY_MAX_MS;
|
|
if (max <= min) {
|
|
return min;
|
|
}
|
|
return Math.floor(Math.random() * (max - min + 1)) + min;
|
|
}
|
|
|
|
export type ReplyDispatcherOptions = {
|
|
deliver: ReplyDispatchDeliverer;
|
|
responsePrefix?: string;
|
|
/** Static context for response prefix template interpolation. */
|
|
responsePrefixContext?: ResponsePrefixContext;
|
|
/** Dynamic context provider for response prefix template interpolation.
|
|
* Called at normalization time, after model selection is complete. */
|
|
responsePrefixContextProvider?: () => ResponsePrefixContext;
|
|
onHeartbeatStrip?: () => void;
|
|
onIdle?: () => void;
|
|
onError?: ReplyDispatchErrorHandler;
|
|
// AIDEV-NOTE: onSkip lets channels detect silent/empty drops (e.g. Telegram empty-response fallback).
|
|
onSkip?: ReplyDispatchSkipHandler;
|
|
/** Human-like delay between block replies for natural rhythm. */
|
|
humanDelay?: HumanDelayConfig;
|
|
};
|
|
|
|
export type ReplyDispatcherWithTypingOptions = Omit<ReplyDispatcherOptions, "onIdle"> & {
|
|
onReplyStart?: () => Promise<void> | void;
|
|
onIdle?: () => void;
|
|
/** Called when the typing controller is cleaned up (e.g., on NO_REPLY). */
|
|
onCleanup?: () => void;
|
|
};
|
|
|
|
type ReplyDispatcherWithTypingResult = {
|
|
dispatcher: ReplyDispatcher;
|
|
replyOptions: Pick<GetReplyOptions, "onReplyStart" | "onTypingController" | "onTypingCleanup">;
|
|
markDispatchIdle: () => void;
|
|
};
|
|
|
|
export type ReplyDispatcher = {
|
|
sendToolResult: (payload: ReplyPayload) => boolean;
|
|
sendBlockReply: (payload: ReplyPayload) => boolean;
|
|
sendFinalReply: (payload: ReplyPayload) => boolean;
|
|
waitForIdle: () => Promise<void>;
|
|
getQueuedCounts: () => Record<ReplyDispatchKind, number>;
|
|
};
|
|
|
|
type NormalizeReplyPayloadInternalOptions = Pick<
|
|
ReplyDispatcherOptions,
|
|
"responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip"
|
|
> & {
|
|
onSkip?: (reason: NormalizeReplySkipReason) => void;
|
|
};
|
|
|
|
function normalizeReplyPayloadInternal(
|
|
payload: ReplyPayload,
|
|
opts: NormalizeReplyPayloadInternalOptions,
|
|
): ReplyPayload | null {
|
|
// Prefer dynamic context provider over static context
|
|
const prefixContext = opts.responsePrefixContextProvider?.() ?? opts.responsePrefixContext;
|
|
|
|
return normalizeReplyPayload(payload, {
|
|
responsePrefix: opts.responsePrefix,
|
|
responsePrefixContext: prefixContext,
|
|
onHeartbeatStrip: opts.onHeartbeatStrip,
|
|
onSkip: opts.onSkip,
|
|
});
|
|
}
|
|
|
|
export function createReplyDispatcher(options: ReplyDispatcherOptions): ReplyDispatcher {
|
|
let sendChain: Promise<void> = Promise.resolve();
|
|
// Track in-flight deliveries so we can emit a reliable "idle" signal.
|
|
let pending = 0;
|
|
// Track whether we've sent a block reply (for human delay - skip delay on first block).
|
|
let sentFirstBlock = false;
|
|
// Serialize outbound replies to preserve tool/block/final order.
|
|
const queuedCounts: Record<ReplyDispatchKind, number> = {
|
|
tool: 0,
|
|
block: 0,
|
|
final: 0,
|
|
};
|
|
|
|
const enqueue = (kind: ReplyDispatchKind, payload: ReplyPayload) => {
|
|
const normalized = normalizeReplyPayloadInternal(payload, {
|
|
responsePrefix: options.responsePrefix,
|
|
responsePrefixContext: options.responsePrefixContext,
|
|
responsePrefixContextProvider: options.responsePrefixContextProvider,
|
|
onHeartbeatStrip: options.onHeartbeatStrip,
|
|
onSkip: (reason) => options.onSkip?.(payload, { kind, reason }),
|
|
});
|
|
if (!normalized) {
|
|
return false;
|
|
}
|
|
queuedCounts[kind] += 1;
|
|
pending += 1;
|
|
|
|
// Determine if we should add human-like delay (only for block replies after the first).
|
|
const shouldDelay = kind === "block" && sentFirstBlock;
|
|
if (kind === "block") {
|
|
sentFirstBlock = true;
|
|
}
|
|
|
|
sendChain = sendChain
|
|
.then(async () => {
|
|
// Add human-like delay between block replies for natural rhythm.
|
|
if (shouldDelay) {
|
|
const delayMs = getHumanDelay(options.humanDelay);
|
|
if (delayMs > 0) {
|
|
await sleep(delayMs);
|
|
}
|
|
}
|
|
await options.deliver(normalized, { kind });
|
|
})
|
|
.catch((err) => {
|
|
options.onError?.(err, { kind });
|
|
})
|
|
.finally(() => {
|
|
pending -= 1;
|
|
if (pending === 0) {
|
|
options.onIdle?.();
|
|
}
|
|
});
|
|
return true;
|
|
};
|
|
|
|
return {
|
|
sendToolResult: (payload) => enqueue("tool", payload),
|
|
sendBlockReply: (payload) => enqueue("block", payload),
|
|
sendFinalReply: (payload) => enqueue("final", payload),
|
|
waitForIdle: () => sendChain,
|
|
getQueuedCounts: () => ({ ...queuedCounts }),
|
|
};
|
|
}
|
|
|
|
export function createReplyDispatcherWithTyping(
|
|
options: ReplyDispatcherWithTypingOptions,
|
|
): ReplyDispatcherWithTypingResult {
|
|
const { onReplyStart, onIdle, onCleanup, ...dispatcherOptions } = options;
|
|
let typingController: TypingController | undefined;
|
|
const dispatcher = createReplyDispatcher({
|
|
...dispatcherOptions,
|
|
onIdle: () => {
|
|
typingController?.markDispatchIdle();
|
|
onIdle?.();
|
|
},
|
|
});
|
|
|
|
return {
|
|
dispatcher,
|
|
replyOptions: {
|
|
onReplyStart,
|
|
onTypingCleanup: onCleanup,
|
|
onTypingController: (typing) => {
|
|
typingController = typing;
|
|
},
|
|
},
|
|
markDispatchIdle: () => {
|
|
typingController?.markDispatchIdle();
|
|
onIdle?.();
|
|
},
|
|
};
|
|
}
|