Merge 32720e044ff80329f23ef54ed7e47d157df9c81e into 5e417b44e1540f528d2ae63e3e20229a902d1db2

This commit is contained in:
Chandika Jayasundara 2026-03-21 14:00:25 +11:00 committed by GitHub
commit 7cd9ddbead
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 162 additions and 23 deletions

View File

@ -2552,7 +2552,7 @@ export async function runEmbeddedAttempt(
});
const {
assistantTexts,
assistantTexts: rawAssistantTexts,
toolMetas,
unsubscribe,
waitForCompactionRetry,
@ -2566,6 +2566,7 @@ export async function runEmbeddedAttempt(
getUsageTotals,
getCompactionCount,
} = subscription;
let assistantTexts = rawAssistantTexts;
const queueHandle: EmbeddedPiQueueHandle = {
queueMessage: async (text: string) => {
@ -2800,9 +2801,10 @@ export async function runEmbeddedAttempt(
);
}
// Run llm_input hook — plugins may modify the user prompt
if (hookRunner?.hasHooks("llm_input")) {
hookRunner
.runLlmInput(
try {
const llmInputResult = await hookRunner.runLlmInput(
{
runId: params.runId,
sessionId: params.sessionId,
@ -2822,10 +2824,14 @@ export async function runEmbeddedAttempt(
trigger: params.trigger,
channelId: params.messageChannel ?? params.messageProvider ?? undefined,
},
)
.catch((err) => {
log.warn(`llm_input hook failed: ${String(err)}`);
});
);
// Apply any modifications from the hook
if (llmInputResult?.prompt) {
effectivePrompt = llmInputResult.prompt;
}
} catch (err) {
log.warn(`llm_input hook failed: ${String(err)}`);
}
}
const btwSnapshotMessages = activeSession.messages.slice(-MAX_BTW_SNAPSHOT_MESSAGES);
@ -3129,9 +3135,10 @@ export async function runEmbeddedAttempt(
)
.map((entry) => ({ toolName: entry.toolName, meta: entry.meta }));
// Run llm_output hook — plugins may modify assistantTexts
if (hookRunner?.hasHooks("llm_output")) {
hookRunner
.runLlmOutput(
try {
const llmOutputResult = await hookRunner.runLlmOutput(
{
runId: params.runId,
sessionId: params.sessionId,
@ -3150,10 +3157,14 @@ export async function runEmbeddedAttempt(
trigger: params.trigger,
channelId: params.messageChannel ?? params.messageProvider ?? undefined,
},
)
.catch((err) => {
log.warn(`llm_output hook failed: ${String(err)}`);
});
);
// Apply any modifications from the hook
if (llmOutputResult?.assistantTexts) {
assistantTexts = llmOutputResult.assistantTexts;
}
} catch (err) {
log.warn(`llm_output hook failed: ${String(err)}`);
}
}
return {

View File

@ -23,7 +23,9 @@ import type {
PluginHookInboundClaimEvent,
PluginHookInboundClaimResult,
PluginHookLlmInputEvent,
PluginHookLlmInputResult,
PluginHookLlmOutputEvent,
PluginHookLlmOutputResult,
PluginHookBeforeResetEvent,
PluginHookBeforeToolCallEvent,
PluginHookBeforeToolCallResult,
@ -65,7 +67,9 @@ export type {
PluginHookBeforePromptBuildEvent,
PluginHookBeforePromptBuildResult,
PluginHookLlmInputEvent,
PluginHookLlmInputResult,
PluginHookLlmOutputEvent,
PluginHookLlmOutputResult,
PluginHookAgentEndEvent,
PluginHookBeforeCompactionEvent,
PluginHookBeforeResetEvent,
@ -487,20 +491,42 @@ export function createHookRunner(registry: PluginRegistry, options: HookRunnerOp
/**
* Run llm_input hook.
* Allows plugins to observe the exact input payload sent to the LLM.
* Runs in parallel (fire-and-forget).
* Allows plugins to observe or modify the input payload sent to the LLM.
* Plugins can return `{ prompt }` to transform the input,
* or return void/undefined for observation-only (backward compatible).
*/
async function runLlmInput(event: PluginHookLlmInputEvent, ctx: PluginHookAgentContext) {
return runVoidHook("llm_input", event, ctx);
async function runLlmInput(
event: PluginHookLlmInputEvent,
ctx: PluginHookAgentContext,
): Promise<PluginHookLlmInputResult | undefined> {
return runModifyingHook<"llm_input", PluginHookLlmInputResult>(
"llm_input",
event,
ctx,
(acc, next) => ({
prompt: next.prompt ?? acc?.prompt,
}),
);
}
/**
* Run llm_output hook.
* Allows plugins to observe the exact output payload returned by the LLM.
* Runs in parallel (fire-and-forget).
* Allows plugins to observe or modify the output payload returned by the LLM.
* Plugins can return `{ assistantTexts }` to transform the output,
* or return void/undefined for observation-only (backward compatible).
*/
async function runLlmOutput(event: PluginHookLlmOutputEvent, ctx: PluginHookAgentContext) {
return runVoidHook("llm_output", event, ctx);
async function runLlmOutput(
event: PluginHookLlmOutputEvent,
ctx: PluginHookAgentContext,
): Promise<PluginHookLlmOutputResult | undefined> {
return runModifyingHook<"llm_output", PluginHookLlmOutputResult>(
"llm_output",
event,
ctx,
(acc, next) => ({
assistantTexts: next.assistantTexts ?? acc?.assistantTexts,
}),
);
}
/**

View File

@ -1550,6 +1550,15 @@ export type PluginHookLlmInputEvent = {
imagesCount: number;
};
// llm_input hook result (when used as a modifying hook)
export type PluginHookLlmInputResult = {
/** Modified prompt text. If set, replaces the original prompt. */
prompt?: string;
// Note: systemPrompt modification is not yet supported — the system prompt
// is finalised earlier in the pipeline. Will be added when late-stage
// system prompt overrides are plumbed through.
};
// llm_output hook
export type PluginHookLlmOutputEvent = {
runId: string;
@ -1567,6 +1576,12 @@ export type PluginHookLlmOutputEvent = {
};
};
// llm_output hook result (when used as a modifying hook)
export type PluginHookLlmOutputResult = {
/** Modified assistant response texts. If set, replaces the originals. */
assistantTexts?: string[];
};
// agent_end hook
export type PluginHookAgentEndEvent = {
messages: unknown[];
@ -1882,11 +1897,14 @@ export type PluginHookHandlerMap = {
event: PluginHookBeforeAgentStartEvent,
ctx: PluginHookAgentContext,
) => Promise<PluginHookBeforeAgentStartResult | void> | PluginHookBeforeAgentStartResult | void;
llm_input: (event: PluginHookLlmInputEvent, ctx: PluginHookAgentContext) => Promise<void> | void;
llm_input: (
event: PluginHookLlmInputEvent,
ctx: PluginHookAgentContext,
) => Promise<PluginHookLlmInputResult | void> | PluginHookLlmInputResult | void;
llm_output: (
event: PluginHookLlmOutputEvent,
ctx: PluginHookAgentContext,
) => Promise<void> | void;
) => Promise<PluginHookLlmOutputResult | void> | PluginHookLlmOutputResult | void;
agent_end: (event: PluginHookAgentEndEvent, ctx: PluginHookAgentContext) => Promise<void> | void;
before_compaction: (
event: PluginHookBeforeCompactionEvent,

View File

@ -69,4 +69,88 @@ describe("llm hook runner methods", () => {
expect(runner.hasHooks("llm_input")).toBe(true);
expect(runner.hasHooks("llm_output")).toBe(false);
});
it("runLlmInput returns modified prompt from hook", async () => {
const handler = vi.fn().mockResolvedValue({ prompt: "redacted prompt" });
const registry = createMockPluginRegistry([{ hookName: "llm_input", handler }]);
const runner = createHookRunner(registry);
const result = await runner.runLlmInput(
{
runId: "run-1",
sessionId: "session-1",
provider: "openai",
model: "gpt-5",
systemPrompt: "be helpful",
prompt: "original prompt",
historyMessages: [],
imagesCount: 0,
},
{ agentId: "main", sessionId: "session-1" },
);
expect(result).toEqual(expect.objectContaining({ prompt: "redacted prompt" }));
});
it("runLlmInput returns undefined when hook returns void (backward compat)", async () => {
const handler = vi.fn(); // returns undefined
const registry = createMockPluginRegistry([{ hookName: "llm_input", handler }]);
const runner = createHookRunner(registry);
const result = await runner.runLlmInput(
{
runId: "run-1",
sessionId: "session-1",
provider: "openai",
model: "gpt-5",
prompt: "hello",
historyMessages: [],
imagesCount: 0,
},
{ agentId: "main", sessionId: "session-1" },
);
expect(result).toBeUndefined();
});
it("runLlmOutput returns modified assistantTexts from hook", async () => {
const handler = vi.fn().mockResolvedValue({ assistantTexts: ["rehydrated response"] });
const registry = createMockPluginRegistry([{ hookName: "llm_output", handler }]);
const runner = createHookRunner(registry);
const result = await runner.runLlmOutput(
{
runId: "run-1",
sessionId: "session-1",
provider: "openai",
model: "gpt-5",
assistantTexts: ["raw «PERSON_001» response"],
lastAssistant: { role: "assistant", content: "raw" },
usage: { input: 10, output: 20, total: 30 },
},
{ agentId: "main", sessionId: "session-1" },
);
expect(result).toEqual(expect.objectContaining({ assistantTexts: ["rehydrated response"] }));
});
it("runLlmOutput returns undefined when hook returns void (backward compat)", async () => {
const handler = vi.fn(); // returns undefined
const registry = createMockPluginRegistry([{ hookName: "llm_output", handler }]);
const runner = createHookRunner(registry);
const result = await runner.runLlmOutput(
{
runId: "run-1",
sessionId: "session-1",
provider: "openai",
model: "gpt-5",
assistantTexts: ["hi"],
usage: { input: 10, output: 20, total: 30 },
},
{ agentId: "main", sessionId: "session-1" },
);
expect(result).toBeUndefined();
});
});