Compare commits

...

2 Commits

Author SHA1 Message Date
Altay
b391f8e4bb
test(agents): cover lifecycle consoleMessage logging 2026-03-10 20:08:31 +03:00
jiarung
0ed550d09a fix(logging): include model and provider in overload/error log
When an embedded agent run ends with an error (e.g. overloaded_error),
the warn log now includes the model and provider that triggered the error.

Before:
  embedded run agent end: runId=xxx isError=true error=The AI service is temporarily overloaded...

After:
  embedded run agent end: runId=xxx isError=true model=claude-sonnet-4-6 provider=anthropic error=The AI service is temporarily overloaded...

This makes it easy to correlate which model/provider is failing
without needing to cross-reference token-usage.json.
2026-03-10 11:13:35 +00:00
2 changed files with 31 additions and 3 deletions

View File

@ -70,7 +70,7 @@ describe("handleAgentEnd", () => {
});
});
it("attaches raw provider error metadata without changing the console message", () => {
it("attaches raw provider error metadata and includes model/provider in console output", () => {
const ctx = createContext({
role: "assistant",
stopReason: "error",
@ -91,9 +91,35 @@ describe("handleAgentEnd", () => {
error: "The AI service is temporarily overloaded. Please try again in a moment.",
failoverReason: "overloaded",
providerErrorType: "overloaded_error",
consoleMessage:
"embedded run agent end: runId=run-1 isError=true model=claude-test provider=anthropic error=The AI service is temporarily overloaded. Please try again in a moment.",
});
});
it("sanitizes model and provider before writing consoleMessage", () => {
const ctx = createContext({
role: "assistant",
stopReason: "error",
provider: "anthropic\u001b]8;;https://evil.test\u0007",
model: "claude\tsonnet\n4",
errorMessage: "connection refused",
content: [{ type: "text", text: "" }],
});
handleAgentEnd(ctx);
const warn = vi.mocked(ctx.log.warn);
const meta = warn.mock.calls[0]?.[1];
expect(meta).toMatchObject({
consoleMessage:
"embedded run agent end: runId=run-1 isError=true model=claude sonnet 4 provider=anthropic]8;;https://evil.test error=connection refused",
});
expect(meta?.consoleMessage).not.toContain("\n");
expect(meta?.consoleMessage).not.toContain("\r");
expect(meta?.consoleMessage).not.toContain("\t");
expect(meta?.consoleMessage).not.toContain("\u001b");
});
it("redacts logged error text before emitting lifecycle events", () => {
const onAgentEvent = vi.fn();
const ctx = createContext(

View File

@ -48,6 +48,8 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
const safeErrorText =
buildTextObservationFields(errorText).textPreview ?? "LLM request failed.";
const safeRunId = sanitizeForConsole(ctx.params.runId) ?? "-";
const safeModel = sanitizeForConsole(lastAssistant.model) ?? "unknown";
const safeProvider = sanitizeForConsole(lastAssistant.provider) ?? "unknown";
ctx.log.warn("embedded run agent end", {
event: "embedded_run_agent_end",
tags: ["error_handling", "lifecycle", "agent_end", "assistant_error"],
@ -55,10 +57,10 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
isError: true,
error: safeErrorText,
failoverReason,
provider: lastAssistant.provider,
model: lastAssistant.model,
provider: lastAssistant.provider,
...observedError,
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true error=${safeErrorText}`,
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true model=${safeModel} provider=${safeProvider} error=${safeErrorText}`,
});
emitAgentEvent({
runId: ctx.params.runId,