chore: bump version to 2.0.16 and enhance feedback API message handling

Update package versions for denchclaw and dench to 2.0.16. Refactor feedback API to convert chat lines into PostHog-compatible messages, preserving chronological order and including tool calls and results. Improve test cases to reflect changes in message handling.
This commit is contained in:
kumarabhirup 2026-03-05 21:38:55 -08:00
parent 38b062a71e
commit ec73141a01
No known key found for this signature in database
GPG Key ID: DB7CA2289CAB0167
5 changed files with 111 additions and 45 deletions

View File

@ -12,14 +12,69 @@ type ChatLine = {
parts?: Array<Record<string, unknown>>;
};
function extractTextContent(line: ChatLine): string {
if (line.parts) {
return line.parts
.filter((p) => p.type === "text" && typeof p.text === "string")
.map((p) => p.text as string)
.join("");
/**
* Convert a persisted chat line into a PostHog-compatible message,
* preserving tool calls, tool results, and reasoning blocks.
*/
function toPostHogMessage(line: ChatLine): Record<string, unknown> {
const msg: Record<string, unknown> = { role: line.role };
if (!line.parts || line.parts.length === 0) {
msg.content = line.content;
return msg;
}
return line.content;
const contentBlocks: unknown[] = [];
const toolCalls: unknown[] = [];
for (const part of line.parts) {
switch (part.type) {
case "text":
if (typeof part.text === "string" && part.text) {
contentBlocks.push({ type: "text", text: part.text });
}
break;
case "tool-invocation":
toolCalls.push({
type: "function",
id: part.toolCallId,
function: {
name: part.toolName,
arguments:
typeof part.args === "string"
? part.args
: JSON.stringify(part.args ?? {}),
},
});
if (part.result && typeof part.result === "object") {
contentBlocks.push({
type: "tool_result",
tool_call_id: part.toolCallId,
content: (part.result as Record<string, unknown>).text ?? "",
});
}
break;
case "reasoning":
if (typeof part.text === "string" && part.text) {
contentBlocks.push({ type: "thinking", text: part.text });
}
break;
}
}
if (contentBlocks.length === 1 && toolCalls.length === 0 && (contentBlocks[0] as any)?.type === "text") {
msg.content = (contentBlocks[0] as any).text;
} else if (contentBlocks.length > 0) {
msg.content = contentBlocks;
} else {
msg.content = line.content || null;
}
if (toolCalls.length > 0) {
msg.tool_calls = toolCalls;
}
return msg;
}
/**
@ -54,7 +109,6 @@ export async function POST(req: Request) {
})
.filter((m): m is ChatLine => m !== null);
// Include all messages up to (and including) the feedback target.
let cutoff = lines.length;
if (messageId) {
const idx = lines.findIndex((m) => m.id === messageId);
@ -62,14 +116,9 @@ export async function POST(req: Request) {
}
const conversation = lines.slice(0, cutoff);
const chronological = conversation.map((m) => ({
role: m.role as "user" | "assistant",
content: extractTextContent(m),
}));
const allMessages = conversation.map(toPostHogMessage);
const lastAssistant = [...conversation]
.reverse()
.find((m) => m.role === "assistant");
const lastAssistantIdx = conversation.findLastIndex((m) => m.role === "assistant");
trackServer(
"$ai_trace",
@ -77,9 +126,9 @@ export async function POST(req: Request) {
$ai_trace_id: sessionId,
$ai_session_id: sessionId,
$ai_span_name: "chat_session",
$ai_input_state: chronological.length > 0 ? chronological : undefined,
$ai_output_state: lastAssistant
? [{ role: "assistant" as const, content: extractTextContent(lastAssistant) }]
$ai_input_state: allMessages.length > 0 ? allMessages : undefined,
$ai_output_state: lastAssistantIdx >= 0
? [allMessages[lastAssistantIdx]]
: undefined,
},
distinctId,

View File

@ -122,8 +122,8 @@ export function normalizeOutputForPostHog(messages: unknown): unknown[] | undefi
/**
* Build full conversation state for the $ai_trace event.
* Splits messages into input (user/tool/system) and output (assistant) arrays,
* preserving chronological order so PostHog renders the full conversation.
* Preserves chronological message order (user assistant tool assistant)
* so PostHog renders the conversation turn-by-turn with tool calls inline.
*/
export function buildTraceState(
messages: unknown,
@ -131,8 +131,8 @@ export function buildTraceState(
): { inputState: unknown; outputState: unknown } {
if (!Array.isArray(messages)) return { inputState: undefined, outputState: undefined };
const inputMessages: unknown[] = [];
const outputMessages: unknown[] = [];
const chronological: unknown[] = [];
let lastAssistantEntry: Record<string, unknown> | undefined;
for (const msg of messages) {
if (!msg || typeof msg !== "object") continue;
@ -160,19 +160,20 @@ export function buildTraceState(
}));
}
outputMessages.push(entry);
chronological.push(entry);
lastAssistantEntry = entry;
} else if (m.role === "user" || m.role === "tool" || m.role === "toolResult" || m.role === "system") {
const content = privacyMode ? "[REDACTED]" : extractText();
const entry: Record<string, unknown> = { role: m.role, content };
if (m.name) entry.name = m.name;
if (m.toolName) entry.toolName = m.toolName;
inputMessages.push(entry);
chronological.push(entry);
}
}
return {
inputState: inputMessages.length > 0 ? inputMessages : undefined,
outputState: outputMessages.length > 0 ? outputMessages : undefined,
inputState: chronological.length > 0 ? chronological : undefined,
outputState: lastAssistantEntry ? [lastAssistantEntry] : undefined,
};
}
@ -260,7 +261,7 @@ export function emitGeneration(
}
properties.$ai_input = sanitizeMessages(
extractInputMessages(event.messages) ?? trace.input,
event.messages ?? trace.input,
privacyMode,
);

View File

@ -1,6 +1,6 @@
{
"name": "denchclaw",
"version": "2.0.14",
"version": "2.0.16",
"description": "Fully Managed OpenClaw Framework for managing your CRM, Sales Automation and Outreach agents. The only local productivity tool you need.",
"keywords": [],
"homepage": "https://github.com/DenchHQ/DenchClaw#readme",

View File

@ -1,6 +1,6 @@
{
"name": "dench",
"version": "2.0.14",
"version": "2.0.16",
"description": "Shorthand alias for denchclaw — AI-powered CRM platform CLI",
"license": "MIT",
"repository": {
@ -16,7 +16,7 @@
],
"type": "module",
"dependencies": {
"denchclaw": "^2.0.14"
"denchclaw": "^2.0.16"
},
"engines": {
"node": ">=22.12.0"

View File

@ -313,7 +313,7 @@ describe("emitGeneration", () => {
expect(ph.capture.mock.calls[0][0].properties.$ai_latency).toBe(5);
});
it("includes user messages from event.messages in $ai_input even when trace.input is empty", () => {
it("includes full chronological conversation in $ai_input from event.messages", () => {
traceCtx.startTrace("s", "r");
const messages = [
@ -323,7 +323,10 @@ describe("emitGeneration", () => {
emitGeneration(ph, traceCtx, "s", { messages }, false);
const input = ph.capture.mock.calls[0][0].properties.$ai_input;
expect(input).toEqual([{ role: "user", content: "what is this" }]);
expect(input).toEqual([
{ role: "user", content: "what is this" },
{ role: "assistant", content: "It's a config file." },
]);
});
it("prefers event.messages over trace.input for $ai_input", () => {
@ -337,10 +340,13 @@ describe("emitGeneration", () => {
emitGeneration(ph, traceCtx, "s", { messages }, false);
const input = ph.capture.mock.calls[0][0].properties.$ai_input;
expect(input).toEqual([{ role: "user", content: "hello" }]);
expect(input).toEqual([
{ role: "user", content: "hello" },
{ role: "assistant", content: "hi" },
]);
});
it("falls back to trace.input when event.messages has no input messages", () => {
it("falls back to trace.input when event.messages is absent", () => {
traceCtx.startTrace("s", "r");
traceCtx.setInput("s", [{ role: "user", content: "from trace" }], false);
@ -411,7 +417,7 @@ describe("emitToolSpan", () => {
});
describe("buildTraceState", () => {
it("includes all user messages in inputState and all assistant messages in outputState (full conversation)", () => {
it("preserves chronological order with all messages in inputState", () => {
const messages = [
{ role: "user", content: "Question 1" },
{ role: "assistant", content: "Answer 1" },
@ -421,15 +427,16 @@ describe("buildTraceState", () => {
const { inputState, outputState } = buildTraceState(messages, false);
expect(inputState).toEqual([
{ role: "user", content: "Question 1" },
{ role: "assistant", content: "Answer 1" },
{ role: "user", content: "Question 2" },
{ role: "assistant", content: "Answer 2" },
]);
expect(outputState).toEqual([
{ role: "assistant", content: "Answer 1" },
{ role: "assistant", content: "Answer 2" },
]);
});
it("includes tool result messages in inputState (user sees tool activity)", () => {
it("interleaves tool calls and results chronologically", () => {
const messages = [
{ role: "user", content: "run ls" },
{ role: "assistant", content: "Running..." },
@ -437,10 +444,13 @@ describe("buildTraceState", () => {
{ role: "assistant", content: "Done!" },
];
const { inputState, outputState } = buildTraceState(messages, false);
expect(inputState).toHaveLength(2);
expect((inputState as any[])[1].role).toBe("tool");
expect((inputState as any[])[1].name).toBe("exec");
expect(outputState).toHaveLength(2);
expect(inputState).toHaveLength(4);
expect((inputState as any[])[0].role).toBe("user");
expect((inputState as any[])[1].role).toBe("assistant");
expect((inputState as any[])[2].role).toBe("tool");
expect((inputState as any[])[2].name).toBe("exec");
expect((inputState as any[])[3].role).toBe("assistant");
expect(outputState).toEqual([{ role: "assistant", content: "Done!" }]);
});
it("redacts content in privacy mode but keeps role and tool metadata", () => {
@ -450,7 +460,9 @@ describe("buildTraceState", () => {
];
const { inputState, outputState } = buildTraceState(messages, true);
expect((inputState as any[])[0]).toEqual({ role: "user", content: "[REDACTED]" });
expect((outputState as any[])[0].content).toBe("[REDACTED]");
expect((inputState as any[])[1].content).toBe("[REDACTED]");
expect((inputState as any[])[1].tool_calls).toEqual([{ type: "function", function: { name: "exec" } }]);
expect(outputState).toHaveLength(1);
expect((outputState as any[])[0].tool_calls).toEqual([{ type: "function", function: { name: "exec" } }]);
});
@ -462,7 +474,8 @@ describe("buildTraceState", () => {
tool_calls: [{ function: { name: "web_search" } }],
},
];
const { outputState } = buildTraceState(messages, true);
const { inputState, outputState } = buildTraceState(messages, true);
expect((inputState as any[])[0].tool_calls).toEqual([{ type: "function", function: { name: "web_search" } }]);
expect((outputState as any[])[0].tool_calls).toEqual([{ type: "function", function: { name: "web_search" } }]);
});
@ -473,7 +486,7 @@ describe("buildTraceState", () => {
});
describe("emitTrace", () => {
it("emits $ai_trace with full conversation state from buildTraceState", () => {
it("emits $ai_trace with chronological conversation in inputState", () => {
const ph = createMockPostHog();
const traceCtx = new TraceContextManager();
traceCtx.startTrace("sess-1", "r");
@ -487,7 +500,10 @@ describe("emitTrace", () => {
const props = ph.capture.mock.calls[0][0].properties;
expect(props.$ai_trace_id).toBe(traceCtx.getTrace("sess-1")!.traceId);
expect(props.$ai_input_state).toEqual([{ role: "user", content: "Hello" }]);
expect(props.$ai_input_state).toEqual([
{ role: "user", content: "Hello" },
{ role: "assistant", content: "Hi there!" },
]);
expect(props.$ai_output_state).toEqual([{ role: "assistant", content: "Hi there!" }]);
});