fix: expand openrouter thinking-off regression coverage (#24863) (thanks @DevSecTim)

This commit is contained in:
Peter Steinberger 2026-02-24 03:54:15 +00:00
parent b96d32c1c2
commit de0e01259a
2 changed files with 50 additions and 0 deletions

View File

@ -10,6 +10,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Providers/OpenRouter: when thinking is explicitly off, avoid injecting `reasoning.effort` so reasoning-required models can use provider defaults instead of failing request validation. (#24863) Thanks @DevSecTim.
- Status/Pairing recovery: show explicit pairing-approval command hints (including requestId when safe) when gateway probe failures report pairing-required closures. (#24771) Thanks @markmusson.
- Discord/Threading: recover missing thread parent IDs by refetching thread metadata before resolving parent channel context. (#24897) Thanks @z-x-yang.
- Web UI/i18n: load and hydrate saved locale translations during startup so non-English sessions apply immediately without manual toggling. (#24795) Thanks @chilu18.

View File

@ -261,6 +261,55 @@ describe("applyExtraParamsToAgent", () => {
expect(payloads[0]?.reasoning).toEqual({ effort: "low" });
});
it("removes legacy reasoning_effort and keeps reasoning unset when thinkingLevel is off", () => {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
const payload: Record<string, unknown> = { reasoning_effort: "high" };
options?.onPayload?.(payload);
payloads.push(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", "openrouter/auto", undefined, "off");
const model = {
api: "openai-completions",
provider: "openrouter",
id: "openrouter/auto",
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
expect(payloads).toHaveLength(1);
expect(payloads[0]).not.toHaveProperty("reasoning_effort");
expect(payloads[0]).not.toHaveProperty("reasoning");
});
it("does not inject effort when payload already has reasoning.max_tokens", () => {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
const payload: Record<string, unknown> = { reasoning: { max_tokens: 256 } };
options?.onPayload?.(payload);
payloads.push(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", "openrouter/auto", undefined, "low");
const model = {
api: "openai-completions",
provider: "openrouter",
id: "openrouter/auto",
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
expect(payloads).toHaveLength(1);
expect(payloads[0]).toEqual({ reasoning: { max_tokens: 256 } });
});
it("adds OpenRouter attribution headers to stream options", () => {
const { calls, agent } = createOptionsCaptureAgent();