Merge remote-tracking branch 'origin/main' into fix/codex-transport-path
This commit is contained in:
commit
7f5d0b6dde
@ -6,6 +6,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Changes
|
||||
|
||||
- Models/Anthropic Vertex: add core `anthropic-vertex` provider support for Claude via Google Vertex AI, including GCP auth/discovery and main run-path routing. (#43356) Thanks @sallyom and @yossiovadia.
|
||||
- Commands/btw: add `/btw` side questions for quick tool-less answers about the current session without changing future session context, with dismissible in-session TUI answers and explicit BTW replies on external channels. (#45444) Thanks @ngutman.
|
||||
- Gateway/docs: clarify that empty URL input allowlists are treated as unset, document `allowUrl: false` as the deny-all switch, and add regression coverage for the normalization path.
|
||||
- Sandbox/runtime: add pluggable sandbox backends, ship an OpenShell backend with `mirror` and `remote` workspace modes, and make sandbox list/recreate/prune backend-aware instead of Docker-only.
|
||||
@ -52,6 +53,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Docs/plugins: add the community DingTalk plugin listing to the docs catalog. (#29913) Thanks @sliverp.
|
||||
- Docs/plugins: add the community QQbot plugin listing to the docs catalog. (#29898) Thanks @sliverp.
|
||||
- Plugins/context engines: pass the embedded runner `modelId` into context-engine `assemble()` so plugins can adapt context formatting per model. (#47437) thanks @jscianna.
|
||||
- Plugins/context engines: add transcript maintenance rewrites for context engines, preserve active-branch transcript metadata during rewrites, and harden overflow-recovery truncation to rewrite sessions under the normal session write lock. (#51191) Thanks @jalehman.
|
||||
|
||||
### Fixes
|
||||
|
||||
@ -188,6 +190,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Telegram/routing: fail loud when `message send` targets an unknown non-default Telegram `accountId`, instead of silently falling back to the channel-level bot token and sending through the wrong bot. (#50853) Thanks @hclsys.
|
||||
- Web search: align onboarding, configure, and finalize with plugin-owned provider contracts, including disabled-provider recovery, config-aware credential hooks, and runtime-visible summaries. (#50935) Thanks @gumadeiras.
|
||||
- Agents/replay: sanitize malformed assistant tool-call replay blocks before provider replay so follow-up Anthropic requests do not inherit the downstream `replace` crash. (#50005) Thanks @jalehman.
|
||||
- Plugins/context engines: retry strict legacy `assemble()` calls without the new `prompt` field when older engines reject it, preserving prompt-aware retrieval compatibility for pre-prompt plugins. (#50848) thanks @danhdoan.
|
||||
|
||||
### Breaking
|
||||
|
||||
|
||||
65
extensions/anthropic-vertex/provider-catalog.ts
Normal file
65
extensions/anthropic-vertex/provider-catalog.ts
Normal file
@ -0,0 +1,65 @@
|
||||
import type {
|
||||
ModelDefinitionConfig,
|
||||
ModelProviderConfig,
|
||||
} from "openclaw/plugin-sdk/provider-models";
|
||||
import { resolveAnthropicVertexRegion } from "openclaw/plugin-sdk/provider-models";
|
||||
export const ANTHROPIC_VERTEX_DEFAULT_MODEL_ID = "claude-sonnet-4-6";
|
||||
const ANTHROPIC_VERTEX_DEFAULT_CONTEXT_WINDOW = 1_000_000;
|
||||
const GCP_VERTEX_CREDENTIALS_MARKER = "gcp-vertex-credentials";
|
||||
|
||||
function buildAnthropicVertexModel(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
reasoning: boolean;
|
||||
input: ModelDefinitionConfig["input"];
|
||||
cost: ModelDefinitionConfig["cost"];
|
||||
maxTokens: number;
|
||||
}): ModelDefinitionConfig {
|
||||
return {
|
||||
id: params.id,
|
||||
name: params.name,
|
||||
reasoning: params.reasoning,
|
||||
input: params.input,
|
||||
cost: params.cost,
|
||||
contextWindow: ANTHROPIC_VERTEX_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: params.maxTokens,
|
||||
};
|
||||
}
|
||||
|
||||
function buildAnthropicVertexCatalog(): ModelDefinitionConfig[] {
|
||||
return [
|
||||
buildAnthropicVertexModel({
|
||||
id: "claude-opus-4-6",
|
||||
name: "Claude Opus 4.6",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
|
||||
maxTokens: 128000,
|
||||
}),
|
||||
buildAnthropicVertexModel({
|
||||
id: ANTHROPIC_VERTEX_DEFAULT_MODEL_ID,
|
||||
name: "Claude Sonnet 4.6",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
||||
maxTokens: 128000,
|
||||
}),
|
||||
];
|
||||
}
|
||||
|
||||
export function buildAnthropicVertexProvider(params?: {
|
||||
env?: NodeJS.ProcessEnv;
|
||||
}): ModelProviderConfig {
|
||||
const region = resolveAnthropicVertexRegion(params?.env);
|
||||
const baseUrl =
|
||||
region.toLowerCase() === "global"
|
||||
? "https://aiplatform.googleapis.com"
|
||||
: `https://${region}-aiplatform.googleapis.com`;
|
||||
|
||||
return {
|
||||
baseUrl,
|
||||
api: "anthropic-messages",
|
||||
apiKey: GCP_VERTEX_CREDENTIALS_MARKER,
|
||||
models: buildAnthropicVertexCatalog(),
|
||||
};
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
|
||||
import "./test-mocks.js";
|
||||
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
|
||||
import type { PluginRuntime } from "./runtime-api.js";
|
||||
import { clearBlueBubblesRuntime, setBlueBubblesRuntime } from "./runtime.js";
|
||||
import { sendMessageBlueBubbles, resolveChatGuidForTarget, createChatForHandle } from "./send.js";
|
||||
|
||||
@ -62,14 +62,16 @@ export function createBlueBubblesProbeMockModule(): BlueBubblesProbeMockModule {
|
||||
export function installBlueBubblesFetchTestHooks(params: {
|
||||
mockFetch: ReturnType<typeof vi.fn>;
|
||||
privateApiStatusMock: {
|
||||
mockReset: () => unknown;
|
||||
mockReset?: () => unknown;
|
||||
mockClear?: () => unknown;
|
||||
mockReturnValue: (value: boolean | null) => unknown;
|
||||
};
|
||||
}) {
|
||||
beforeEach(() => {
|
||||
vi.stubGlobal("fetch", params.mockFetch);
|
||||
params.mockFetch.mockReset();
|
||||
params.privateApiStatusMock.mockReset();
|
||||
params.privateApiStatusMock.mockReset?.();
|
||||
params.privateApiStatusMock.mockClear?.();
|
||||
params.privateApiStatusMock.mockReturnValue(BLUE_BUBBLES_PRIVATE_API_STATUS.unknown);
|
||||
});
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ export {
|
||||
ssrfPolicyFromAllowPrivateNetwork,
|
||||
type LookupFn,
|
||||
type SsrFPolicy,
|
||||
} from "openclaw/plugin-sdk/infra-runtime";
|
||||
} from "openclaw/plugin-sdk/ssrf-runtime";
|
||||
export {
|
||||
setMatrixThreadBindingIdleTimeoutBySessionKey,
|
||||
setMatrixThreadBindingMaxAgeBySessionKey,
|
||||
|
||||
@ -53,11 +53,19 @@ function createHandlerHarness() {
|
||||
dispatcher: {},
|
||||
replyOptions: {},
|
||||
markDispatchIdle: vi.fn(),
|
||||
markRunComplete: vi.fn(),
|
||||
}),
|
||||
resolveHumanDelayConfig: vi.fn().mockReturnValue(undefined),
|
||||
dispatchReplyFromConfig: vi
|
||||
.fn()
|
||||
.mockResolvedValue({ queuedFinal: false, counts: { final: 0, block: 0, tool: 0 } }),
|
||||
withReplyDispatcher: vi.fn().mockImplementation(async ({ run, onSettled }) => {
|
||||
try {
|
||||
return await run();
|
||||
} finally {
|
||||
await onSettled?.();
|
||||
}
|
||||
}),
|
||||
},
|
||||
commands: {
|
||||
shouldHandleTextCommands: vi.fn().mockReturnValue(true),
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
import type {
|
||||
BindingTargetKind,
|
||||
SessionBindingRecord,
|
||||
} from "openclaw/plugin-sdk/conversation-runtime";
|
||||
import { resolveThreadBindingLifecycle } from "openclaw/plugin-sdk/conversation-runtime";
|
||||
} from "openclaw/plugin-sdk/thread-bindings-runtime";
|
||||
import { resolveThreadBindingLifecycle } from "openclaw/plugin-sdk/thread-bindings-runtime";
|
||||
|
||||
export type MatrixThreadBindingTargetKind = "subagent" | "acp";
|
||||
|
||||
|
||||
@ -16,30 +16,14 @@ import {
|
||||
setMatrixThreadBindingMaxAgeBySessionKey,
|
||||
} from "./thread-bindings.js";
|
||||
|
||||
const pluginSdkActual = vi.hoisted(() => ({
|
||||
writeJsonFileAtomically: null as null | ((filePath: string, value: unknown) => Promise<void>),
|
||||
}));
|
||||
|
||||
const sendMessageMatrixMock = vi.hoisted(() =>
|
||||
vi.fn(async (_to: string, _message: string, opts?: { threadId?: string }) => ({
|
||||
messageId: opts?.threadId ? "$reply" : "$root",
|
||||
roomId: "!room:example",
|
||||
})),
|
||||
);
|
||||
const writeJsonFileAtomicallyMock = vi.hoisted(() =>
|
||||
vi.fn<(filePath: string, value: unknown) => Promise<void>>(),
|
||||
);
|
||||
|
||||
vi.mock("../../runtime-api.js", async () => {
|
||||
const actual =
|
||||
await vi.importActual<typeof import("../../runtime-api.js")>("../../runtime-api.js");
|
||||
pluginSdkActual.writeJsonFileAtomically = actual.writeJsonFileAtomically;
|
||||
return {
|
||||
...actual,
|
||||
writeJsonFileAtomically: (filePath: string, value: unknown) =>
|
||||
writeJsonFileAtomicallyMock(filePath, value),
|
||||
};
|
||||
});
|
||||
const actualRename = fs.rename.bind(fs);
|
||||
const renameMock = vi.spyOn(fs, "rename");
|
||||
|
||||
vi.mock("./send.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("./send.js")>("./send.js");
|
||||
@ -82,10 +66,8 @@ describe("matrix thread bindings", () => {
|
||||
__testing.resetSessionBindingAdaptersForTests();
|
||||
resetMatrixThreadBindingsForTests();
|
||||
sendMessageMatrixMock.mockClear();
|
||||
writeJsonFileAtomicallyMock.mockReset();
|
||||
writeJsonFileAtomicallyMock.mockImplementation(async (filePath: string, value: unknown) => {
|
||||
await pluginSdkActual.writeJsonFileAtomically?.(filePath, value);
|
||||
});
|
||||
renameMock.mockReset();
|
||||
renameMock.mockImplementation(actualRename);
|
||||
setMatrixRuntime({
|
||||
state: {
|
||||
resolveStateDir: () => stateDir,
|
||||
@ -216,7 +198,7 @@ describe("matrix thread bindings", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("persists a batch of expired bindings once per sweep", async () => {
|
||||
it("persists expired bindings after a sweep", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-08T12:00:00.000Z"));
|
||||
try {
|
||||
@ -251,12 +233,8 @@ describe("matrix thread bindings", () => {
|
||||
placement: "current",
|
||||
});
|
||||
|
||||
writeJsonFileAtomicallyMock.mockClear();
|
||||
await vi.advanceTimersByTimeAsync(61_000);
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(writeJsonFileAtomicallyMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
await Promise.resolve();
|
||||
|
||||
await vi.waitFor(async () => {
|
||||
const persistedRaw = await fs.readFile(resolveBindingsFilePath(), "utf-8");
|
||||
@ -296,13 +274,23 @@ describe("matrix thread bindings", () => {
|
||||
placement: "current",
|
||||
});
|
||||
|
||||
writeJsonFileAtomicallyMock.mockClear();
|
||||
writeJsonFileAtomicallyMock.mockRejectedValueOnce(new Error("disk full"));
|
||||
renameMock.mockRejectedValueOnce(new Error("disk full"));
|
||||
await vi.advanceTimersByTimeAsync(61_000);
|
||||
await Promise.resolve();
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(
|
||||
logVerboseMessage.mock.calls.some(
|
||||
([message]) =>
|
||||
typeof message === "string" &&
|
||||
message.includes("failed auto-unbinding expired bindings"),
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(logVerboseMessage).toHaveBeenCalledWith(
|
||||
expect.stringContaining("failed auto-unbinding expired bindings"),
|
||||
expect.stringContaining("matrix: auto-unbinding $thread due to idle-expired"),
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@ -8,6 +8,12 @@ export {
|
||||
type LookupFn,
|
||||
type SsrFPolicy,
|
||||
} from "openclaw/plugin-sdk/infra-runtime";
|
||||
export {
|
||||
dispatchReplyFromConfigWithSettledDispatcher,
|
||||
ensureConfiguredAcpBindingReady,
|
||||
maybeCreateMatrixMigrationSnapshot,
|
||||
resolveConfiguredAcpBindingRecord,
|
||||
} from "openclaw/plugin-sdk/matrix-runtime-heavy";
|
||||
// Keep auth-precedence available internally without re-exporting helper-api
|
||||
// twice through both plugin-sdk/matrix and ../runtime-api.js.
|
||||
export * from "./auth-precedence.js";
|
||||
|
||||
397
extensions/openai/index.test.ts
Normal file
397
extensions/openai/index.test.ts
Normal file
@ -0,0 +1,397 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import OpenAI from "openai";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../../src/config/config.js";
|
||||
import { loadConfig } from "../../src/config/config.js";
|
||||
import { encodePngRgba, fillPixel } from "../../src/media/png-encode.js";
|
||||
import type { ResolvedTtsConfig } from "../../src/tts/tts.js";
|
||||
import { createTestPluginApi } from "../../test/helpers/extensions/plugin-api.js";
|
||||
import plugin from "./index.js";
|
||||
|
||||
const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? "";
|
||||
const LIVE_MODEL_ID = process.env.OPENCLAW_LIVE_OPENAI_PLUGIN_MODEL?.trim() || "gpt-5.4-nano";
|
||||
const LIVE_IMAGE_MODEL = process.env.OPENCLAW_LIVE_OPENAI_IMAGE_MODEL?.trim() || "gpt-image-1";
|
||||
const LIVE_VISION_MODEL = process.env.OPENCLAW_LIVE_OPENAI_VISION_MODEL?.trim() || "gpt-4.1-mini";
|
||||
const liveEnabled = OPENAI_API_KEY.trim().length > 0 && process.env.OPENCLAW_LIVE_TEST === "1";
|
||||
const describeLive = liveEnabled ? describe : describe.skip;
|
||||
const EMPTY_AUTH_STORE = { version: 1, profiles: {} } as const;
|
||||
|
||||
function createTemplateModel(modelId: string) {
|
||||
switch (modelId) {
|
||||
case "gpt-5.4":
|
||||
return {
|
||||
id: "gpt-5.2",
|
||||
name: "GPT-5.2",
|
||||
provider: "openai",
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
};
|
||||
case "gpt-5.4-mini":
|
||||
return {
|
||||
id: "gpt-5-mini",
|
||||
name: "GPT-5 mini",
|
||||
provider: "openai",
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
};
|
||||
case "gpt-5.4-nano":
|
||||
return {
|
||||
id: "gpt-5-nano",
|
||||
name: "GPT-5 nano",
|
||||
provider: "openai",
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0.5, output: 1, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200_000,
|
||||
maxTokens: 64_000,
|
||||
};
|
||||
default:
|
||||
throw new Error(`Unsupported live OpenAI plugin model: ${modelId}`);
|
||||
}
|
||||
}
|
||||
|
||||
function registerOpenAIPlugin() {
|
||||
const providers: unknown[] = [];
|
||||
const speechProviders: unknown[] = [];
|
||||
const mediaProviders: unknown[] = [];
|
||||
const imageProviders: unknown[] = [];
|
||||
|
||||
plugin.register(
|
||||
createTestPluginApi({
|
||||
id: "openai",
|
||||
name: "OpenAI Provider",
|
||||
source: "test",
|
||||
config: {},
|
||||
runtime: {} as never,
|
||||
registerProvider: (provider) => {
|
||||
providers.push(provider);
|
||||
},
|
||||
registerSpeechProvider: (provider) => {
|
||||
speechProviders.push(provider);
|
||||
},
|
||||
registerMediaUnderstandingProvider: (provider) => {
|
||||
mediaProviders.push(provider);
|
||||
},
|
||||
registerImageGenerationProvider: (provider) => {
|
||||
imageProviders.push(provider);
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { providers, speechProviders, mediaProviders, imageProviders };
|
||||
}
|
||||
|
||||
function createReferencePng(): Buffer {
|
||||
const width = 96;
|
||||
const height = 96;
|
||||
const buf = Buffer.alloc(width * height * 4, 255);
|
||||
|
||||
for (let y = 0; y < height; y += 1) {
|
||||
for (let x = 0; x < width; x += 1) {
|
||||
fillPixel(buf, x, y, width, 225, 242, 255, 255);
|
||||
}
|
||||
}
|
||||
|
||||
for (let y = 24; y < 72; y += 1) {
|
||||
for (let x = 24; x < 72; x += 1) {
|
||||
fillPixel(buf, x, y, width, 255, 153, 51, 255);
|
||||
}
|
||||
}
|
||||
|
||||
return encodePngRgba(buf, width, height);
|
||||
}
|
||||
|
||||
function createLiveConfig(): OpenClawConfig {
|
||||
const cfg = loadConfig();
|
||||
return {
|
||||
...cfg,
|
||||
models: {
|
||||
...cfg.models,
|
||||
providers: {
|
||||
...cfg.models?.providers,
|
||||
openai: {
|
||||
...cfg.models?.providers?.openai,
|
||||
apiKey: OPENAI_API_KEY,
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
}
|
||||
|
||||
function createLiveTtsConfig(): ResolvedTtsConfig {
|
||||
return {
|
||||
auto: "off",
|
||||
mode: "final",
|
||||
provider: "openai",
|
||||
providerSource: "config",
|
||||
modelOverrides: {
|
||||
enabled: true,
|
||||
allowText: true,
|
||||
allowProvider: true,
|
||||
allowVoice: true,
|
||||
allowModelId: true,
|
||||
allowVoiceSettings: true,
|
||||
allowNormalization: true,
|
||||
allowSeed: true,
|
||||
},
|
||||
elevenlabs: {
|
||||
baseUrl: "https://api.elevenlabs.io",
|
||||
voiceId: "",
|
||||
modelId: "eleven_multilingual_v2",
|
||||
voiceSettings: {
|
||||
stability: 0.5,
|
||||
similarityBoost: 0.75,
|
||||
style: 0,
|
||||
useSpeakerBoost: true,
|
||||
speed: 1,
|
||||
},
|
||||
},
|
||||
openai: {
|
||||
apiKey: OPENAI_API_KEY,
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
model: "gpt-4o-mini-tts",
|
||||
voice: "alloy",
|
||||
},
|
||||
edge: {
|
||||
enabled: false,
|
||||
voice: "en-US-AriaNeural",
|
||||
lang: "en-US",
|
||||
outputFormat: "audio-24khz-48kbitrate-mono-mp3",
|
||||
outputFormatConfigured: false,
|
||||
saveSubtitles: false,
|
||||
},
|
||||
maxTextLength: 4_000,
|
||||
timeoutMs: 30_000,
|
||||
};
|
||||
}
|
||||
|
||||
async function createTempAgentDir(): Promise<string> {
|
||||
return await fs.mkdtemp(path.join(os.tmpdir(), "openai-plugin-live-"));
|
||||
}
|
||||
|
||||
describe("openai plugin", () => {
|
||||
it("registers the expected provider surfaces", () => {
|
||||
const { providers, speechProviders, mediaProviders, imageProviders } = registerOpenAIPlugin();
|
||||
|
||||
expect(providers).toHaveLength(2);
|
||||
expect(
|
||||
providers.map(
|
||||
(provider) =>
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
(provider as any).id,
|
||||
),
|
||||
).toEqual(["openai", "openai-codex"]);
|
||||
expect(speechProviders).toHaveLength(1);
|
||||
expect(mediaProviders).toHaveLength(1);
|
||||
expect(imageProviders).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describeLive("openai plugin live", () => {
|
||||
it("registers an OpenAI provider that can complete a live request", async () => {
|
||||
const { providers } = registerOpenAIPlugin();
|
||||
const provider =
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
providers.find((entry) => (entry as any).id === "openai");
|
||||
|
||||
expect(provider).toBeDefined();
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const resolved = (provider as any).resolveDynamicModel?.({
|
||||
provider: "openai",
|
||||
modelId: LIVE_MODEL_ID,
|
||||
modelRegistry: {
|
||||
find(providerId: string, id: string) {
|
||||
if (providerId !== "openai") {
|
||||
return null;
|
||||
}
|
||||
const template = createTemplateModel(LIVE_MODEL_ID);
|
||||
return id === template.id ? template : null;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(resolved).toBeDefined();
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const normalized = (provider as any).normalizeResolvedModel?.({
|
||||
provider: "openai",
|
||||
modelId: resolved.id,
|
||||
model: resolved,
|
||||
});
|
||||
|
||||
expect(normalized).toMatchObject({
|
||||
provider: "openai",
|
||||
id: LIVE_MODEL_ID,
|
||||
api: "openai-responses",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
});
|
||||
|
||||
const client = new OpenAI({
|
||||
apiKey: OPENAI_API_KEY,
|
||||
baseURL: normalized?.baseUrl,
|
||||
});
|
||||
const response = await client.responses.create({
|
||||
model: normalized?.id ?? LIVE_MODEL_ID,
|
||||
input: "Reply with exactly OK.",
|
||||
max_output_tokens: 16,
|
||||
});
|
||||
|
||||
expect(response.output_text.trim()).toMatch(/^OK[.!]?$/);
|
||||
}, 30_000);
|
||||
|
||||
it("lists voices and synthesizes audio through the registered speech provider", async () => {
|
||||
const { speechProviders } = registerOpenAIPlugin();
|
||||
const speechProvider =
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
speechProviders.find((entry) => (entry as any).id === "openai");
|
||||
|
||||
expect(speechProvider).toBeDefined();
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const voices = await (speechProvider as any).listVoices?.({});
|
||||
expect(Array.isArray(voices)).toBe(true);
|
||||
expect(voices.map((voice: { id: string }) => voice.id)).toContain("alloy");
|
||||
|
||||
const cfg = createLiveConfig();
|
||||
const ttsConfig = createLiveTtsConfig();
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const audioFile = await (speechProvider as any).synthesize({
|
||||
text: "OpenClaw integration test OK.",
|
||||
cfg,
|
||||
config: ttsConfig,
|
||||
target: "audio-file",
|
||||
});
|
||||
expect(audioFile.outputFormat).toBe("mp3");
|
||||
expect(audioFile.fileExtension).toBe(".mp3");
|
||||
expect(audioFile.audioBuffer.byteLength).toBeGreaterThan(512);
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const telephony = await (speechProvider as any).synthesizeTelephony?.({
|
||||
text: "Telephony check OK.",
|
||||
cfg,
|
||||
config: ttsConfig,
|
||||
});
|
||||
expect(telephony?.outputFormat).toBe("pcm");
|
||||
expect(telephony?.sampleRate).toBe(24_000);
|
||||
expect(telephony?.audioBuffer.byteLength).toBeGreaterThan(512);
|
||||
}, 45_000);
|
||||
|
||||
it("transcribes synthesized speech through the registered media provider", async () => {
|
||||
const { speechProviders, mediaProviders } = registerOpenAIPlugin();
|
||||
const speechProvider =
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
speechProviders.find((entry) => (entry as any).id === "openai");
|
||||
const mediaProvider =
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
mediaProviders.find((entry) => (entry as any).id === "openai");
|
||||
|
||||
expect(speechProvider).toBeDefined();
|
||||
expect(mediaProvider).toBeDefined();
|
||||
|
||||
const cfg = createLiveConfig();
|
||||
const ttsConfig = createLiveTtsConfig();
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const synthesized = await (speechProvider as any).synthesize({
|
||||
text: "OpenClaw integration test OK.",
|
||||
cfg,
|
||||
config: ttsConfig,
|
||||
target: "audio-file",
|
||||
});
|
||||
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const transcription = await (mediaProvider as any).transcribeAudio?.({
|
||||
buffer: synthesized.audioBuffer,
|
||||
fileName: "openai-plugin-live.mp3",
|
||||
mime: "audio/mpeg",
|
||||
apiKey: OPENAI_API_KEY,
|
||||
timeoutMs: 30_000,
|
||||
});
|
||||
|
||||
const text = String(transcription?.text ?? "").toLowerCase();
|
||||
expect(text.length).toBeGreaterThan(0);
|
||||
expect(text).toContain("openclaw");
|
||||
expect(text).toMatch(/\bok\b/);
|
||||
}, 45_000);
|
||||
|
||||
it("generates an image through the registered image provider", async () => {
|
||||
const { imageProviders } = registerOpenAIPlugin();
|
||||
const imageProvider =
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
imageProviders.find((entry) => (entry as any).id === "openai");
|
||||
|
||||
expect(imageProvider).toBeDefined();
|
||||
|
||||
const cfg = createLiveConfig();
|
||||
const agentDir = await createTempAgentDir();
|
||||
|
||||
try {
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const generated = await (imageProvider as any).generateImage({
|
||||
provider: "openai",
|
||||
model: LIVE_IMAGE_MODEL,
|
||||
prompt: "Create a minimal flat orange square centered on a white background.",
|
||||
cfg,
|
||||
agentDir,
|
||||
authStore: EMPTY_AUTH_STORE,
|
||||
timeoutMs: 45_000,
|
||||
size: "1024x1024",
|
||||
});
|
||||
|
||||
expect(generated.model).toBe(LIVE_IMAGE_MODEL);
|
||||
expect(generated.images.length).toBeGreaterThan(0);
|
||||
expect(generated.images[0]?.mimeType).toBe("image/png");
|
||||
expect(generated.images[0]?.buffer.byteLength).toBeGreaterThan(1_000);
|
||||
} finally {
|
||||
await fs.rm(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
}, 60_000);
|
||||
|
||||
it("describes a deterministic image through the registered media provider", async () => {
|
||||
const { mediaProviders } = registerOpenAIPlugin();
|
||||
const mediaProvider =
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
mediaProviders.find((entry) => (entry as any).id === "openai");
|
||||
|
||||
expect(mediaProvider).toBeDefined();
|
||||
|
||||
const cfg = createLiveConfig();
|
||||
const agentDir = await createTempAgentDir();
|
||||
|
||||
try {
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const description = await (mediaProvider as any).describeImage?.({
|
||||
buffer: createReferencePng(),
|
||||
fileName: "reference.png",
|
||||
mime: "image/png",
|
||||
prompt: "Reply with one lowercase word for the dominant center color.",
|
||||
timeoutMs: 30_000,
|
||||
agentDir,
|
||||
cfg,
|
||||
model: LIVE_VISION_MODEL,
|
||||
provider: "openai",
|
||||
});
|
||||
|
||||
expect(String(description?.text ?? "").toLowerCase()).toContain("orange");
|
||||
} finally {
|
||||
await fs.rm(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
}, 60_000);
|
||||
});
|
||||
@ -1,6 +1,73 @@
|
||||
import OpenAI from "openai";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { buildOpenAIProvider } from "./openai-provider.js";
|
||||
|
||||
const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? "";
|
||||
const DEFAULT_LIVE_MODEL_IDS = ["gpt-5.4-mini", "gpt-5.4-nano"] as const;
|
||||
const liveEnabled = OPENAI_API_KEY.trim().length > 0 && process.env.OPENCLAW_LIVE_TEST === "1";
|
||||
const describeLive = liveEnabled ? describe : describe.skip;
|
||||
|
||||
type LiveModelCase = {
|
||||
modelId: string;
|
||||
templateId: string;
|
||||
templateName: string;
|
||||
cost: { input: number; output: number; cacheRead: number; cacheWrite: number };
|
||||
contextWindow: number;
|
||||
maxTokens: number;
|
||||
};
|
||||
|
||||
function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
switch (modelId) {
|
||||
case "gpt-5.4":
|
||||
return {
|
||||
modelId,
|
||||
templateId: "gpt-5.2",
|
||||
templateName: "GPT-5.2",
|
||||
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
};
|
||||
case "gpt-5.4-pro":
|
||||
return {
|
||||
modelId,
|
||||
templateId: "gpt-5.2-pro",
|
||||
templateName: "GPT-5.2 Pro",
|
||||
cost: { input: 15, output: 60, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
};
|
||||
case "gpt-5.4-mini":
|
||||
return {
|
||||
modelId,
|
||||
templateId: "gpt-5-mini",
|
||||
templateName: "GPT-5 mini",
|
||||
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
};
|
||||
case "gpt-5.4-nano":
|
||||
return {
|
||||
modelId,
|
||||
templateId: "gpt-5-nano",
|
||||
templateName: "GPT-5 nano",
|
||||
cost: { input: 0.5, output: 1, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200_000,
|
||||
maxTokens: 64_000,
|
||||
};
|
||||
default:
|
||||
throw new Error(`Unsupported live OpenAI model: ${modelId}`);
|
||||
}
|
||||
}
|
||||
|
||||
function resolveLiveModelCases(raw?: string): LiveModelCase[] {
|
||||
const requested = raw
|
||||
?.split(",")
|
||||
.map((value) => value.trim())
|
||||
.filter(Boolean);
|
||||
const modelIds = requested?.length ? requested : [...DEFAULT_LIVE_MODEL_IDS];
|
||||
return [...new Set(modelIds)].map((modelId) => resolveLiveModelCase(modelId));
|
||||
}
|
||||
|
||||
describe("buildOpenAIProvider", () => {
|
||||
it("resolves gpt-5.4 mini and nano from GPT-5 small-model templates", () => {
|
||||
const provider = buildOpenAIProvider();
|
||||
@ -106,3 +173,69 @@ describe("buildOpenAIProvider", () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describeLive("buildOpenAIProvider live", () => {
|
||||
it.each(resolveLiveModelCases(process.env.OPENCLAW_LIVE_OPENAI_MODELS))(
|
||||
"resolves %s and completes through the OpenAI responses API",
|
||||
async (liveCase) => {
|
||||
const provider = buildOpenAIProvider();
|
||||
const registry = {
|
||||
find(providerId: string, id: string) {
|
||||
if (providerId !== "openai") {
|
||||
return null;
|
||||
}
|
||||
if (id === liveCase.templateId) {
|
||||
return {
|
||||
id: liveCase.templateId,
|
||||
name: liveCase.templateName,
|
||||
provider: "openai",
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: liveCase.cost,
|
||||
contextWindow: liveCase.contextWindow,
|
||||
maxTokens: liveCase.maxTokens,
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
};
|
||||
|
||||
const resolved = provider.resolveDynamicModel?.({
|
||||
provider: "openai",
|
||||
modelId: liveCase.modelId,
|
||||
modelRegistry: registry as never,
|
||||
});
|
||||
|
||||
expect(resolved).toBeDefined();
|
||||
|
||||
const normalized = provider.normalizeResolvedModel?.({
|
||||
provider: "openai",
|
||||
modelId: resolved!.id,
|
||||
model: resolved!,
|
||||
});
|
||||
|
||||
expect(normalized).toMatchObject({
|
||||
provider: "openai",
|
||||
id: liveCase.modelId,
|
||||
api: "openai-responses",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
});
|
||||
|
||||
const client = new OpenAI({
|
||||
apiKey: OPENAI_API_KEY,
|
||||
baseURL: normalized?.baseUrl,
|
||||
});
|
||||
|
||||
const response = await client.responses.create({
|
||||
model: normalized?.id ?? liveCase.modelId,
|
||||
input: "Reply with exactly OK.",
|
||||
max_output_tokens: 16,
|
||||
});
|
||||
|
||||
expect(response.output_text.trim()).toMatch(/^OK[.!]?$/);
|
||||
},
|
||||
30_000,
|
||||
);
|
||||
});
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { resolveAgentRoute } from "openclaw/plugin-sdk/routing";
|
||||
import { normalizeE164 } from "openclaw/plugin-sdk/text-runtime";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../../../src/config/config.js";
|
||||
import { resolveAgentRoute } from "../../../src/routing/resolve-route.js";
|
||||
import { normalizeE164 } from "../../../src/utils.js";
|
||||
import type { SignalDaemonExitEvent } from "./daemon.js";
|
||||
import {
|
||||
createMockSignalDaemonHandle,
|
||||
@ -16,16 +16,14 @@ installSignalToolResultTestHooks();
|
||||
|
||||
// Import after the harness registers `vi.mock(...)` for Signal internals.
|
||||
vi.resetModules();
|
||||
const [{ peekSystemEvents }, { monitorSignalProvider }] = await Promise.all([
|
||||
import("openclaw/plugin-sdk/infra-runtime"),
|
||||
import("./monitor.js"),
|
||||
]);
|
||||
const { monitorSignalProvider } = await import("./monitor.js");
|
||||
|
||||
const {
|
||||
replyMock,
|
||||
sendMock,
|
||||
streamMock,
|
||||
updateLastRouteMock,
|
||||
enqueueSystemEventMock,
|
||||
upsertPairingRequestMock,
|
||||
waitForTransportReadyMock,
|
||||
spawnSignalDaemonMock,
|
||||
@ -109,14 +107,23 @@ async function receiveSignalPayloads(params: {
|
||||
await flush();
|
||||
}
|
||||
|
||||
function getDirectSignalEventsFor(sender: string) {
|
||||
function hasQueuedReactionEventFor(sender: string) {
|
||||
const route = resolveAgentRoute({
|
||||
cfg: config as OpenClawConfig,
|
||||
channel: "signal",
|
||||
accountId: "default",
|
||||
peer: { kind: "direct", id: normalizeE164(sender) },
|
||||
});
|
||||
return peekSystemEvents(route.sessionKey);
|
||||
return enqueueSystemEventMock.mock.calls.some(([text, options]) => {
|
||||
return (
|
||||
typeof text === "string" &&
|
||||
text.includes("Signal reaction added") &&
|
||||
typeof options === "object" &&
|
||||
options !== null &&
|
||||
"sessionKey" in options &&
|
||||
(options as { sessionKey?: string }).sessionKey === route.sessionKey
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
function makeBaseEnvelope(overrides: Record<string, unknown> = {}) {
|
||||
@ -383,8 +390,7 @@ describe("monitorSignalProvider tool results", () => {
|
||||
},
|
||||
});
|
||||
|
||||
const events = getDirectSignalEventsFor("+15550001111");
|
||||
expect(events.some((text) => text.includes("Signal reaction added"))).toBe(true);
|
||||
expect(hasQueuedReactionEventFor("+15550001111")).toBe(true);
|
||||
});
|
||||
|
||||
it.each([
|
||||
@ -424,8 +430,7 @@ describe("monitorSignalProvider tool results", () => {
|
||||
},
|
||||
});
|
||||
|
||||
const events = getDirectSignalEventsFor("+15550001111");
|
||||
expect(events.some((text) => text.includes("Signal reaction added"))).toBe(shouldEnqueue);
|
||||
expect(hasQueuedReactionEventFor("+15550001111")).toBe(shouldEnqueue);
|
||||
expect(sendMock).not.toHaveBeenCalled();
|
||||
expect(upsertPairingRequestMock).not.toHaveBeenCalled();
|
||||
});
|
||||
@ -442,8 +447,7 @@ describe("monitorSignalProvider tool results", () => {
|
||||
},
|
||||
});
|
||||
|
||||
const events = getDirectSignalEventsFor("+15550001111");
|
||||
expect(events.some((text) => text.includes("Signal reaction added"))).toBe(true);
|
||||
expect(hasQueuedReactionEventFor("+15550001111")).toBe(true);
|
||||
});
|
||||
|
||||
it("processes messages when reaction metadata is present", async () => {
|
||||
|
||||
@ -4,6 +4,7 @@ import type { SignalDaemonExitEvent, SignalDaemonHandle } from "./daemon.js";
|
||||
|
||||
type SignalToolResultTestMocks = {
|
||||
waitForTransportReadyMock: MockFn;
|
||||
enqueueSystemEventMock: MockFn;
|
||||
sendMock: MockFn;
|
||||
replyMock: MockFn;
|
||||
updateLastRouteMock: MockFn;
|
||||
@ -16,6 +17,7 @@ type SignalToolResultTestMocks = {
|
||||
};
|
||||
|
||||
const waitForTransportReadyMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
|
||||
const enqueueSystemEventMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
|
||||
const sendMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
|
||||
const replyMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
|
||||
const updateLastRouteMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
|
||||
@ -29,6 +31,7 @@ const spawnSignalDaemonMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
|
||||
export function getSignalToolResultTestMocks(): SignalToolResultTestMocks {
|
||||
return {
|
||||
waitForTransportReadyMock,
|
||||
enqueueSystemEventMock,
|
||||
sendMock,
|
||||
replyMock,
|
||||
updateLastRouteMock,
|
||||
@ -162,6 +165,10 @@ vi.mock("openclaw/plugin-sdk/infra-runtime", async () => {
|
||||
return {
|
||||
...actual,
|
||||
waitForTransportReady: (...args: unknown[]) => waitForTransportReadyMock(...args),
|
||||
enqueueSystemEvent: (...args: Parameters<typeof actual.enqueueSystemEvent>) => {
|
||||
enqueueSystemEventMock(...args);
|
||||
return actual.enqueueSystemEvent(...args);
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
@ -189,6 +196,7 @@ export function installSignalToolResultTestHooks() {
|
||||
readAllowFromStoreMock.mockReset().mockResolvedValue([]);
|
||||
upsertPairingRequestMock.mockReset().mockResolvedValue({ code: "PAIRCODE", created: true });
|
||||
waitForTransportReadyMock.mockReset().mockResolvedValue(undefined);
|
||||
enqueueSystemEventMock.mockReset();
|
||||
|
||||
resetSystemEventsForTest();
|
||||
});
|
||||
|
||||
@ -20,15 +20,6 @@ vi.mock("openclaw/plugin-sdk/reply-runtime", async (importOriginal) => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("openclaw/plugin-sdk/conversation-runtime", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/conversation-runtime")>();
|
||||
return {
|
||||
...actual,
|
||||
readChannelAllowFromStore: (...args: unknown[]) => mocks.readAllowFromStoreMock(...args),
|
||||
upsertChannelPairingRequest: (...args: unknown[]) => mocks.upsertPairingRequestMock(...args),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("openclaw/plugin-sdk/routing", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/routing")>();
|
||||
return {
|
||||
|
||||
@ -21,8 +21,10 @@ const { resolveTelegramFetch } = vi.hoisted(() => ({
|
||||
resolveTelegramFetch: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../../../src/config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../../../src/config/config.js")>();
|
||||
vi.mock("openclaw/plugin-sdk/config-runtime", async () => {
|
||||
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/config-runtime")>(
|
||||
"openclaw/plugin-sdk/config-runtime",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
loadConfig,
|
||||
|
||||
@ -8,8 +8,10 @@ const readAllowFromStoreMock = vi.fn().mockResolvedValue([]);
|
||||
const upsertPairingRequestMock = vi.fn().mockResolvedValue({ code: "PAIRCODE", created: true });
|
||||
const saveMediaBufferSpy = vi.fn();
|
||||
|
||||
vi.mock("../../../src/config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../../../src/config/config.js")>();
|
||||
vi.mock("openclaw/plugin-sdk/config-runtime", async () => {
|
||||
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/config-runtime")>(
|
||||
"openclaw/plugin-sdk/config-runtime",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: vi.fn().mockReturnValue({
|
||||
@ -37,8 +39,10 @@ vi.mock("../../../src/pairing/pairing-store.js", () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../../../src/media/store.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../../../src/media/store.js")>();
|
||||
vi.mock("openclaw/plugin-sdk/media-runtime", async () => {
|
||||
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/media-runtime")>(
|
||||
"openclaw/plugin-sdk/media-runtime",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
saveMediaBuffer: vi.fn(async (...args: Parameters<typeof actual.saveMediaBuffer>) => {
|
||||
|
||||
@ -19,25 +19,30 @@ function resolveTestAuthDir() {
|
||||
|
||||
const authDir = resolveTestAuthDir();
|
||||
|
||||
vi.mock("../../../src/config/config.js", () => ({
|
||||
loadConfig: () =>
|
||||
({
|
||||
channels: {
|
||||
whatsapp: {
|
||||
accounts: {
|
||||
default: { enabled: true, authDir: resolveTestAuthDir() },
|
||||
vi.mock("openclaw/plugin-sdk/config-runtime", async () => {
|
||||
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/config-runtime")>(
|
||||
"openclaw/plugin-sdk/config-runtime",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () =>
|
||||
({
|
||||
channels: {
|
||||
whatsapp: {
|
||||
accounts: {
|
||||
default: { enabled: true, authDir: resolveTestAuthDir() },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}) as never,
|
||||
}));
|
||||
}) as never,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("./session.js", () => {
|
||||
const authDir = resolveTestAuthDir();
|
||||
const sockA = { ws: { close: vi.fn() } };
|
||||
const sockB = { ws: { close: vi.fn() } };
|
||||
let call = 0;
|
||||
const createWaSocket = vi.fn(async () => (call++ === 0 ? sockA : sockB));
|
||||
const createWaSocket = vi.fn(async () => (createWaSocket.mock.calls.length <= 1 ? sockA : sockB));
|
||||
const waitForWaConnection = vi.fn();
|
||||
const formatError = vi.fn((err: unknown) => `formatted:${String(err)}`);
|
||||
const getStatusCode = vi.fn(
|
||||
@ -78,6 +83,10 @@ describe("loginWeb coverage", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
vi.clearAllMocks();
|
||||
createWaSocketMock.mockClear();
|
||||
waitForWaConnectionMock.mockReset().mockResolvedValue(undefined);
|
||||
waitForCredsSaveQueueWithTimeoutMock.mockReset().mockResolvedValue(undefined);
|
||||
formatErrorMock.mockReset().mockImplementation((err: unknown) => `formatted:${String(err)}`);
|
||||
rmMock.mockClear();
|
||||
});
|
||||
afterEach(() => {
|
||||
|
||||
17
package.json
17
package.json
@ -121,6 +121,10 @@
|
||||
"types": "./dist/plugin-sdk/infra-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/infra-runtime.js"
|
||||
},
|
||||
"./plugin-sdk/ssrf-runtime": {
|
||||
"types": "./dist/plugin-sdk/ssrf-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/ssrf-runtime.js"
|
||||
},
|
||||
"./plugin-sdk/media-runtime": {
|
||||
"types": "./dist/plugin-sdk/media-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/media-runtime.js"
|
||||
@ -133,6 +137,18 @@
|
||||
"types": "./dist/plugin-sdk/conversation-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/conversation-runtime.js"
|
||||
},
|
||||
"./plugin-sdk/matrix-runtime-heavy": {
|
||||
"types": "./dist/plugin-sdk/matrix-runtime-heavy.d.ts",
|
||||
"default": "./dist/plugin-sdk/matrix-runtime-heavy.js"
|
||||
},
|
||||
"./plugin-sdk/matrix-runtime-shared": {
|
||||
"types": "./dist/plugin-sdk/matrix-runtime-shared.d.ts",
|
||||
"default": "./dist/plugin-sdk/matrix-runtime-shared.js"
|
||||
},
|
||||
"./plugin-sdk/thread-bindings-runtime": {
|
||||
"types": "./dist/plugin-sdk/thread-bindings-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/thread-bindings-runtime.js"
|
||||
},
|
||||
"./plugin-sdk/text-runtime": {
|
||||
"types": "./dist/plugin-sdk/text-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/text-runtime.js"
|
||||
@ -577,6 +593,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@agentclientprotocol/sdk": "0.16.1",
|
||||
"@anthropic-ai/vertex-sdk": "^0.14.4",
|
||||
"@aws-sdk/client-bedrock": "^3.1011.0",
|
||||
"@clack/prompts": "^1.1.0",
|
||||
"@homebridge/ciao": "^1.3.5",
|
||||
|
||||
208
pnpm-lock.yaml
generated
208
pnpm-lock.yaml
generated
@ -29,6 +29,9 @@ importers:
|
||||
'@agentclientprotocol/sdk':
|
||||
specifier: 0.16.1
|
||||
version: 0.16.1(zod@4.3.6)
|
||||
'@anthropic-ai/vertex-sdk':
|
||||
specifier: ^0.14.4
|
||||
version: 0.14.4(zod@4.3.6)
|
||||
'@aws-sdk/client-bedrock':
|
||||
specifier: ^3.1011.0
|
||||
version: 3.1011.0
|
||||
@ -688,6 +691,9 @@ packages:
|
||||
zod:
|
||||
optional: true
|
||||
|
||||
'@anthropic-ai/vertex-sdk@0.14.4':
|
||||
resolution: {integrity: sha512-BZUPRWghZxfSFtAxU563wH+jfWBPoedAwsVxG35FhmNsjeV8tyfN+lFriWhCpcZApxA4NdT6Soov+PzfnxxD5g==}
|
||||
|
||||
'@asamuzakjp/css-color@5.0.1':
|
||||
resolution: {integrity: sha512-2SZFvqMyvboVV1d15lMf7XiI3m7SDqXUuKaTymJYLN6dSGadqp+fVojqJlVoMlbZnlTmu3S0TLwLTJpvBMO1Aw==}
|
||||
engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0}
|
||||
@ -1480,10 +1486,6 @@ packages:
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@isaacs/cliui@8.0.2':
|
||||
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
'@isaacs/fs-minipass@4.0.1':
|
||||
resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
@ -2619,10 +2621,6 @@ packages:
|
||||
'@pinojs/redact@0.4.0':
|
||||
resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==}
|
||||
|
||||
'@pkgjs/parseargs@0.11.0':
|
||||
resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
'@polka/url@1.0.0-next.29':
|
||||
resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==}
|
||||
|
||||
@ -4125,9 +4123,6 @@ packages:
|
||||
resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
eastasianwidth@0.2.0:
|
||||
resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
|
||||
|
||||
ecdsa-sig-formatter@1.0.11:
|
||||
resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==}
|
||||
|
||||
@ -4140,9 +4135,6 @@ packages:
|
||||
emoji-regex@8.0.0:
|
||||
resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
|
||||
|
||||
emoji-regex@9.2.2:
|
||||
resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
|
||||
|
||||
empathic@2.0.0:
|
||||
resolution: {integrity: sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA==}
|
||||
engines: {node: '>=14'}
|
||||
@ -4359,10 +4351,6 @@ packages:
|
||||
debug:
|
||||
optional: true
|
||||
|
||||
foreground-child@3.3.1:
|
||||
resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
form-data@2.5.4:
|
||||
resolution: {integrity: sha512-Y/3MmRiR8Nd+0CUtrbvcKtKzLWiUfpQ7DFVggH8PwmGt/0r7RSy32GuP4hpCJlQNEBusisSx1DLtD8uD386HJQ==}
|
||||
engines: {node: '>= 0.12'}
|
||||
@ -4409,14 +4397,18 @@ packages:
|
||||
engines: {node: '>=10'}
|
||||
deprecated: This package is no longer supported.
|
||||
|
||||
gaxios@7.1.3:
|
||||
resolution: {integrity: sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ==}
|
||||
engines: {node: '>=18'}
|
||||
gaxios@6.7.1:
|
||||
resolution: {integrity: sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
gaxios@7.1.4:
|
||||
resolution: {integrity: sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
gcp-metadata@6.1.1:
|
||||
resolution: {integrity: sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
gcp-metadata@8.1.2:
|
||||
resolution: {integrity: sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==}
|
||||
engines: {node: '>=18'}
|
||||
@ -4459,11 +4451,6 @@ packages:
|
||||
resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
|
||||
engines: {node: '>= 6'}
|
||||
|
||||
glob@10.5.0:
|
||||
resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==}
|
||||
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
|
||||
hasBin: true
|
||||
|
||||
glob@13.0.6:
|
||||
resolution: {integrity: sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==}
|
||||
engines: {node: 18 || 20 || >=22}
|
||||
@ -4472,14 +4459,18 @@ packages:
|
||||
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
|
||||
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
|
||||
|
||||
google-auth-library@10.6.1:
|
||||
resolution: {integrity: sha512-5awwuLrzNol+pFDmKJd0dKtZ0fPLAtoA5p7YO4ODsDu6ONJUVqbYwvv8y2ZBO5MBNp9TJXigB19710kYpBPdtA==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
google-auth-library@10.6.2:
|
||||
resolution: {integrity: sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
google-auth-library@9.15.1:
|
||||
resolution: {integrity: sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
google-logging-utils@0.0.2:
|
||||
resolution: {integrity: sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
google-logging-utils@1.1.3:
|
||||
resolution: {integrity: sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==}
|
||||
engines: {node: '>=14'}
|
||||
@ -4495,6 +4486,10 @@ packages:
|
||||
resolution: {integrity: sha512-wcHAQ1e7svL3fJMpDchcQVcWUmywhuepOOjHUHmMmWAwUJEIyK5ea5sbSjZd+Gy1aMpZeP8VYJa+4tP+j1YptQ==}
|
||||
engines: {node: ^12.20.0 || >=14.13.1}
|
||||
|
||||
gtoken@7.1.0:
|
||||
resolution: {integrity: sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==}
|
||||
engines: {node: '>=14.0.0'}
|
||||
|
||||
has-flag@4.0.0:
|
||||
resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
|
||||
engines: {node: '>=8'}
|
||||
@ -4721,9 +4716,6 @@ packages:
|
||||
resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
jackspeak@3.4.3:
|
||||
resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==}
|
||||
|
||||
jimp@1.6.0:
|
||||
resolution: {integrity: sha512-YcwCHw1kiqEeI5xRpDlPPBGL2EOpBKLwO4yIBJcXWHPj5PnA5urGq0jbyhM5KoNpypQ6VboSoxc9D8HyfvngSg==}
|
||||
engines: {node: '>=18'}
|
||||
@ -4993,9 +4985,6 @@ packages:
|
||||
resolution: {integrity: sha512-neJAj8GwF0e8EpycYIDFqEPcx9Qz4GUho20jWFR7YiFeXzF1YMLdxB36PypcTSPMA+4+LvgyMacYhlr18Zlymw==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
lru-cache@10.4.3:
|
||||
resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
|
||||
|
||||
lru-cache@11.2.7:
|
||||
resolution: {integrity: sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==}
|
||||
engines: {node: 20 || >=22}
|
||||
@ -5423,9 +5412,6 @@ packages:
|
||||
resolution: {integrity: sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==}
|
||||
engines: {node: '>= 14'}
|
||||
|
||||
package-json-from-dist@1.0.1:
|
||||
resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==}
|
||||
|
||||
pako@1.0.11:
|
||||
resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==}
|
||||
|
||||
@ -5483,10 +5469,6 @@ packages:
|
||||
path-parse@1.0.7:
|
||||
resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
|
||||
|
||||
path-scurry@1.11.1:
|
||||
resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
|
||||
engines: {node: '>=16 || 14 >=14.18'}
|
||||
|
||||
path-scurry@2.0.2:
|
||||
resolution: {integrity: sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==}
|
||||
engines: {node: 18 || 20 || >=22}
|
||||
@ -5794,10 +5776,6 @@ packages:
|
||||
deprecated: Rimraf versions prior to v4 are no longer supported
|
||||
hasBin: true
|
||||
|
||||
rimraf@5.0.10:
|
||||
resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==}
|
||||
hasBin: true
|
||||
|
||||
rolldown-plugin-dts@0.22.5:
|
||||
resolution: {integrity: sha512-M/HXfM4cboo+jONx9Z0X+CUf3B5tCi7ni+kR5fUW50Fp9AlZk0oVLesibGWgCXDKFp5lpgQ9yhKoImUFjl3VZw==}
|
||||
engines: {node: '>=20.19.0'}
|
||||
@ -6089,10 +6067,6 @@ packages:
|
||||
resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
string-width@5.1.2:
|
||||
resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
string-width@7.2.0:
|
||||
resolution: {integrity: sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==}
|
||||
engines: {node: '>=18'}
|
||||
@ -6402,6 +6376,10 @@ packages:
|
||||
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
|
||||
hasBin: true
|
||||
|
||||
uuid@9.0.1:
|
||||
resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==}
|
||||
hasBin: true
|
||||
|
||||
validate-npm-package-name@7.0.2:
|
||||
resolution: {integrity: sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A==}
|
||||
engines: {node: ^20.17.0 || >=22.9.0}
|
||||
@ -6557,10 +6535,6 @@ packages:
|
||||
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
wrap-ansi@8.1.0:
|
||||
resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
wrappy@1.0.2:
|
||||
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
|
||||
|
||||
@ -6668,6 +6642,15 @@ snapshots:
|
||||
optionalDependencies:
|
||||
zod: 4.3.6
|
||||
|
||||
'@anthropic-ai/vertex-sdk@0.14.4(zod@4.3.6)':
|
||||
dependencies:
|
||||
'@anthropic-ai/sdk': 0.73.0(zod@4.3.6)
|
||||
google-auth-library: 9.15.1
|
||||
transitivePeerDependencies:
|
||||
- encoding
|
||||
- supports-color
|
||||
- zod
|
||||
|
||||
'@asamuzakjp/css-color@5.0.1':
|
||||
dependencies:
|
||||
'@csstools/css-calc': 3.1.1(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0)
|
||||
@ -7804,7 +7787,7 @@ snapshots:
|
||||
|
||||
'@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))':
|
||||
dependencies:
|
||||
google-auth-library: 10.6.1
|
||||
google-auth-library: 10.6.2
|
||||
p-retry: 4.6.2
|
||||
protobufjs: 7.5.4
|
||||
ws: 8.19.0
|
||||
@ -7969,15 +7952,6 @@ snapshots:
|
||||
'@img/sharp-win32-x64@0.34.5':
|
||||
optional: true
|
||||
|
||||
'@isaacs/cliui@8.0.2':
|
||||
dependencies:
|
||||
string-width: 5.1.2
|
||||
string-width-cjs: string-width@4.2.3
|
||||
strip-ansi: 7.2.0
|
||||
strip-ansi-cjs: strip-ansi@6.0.1
|
||||
wrap-ansi: 8.1.0
|
||||
wrap-ansi-cjs: wrap-ansi@7.0.0
|
||||
|
||||
'@isaacs/fs-minipass@4.0.1':
|
||||
dependencies:
|
||||
minipass: 7.1.3
|
||||
@ -9320,9 +9294,6 @@ snapshots:
|
||||
|
||||
'@pinojs/redact@0.4.0': {}
|
||||
|
||||
'@pkgjs/parseargs@0.11.0':
|
||||
optional: true
|
||||
|
||||
'@polka/url@1.0.0-next.29': {}
|
||||
|
||||
'@protobufjs/aspromise@1.1.2': {}
|
||||
@ -11012,8 +10983,6 @@ snapshots:
|
||||
es-errors: 1.3.0
|
||||
gopd: 1.2.0
|
||||
|
||||
eastasianwidth@0.2.0: {}
|
||||
|
||||
ecdsa-sig-formatter@1.0.11:
|
||||
dependencies:
|
||||
safe-buffer: 5.2.1
|
||||
@ -11024,8 +10993,6 @@ snapshots:
|
||||
|
||||
emoji-regex@8.0.0: {}
|
||||
|
||||
emoji-regex@9.2.2: {}
|
||||
|
||||
empathic@2.0.0: {}
|
||||
|
||||
encodeurl@2.0.0: {}
|
||||
@ -11278,11 +11245,6 @@ snapshots:
|
||||
|
||||
follow-redirects@1.15.11: {}
|
||||
|
||||
foreground-child@3.3.1:
|
||||
dependencies:
|
||||
cross-spawn: 7.0.6
|
||||
signal-exit: 4.1.0
|
||||
|
||||
form-data@2.5.4:
|
||||
dependencies:
|
||||
asynckit: 0.4.0
|
||||
@ -11336,13 +11298,15 @@ snapshots:
|
||||
wide-align: 1.1.5
|
||||
optional: true
|
||||
|
||||
gaxios@7.1.3:
|
||||
gaxios@6.7.1:
|
||||
dependencies:
|
||||
extend: 3.0.2
|
||||
https-proxy-agent: 7.0.6
|
||||
node-fetch: 3.3.2
|
||||
rimraf: 5.0.10
|
||||
is-stream: 2.0.1
|
||||
node-fetch: 2.7.0
|
||||
uuid: 9.0.1
|
||||
transitivePeerDependencies:
|
||||
- encoding
|
||||
- supports-color
|
||||
|
||||
gaxios@7.1.4:
|
||||
@ -11353,6 +11317,15 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
gcp-metadata@6.1.1:
|
||||
dependencies:
|
||||
gaxios: 6.7.1
|
||||
google-logging-utils: 0.0.2
|
||||
json-bigint: 1.0.0
|
||||
transitivePeerDependencies:
|
||||
- encoding
|
||||
- supports-color
|
||||
|
||||
gcp-metadata@8.1.2:
|
||||
dependencies:
|
||||
gaxios: 7.1.4
|
||||
@ -11411,15 +11384,6 @@ snapshots:
|
||||
dependencies:
|
||||
is-glob: 4.0.3
|
||||
|
||||
glob@10.5.0:
|
||||
dependencies:
|
||||
foreground-child: 3.3.1
|
||||
jackspeak: 3.4.3
|
||||
minimatch: 10.2.4
|
||||
minipass: 7.1.3
|
||||
package-json-from-dist: 1.0.1
|
||||
path-scurry: 1.11.1
|
||||
|
||||
glob@13.0.6:
|
||||
dependencies:
|
||||
minimatch: 10.2.4
|
||||
@ -11436,17 +11400,6 @@ snapshots:
|
||||
path-is-absolute: 1.0.1
|
||||
optional: true
|
||||
|
||||
google-auth-library@10.6.1:
|
||||
dependencies:
|
||||
base64-js: 1.5.1
|
||||
ecdsa-sig-formatter: 1.0.11
|
||||
gaxios: 7.1.3
|
||||
gcp-metadata: 8.1.2
|
||||
google-logging-utils: 1.1.3
|
||||
jws: 4.0.1
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
google-auth-library@10.6.2:
|
||||
dependencies:
|
||||
base64-js: 1.5.1
|
||||
@ -11458,6 +11411,20 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
google-auth-library@9.15.1:
|
||||
dependencies:
|
||||
base64-js: 1.5.1
|
||||
ecdsa-sig-formatter: 1.0.11
|
||||
gaxios: 6.7.1
|
||||
gcp-metadata: 6.1.1
|
||||
gtoken: 7.1.0
|
||||
jws: 4.0.1
|
||||
transitivePeerDependencies:
|
||||
- encoding
|
||||
- supports-color
|
||||
|
||||
google-logging-utils@0.0.2: {}
|
||||
|
||||
google-logging-utils@1.1.3: {}
|
||||
|
||||
gopd@1.2.0: {}
|
||||
@ -11474,6 +11441,14 @@ snapshots:
|
||||
- encoding
|
||||
- supports-color
|
||||
|
||||
gtoken@7.1.0:
|
||||
dependencies:
|
||||
gaxios: 6.7.1
|
||||
jws: 4.0.1
|
||||
transitivePeerDependencies:
|
||||
- encoding
|
||||
- supports-color
|
||||
|
||||
has-flag@4.0.0: {}
|
||||
|
||||
has-own@1.0.1: {}
|
||||
@ -11725,12 +11700,6 @@ snapshots:
|
||||
html-escaper: 2.0.2
|
||||
istanbul-lib-report: 3.0.1
|
||||
|
||||
jackspeak@3.4.3:
|
||||
dependencies:
|
||||
'@isaacs/cliui': 8.0.2
|
||||
optionalDependencies:
|
||||
'@pkgjs/parseargs': 0.11.0
|
||||
|
||||
jimp@1.6.0:
|
||||
dependencies:
|
||||
'@jimp/core': 1.6.0
|
||||
@ -12037,8 +12006,6 @@ snapshots:
|
||||
dependencies:
|
||||
steno: 4.0.2
|
||||
|
||||
lru-cache@10.4.3: {}
|
||||
|
||||
lru-cache@11.2.7: {}
|
||||
|
||||
lru-cache@6.0.0:
|
||||
@ -12634,8 +12601,6 @@ snapshots:
|
||||
degenerator: 5.0.1
|
||||
netmask: 2.0.2
|
||||
|
||||
package-json-from-dist@1.0.1: {}
|
||||
|
||||
pako@1.0.11: {}
|
||||
|
||||
pako@2.1.0: {}
|
||||
@ -12681,11 +12646,6 @@ snapshots:
|
||||
|
||||
path-parse@1.0.7: {}
|
||||
|
||||
path-scurry@1.11.1:
|
||||
dependencies:
|
||||
lru-cache: 10.4.3
|
||||
minipass: 7.1.3
|
||||
|
||||
path-scurry@2.0.2:
|
||||
dependencies:
|
||||
lru-cache: 11.2.7
|
||||
@ -13036,10 +12996,6 @@ snapshots:
|
||||
glob: 7.2.3
|
||||
optional: true
|
||||
|
||||
rimraf@5.0.10:
|
||||
dependencies:
|
||||
glob: 10.5.0
|
||||
|
||||
rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260317.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3):
|
||||
dependencies:
|
||||
'@babel/generator': 8.0.0-rc.2
|
||||
@ -13394,12 +13350,6 @@ snapshots:
|
||||
is-fullwidth-code-point: 3.0.0
|
||||
strip-ansi: 6.0.1
|
||||
|
||||
string-width@5.1.2:
|
||||
dependencies:
|
||||
eastasianwidth: 0.2.0
|
||||
emoji-regex: 9.2.2
|
||||
strip-ansi: 7.2.0
|
||||
|
||||
string-width@7.2.0:
|
||||
dependencies:
|
||||
emoji-regex: 10.6.0
|
||||
@ -13687,6 +13637,8 @@ snapshots:
|
||||
|
||||
uuid@8.3.2: {}
|
||||
|
||||
uuid@9.0.1: {}
|
||||
|
||||
validate-npm-package-name@7.0.2: {}
|
||||
|
||||
vary@1.1.2: {}
|
||||
@ -13809,12 +13761,6 @@ snapshots:
|
||||
string-width: 4.2.3
|
||||
strip-ansi: 6.0.1
|
||||
|
||||
wrap-ansi@8.1.0:
|
||||
dependencies:
|
||||
ansi-styles: 6.2.3
|
||||
string-width: 5.1.2
|
||||
strip-ansi: 7.2.0
|
||||
|
||||
wrappy@1.0.2: {}
|
||||
|
||||
ws@8.19.0: {}
|
||||
|
||||
@ -20,9 +20,13 @@
|
||||
"channel-runtime",
|
||||
"interactive-runtime",
|
||||
"infra-runtime",
|
||||
"ssrf-runtime",
|
||||
"media-runtime",
|
||||
"media-understanding-runtime",
|
||||
"conversation-runtime",
|
||||
"matrix-runtime-heavy",
|
||||
"matrix-runtime-shared",
|
||||
"thread-bindings-runtime",
|
||||
"text-runtime",
|
||||
"agent-runtime",
|
||||
"speech-runtime",
|
||||
|
||||
@ -365,11 +365,13 @@ const defaultSingletonBatchLaneCount =
|
||||
? 0
|
||||
: isCI
|
||||
? Math.ceil(unitSingletonBatchFiles.length / 6)
|
||||
: highMemLocalHost
|
||||
? Math.ceil(unitSingletonBatchFiles.length / 8)
|
||||
: lowMemLocalHost
|
||||
? Math.ceil(unitSingletonBatchFiles.length / 12)
|
||||
: Math.ceil(unitSingletonBatchFiles.length / 10);
|
||||
: testProfile === "low" && highMemLocalHost
|
||||
? Math.ceil(unitSingletonBatchFiles.length / 8) + 1
|
||||
: highMemLocalHost
|
||||
? Math.ceil(unitSingletonBatchFiles.length / 8)
|
||||
: lowMemLocalHost
|
||||
? Math.ceil(unitSingletonBatchFiles.length / 12)
|
||||
: Math.ceil(unitSingletonBatchFiles.length / 10);
|
||||
const singletonBatchLaneCount =
|
||||
unitSingletonBatchFiles.length === 0
|
||||
? 0
|
||||
|
||||
124
src/agents/anthropic-vertex-provider.ts
Normal file
124
src/agents/anthropic-vertex-provider.ts
Normal file
@ -0,0 +1,124 @@
|
||||
import { existsSync, readFileSync } from "node:fs";
|
||||
import { homedir, platform } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
|
||||
|
||||
const ANTHROPIC_VERTEX_DEFAULT_REGION = "global";
|
||||
const ANTHROPIC_VERTEX_REGION_RE = /^[a-z0-9-]+$/;
|
||||
const GCLOUD_DEFAULT_ADC_PATH = join(
|
||||
homedir(),
|
||||
".config",
|
||||
"gcloud",
|
||||
"application_default_credentials.json",
|
||||
);
|
||||
|
||||
type AdcProjectFile = {
|
||||
project_id?: unknown;
|
||||
quota_project_id?: unknown;
|
||||
};
|
||||
|
||||
export function resolveAnthropicVertexProjectId(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string | undefined {
|
||||
return (
|
||||
normalizeOptionalSecretInput(env.ANTHROPIC_VERTEX_PROJECT_ID) ||
|
||||
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_PROJECT) ||
|
||||
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_PROJECT_ID) ||
|
||||
resolveAnthropicVertexProjectIdFromAdc(env)
|
||||
);
|
||||
}
|
||||
|
||||
export function resolveAnthropicVertexRegion(env: NodeJS.ProcessEnv = process.env): string {
|
||||
const region =
|
||||
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_LOCATION) ||
|
||||
normalizeOptionalSecretInput(env.CLOUD_ML_REGION);
|
||||
|
||||
return region && ANTHROPIC_VERTEX_REGION_RE.test(region)
|
||||
? region
|
||||
: ANTHROPIC_VERTEX_DEFAULT_REGION;
|
||||
}
|
||||
|
||||
export function resolveAnthropicVertexRegionFromBaseUrl(baseUrl?: string): string | undefined {
|
||||
const trimmed = baseUrl?.trim();
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
const host = new URL(trimmed).hostname.toLowerCase();
|
||||
if (host === "aiplatform.googleapis.com") {
|
||||
return "global";
|
||||
}
|
||||
const match = /^([a-z0-9-]+)-aiplatform\.googleapis\.com$/.exec(host);
|
||||
return match?.[1];
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveAnthropicVertexClientRegion(params?: {
|
||||
baseUrl?: string;
|
||||
env?: NodeJS.ProcessEnv;
|
||||
}): string {
|
||||
return (
|
||||
resolveAnthropicVertexRegionFromBaseUrl(params?.baseUrl) ||
|
||||
resolveAnthropicVertexRegion(params?.env)
|
||||
);
|
||||
}
|
||||
|
||||
function hasAnthropicVertexMetadataServerAdc(env: NodeJS.ProcessEnv = process.env): boolean {
|
||||
const explicitMetadataOptIn = normalizeOptionalSecretInput(env.ANTHROPIC_VERTEX_USE_GCP_METADATA);
|
||||
return explicitMetadataOptIn === "1" || explicitMetadataOptIn?.toLowerCase() === "true";
|
||||
}
|
||||
|
||||
function resolveAnthropicVertexDefaultAdcPath(env: NodeJS.ProcessEnv = process.env): string {
|
||||
return platform() === "win32"
|
||||
? join(
|
||||
env.APPDATA ?? join(homedir(), "AppData", "Roaming"),
|
||||
"gcloud",
|
||||
"application_default_credentials.json",
|
||||
)
|
||||
: GCLOUD_DEFAULT_ADC_PATH;
|
||||
}
|
||||
|
||||
function resolveAnthropicVertexAdcCredentialsPath(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string | undefined {
|
||||
const explicitCredentialsPath = normalizeOptionalSecretInput(env.GOOGLE_APPLICATION_CREDENTIALS);
|
||||
if (explicitCredentialsPath) {
|
||||
return existsSync(explicitCredentialsPath) ? explicitCredentialsPath : undefined;
|
||||
}
|
||||
|
||||
const defaultAdcPath = resolveAnthropicVertexDefaultAdcPath(env);
|
||||
return existsSync(defaultAdcPath) ? defaultAdcPath : undefined;
|
||||
}
|
||||
|
||||
function resolveAnthropicVertexProjectIdFromAdc(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string | undefined {
|
||||
const credentialsPath = resolveAnthropicVertexAdcCredentialsPath(env);
|
||||
if (!credentialsPath) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(readFileSync(credentialsPath, "utf8")) as AdcProjectFile;
|
||||
return (
|
||||
normalizeOptionalSecretInput(parsed.project_id) ||
|
||||
normalizeOptionalSecretInput(parsed.quota_project_id)
|
||||
);
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export function hasAnthropicVertexCredentials(env: NodeJS.ProcessEnv = process.env): boolean {
|
||||
return (
|
||||
hasAnthropicVertexMetadataServerAdc(env) ||
|
||||
resolveAnthropicVertexAdcCredentialsPath(env) !== undefined
|
||||
);
|
||||
}
|
||||
|
||||
export function hasAnthropicVertexAvailableAuth(env: NodeJS.ProcessEnv = process.env): boolean {
|
||||
return hasAnthropicVertexCredentials(env);
|
||||
}
|
||||
221
src/agents/anthropic-vertex-stream.test.ts
Normal file
221
src/agents/anthropic-vertex-stream.test.ts
Normal file
@ -0,0 +1,221 @@
|
||||
import type { Model } from "@mariozechner/pi-ai";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const hoisted = vi.hoisted(() => {
|
||||
const streamAnthropicMock = vi.fn<(model: unknown, context: unknown, options: unknown) => symbol>(
|
||||
() => Symbol("anthropic-vertex-stream"),
|
||||
);
|
||||
const anthropicVertexCtorMock = vi.fn();
|
||||
|
||||
return {
|
||||
streamAnthropicMock,
|
||||
anthropicVertexCtorMock,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", () => {
|
||||
return {
|
||||
streamAnthropic: (model: unknown, context: unknown, options: unknown) =>
|
||||
hoisted.streamAnthropicMock(model, context, options),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@anthropic-ai/vertex-sdk", () => ({
|
||||
AnthropicVertex: vi.fn(function MockAnthropicVertex(options: unknown) {
|
||||
hoisted.anthropicVertexCtorMock(options);
|
||||
return { options };
|
||||
}),
|
||||
}));
|
||||
|
||||
import {
|
||||
resolveAnthropicVertexRegion,
|
||||
resolveAnthropicVertexRegionFromBaseUrl,
|
||||
} from "./anthropic-vertex-provider.js";
|
||||
import {
|
||||
createAnthropicVertexStreamFn,
|
||||
createAnthropicVertexStreamFnForModel,
|
||||
} from "./anthropic-vertex-stream.js";
|
||||
|
||||
function makeModel(params: { id: string; maxTokens?: number }): Model<"anthropic-messages"> {
|
||||
return {
|
||||
id: params.id,
|
||||
api: "anthropic-messages",
|
||||
provider: "anthropic-vertex",
|
||||
...(params.maxTokens !== undefined ? { maxTokens: params.maxTokens } : {}),
|
||||
} as Model<"anthropic-messages">;
|
||||
}
|
||||
|
||||
describe("createAnthropicVertexStreamFn", () => {
|
||||
beforeEach(() => {
|
||||
hoisted.streamAnthropicMock.mockClear();
|
||||
hoisted.anthropicVertexCtorMock.mockClear();
|
||||
});
|
||||
|
||||
it("omits projectId when ADC credentials are used without an explicit project", () => {
|
||||
const streamFn = createAnthropicVertexStreamFn(undefined, "global");
|
||||
|
||||
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 128000 }), { messages: [] }, {});
|
||||
|
||||
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
|
||||
region: "global",
|
||||
});
|
||||
});
|
||||
|
||||
it("passes an explicit baseURL through to the Vertex client", () => {
|
||||
const streamFn = createAnthropicVertexStreamFn(
|
||||
"vertex-project",
|
||||
"us-east5",
|
||||
"https://proxy.example.test/vertex/v1",
|
||||
);
|
||||
|
||||
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 128000 }), { messages: [] }, {});
|
||||
|
||||
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
|
||||
projectId: "vertex-project",
|
||||
region: "us-east5",
|
||||
baseURL: "https://proxy.example.test/vertex/v1",
|
||||
});
|
||||
});
|
||||
|
||||
it("defaults maxTokens to the model limit instead of the old 32000 cap", () => {
|
||||
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
|
||||
const model = makeModel({ id: "claude-opus-4-6", maxTokens: 128000 });
|
||||
|
||||
void streamFn(model, { messages: [] }, {});
|
||||
|
||||
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
|
||||
model,
|
||||
{ messages: [] },
|
||||
expect.objectContaining({
|
||||
maxTokens: 128000,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("clamps explicit maxTokens to the selected model limit", () => {
|
||||
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
|
||||
const model = makeModel({ id: "claude-sonnet-4-6", maxTokens: 128000 });
|
||||
|
||||
void streamFn(model, { messages: [] }, { maxTokens: 999999 });
|
||||
|
||||
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
|
||||
model,
|
||||
{ messages: [] },
|
||||
expect.objectContaining({
|
||||
maxTokens: 128000,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("maps xhigh reasoning to max effort for adaptive Opus models", () => {
|
||||
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
|
||||
const model = makeModel({ id: "claude-opus-4-6", maxTokens: 64000 });
|
||||
|
||||
void streamFn(model, { messages: [] }, { reasoning: "xhigh" });
|
||||
|
||||
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
|
||||
model,
|
||||
{ messages: [] },
|
||||
expect.objectContaining({
|
||||
thinkingEnabled: true,
|
||||
effort: "max",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("omits maxTokens when neither the model nor request provide a finite limit", () => {
|
||||
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
|
||||
const model = makeModel({ id: "claude-sonnet-4-6" });
|
||||
|
||||
void streamFn(model, { messages: [] }, { maxTokens: Number.NaN });
|
||||
|
||||
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
|
||||
model,
|
||||
{ messages: [] },
|
||||
expect.not.objectContaining({
|
||||
maxTokens: expect.anything(),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveAnthropicVertexRegionFromBaseUrl", () => {
|
||||
it("accepts well-formed regional env values", () => {
|
||||
expect(
|
||||
resolveAnthropicVertexRegion({
|
||||
GOOGLE_CLOUD_LOCATION: "us-east1",
|
||||
} as NodeJS.ProcessEnv),
|
||||
).toBe("us-east1");
|
||||
});
|
||||
|
||||
it("falls back to the default region for malformed env values", () => {
|
||||
expect(
|
||||
resolveAnthropicVertexRegion({
|
||||
GOOGLE_CLOUD_LOCATION: "us-central1.attacker.example",
|
||||
} as NodeJS.ProcessEnv),
|
||||
).toBe("global");
|
||||
});
|
||||
|
||||
it("parses regional Vertex endpoints", () => {
|
||||
expect(
|
||||
resolveAnthropicVertexRegionFromBaseUrl("https://europe-west4-aiplatform.googleapis.com"),
|
||||
).toBe("europe-west4");
|
||||
});
|
||||
|
||||
it("treats the global Vertex endpoint as global", () => {
|
||||
expect(resolveAnthropicVertexRegionFromBaseUrl("https://aiplatform.googleapis.com")).toBe(
|
||||
"global",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("createAnthropicVertexStreamFnForModel", () => {
|
||||
beforeEach(() => {
|
||||
hoisted.anthropicVertexCtorMock.mockClear();
|
||||
});
|
||||
|
||||
it("derives project and region from the model and env", () => {
|
||||
const streamFn = createAnthropicVertexStreamFnForModel(
|
||||
{ baseUrl: "https://europe-west4-aiplatform.googleapis.com" },
|
||||
{ GOOGLE_CLOUD_PROJECT_ID: "vertex-project" } as NodeJS.ProcessEnv,
|
||||
);
|
||||
|
||||
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 }), { messages: [] }, {});
|
||||
|
||||
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
|
||||
projectId: "vertex-project",
|
||||
region: "europe-west4",
|
||||
baseURL: "https://europe-west4-aiplatform.googleapis.com/v1",
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves explicit custom provider base URLs", () => {
|
||||
const streamFn = createAnthropicVertexStreamFnForModel(
|
||||
{ baseUrl: "https://proxy.example.test/custom-root/v1" },
|
||||
{ GOOGLE_CLOUD_PROJECT_ID: "vertex-project" } as NodeJS.ProcessEnv,
|
||||
);
|
||||
|
||||
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 }), { messages: [] }, {});
|
||||
|
||||
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
|
||||
projectId: "vertex-project",
|
||||
region: "global",
|
||||
baseURL: "https://proxy.example.test/custom-root/v1",
|
||||
});
|
||||
});
|
||||
|
||||
it("adds /v1 for path-prefixed custom provider base URLs", () => {
|
||||
const streamFn = createAnthropicVertexStreamFnForModel(
|
||||
{ baseUrl: "https://proxy.example.test/custom-root" },
|
||||
{ GOOGLE_CLOUD_PROJECT_ID: "vertex-project" } as NodeJS.ProcessEnv,
|
||||
);
|
||||
|
||||
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 }), { messages: [] }, {});
|
||||
|
||||
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
|
||||
projectId: "vertex-project",
|
||||
region: "global",
|
||||
baseURL: "https://proxy.example.test/custom-root/v1",
|
||||
});
|
||||
});
|
||||
});
|
||||
137
src/agents/anthropic-vertex-stream.ts
Normal file
137
src/agents/anthropic-vertex-stream.ts
Normal file
@ -0,0 +1,137 @@
|
||||
import { AnthropicVertex } from "@anthropic-ai/vertex-sdk";
|
||||
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
import { streamAnthropic, type AnthropicOptions, type Model } from "@mariozechner/pi-ai";
|
||||
import {
|
||||
resolveAnthropicVertexClientRegion,
|
||||
resolveAnthropicVertexProjectId,
|
||||
} from "./anthropic-vertex-provider.js";
|
||||
|
||||
type AnthropicVertexEffort = NonNullable<AnthropicOptions["effort"]>;
|
||||
|
||||
function resolveAnthropicVertexMaxTokens(params: {
|
||||
modelMaxTokens: number | undefined;
|
||||
requestedMaxTokens: number | undefined;
|
||||
}): number | undefined {
|
||||
const modelMax =
|
||||
typeof params.modelMaxTokens === "number" &&
|
||||
Number.isFinite(params.modelMaxTokens) &&
|
||||
params.modelMaxTokens > 0
|
||||
? Math.floor(params.modelMaxTokens)
|
||||
: undefined;
|
||||
const requested =
|
||||
typeof params.requestedMaxTokens === "number" &&
|
||||
Number.isFinite(params.requestedMaxTokens) &&
|
||||
params.requestedMaxTokens > 0
|
||||
? Math.floor(params.requestedMaxTokens)
|
||||
: undefined;
|
||||
|
||||
if (modelMax !== undefined && requested !== undefined) {
|
||||
return Math.min(requested, modelMax);
|
||||
}
|
||||
return requested ?? modelMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a StreamFn that routes through pi-ai's `streamAnthropic` with an
|
||||
* injected `AnthropicVertex` client. All streaming, message conversion, and
|
||||
* event handling is handled by pi-ai — we only supply the GCP-authenticated
|
||||
* client and map SimpleStreamOptions → AnthropicOptions.
|
||||
*/
|
||||
export function createAnthropicVertexStreamFn(
|
||||
projectId: string | undefined,
|
||||
region: string,
|
||||
baseURL?: string,
|
||||
): StreamFn {
|
||||
const client = new AnthropicVertex({
|
||||
region,
|
||||
...(baseURL ? { baseURL } : {}),
|
||||
...(projectId ? { projectId } : {}),
|
||||
});
|
||||
|
||||
return (model, context, options) => {
|
||||
const maxTokens = resolveAnthropicVertexMaxTokens({
|
||||
modelMaxTokens: model.maxTokens,
|
||||
requestedMaxTokens: options?.maxTokens,
|
||||
});
|
||||
const opts: AnthropicOptions = {
|
||||
client: client as unknown as AnthropicOptions["client"],
|
||||
temperature: options?.temperature,
|
||||
...(maxTokens !== undefined ? { maxTokens } : {}),
|
||||
signal: options?.signal,
|
||||
cacheRetention: options?.cacheRetention,
|
||||
sessionId: options?.sessionId,
|
||||
headers: options?.headers,
|
||||
onPayload: options?.onPayload,
|
||||
maxRetryDelayMs: options?.maxRetryDelayMs,
|
||||
metadata: options?.metadata,
|
||||
};
|
||||
|
||||
if (options?.reasoning) {
|
||||
const isAdaptive =
|
||||
model.id.includes("opus-4-6") ||
|
||||
model.id.includes("opus-4.6") ||
|
||||
model.id.includes("sonnet-4-6") ||
|
||||
model.id.includes("sonnet-4.6");
|
||||
|
||||
if (isAdaptive) {
|
||||
opts.thinkingEnabled = true;
|
||||
const effortMap: Record<string, AnthropicVertexEffort> = {
|
||||
minimal: "low",
|
||||
low: "low",
|
||||
medium: "medium",
|
||||
high: "high",
|
||||
xhigh: model.id.includes("opus-4-6") || model.id.includes("opus-4.6") ? "max" : "high",
|
||||
};
|
||||
opts.effort = effortMap[options.reasoning] ?? "high";
|
||||
} else {
|
||||
opts.thinkingEnabled = true;
|
||||
const budgets = options.thinkingBudgets;
|
||||
opts.thinkingBudgetTokens =
|
||||
(budgets && options.reasoning in budgets
|
||||
? budgets[options.reasoning as keyof typeof budgets]
|
||||
: undefined) ?? 10000;
|
||||
}
|
||||
} else {
|
||||
opts.thinkingEnabled = false;
|
||||
}
|
||||
|
||||
return streamAnthropic(model as Model<"anthropic-messages">, context, opts);
|
||||
};
|
||||
}
|
||||
|
||||
function resolveAnthropicVertexSdkBaseUrl(baseUrl?: string): string | undefined {
|
||||
const trimmed = baseUrl?.trim();
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
const url = new URL(trimmed);
|
||||
const normalizedPath = url.pathname.replace(/\/+$/, "");
|
||||
if (!normalizedPath || normalizedPath === "") {
|
||||
url.pathname = "/v1";
|
||||
return url.toString().replace(/\/$/, "");
|
||||
}
|
||||
if (!normalizedPath.endsWith("/v1")) {
|
||||
url.pathname = `${normalizedPath}/v1`;
|
||||
return url.toString().replace(/\/$/, "");
|
||||
}
|
||||
return trimmed;
|
||||
} catch {
|
||||
return trimmed;
|
||||
}
|
||||
}
|
||||
|
||||
export function createAnthropicVertexStreamFnForModel(
|
||||
model: { baseUrl?: string },
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): StreamFn {
|
||||
return createAnthropicVertexStreamFn(
|
||||
resolveAnthropicVertexProjectId(env),
|
||||
resolveAnthropicVertexClientRegion({
|
||||
baseUrl: model.baseUrl,
|
||||
env,
|
||||
}),
|
||||
resolveAnthropicVertexSdkBaseUrl(model.baseUrl),
|
||||
);
|
||||
}
|
||||
@ -1,6 +1,7 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { listKnownProviderEnvApiKeyNames } from "./model-auth-env-vars.js";
|
||||
import {
|
||||
GCP_VERTEX_CREDENTIALS_MARKER,
|
||||
isKnownEnvApiKeyMarker,
|
||||
isNonSecretApiKeyMarker,
|
||||
NON_ENV_SECRETREF_MARKER,
|
||||
@ -13,6 +14,7 @@ describe("model auth markers", () => {
|
||||
expect(isNonSecretApiKeyMarker("qwen-oauth")).toBe(true);
|
||||
expect(isNonSecretApiKeyMarker(resolveOAuthApiKeyMarker("chutes"))).toBe(true);
|
||||
expect(isNonSecretApiKeyMarker("ollama-local")).toBe(true);
|
||||
expect(isNonSecretApiKeyMarker(GCP_VERTEX_CREDENTIALS_MARKER)).toBe(true);
|
||||
});
|
||||
|
||||
it("recognizes known env marker names but not arbitrary all-caps keys", () => {
|
||||
|
||||
@ -6,6 +6,7 @@ export const OAUTH_API_KEY_MARKER_PREFIX = "oauth:";
|
||||
export const QWEN_OAUTH_MARKER = "qwen-oauth";
|
||||
export const OLLAMA_LOCAL_AUTH_MARKER = "ollama-local";
|
||||
export const CUSTOM_LOCAL_AUTH_MARKER = "custom-local";
|
||||
export const GCP_VERTEX_CREDENTIALS_MARKER = "gcp-vertex-credentials";
|
||||
export const NON_ENV_SECRETREF_MARKER = "secretref-managed"; // pragma: allowlist secret
|
||||
export const SECRETREF_ENV_HEADER_MARKER_PREFIX = "secretref-env:"; // pragma: allowlist secret
|
||||
|
||||
@ -83,6 +84,7 @@ export function isNonSecretApiKeyMarker(
|
||||
isOAuthApiKeyMarker(trimmed) ||
|
||||
trimmed === OLLAMA_LOCAL_AUTH_MARKER ||
|
||||
trimmed === CUSTOM_LOCAL_AUTH_MARKER ||
|
||||
trimmed === GCP_VERTEX_CREDENTIALS_MARKER ||
|
||||
trimmed === NON_ENV_SECRETREF_MARKER ||
|
||||
isAwsSdkAuthMarker(trimmed);
|
||||
if (isKnownMarker) {
|
||||
|
||||
@ -506,4 +506,55 @@ describe("getApiKeyForModel", () => {
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("resolveEnvApiKey('anthropic-vertex') uses the provided env snapshot", async () => {
|
||||
const resolved = resolveEnvApiKey("anthropic-vertex", {
|
||||
GOOGLE_CLOUD_PROJECT_ID: "vertex-project",
|
||||
} as NodeJS.ProcessEnv);
|
||||
|
||||
expect(resolved).toBeNull();
|
||||
});
|
||||
|
||||
it("resolveEnvApiKey('anthropic-vertex') accepts GOOGLE_APPLICATION_CREDENTIALS with project_id", async () => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = path.join(tempDir, "adc.json");
|
||||
await fs.writeFile(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
|
||||
|
||||
try {
|
||||
const resolved = resolveEnvApiKey("anthropic-vertex", {
|
||||
GOOGLE_APPLICATION_CREDENTIALS: credentialsPath,
|
||||
} as NodeJS.ProcessEnv);
|
||||
|
||||
expect(resolved?.apiKey).toBe("gcp-vertex-credentials");
|
||||
expect(resolved?.source).toBe("gcloud adc");
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("resolveEnvApiKey('anthropic-vertex') accepts GOOGLE_APPLICATION_CREDENTIALS without a local project field", async () => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = path.join(tempDir, "adc.json");
|
||||
await fs.writeFile(credentialsPath, "{}", "utf8");
|
||||
|
||||
try {
|
||||
const resolved = resolveEnvApiKey("anthropic-vertex", {
|
||||
GOOGLE_APPLICATION_CREDENTIALS: credentialsPath,
|
||||
} as NodeJS.ProcessEnv);
|
||||
|
||||
expect(resolved?.apiKey).toBe("gcp-vertex-credentials");
|
||||
expect(resolved?.source).toBe("gcloud adc");
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("resolveEnvApiKey('anthropic-vertex') accepts explicit metadata auth opt-in", async () => {
|
||||
const resolved = resolveEnvApiKey("anthropic-vertex", {
|
||||
ANTHROPIC_VERTEX_USE_GCP_METADATA: "true",
|
||||
} as NodeJS.ProcessEnv);
|
||||
|
||||
expect(resolved?.apiKey).toBe("gcp-vertex-credentials");
|
||||
expect(resolved?.source).toBe("gcloud adc");
|
||||
});
|
||||
});
|
||||
|
||||
@ -2,7 +2,11 @@ import { streamSimpleOpenAICompletions, type Model } from "@mariozechner/pi-ai";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { withFetchPreconnect } from "../test-utils/fetch-mock.js";
|
||||
import type { AuthProfileStore } from "./auth-profiles.js";
|
||||
import { CUSTOM_LOCAL_AUTH_MARKER, NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js";
|
||||
import {
|
||||
CUSTOM_LOCAL_AUTH_MARKER,
|
||||
GCP_VERTEX_CREDENTIALS_MARKER,
|
||||
NON_ENV_SECRETREF_MARKER,
|
||||
} from "./model-auth-markers.js";
|
||||
import {
|
||||
applyLocalNoAuthHeaderOverride,
|
||||
hasUsableCustomProviderApiKey,
|
||||
@ -169,6 +173,24 @@ describe("resolveUsableCustomProviderApiKey", () => {
|
||||
expect(resolved).toBeNull();
|
||||
});
|
||||
|
||||
it("does not treat the Vertex ADC marker as a usable models.json credential", () => {
|
||||
const resolved = resolveUsableCustomProviderApiKey({
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
"anthropic-vertex": {
|
||||
baseUrl: "https://us-central1-aiplatform.googleapis.com",
|
||||
apiKey: GCP_VERTEX_CREDENTIALS_MARKER,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "anthropic-vertex",
|
||||
});
|
||||
expect(resolved).toBeNull();
|
||||
});
|
||||
|
||||
it("resolves known env marker names from process env for custom providers", () => {
|
||||
const previous = process.env.OPENAI_API_KEY;
|
||||
process.env.OPENAI_API_KEY = "sk-from-env"; // pragma: allowlist secret
|
||||
|
||||
@ -10,6 +10,7 @@ import {
|
||||
normalizeOptionalSecretInput,
|
||||
normalizeSecretInput,
|
||||
} from "../utils/normalize-secret-input.js";
|
||||
import { hasAnthropicVertexAvailableAuth } from "./anthropic-vertex-provider.js";
|
||||
import {
|
||||
type AuthProfileStore,
|
||||
ensureAuthProfileStore,
|
||||
@ -21,6 +22,7 @@ import {
|
||||
import { PROVIDER_ENV_API_KEY_CANDIDATES } from "./model-auth-env-vars.js";
|
||||
import {
|
||||
CUSTOM_LOCAL_AUTH_MARKER,
|
||||
GCP_VERTEX_CREDENTIALS_MARKER,
|
||||
isKnownEnvApiKeyMarker,
|
||||
isNonSecretApiKeyMarker,
|
||||
OLLAMA_LOCAL_AUTH_MARKER,
|
||||
@ -428,6 +430,16 @@ export function resolveEnvApiKey(
|
||||
}
|
||||
return { apiKey: envKey, source: "gcloud adc" };
|
||||
}
|
||||
|
||||
if (normalized === "anthropic-vertex") {
|
||||
// Vertex AI uses GCP credentials (SA JSON or ADC), not API keys.
|
||||
// Return a sentinel so the model resolver considers this provider available.
|
||||
if (hasAnthropicVertexAvailableAuth(env)) {
|
||||
return { apiKey: GCP_VERTEX_CREDENTIALS_MARKER, source: "gcloud adc" };
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@ -112,9 +112,15 @@ export const MODELS_CONFIG_IMPLICIT_ENV_VARS = [
|
||||
"KIMI_API_KEY",
|
||||
"KIMICODE_API_KEY",
|
||||
"GEMINI_API_KEY",
|
||||
"GOOGLE_APPLICATION_CREDENTIALS",
|
||||
"GOOGLE_CLOUD_LOCATION",
|
||||
"GOOGLE_CLOUD_PROJECT",
|
||||
"GOOGLE_CLOUD_PROJECT_ID",
|
||||
"VENICE_API_KEY",
|
||||
"VLLM_API_KEY",
|
||||
"XIAOMI_API_KEY",
|
||||
"ANTHROPIC_VERTEX_PROJECT_ID",
|
||||
"CLOUD_ML_REGION",
|
||||
// Avoid ambient AWS creds unintentionally enabling Bedrock discovery.
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_CONFIG_FILE",
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
@ -333,6 +334,53 @@ describe("models-config", () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it("fills anthropic-vertex apiKey with the ADC sentinel when models exist", async () => {
|
||||
await withTempHome(async () => {
|
||||
const adcDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = path.join(adcDir, "application_default_credentials.json");
|
||||
await fs.writeFile(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
|
||||
const previousCredentials = process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
||||
|
||||
try {
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
|
||||
await ensureOpenClawModelsJson({
|
||||
models: {
|
||||
providers: {
|
||||
"anthropic-vertex": {
|
||||
baseUrl: "https://us-central1-aiplatform.googleapis.com",
|
||||
api: "anthropic-messages",
|
||||
models: [
|
||||
{
|
||||
id: "claude-sonnet-4-6",
|
||||
name: "Claude Sonnet 4.6",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 64000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers["anthropic-vertex"]?.apiKey).toBe("gcp-vertex-credentials");
|
||||
} finally {
|
||||
if (previousCredentials === undefined) {
|
||||
delete process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
||||
} else {
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = previousCredentials;
|
||||
}
|
||||
await fs.rm(adcDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
it("merges providers by default", async () => {
|
||||
await withTempHome(async () => {
|
||||
await writeAgentModelsJson({
|
||||
|
||||
190
src/agents/models-config.providers.anthropic-vertex.test.ts
Normal file
190
src/agents/models-config.providers.anthropic-vertex.test.ts
Normal file
@ -0,0 +1,190 @@
|
||||
import { mkdtempSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { captureEnv } from "../test-utils/env.js";
|
||||
import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js";
|
||||
|
||||
describe("anthropic-vertex implicit provider", () => {
|
||||
it("offers Claude models when GOOGLE_CLOUD_PROJECT_ID is set", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_CLOUD_PROJECT_ID"]);
|
||||
process.env.GOOGLE_CLOUD_PROJECT_ID = "vertex-project";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]).toBeUndefined();
|
||||
} finally {
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("accepts ADC credentials when the file includes a project_id", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
|
||||
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = join(adcDir, "application_default_credentials.json");
|
||||
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "us-east1";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
|
||||
"https://us-east1-aiplatform.googleapis.com",
|
||||
);
|
||||
expect(providers?.["anthropic-vertex"]?.models).toMatchObject([
|
||||
{ id: "claude-opus-4-6", maxTokens: 128000, contextWindow: 1_000_000 },
|
||||
{ id: "claude-sonnet-4-6", maxTokens: 128000, contextWindow: 1_000_000 },
|
||||
]);
|
||||
} finally {
|
||||
rmSync(adcDir, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("accepts ADC credentials when the file only includes a quota_project_id", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
|
||||
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = join(adcDir, "application_default_credentials.json");
|
||||
writeFileSync(credentialsPath, JSON.stringify({ quota_project_id: "vertex-quota" }), "utf8");
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
|
||||
"https://us-east5-aiplatform.googleapis.com",
|
||||
);
|
||||
} finally {
|
||||
rmSync(adcDir, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("accepts ADC credentials when project_id is resolved at runtime", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
|
||||
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = join(adcDir, "application_default_credentials.json");
|
||||
writeFileSync(credentialsPath, "{}", "utf8");
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "europe-west4";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
|
||||
"https://europe-west4-aiplatform.googleapis.com",
|
||||
);
|
||||
} finally {
|
||||
rmSync(adcDir, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("falls back to the default region when GOOGLE_CLOUD_LOCATION is invalid", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
|
||||
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = join(adcDir, "application_default_credentials.json");
|
||||
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "us-central1.attacker.example";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe("https://aiplatform.googleapis.com");
|
||||
} finally {
|
||||
rmSync(adcDir, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("uses the Vertex global endpoint when GOOGLE_CLOUD_LOCATION=global", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
|
||||
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = join(adcDir, "application_default_credentials.json");
|
||||
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "global";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe("https://aiplatform.googleapis.com");
|
||||
} finally {
|
||||
rmSync(adcDir, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("accepts explicit metadata auth opt-in without local credential files", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["ANTHROPIC_VERTEX_USE_GCP_METADATA", "GOOGLE_CLOUD_LOCATION"]);
|
||||
process.env.ANTHROPIC_VERTEX_USE_GCP_METADATA = "true";
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
|
||||
"https://us-east5-aiplatform.googleapis.com",
|
||||
);
|
||||
} finally {
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("merges the bundled catalog into explicit anthropic-vertex provider overrides", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
|
||||
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
|
||||
const credentialsPath = join(adcDir, "application_default_credentials.json");
|
||||
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({
|
||||
agentDir,
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
"anthropic-vertex": {
|
||||
baseUrl: "https://europe-west4-aiplatform.googleapis.com",
|
||||
headers: { "x-test-header": "1" },
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
});
|
||||
|
||||
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
|
||||
"https://europe-west4-aiplatform.googleapis.com",
|
||||
);
|
||||
expect(providers?.["anthropic-vertex"]?.headers).toEqual({ "x-test-header": "1" });
|
||||
expect(providers?.["anthropic-vertex"]?.models?.map((model) => model.id)).toEqual([
|
||||
"claude-opus-4-6",
|
||||
"claude-sonnet-4-6",
|
||||
]);
|
||||
} finally {
|
||||
rmSync(adcDir, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("does not accept generic Kubernetes env without a GCP ADC signal", async () => {
|
||||
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||
const envSnapshot = captureEnv(["KUBERNETES_SERVICE_HOST", "GOOGLE_CLOUD_LOCATION"]);
|
||||
process.env.KUBERNETES_SERVICE_HOST = "10.0.0.1";
|
||||
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
|
||||
|
||||
try {
|
||||
const providers = await resolveImplicitProvidersForTest({ agentDir });
|
||||
expect(providers?.["anthropic-vertex"]).toBeUndefined();
|
||||
} finally {
|
||||
envSnapshot.restore();
|
||||
}
|
||||
});
|
||||
});
|
||||
@ -1,3 +1,7 @@
|
||||
export {
|
||||
ANTHROPIC_VERTEX_DEFAULT_MODEL_ID,
|
||||
buildAnthropicVertexProvider,
|
||||
} from "../../extensions/anthropic-vertex/provider-catalog.js";
|
||||
export {
|
||||
buildBytePlusCodingProvider,
|
||||
buildBytePlusProvider,
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { buildAnthropicVertexProvider } from "../../extensions/anthropic-vertex/provider-catalog.js";
|
||||
import {
|
||||
QIANFAN_BASE_URL,
|
||||
QIANFAN_DEFAULT_MODEL_ID,
|
||||
@ -7,6 +8,7 @@ import type { OpenClawConfig } from "../config/config.js";
|
||||
import { coerceSecretRef, resolveSecretInputRef } from "../config/types.secrets.js";
|
||||
import { isRecord } from "../utils.js";
|
||||
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
|
||||
import { hasAnthropicVertexAvailableAuth } from "./anthropic-vertex-provider.js";
|
||||
import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js";
|
||||
import { discoverBedrockModels } from "./bedrock-discovery.js";
|
||||
import { normalizeGoogleModelId, normalizeXaiModelId } from "./model-id-normalization.js";
|
||||
@ -552,7 +554,10 @@ export function normalizeProviders(params: {
|
||||
mutated = true;
|
||||
normalizedProvider = { ...normalizedProvider, apiKey };
|
||||
} else {
|
||||
const fromEnv = resolveEnvApiKeyVarName(normalizedKey, env);
|
||||
const fromEnv =
|
||||
normalizedKey === "anthropic-vertex"
|
||||
? resolveEnvApiKey(normalizedKey, env)?.apiKey
|
||||
: resolveEnvApiKeyVarName(normalizedKey, env);
|
||||
const apiKey = fromEnv ?? profileApiKey?.apiKey;
|
||||
if (apiKey?.trim()) {
|
||||
if (profileApiKey && profileApiKey.source !== "plaintext") {
|
||||
@ -812,9 +817,34 @@ export async function resolveImplicitProviders(
|
||||
: implicitBedrock;
|
||||
}
|
||||
|
||||
const implicitAnthropicVertex = resolveImplicitAnthropicVertexProvider({ env });
|
||||
if (implicitAnthropicVertex) {
|
||||
const existing = providers["anthropic-vertex"];
|
||||
providers["anthropic-vertex"] = existing
|
||||
? {
|
||||
...implicitAnthropicVertex,
|
||||
...existing,
|
||||
models:
|
||||
Array.isArray(existing.models) && existing.models.length > 0
|
||||
? existing.models
|
||||
: implicitAnthropicVertex.models,
|
||||
}
|
||||
: implicitAnthropicVertex;
|
||||
}
|
||||
|
||||
return providers;
|
||||
}
|
||||
|
||||
export function resolveImplicitAnthropicVertexProvider(params: {
|
||||
env?: NodeJS.ProcessEnv;
|
||||
}): ProviderConfig | null {
|
||||
const env = params.env ?? process.env;
|
||||
if (!hasAnthropicVertexAvailableAuth(env)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return buildAnthropicVertexProvider({ env });
|
||||
}
|
||||
export async function resolveImplicitBedrockProvider(params: {
|
||||
agentDir: string;
|
||||
config?: OpenClawConfig;
|
||||
|
||||
@ -623,6 +623,36 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("runs maintain after successful compaction with a transcript rewrite helper", async () => {
|
||||
const maintain = vi.fn(async (_params?: unknown) => ({
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
}));
|
||||
resolveContextEngineMock.mockResolvedValue({
|
||||
info: { ownsCompaction: true },
|
||||
compact: contextEngineCompactMock,
|
||||
maintain,
|
||||
} as never);
|
||||
|
||||
const result = await compactEmbeddedPiSession(wrappedCompactionArgs());
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
expect(maintain).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
sessionKey: TEST_SESSION_KEY,
|
||||
sessionFile: TEST_SESSION_FILE,
|
||||
runtimeContext: expect.objectContaining({
|
||||
workspaceDir: TEST_WORKSPACE_DIR,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
const runtimeContext = (
|
||||
maintain.mock.calls[0]?.[0] as { runtimeContext?: Record<string, unknown> } | undefined
|
||||
)?.runtimeContext;
|
||||
expect(typeof runtimeContext?.rewriteTranscriptEntries).toBe("function");
|
||||
});
|
||||
|
||||
it("does not fire after_compaction when compaction fails", async () => {
|
||||
hookRunner.hasHooks.mockReturnValue(true);
|
||||
const sync = vi.fn(async () => {});
|
||||
|
||||
@ -83,6 +83,7 @@ import {
|
||||
compactWithSafetyTimeout,
|
||||
resolveCompactionTimeoutMs,
|
||||
} from "./compaction-safety-timeout.js";
|
||||
import { runContextEngineMaintenance } from "./context-engine-maintenance.js";
|
||||
import { buildEmbeddedExtensionFactories } from "./extensions.js";
|
||||
import {
|
||||
logToolSchemasForGoogle,
|
||||
@ -1226,6 +1227,16 @@ export async function compactEmbeddedPiSession(
|
||||
force: params.trigger === "manual",
|
||||
runtimeContext: params as Record<string, unknown>,
|
||||
});
|
||||
if (result.ok && result.compacted) {
|
||||
await runContextEngineMaintenance({
|
||||
contextEngine,
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
reason: "compaction",
|
||||
runtimeContext: params as Record<string, unknown>,
|
||||
});
|
||||
}
|
||||
if (engineOwnsCompaction && result.ok && result.compacted) {
|
||||
await runPostCompactionSideEffects({
|
||||
config: params.config,
|
||||
|
||||
150
src/agents/pi-embedded-runner/context-engine-maintenance.test.ts
Normal file
150
src/agents/pi-embedded-runner/context-engine-maintenance.test.ts
Normal file
@ -0,0 +1,150 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const rewriteTranscriptEntriesInSessionManagerMock = vi.fn((_params?: unknown) => ({
|
||||
changed: true,
|
||||
bytesFreed: 77,
|
||||
rewrittenEntries: 1,
|
||||
}));
|
||||
const rewriteTranscriptEntriesInSessionFileMock = vi.fn(async (_params?: unknown) => ({
|
||||
changed: true,
|
||||
bytesFreed: 123,
|
||||
rewrittenEntries: 2,
|
||||
}));
|
||||
|
||||
vi.mock("./transcript-rewrite.js", () => ({
|
||||
rewriteTranscriptEntriesInSessionManager: (params: unknown) =>
|
||||
rewriteTranscriptEntriesInSessionManagerMock(params),
|
||||
rewriteTranscriptEntriesInSessionFile: (params: unknown) =>
|
||||
rewriteTranscriptEntriesInSessionFileMock(params),
|
||||
}));
|
||||
|
||||
import {
|
||||
buildContextEngineMaintenanceRuntimeContext,
|
||||
runContextEngineMaintenance,
|
||||
} from "./context-engine-maintenance.js";
|
||||
|
||||
describe("buildContextEngineMaintenanceRuntimeContext", () => {
|
||||
beforeEach(() => {
|
||||
rewriteTranscriptEntriesInSessionManagerMock.mockClear();
|
||||
rewriteTranscriptEntriesInSessionFileMock.mockClear();
|
||||
});
|
||||
|
||||
it("adds a transcript rewrite helper that targets the current session file", async () => {
|
||||
const runtimeContext = buildContextEngineMaintenanceRuntimeContext({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
runtimeContext: { workspaceDir: "/tmp/workspace" },
|
||||
});
|
||||
|
||||
expect(runtimeContext.workspaceDir).toBe("/tmp/workspace");
|
||||
expect(typeof runtimeContext.rewriteTranscriptEntries).toBe("function");
|
||||
|
||||
const result = await runtimeContext.rewriteTranscriptEntries?.({
|
||||
replacements: [
|
||||
{ entryId: "entry-1", message: { role: "user", content: "hi", timestamp: 1 } },
|
||||
],
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
changed: true,
|
||||
bytesFreed: 123,
|
||||
rewrittenEntries: 2,
|
||||
});
|
||||
expect(rewriteTranscriptEntriesInSessionFileMock).toHaveBeenCalledWith({
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
request: {
|
||||
replacements: [
|
||||
{ entryId: "entry-1", message: { role: "user", content: "hi", timestamp: 1 } },
|
||||
],
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("reuses the active session manager when one is provided", async () => {
|
||||
const sessionManager = { appendMessage: vi.fn() } as unknown as Parameters<
|
||||
typeof buildContextEngineMaintenanceRuntimeContext
|
||||
>[0]["sessionManager"];
|
||||
const runtimeContext = buildContextEngineMaintenanceRuntimeContext({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
sessionManager,
|
||||
});
|
||||
|
||||
const result = await runtimeContext.rewriteTranscriptEntries?.({
|
||||
replacements: [
|
||||
{ entryId: "entry-1", message: { role: "user", content: "hi", timestamp: 1 } },
|
||||
],
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
changed: true,
|
||||
bytesFreed: 77,
|
||||
rewrittenEntries: 1,
|
||||
});
|
||||
expect(rewriteTranscriptEntriesInSessionManagerMock).toHaveBeenCalledWith({
|
||||
sessionManager,
|
||||
replacements: [
|
||||
{ entryId: "entry-1", message: { role: "user", content: "hi", timestamp: 1 } },
|
||||
],
|
||||
});
|
||||
expect(rewriteTranscriptEntriesInSessionFileMock).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("runContextEngineMaintenance", () => {
|
||||
beforeEach(() => {
|
||||
rewriteTranscriptEntriesInSessionManagerMock.mockClear();
|
||||
rewriteTranscriptEntriesInSessionFileMock.mockClear();
|
||||
});
|
||||
|
||||
it("passes a rewrite-capable runtime context into maintain()", async () => {
|
||||
const maintain = vi.fn(async (_params?: unknown) => ({
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
}));
|
||||
|
||||
const result = await runContextEngineMaintenance({
|
||||
contextEngine: {
|
||||
info: { id: "test", name: "Test Engine" },
|
||||
ingest: async () => ({ ingested: true }),
|
||||
assemble: async ({ messages }) => ({ messages, estimatedTokens: 0 }),
|
||||
compact: async () => ({ ok: true, compacted: false }),
|
||||
maintain,
|
||||
},
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
reason: "turn",
|
||||
runtimeContext: { workspaceDir: "/tmp/workspace" },
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
});
|
||||
expect(maintain).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
runtimeContext: expect.objectContaining({
|
||||
workspaceDir: "/tmp/workspace",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
const runtimeContext = (
|
||||
maintain.mock.calls[0]?.[0] as
|
||||
| { runtimeContext?: { rewriteTranscriptEntries?: (request: unknown) => Promise<unknown> } }
|
||||
| undefined
|
||||
)?.runtimeContext as
|
||||
| { rewriteTranscriptEntries?: (request: unknown) => Promise<unknown> }
|
||||
| undefined;
|
||||
expect(typeof runtimeContext?.rewriteTranscriptEntries).toBe("function");
|
||||
});
|
||||
});
|
||||
83
src/agents/pi-embedded-runner/context-engine-maintenance.ts
Normal file
83
src/agents/pi-embedded-runner/context-engine-maintenance.ts
Normal file
@ -0,0 +1,83 @@
|
||||
import type {
|
||||
ContextEngine,
|
||||
ContextEngineMaintenanceResult,
|
||||
ContextEngineRuntimeContext,
|
||||
} from "../../context-engine/types.js";
|
||||
import { log } from "./logger.js";
|
||||
import {
|
||||
rewriteTranscriptEntriesInSessionFile,
|
||||
rewriteTranscriptEntriesInSessionManager,
|
||||
} from "./transcript-rewrite.js";
|
||||
|
||||
/**
|
||||
* Attach runtime-owned transcript rewrite helpers to an existing
|
||||
* context-engine runtime context payload.
|
||||
*/
|
||||
export function buildContextEngineMaintenanceRuntimeContext(params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
sessionFile: string;
|
||||
sessionManager?: Parameters<typeof rewriteTranscriptEntriesInSessionManager>[0]["sessionManager"];
|
||||
runtimeContext?: ContextEngineRuntimeContext;
|
||||
}): ContextEngineRuntimeContext {
|
||||
return {
|
||||
...params.runtimeContext,
|
||||
rewriteTranscriptEntries: async (request) => {
|
||||
if (params.sessionManager) {
|
||||
return rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager: params.sessionManager,
|
||||
replacements: request.replacements,
|
||||
});
|
||||
}
|
||||
return await rewriteTranscriptEntriesInSessionFile({
|
||||
sessionFile: params.sessionFile,
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
request,
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run optional context-engine transcript maintenance and normalize the result.
|
||||
*/
|
||||
export async function runContextEngineMaintenance(params: {
|
||||
contextEngine?: ContextEngine;
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
sessionFile: string;
|
||||
reason: "bootstrap" | "compaction" | "turn";
|
||||
sessionManager?: Parameters<typeof rewriteTranscriptEntriesInSessionManager>[0]["sessionManager"];
|
||||
runtimeContext?: ContextEngineRuntimeContext;
|
||||
}): Promise<ContextEngineMaintenanceResult | undefined> {
|
||||
if (typeof params.contextEngine?.maintain !== "function") {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await params.contextEngine.maintain({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
runtimeContext: buildContextEngineMaintenanceRuntimeContext({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
sessionManager: params.sessionManager,
|
||||
runtimeContext: params.runtimeContext,
|
||||
}),
|
||||
});
|
||||
if (result.changed) {
|
||||
log.info(
|
||||
`[context-engine] maintenance(${params.reason}) changed transcript ` +
|
||||
`rewrittenEntries=${result.rewrittenEntries} bytesFreed=${result.bytesFreed} ` +
|
||||
`sessionKey=${params.sessionKey ?? params.sessionId ?? "unknown"}`,
|
||||
);
|
||||
}
|
||||
return result;
|
||||
} catch (err) {
|
||||
log.warn(`context engine maintain failed (${params.reason}): ${String(err)}`);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
@ -66,6 +66,7 @@ export const mockedEnsureRuntimePluginsLoaded = vi.fn<(params?: unknown) => void
|
||||
export const mockedPrepareProviderRuntimeAuth = vi.fn(async () => undefined);
|
||||
export const mockedRunEmbeddedAttempt =
|
||||
vi.fn<(params: unknown) => Promise<EmbeddedRunAttemptResult>>();
|
||||
export const mockedRunContextEngineMaintenance = vi.fn(async () => undefined);
|
||||
export const mockedSessionLikelyHasOversizedToolResults = vi.fn(() => false);
|
||||
export const mockedTruncateOversizedToolResultsInSession = vi.fn<
|
||||
() => Promise<MockTruncateOversizedToolResultsResult>
|
||||
@ -173,6 +174,8 @@ export function resetRunOverflowCompactionHarnessMocks(): void {
|
||||
mockedPrepareProviderRuntimeAuth.mockReset();
|
||||
mockedPrepareProviderRuntimeAuth.mockResolvedValue(undefined);
|
||||
mockedRunEmbeddedAttempt.mockReset();
|
||||
mockedRunContextEngineMaintenance.mockReset();
|
||||
mockedRunContextEngineMaintenance.mockResolvedValue(undefined);
|
||||
mockedSessionLikelyHasOversizedToolResults.mockReset();
|
||||
mockedSessionLikelyHasOversizedToolResults.mockReturnValue(false);
|
||||
mockedTruncateOversizedToolResultsInSession.mockReset();
|
||||
@ -303,6 +306,10 @@ export async function loadRunOverflowCompactionHarness(): Promise<{
|
||||
runEmbeddedAttempt: mockedRunEmbeddedAttempt,
|
||||
}));
|
||||
|
||||
vi.doMock("./context-engine-maintenance.js", () => ({
|
||||
runContextEngineMaintenance: mockedRunContextEngineMaintenance,
|
||||
}));
|
||||
|
||||
vi.doMock("./model.js", () => ({
|
||||
resolveModelAsync: vi.fn(async () => ({
|
||||
model: {
|
||||
|
||||
@ -16,6 +16,7 @@ import {
|
||||
mockedContextEngine,
|
||||
mockedCompactDirect,
|
||||
mockedRunEmbeddedAttempt,
|
||||
mockedRunContextEngineMaintenance,
|
||||
resetRunOverflowCompactionHarnessMocks,
|
||||
mockedSessionLikelyHasOversizedToolResults,
|
||||
mockedTruncateOversizedToolResultsInSession,
|
||||
@ -35,6 +36,7 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => {
|
||||
|
||||
beforeEach(() => {
|
||||
mockedRunEmbeddedAttempt.mockReset();
|
||||
mockedRunContextEngineMaintenance.mockReset();
|
||||
mockedCompactDirect.mockReset();
|
||||
mockedCoerceToFailoverError.mockReset();
|
||||
mockedDescribeFailoverError.mockReset();
|
||||
@ -50,6 +52,7 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => {
|
||||
compacted: false,
|
||||
reason: "nothing to compact",
|
||||
});
|
||||
mockedRunContextEngineMaintenance.mockResolvedValue(undefined);
|
||||
mockedCoerceToFailoverError.mockReturnValue(null);
|
||||
mockedDescribeFailoverError.mockImplementation((err: unknown) => ({
|
||||
message: err instanceof Error ? err.message : String(err),
|
||||
@ -241,6 +244,37 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("runs maintenance after successful overflow-recovery compaction", async () => {
|
||||
mockedContextEngine.info.ownsCompaction = true;
|
||||
mockedRunEmbeddedAttempt
|
||||
.mockResolvedValueOnce(makeAttemptResult({ promptError: makeOverflowError() }))
|
||||
.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
|
||||
mockedCompactDirect.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
compacted: true,
|
||||
result: {
|
||||
summary: "engine-owned compaction",
|
||||
tokensAfter: 50,
|
||||
},
|
||||
});
|
||||
|
||||
await runEmbeddedPiAgent(overflowBaseRunParams);
|
||||
|
||||
expect(mockedRunContextEngineMaintenance).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
contextEngine: mockedContextEngine,
|
||||
sessionId: "test-session",
|
||||
sessionKey: "test-key",
|
||||
sessionFile: "/tmp/session.json",
|
||||
reason: "compaction",
|
||||
runtimeContext: expect.objectContaining({
|
||||
trigger: "overflow",
|
||||
authProfileId: "test-profile",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("guards thrown engine-owned overflow compaction attempts", async () => {
|
||||
mockedContextEngine.info.ownsCompaction = true;
|
||||
mockedGlobalHookRunner.hasHooks.mockImplementation(
|
||||
|
||||
@ -66,6 +66,7 @@ import { ensureRuntimePluginsLoaded } from "../runtime-plugins.js";
|
||||
import { derivePromptTokens, normalizeUsage, type UsageLike } from "../usage.js";
|
||||
import { redactRunIdentifier, resolveRunWorkspaceDir } from "../workspace-run.js";
|
||||
import { buildEmbeddedCompactionRuntimeContext } from "./compaction-runtime-context.js";
|
||||
import { runContextEngineMaintenance } from "./context-engine-maintenance.js";
|
||||
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
|
||||
import { log } from "./logger.js";
|
||||
import { resolveModelAsync } from "./model.js";
|
||||
@ -1131,6 +1132,39 @@ export async function runEmbeddedPiAgent(
|
||||
}
|
||||
}
|
||||
try {
|
||||
const overflowCompactionRuntimeContext = {
|
||||
...buildEmbeddedCompactionRuntimeContext({
|
||||
sessionKey: params.sessionKey,
|
||||
messageChannel: params.messageChannel,
|
||||
messageProvider: params.messageProvider,
|
||||
agentAccountId: params.agentAccountId,
|
||||
currentChannelId: params.currentChannelId,
|
||||
currentThreadTs: params.currentThreadTs,
|
||||
currentMessageId: params.currentMessageId,
|
||||
authProfileId: lastProfileId,
|
||||
workspaceDir: resolvedWorkspace,
|
||||
agentDir,
|
||||
config: params.config,
|
||||
skillsSnapshot: params.skillsSnapshot,
|
||||
senderIsOwner: params.senderIsOwner,
|
||||
senderId: params.senderId,
|
||||
provider,
|
||||
modelId,
|
||||
thinkLevel,
|
||||
reasoningLevel: params.reasoningLevel,
|
||||
bashElevated: params.bashElevated,
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
}),
|
||||
runId: params.runId,
|
||||
trigger: "overflow",
|
||||
...(observedOverflowTokens !== undefined
|
||||
? { currentTokenCount: observedOverflowTokens }
|
||||
: {}),
|
||||
diagId: overflowDiagId,
|
||||
attempt: overflowCompactionAttempts,
|
||||
maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS,
|
||||
};
|
||||
compactResult = await contextEngine.compact({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
@ -1141,40 +1175,18 @@ export async function runEmbeddedPiAgent(
|
||||
: {}),
|
||||
force: true,
|
||||
compactionTarget: "budget",
|
||||
runtimeContext: {
|
||||
...buildEmbeddedCompactionRuntimeContext({
|
||||
sessionKey: params.sessionKey,
|
||||
messageChannel: params.messageChannel,
|
||||
messageProvider: params.messageProvider,
|
||||
agentAccountId: params.agentAccountId,
|
||||
currentChannelId: params.currentChannelId,
|
||||
currentThreadTs: params.currentThreadTs,
|
||||
currentMessageId: params.currentMessageId,
|
||||
authProfileId: lastProfileId,
|
||||
workspaceDir: resolvedWorkspace,
|
||||
agentDir,
|
||||
config: params.config,
|
||||
skillsSnapshot: params.skillsSnapshot,
|
||||
senderIsOwner: params.senderIsOwner,
|
||||
senderId: params.senderId,
|
||||
provider,
|
||||
modelId,
|
||||
thinkLevel,
|
||||
reasoningLevel: params.reasoningLevel,
|
||||
bashElevated: params.bashElevated,
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
}),
|
||||
runId: params.runId,
|
||||
trigger: "overflow",
|
||||
...(observedOverflowTokens !== undefined
|
||||
? { currentTokenCount: observedOverflowTokens }
|
||||
: {}),
|
||||
diagId: overflowDiagId,
|
||||
attempt: overflowCompactionAttempts,
|
||||
maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS,
|
||||
},
|
||||
runtimeContext: overflowCompactionRuntimeContext,
|
||||
});
|
||||
if (compactResult.ok && compactResult.compacted) {
|
||||
await runContextEngineMaintenance({
|
||||
contextEngine,
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
reason: "compaction",
|
||||
runtimeContext: overflowCompactionRuntimeContext,
|
||||
});
|
||||
}
|
||||
} catch (compactErr) {
|
||||
log.warn(
|
||||
`contextEngine.compact() threw during overflow recovery for ${provider}/${modelId}: ${String(compactErr)}`,
|
||||
|
||||
@ -40,6 +40,7 @@ const hoisted = vi.hoisted(() => {
|
||||
}));
|
||||
const getGlobalHookRunnerMock = vi.fn<() => unknown>(() => undefined);
|
||||
const initializeGlobalHookRunnerMock = vi.fn();
|
||||
const runContextEngineMaintenanceMock = vi.fn(async (_params?: unknown) => undefined);
|
||||
const sessionManager = {
|
||||
getLeafEntry: vi.fn(() => null),
|
||||
branch: vi.fn(),
|
||||
@ -57,6 +58,7 @@ const hoisted = vi.hoisted(() => {
|
||||
resolveBootstrapContextForRunMock,
|
||||
getGlobalHookRunnerMock,
|
||||
initializeGlobalHookRunnerMock,
|
||||
runContextEngineMaintenanceMock,
|
||||
sessionManager,
|
||||
};
|
||||
});
|
||||
@ -126,6 +128,10 @@ vi.mock("../skills-runtime.js", () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("../context-engine-maintenance.js", () => ({
|
||||
runContextEngineMaintenance: (params: unknown) => hoisted.runContextEngineMaintenanceMock(params),
|
||||
}));
|
||||
|
||||
vi.mock("../../docs-path.js", () => ({
|
||||
resolveOpenClawDocsPath: async () => undefined,
|
||||
}));
|
||||
@ -300,6 +306,7 @@ function resetEmbeddedAttemptHarness(
|
||||
contextFiles: [],
|
||||
});
|
||||
hoisted.getGlobalHookRunnerMock.mockReset().mockReturnValue(undefined);
|
||||
hoisted.runContextEngineMaintenanceMock.mockReset().mockResolvedValue(undefined);
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
@ -852,4 +859,55 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("skips maintenance when afterTurn fails", async () => {
|
||||
const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble();
|
||||
const afterTurn = vi.fn(async () => {
|
||||
throw new Error("afterTurn failed");
|
||||
});
|
||||
|
||||
const result = await runAttemptWithContextEngine({
|
||||
bootstrap,
|
||||
assemble,
|
||||
afterTurn,
|
||||
});
|
||||
|
||||
expect(result.promptError).toBeNull();
|
||||
expect(afterTurn).toHaveBeenCalled();
|
||||
expect(hoisted.runContextEngineMaintenanceMock).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({ reason: "turn" }),
|
||||
);
|
||||
});
|
||||
|
||||
it("runs startup maintenance for existing sessions even without bootstrap()", async () => {
|
||||
const { assemble } = createContextEngineBootstrapAndAssemble();
|
||||
|
||||
const result = await runAttemptWithContextEngine({
|
||||
assemble,
|
||||
});
|
||||
|
||||
expect(result.promptError).toBeNull();
|
||||
expect(hoisted.runContextEngineMaintenanceMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ reason: "bootstrap" }),
|
||||
);
|
||||
});
|
||||
|
||||
it("skips maintenance when ingestBatch fails", async () => {
|
||||
const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble();
|
||||
const ingestBatch = vi.fn(async () => {
|
||||
throw new Error("ingestBatch failed");
|
||||
});
|
||||
|
||||
const result = await runAttemptWithContextEngine({
|
||||
bootstrap,
|
||||
assemble,
|
||||
ingestBatch,
|
||||
});
|
||||
|
||||
expect(result.promptError).toBeNull();
|
||||
expect(ingestBatch).toHaveBeenCalled();
|
||||
expect(hoisted.runContextEngineMaintenanceMock).not.toHaveBeenCalledWith(
|
||||
expect.objectContaining({ reason: "turn" }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@ -36,6 +36,7 @@ import { isReasoningTagProvider } from "../../../utils/provider-utils.js";
|
||||
import { resolveOpenClawAgentDir } from "../../agent-paths.js";
|
||||
import { resolveSessionAgentIds } from "../../agent-scope.js";
|
||||
import { createAnthropicPayloadLogger } from "../../anthropic-payload-log.js";
|
||||
import { createAnthropicVertexStreamFnForModel } from "../../anthropic-vertex-stream.js";
|
||||
import {
|
||||
analyzeBootstrapBudget,
|
||||
buildBootstrapPromptWarning,
|
||||
@ -105,6 +106,7 @@ import { appendCacheTtlTimestamp, isCacheTtlEligibleProvider } from "../cache-tt
|
||||
import type { CompactEmbeddedPiSessionParams } from "../compact.js";
|
||||
import { buildEmbeddedCompactionRuntimeContext } from "../compaction-runtime-context.js";
|
||||
import { resolveCompactionTimeoutMs } from "../compaction-safety-timeout.js";
|
||||
import { runContextEngineMaintenance } from "../context-engine-maintenance.js";
|
||||
import { buildEmbeddedExtensionFactories } from "../extensions.js";
|
||||
import { applyExtraParamsToAgent } from "../extra-params.js";
|
||||
import {
|
||||
@ -2034,12 +2036,27 @@ export async function runEmbeddedAttempt(
|
||||
});
|
||||
trackSessionManagerAccess(params.sessionFile);
|
||||
|
||||
if (hadSessionFile && params.contextEngine?.bootstrap) {
|
||||
if (hadSessionFile && (params.contextEngine?.bootstrap || params.contextEngine?.maintain)) {
|
||||
try {
|
||||
await params.contextEngine.bootstrap({
|
||||
if (typeof params.contextEngine?.bootstrap === "function") {
|
||||
await params.contextEngine.bootstrap({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
});
|
||||
}
|
||||
await runContextEngineMaintenance({
|
||||
contextEngine: params.contextEngine,
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
reason: "bootstrap",
|
||||
sessionManager,
|
||||
runtimeContext: buildAfterTurnRuntimeContext({
|
||||
attempt: params,
|
||||
workspaceDir: effectiveWorkspace,
|
||||
agentDir,
|
||||
}),
|
||||
});
|
||||
} catch (bootstrapErr) {
|
||||
log.warn(`context engine bootstrap failed: ${String(bootstrapErr)}`);
|
||||
@ -2196,6 +2213,10 @@ export async function runEmbeddedAttempt(
|
||||
log.warn(`[ws-stream] no API key for provider=${params.provider}; using HTTP transport`);
|
||||
activeSession.agent.streamFn = streamSimple;
|
||||
}
|
||||
} else if (params.model.provider === "anthropic-vertex") {
|
||||
// Anthropic Vertex AI: inject AnthropicVertex client into pi-ai's
|
||||
// streamAnthropic for GCP IAM auth instead of Anthropic API keys.
|
||||
activeSession.agent.streamFn = createAnthropicVertexStreamFnForModel(params.model);
|
||||
} else {
|
||||
// Force a stable streamFn reference so vitest can reliably mock @mariozechner/pi-ai.
|
||||
activeSession.agent.streamFn = streamSimple;
|
||||
@ -2405,6 +2426,7 @@ export async function runEmbeddedAttempt(
|
||||
messages: activeSession.messages,
|
||||
tokenBudget: params.contextTokenBudget,
|
||||
model: params.modelId,
|
||||
...(params.prompt !== undefined ? { prompt: params.prompt } : {}),
|
||||
});
|
||||
if (assembled.messages !== activeSession.messages) {
|
||||
activeSession.agent.replaceMessages(assembled.messages);
|
||||
@ -2973,6 +2995,7 @@ export async function runEmbeddedAttempt(
|
||||
workspaceDir: effectiveWorkspace,
|
||||
agentDir,
|
||||
});
|
||||
let postTurnFinalizationSucceeded = true;
|
||||
|
||||
if (typeof params.contextEngine.afterTurn === "function") {
|
||||
try {
|
||||
@ -2986,6 +3009,7 @@ export async function runEmbeddedAttempt(
|
||||
runtimeContext: afterTurnRuntimeContext,
|
||||
});
|
||||
} catch (afterTurnErr) {
|
||||
postTurnFinalizationSucceeded = false;
|
||||
log.warn(`context engine afterTurn failed: ${String(afterTurnErr)}`);
|
||||
}
|
||||
} else {
|
||||
@ -3000,6 +3024,7 @@ export async function runEmbeddedAttempt(
|
||||
messages: newMessages,
|
||||
});
|
||||
} catch (ingestErr) {
|
||||
postTurnFinalizationSucceeded = false;
|
||||
log.warn(`context engine ingest failed: ${String(ingestErr)}`);
|
||||
}
|
||||
} else {
|
||||
@ -3011,12 +3036,25 @@ export async function runEmbeddedAttempt(
|
||||
message: msg,
|
||||
});
|
||||
} catch (ingestErr) {
|
||||
postTurnFinalizationSucceeded = false;
|
||||
log.warn(`context engine ingest failed: ${String(ingestErr)}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!promptError && !aborted && !yieldAborted && postTurnFinalizationSucceeded) {
|
||||
await runContextEngineMaintenance({
|
||||
contextEngine: params.contextEngine,
|
||||
sessionId: sessionIdUsed,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionFile: params.sessionFile,
|
||||
reason: "turn",
|
||||
sessionManager,
|
||||
runtimeContext: afterTurnRuntimeContext,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
cacheTrace?.recordStage("session:after", {
|
||||
|
||||
@ -1,13 +1,26 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import type { AssistantMessage, ToolResultMessage, UserMessage } from "@mariozechner/pi-ai";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { onSessionTranscriptUpdate } from "../../sessions/transcript-events.js";
|
||||
import { makeAgentAssistantMessage } from "../test-helpers/agent-message-fixtures.js";
|
||||
|
||||
const acquireSessionWriteLockReleaseMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const acquireSessionWriteLockMock = vi.hoisted(() =>
|
||||
vi.fn(async (_params?: unknown) => ({ release: acquireSessionWriteLockReleaseMock })),
|
||||
);
|
||||
|
||||
vi.mock("../session-write-lock.js", () => ({
|
||||
acquireSessionWriteLock: (params: unknown) => acquireSessionWriteLockMock(params),
|
||||
}));
|
||||
|
||||
import {
|
||||
truncateToolResultText,
|
||||
truncateToolResultMessage,
|
||||
calculateMaxToolResultChars,
|
||||
getToolResultTextLength,
|
||||
truncateOversizedToolResultsInMessages,
|
||||
truncateOversizedToolResultsInSession,
|
||||
isOversizedToolResult,
|
||||
sessionLikelyHasOversizedToolResults,
|
||||
HARD_MAX_TOOL_RESULT_CHARS,
|
||||
@ -16,6 +29,12 @@ import {
|
||||
let testTimestamp = 1;
|
||||
const nextTimestamp = () => testTimestamp++;
|
||||
|
||||
beforeEach(() => {
|
||||
testTimestamp = 1;
|
||||
acquireSessionWriteLockMock.mockClear();
|
||||
acquireSessionWriteLockReleaseMock.mockClear();
|
||||
});
|
||||
|
||||
function makeToolResult(text: string, toolCallId = "call_1"): ToolResultMessage {
|
||||
return {
|
||||
role: "toolResult",
|
||||
@ -248,6 +267,54 @@ describe("truncateOversizedToolResultsInMessages", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("truncateOversizedToolResultsInSession", () => {
|
||||
it("acquires the session write lock before rewriting oversized tool results", async () => {
|
||||
const sessionFile = "/tmp/tool-result-truncation-session.jsonl";
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
sessionManager.appendMessage(makeUserMessage("hello"));
|
||||
sessionManager.appendMessage(makeAssistantMessage("reading file"));
|
||||
sessionManager.appendMessage(makeToolResult("x".repeat(500_000)));
|
||||
|
||||
const openSpy = vi
|
||||
.spyOn(SessionManager, "open")
|
||||
.mockReturnValue(sessionManager as unknown as ReturnType<typeof SessionManager.open>);
|
||||
const listener = vi.fn();
|
||||
const cleanup = onSessionTranscriptUpdate(listener);
|
||||
|
||||
try {
|
||||
const result = await truncateOversizedToolResultsInSession({
|
||||
sessionFile,
|
||||
contextWindowTokens: 128_000,
|
||||
sessionKey: "agent:main:test",
|
||||
});
|
||||
|
||||
expect(result.truncated).toBe(true);
|
||||
expect(result.truncatedCount).toBe(1);
|
||||
expect(acquireSessionWriteLockMock).toHaveBeenCalledWith({ sessionFile });
|
||||
expect(acquireSessionWriteLockReleaseMock).toHaveBeenCalledTimes(1);
|
||||
expect(listener).toHaveBeenCalledWith({ sessionFile });
|
||||
|
||||
const branch = sessionManager.getBranch();
|
||||
const rewrittenToolResult = branch.find(
|
||||
(entry) => entry.type === "message" && entry.message.role === "toolResult",
|
||||
);
|
||||
expect(rewrittenToolResult?.type).toBe("message");
|
||||
if (
|
||||
rewrittenToolResult?.type !== "message" ||
|
||||
rewrittenToolResult.message.role !== "toolResult"
|
||||
) {
|
||||
throw new Error("expected rewritten tool result");
|
||||
}
|
||||
const rewrittenText = getFirstToolResultText(rewrittenToolResult.message);
|
||||
expect(rewrittenText.length).toBeLessThan(500_000);
|
||||
expect(rewrittenText).toContain("truncated");
|
||||
} finally {
|
||||
cleanup();
|
||||
openSpy.mockRestore();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("sessionLikelyHasOversizedToolResults", () => {
|
||||
it("returns false when no tool results are oversized", () => {
|
||||
const messages = [makeUserMessage("hello"), makeToolResult("small result")];
|
||||
|
||||
@ -1,7 +1,10 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import type { TextContent } from "@mariozechner/pi-ai";
|
||||
import { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
import { emitSessionTranscriptUpdate } from "../../sessions/transcript-events.js";
|
||||
import { acquireSessionWriteLock } from "../session-write-lock.js";
|
||||
import { log } from "./logger.js";
|
||||
import { rewriteTranscriptEntriesInSessionManager } from "./transcript-rewrite.js";
|
||||
|
||||
/**
|
||||
* Maximum share of the context window a single tool result should occupy.
|
||||
@ -211,8 +214,10 @@ export async function truncateOversizedToolResultsInSession(params: {
|
||||
}): Promise<{ truncated: boolean; truncatedCount: number; reason?: string }> {
|
||||
const { sessionFile, contextWindowTokens } = params;
|
||||
const maxChars = calculateMaxToolResultChars(contextWindowTokens);
|
||||
let sessionLock: Awaited<ReturnType<typeof acquireSessionWriteLock>> | undefined;
|
||||
|
||||
try {
|
||||
sessionLock = await acquireSessionWriteLock({ sessionFile });
|
||||
const sessionManager = SessionManager.open(sessionFile);
|
||||
const branch = sessionManager.getBranch();
|
||||
|
||||
@ -246,87 +251,46 @@ export async function truncateOversizedToolResultsInSession(params: {
|
||||
return { truncated: false, truncatedCount: 0, reason: "no oversized tool results" };
|
||||
}
|
||||
|
||||
// Branch from the parent of the first oversized entry
|
||||
const firstOversizedIdx = oversizedIndices[0];
|
||||
const firstOversizedEntry = branch[firstOversizedIdx];
|
||||
const branchFromId = firstOversizedEntry.parentId;
|
||||
|
||||
if (!branchFromId) {
|
||||
// The oversized entry is the root - very unusual but handle it
|
||||
sessionManager.resetLeaf();
|
||||
} else {
|
||||
sessionManager.branch(branchFromId);
|
||||
}
|
||||
|
||||
// Re-append all entries from the first oversized one onwards,
|
||||
// with truncated tool results
|
||||
const oversizedSet = new Set(oversizedIndices);
|
||||
let truncatedCount = 0;
|
||||
|
||||
for (let i = firstOversizedIdx; i < branch.length; i++) {
|
||||
const entry = branch[i];
|
||||
|
||||
if (entry.type === "message") {
|
||||
let message = entry.message;
|
||||
|
||||
if (oversizedSet.has(i)) {
|
||||
message = truncateToolResultMessage(message, maxChars);
|
||||
truncatedCount++;
|
||||
const newLength = getToolResultTextLength(message);
|
||||
log.info(
|
||||
`[tool-result-truncation] Truncated tool result: ` +
|
||||
`originalEntry=${entry.id} newChars=${newLength} ` +
|
||||
`sessionKey=${params.sessionKey ?? params.sessionId ?? "unknown"}`,
|
||||
);
|
||||
}
|
||||
|
||||
// appendMessage expects Message | CustomMessage | BashExecutionMessage
|
||||
sessionManager.appendMessage(message as Parameters<typeof sessionManager.appendMessage>[0]);
|
||||
} else if (entry.type === "compaction") {
|
||||
sessionManager.appendCompaction(
|
||||
entry.summary,
|
||||
entry.firstKeptEntryId,
|
||||
entry.tokensBefore,
|
||||
entry.details,
|
||||
entry.fromHook,
|
||||
);
|
||||
} else if (entry.type === "thinking_level_change") {
|
||||
sessionManager.appendThinkingLevelChange(entry.thinkingLevel);
|
||||
} else if (entry.type === "model_change") {
|
||||
sessionManager.appendModelChange(entry.provider, entry.modelId);
|
||||
} else if (entry.type === "custom") {
|
||||
sessionManager.appendCustomEntry(entry.customType, entry.data);
|
||||
} else if (entry.type === "custom_message") {
|
||||
sessionManager.appendCustomMessageEntry(
|
||||
entry.customType,
|
||||
entry.content,
|
||||
entry.display,
|
||||
entry.details,
|
||||
);
|
||||
} else if (entry.type === "branch_summary") {
|
||||
// Branch summaries reference specific entry IDs - skip to avoid inconsistency
|
||||
continue;
|
||||
} else if (entry.type === "label") {
|
||||
// Labels reference specific entry IDs - skip to avoid inconsistency
|
||||
continue;
|
||||
} else if (entry.type === "session_info") {
|
||||
if (entry.name) {
|
||||
sessionManager.appendSessionInfo(entry.name);
|
||||
}
|
||||
const replacements = oversizedIndices.flatMap((index) => {
|
||||
const entry = branch[index];
|
||||
if (!entry || entry.type !== "message") {
|
||||
return [];
|
||||
}
|
||||
const message = truncateToolResultMessage(entry.message, maxChars);
|
||||
const newLength = getToolResultTextLength(message);
|
||||
log.info(
|
||||
`[tool-result-truncation] Truncated tool result: ` +
|
||||
`originalEntry=${entry.id} newChars=${newLength} ` +
|
||||
`sessionKey=${params.sessionKey ?? params.sessionId ?? "unknown"}`,
|
||||
);
|
||||
return [{ entryId: entry.id, message }];
|
||||
});
|
||||
|
||||
const rewriteResult = rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager,
|
||||
replacements,
|
||||
});
|
||||
if (rewriteResult.changed) {
|
||||
emitSessionTranscriptUpdate(sessionFile);
|
||||
}
|
||||
|
||||
log.info(
|
||||
`[tool-result-truncation] Truncated ${truncatedCount} tool result(s) in session ` +
|
||||
`[tool-result-truncation] Truncated ${rewriteResult.rewrittenEntries} tool result(s) in session ` +
|
||||
`(contextWindow=${contextWindowTokens} maxChars=${maxChars}) ` +
|
||||
`sessionKey=${params.sessionKey ?? params.sessionId ?? "unknown"}`,
|
||||
);
|
||||
|
||||
return { truncated: true, truncatedCount };
|
||||
return {
|
||||
truncated: rewriteResult.changed,
|
||||
truncatedCount: rewriteResult.rewrittenEntries,
|
||||
reason: rewriteResult.reason,
|
||||
};
|
||||
} catch (err) {
|
||||
const errMsg = err instanceof Error ? err.message : String(err);
|
||||
log.warn(`[tool-result-truncation] Failed to truncate: ${errMsg}`);
|
||||
return { truncated: false, truncatedCount: 0, reason: errMsg };
|
||||
} finally {
|
||||
await sessionLock?.release();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
402
src/agents/pi-embedded-runner/transcript-rewrite.test.ts
Normal file
402
src/agents/pi-embedded-runner/transcript-rewrite.test.ts
Normal file
@ -0,0 +1,402 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { onSessionTranscriptUpdate } from "../../sessions/transcript-events.js";
|
||||
import { installSessionToolResultGuard } from "../session-tool-result-guard.js";
|
||||
|
||||
const acquireSessionWriteLockReleaseMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const acquireSessionWriteLockMock = vi.hoisted(() =>
|
||||
vi.fn(async (_params?: unknown) => ({ release: acquireSessionWriteLockReleaseMock })),
|
||||
);
|
||||
|
||||
vi.mock("../session-write-lock.js", () => ({
|
||||
acquireSessionWriteLock: (params: unknown) => acquireSessionWriteLockMock(params),
|
||||
}));
|
||||
|
||||
import {
|
||||
rewriteTranscriptEntriesInSessionFile,
|
||||
rewriteTranscriptEntriesInSessionManager,
|
||||
} from "./transcript-rewrite.js";
|
||||
|
||||
type AppendMessage = Parameters<SessionManager["appendMessage"]>[0];
|
||||
|
||||
function asAppendMessage(message: unknown): AppendMessage {
|
||||
return message as AppendMessage;
|
||||
}
|
||||
|
||||
function getBranchMessages(sessionManager: SessionManager): AgentMessage[] {
|
||||
return sessionManager
|
||||
.getBranch()
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message);
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
acquireSessionWriteLockMock.mockClear();
|
||||
acquireSessionWriteLockReleaseMock.mockClear();
|
||||
});
|
||||
|
||||
describe("rewriteTranscriptEntriesInSessionManager", () => {
|
||||
it("branches from the first replaced message and re-appends the remaining suffix", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "user",
|
||||
content: "read file",
|
||||
timestamp: 1,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }],
|
||||
timestamp: 2,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "x".repeat(8_000) }],
|
||||
isError: false,
|
||||
timestamp: 3,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "summarized" }],
|
||||
timestamp: 4,
|
||||
}),
|
||||
);
|
||||
|
||||
const toolResultEntry = sessionManager
|
||||
.getBranch()
|
||||
.find((entry) => entry.type === "message" && entry.message.role === "toolResult");
|
||||
expect(toolResultEntry).toBeDefined();
|
||||
|
||||
const result = rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager,
|
||||
replacements: [
|
||||
{
|
||||
entryId: toolResultEntry!.id,
|
||||
message: {
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "[externalized file_123]" }],
|
||||
isError: false,
|
||||
timestamp: 3,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(result).toMatchObject({
|
||||
changed: true,
|
||||
rewrittenEntries: 1,
|
||||
});
|
||||
expect(result.bytesFreed).toBeGreaterThan(0);
|
||||
|
||||
const branchMessages = getBranchMessages(sessionManager);
|
||||
expect(branchMessages.map((message) => message.role)).toEqual([
|
||||
"user",
|
||||
"assistant",
|
||||
"toolResult",
|
||||
"assistant",
|
||||
]);
|
||||
const rewrittenToolResult = branchMessages[2] as Extract<AgentMessage, { role: "toolResult" }>;
|
||||
expect(rewrittenToolResult.content).toEqual([
|
||||
{ type: "text", text: "[externalized file_123]" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("preserves active-branch labels after rewritten entries are re-appended", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "user",
|
||||
content: "read file",
|
||||
timestamp: 1,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }],
|
||||
timestamp: 2,
|
||||
}),
|
||||
);
|
||||
const toolResultEntryId = sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "x".repeat(8_000) }],
|
||||
isError: false,
|
||||
timestamp: 3,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "summarized" }],
|
||||
timestamp: 4,
|
||||
}),
|
||||
);
|
||||
|
||||
const summaryEntry = sessionManager
|
||||
.getBranch()
|
||||
.find(
|
||||
(entry) =>
|
||||
entry.type === "message" &&
|
||||
entry.message.role === "assistant" &&
|
||||
Array.isArray(entry.message.content) &&
|
||||
entry.message.content.some((part) => part.type === "text" && part.text === "summarized"),
|
||||
);
|
||||
expect(summaryEntry).toBeDefined();
|
||||
sessionManager.appendLabelChange(summaryEntry!.id, "bookmark");
|
||||
|
||||
const result = rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager,
|
||||
replacements: [
|
||||
{
|
||||
entryId: toolResultEntryId,
|
||||
message: {
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "[externalized file_123]" }],
|
||||
isError: false,
|
||||
timestamp: 3,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(result.changed).toBe(true);
|
||||
const rewrittenSummaryEntry = sessionManager
|
||||
.getBranch()
|
||||
.find(
|
||||
(entry) =>
|
||||
entry.type === "message" &&
|
||||
entry.message.role === "assistant" &&
|
||||
Array.isArray(entry.message.content) &&
|
||||
entry.message.content.some((part) => part.type === "text" && part.text === "summarized"),
|
||||
);
|
||||
expect(rewrittenSummaryEntry).toBeDefined();
|
||||
expect(sessionManager.getLabel(rewrittenSummaryEntry!.id)).toBe("bookmark");
|
||||
expect(sessionManager.getBranch().some((entry) => entry.type === "label")).toBe(true);
|
||||
});
|
||||
|
||||
it("remaps compaction keep markers when rewritten entries change ids", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "user",
|
||||
content: "read file",
|
||||
timestamp: 1,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }],
|
||||
timestamp: 2,
|
||||
}),
|
||||
);
|
||||
const toolResultEntryId = sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "x".repeat(8_000) }],
|
||||
isError: false,
|
||||
timestamp: 3,
|
||||
}),
|
||||
);
|
||||
const keptAssistantEntryId = sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "keep me" }],
|
||||
timestamp: 4,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendCompaction("summary", keptAssistantEntryId, 123);
|
||||
|
||||
const result = rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager,
|
||||
replacements: [
|
||||
{
|
||||
entryId: toolResultEntryId,
|
||||
message: {
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "[externalized file_123]" }],
|
||||
isError: false,
|
||||
timestamp: 3,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(result.changed).toBe(true);
|
||||
const branch = sessionManager.getBranch();
|
||||
const keptAssistantEntry = branch.find(
|
||||
(entry) =>
|
||||
entry.type === "message" &&
|
||||
entry.message.role === "assistant" &&
|
||||
Array.isArray(entry.message.content) &&
|
||||
entry.message.content.some((part) => part.type === "text" && part.text === "keep me"),
|
||||
);
|
||||
const compactionEntry = branch.find((entry) => entry.type === "compaction");
|
||||
|
||||
expect(keptAssistantEntry).toBeDefined();
|
||||
expect(compactionEntry).toBeDefined();
|
||||
expect(compactionEntry?.firstKeptEntryId).toBe(keptAssistantEntry?.id);
|
||||
expect(compactionEntry?.firstKeptEntryId).not.toBe(keptAssistantEntryId);
|
||||
});
|
||||
|
||||
it("bypasses persistence hooks when replaying rewritten messages", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "user",
|
||||
content: "run tool",
|
||||
timestamp: 1,
|
||||
}),
|
||||
);
|
||||
const toolResultEntryId = sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "exec",
|
||||
content: [{ type: "text", text: "before rewrite" }],
|
||||
isError: false,
|
||||
timestamp: 2,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "summarized" }],
|
||||
timestamp: 3,
|
||||
}),
|
||||
);
|
||||
installSessionToolResultGuard(sessionManager, {
|
||||
transformToolResultForPersistence: (message) => ({
|
||||
...(message as Extract<AgentMessage, { role: "toolResult" }>),
|
||||
content: [{ type: "text", text: "[hook transformed]" }],
|
||||
}),
|
||||
beforeMessageWriteHook: ({ message }) =>
|
||||
message.role === "assistant" ? { block: true } : undefined,
|
||||
});
|
||||
|
||||
const result = rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager,
|
||||
replacements: [
|
||||
{
|
||||
entryId: toolResultEntryId,
|
||||
message: {
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "exec",
|
||||
content: [{ type: "text", text: "[exact replacement]" }],
|
||||
isError: false,
|
||||
timestamp: 2,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(result.changed).toBe(true);
|
||||
const branchMessages = getBranchMessages(sessionManager);
|
||||
expect(branchMessages.map((message) => message.role)).toEqual([
|
||||
"user",
|
||||
"toolResult",
|
||||
"assistant",
|
||||
]);
|
||||
expect((branchMessages[1] as Extract<AgentMessage, { role: "toolResult" }>).content).toEqual([
|
||||
{ type: "text", text: "[exact replacement]" },
|
||||
]);
|
||||
expect(branchMessages[2]).toMatchObject({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "summarized" }],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("rewriteTranscriptEntriesInSessionFile", () => {
|
||||
it("emits transcript updates when the active branch changes", async () => {
|
||||
const sessionFile = "/tmp/session.jsonl";
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "user",
|
||||
content: "run tool",
|
||||
timestamp: 1,
|
||||
}),
|
||||
);
|
||||
sessionManager.appendMessage(
|
||||
asAppendMessage({
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "exec",
|
||||
content: [{ type: "text", text: "y".repeat(6_000) }],
|
||||
isError: false,
|
||||
timestamp: 2,
|
||||
}),
|
||||
);
|
||||
|
||||
const toolResultEntry = sessionManager
|
||||
.getBranch()
|
||||
.find((entry) => entry.type === "message" && entry.message.role === "toolResult");
|
||||
expect(toolResultEntry).toBeDefined();
|
||||
|
||||
const openSpy = vi
|
||||
.spyOn(SessionManager, "open")
|
||||
.mockReturnValue(sessionManager as unknown as ReturnType<typeof SessionManager.open>);
|
||||
const listener = vi.fn();
|
||||
const cleanup = onSessionTranscriptUpdate(listener);
|
||||
|
||||
try {
|
||||
const result = await rewriteTranscriptEntriesInSessionFile({
|
||||
sessionFile,
|
||||
sessionKey: "agent:main:test",
|
||||
request: {
|
||||
replacements: [
|
||||
{
|
||||
entryId: toolResultEntry!.id,
|
||||
message: {
|
||||
role: "toolResult",
|
||||
toolCallId: "call_1",
|
||||
toolName: "exec",
|
||||
content: [{ type: "text", text: "[file_ref:file_abc]" }],
|
||||
isError: false,
|
||||
timestamp: 2,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.changed).toBe(true);
|
||||
expect(acquireSessionWriteLockMock).toHaveBeenCalledWith({
|
||||
sessionFile,
|
||||
});
|
||||
expect(acquireSessionWriteLockReleaseMock).toHaveBeenCalledTimes(1);
|
||||
expect(listener).toHaveBeenCalledWith({ sessionFile });
|
||||
|
||||
const rewrittenToolResult = getBranchMessages(sessionManager)[1] as Extract<
|
||||
AgentMessage,
|
||||
{ role: "toolResult" }
|
||||
>;
|
||||
expect(rewrittenToolResult.content).toEqual([{ type: "text", text: "[file_ref:file_abc]" }]);
|
||||
} finally {
|
||||
cleanup();
|
||||
openSpy.mockRestore();
|
||||
}
|
||||
});
|
||||
});
|
||||
232
src/agents/pi-embedded-runner/transcript-rewrite.ts
Normal file
232
src/agents/pi-embedded-runner/transcript-rewrite.ts
Normal file
@ -0,0 +1,232 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
import type {
|
||||
TranscriptRewriteReplacement,
|
||||
TranscriptRewriteRequest,
|
||||
TranscriptRewriteResult,
|
||||
} from "../../context-engine/types.js";
|
||||
import { emitSessionTranscriptUpdate } from "../../sessions/transcript-events.js";
|
||||
import { getRawSessionAppendMessage } from "../session-tool-result-guard.js";
|
||||
import { acquireSessionWriteLock } from "../session-write-lock.js";
|
||||
import { log } from "./logger.js";
|
||||
|
||||
type SessionManagerLike = ReturnType<typeof SessionManager.open>;
|
||||
type SessionBranchEntry = ReturnType<SessionManagerLike["getBranch"]>[number];
|
||||
|
||||
function estimateMessageBytes(message: AgentMessage): number {
|
||||
return Buffer.byteLength(JSON.stringify(message), "utf8");
|
||||
}
|
||||
|
||||
function remapEntryId(
|
||||
entryId: string | null | undefined,
|
||||
rewrittenEntryIds: ReadonlyMap<string, string>,
|
||||
): string | null {
|
||||
if (!entryId) {
|
||||
return null;
|
||||
}
|
||||
return rewrittenEntryIds.get(entryId) ?? entryId;
|
||||
}
|
||||
|
||||
function appendBranchEntry(params: {
|
||||
sessionManager: SessionManagerLike;
|
||||
entry: SessionBranchEntry;
|
||||
rewrittenEntryIds: ReadonlyMap<string, string>;
|
||||
appendMessage: SessionManagerLike["appendMessage"];
|
||||
}): string {
|
||||
const { sessionManager, entry, rewrittenEntryIds, appendMessage } = params;
|
||||
if (entry.type === "message") {
|
||||
return appendMessage(entry.message as Parameters<typeof sessionManager.appendMessage>[0]);
|
||||
}
|
||||
if (entry.type === "compaction") {
|
||||
return sessionManager.appendCompaction(
|
||||
entry.summary,
|
||||
remapEntryId(entry.firstKeptEntryId, rewrittenEntryIds) ?? entry.firstKeptEntryId,
|
||||
entry.tokensBefore,
|
||||
entry.details,
|
||||
entry.fromHook,
|
||||
);
|
||||
}
|
||||
if (entry.type === "thinking_level_change") {
|
||||
return sessionManager.appendThinkingLevelChange(entry.thinkingLevel);
|
||||
}
|
||||
if (entry.type === "model_change") {
|
||||
return sessionManager.appendModelChange(entry.provider, entry.modelId);
|
||||
}
|
||||
if (entry.type === "custom") {
|
||||
return sessionManager.appendCustomEntry(entry.customType, entry.data);
|
||||
}
|
||||
if (entry.type === "custom_message") {
|
||||
return sessionManager.appendCustomMessageEntry(
|
||||
entry.customType,
|
||||
entry.content,
|
||||
entry.display,
|
||||
entry.details,
|
||||
);
|
||||
}
|
||||
if (entry.type === "session_info") {
|
||||
if (entry.name) {
|
||||
return sessionManager.appendSessionInfo(entry.name);
|
||||
}
|
||||
return sessionManager.appendSessionInfo("");
|
||||
}
|
||||
if (entry.type === "branch_summary") {
|
||||
return sessionManager.branchWithSummary(
|
||||
remapEntryId(entry.parentId, rewrittenEntryIds),
|
||||
entry.summary,
|
||||
entry.details,
|
||||
entry.fromHook,
|
||||
);
|
||||
}
|
||||
return sessionManager.appendLabelChange(
|
||||
remapEntryId(entry.targetId, rewrittenEntryIds) ?? entry.targetId,
|
||||
entry.label,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Safely rewrites transcript message entries on the active branch by branching
|
||||
* from the first rewritten message's parent and re-appending the suffix.
|
||||
*/
|
||||
export function rewriteTranscriptEntriesInSessionManager(params: {
|
||||
sessionManager: SessionManagerLike;
|
||||
replacements: TranscriptRewriteReplacement[];
|
||||
}): TranscriptRewriteResult {
|
||||
const replacementsById = new Map(
|
||||
params.replacements
|
||||
.filter((replacement) => replacement.entryId.trim().length > 0)
|
||||
.map((replacement) => [replacement.entryId, replacement.message]),
|
||||
);
|
||||
if (replacementsById.size === 0) {
|
||||
return {
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
reason: "no replacements requested",
|
||||
};
|
||||
}
|
||||
|
||||
const branch = params.sessionManager.getBranch();
|
||||
if (branch.length === 0) {
|
||||
return {
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
reason: "empty session",
|
||||
};
|
||||
}
|
||||
|
||||
const matchedIndices: number[] = [];
|
||||
let bytesFreed = 0;
|
||||
|
||||
for (let index = 0; index < branch.length; index++) {
|
||||
const entry = branch[index];
|
||||
if (entry.type !== "message") {
|
||||
continue;
|
||||
}
|
||||
const replacement = replacementsById.get(entry.id);
|
||||
if (!replacement) {
|
||||
continue;
|
||||
}
|
||||
const originalBytes = estimateMessageBytes(entry.message);
|
||||
const replacementBytes = estimateMessageBytes(replacement);
|
||||
matchedIndices.push(index);
|
||||
bytesFreed += Math.max(0, originalBytes - replacementBytes);
|
||||
}
|
||||
|
||||
if (matchedIndices.length === 0) {
|
||||
return {
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
reason: "no matching message entries",
|
||||
};
|
||||
}
|
||||
|
||||
const firstMatchedEntry = branch[matchedIndices[0]] as
|
||||
| Extract<SessionBranchEntry, { type: "message" }>
|
||||
| undefined;
|
||||
// matchedIndices only contains indices of branch "message" entries.
|
||||
if (!firstMatchedEntry) {
|
||||
return {
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
reason: "invalid first rewrite target",
|
||||
};
|
||||
}
|
||||
|
||||
if (!firstMatchedEntry.parentId) {
|
||||
params.sessionManager.resetLeaf();
|
||||
} else {
|
||||
params.sessionManager.branch(firstMatchedEntry.parentId);
|
||||
}
|
||||
|
||||
// Maintenance rewrites should preserve the exact requested history without
|
||||
// re-running persistence hooks or size truncation on replayed messages.
|
||||
const appendMessage = getRawSessionAppendMessage(params.sessionManager);
|
||||
const rewrittenEntryIds = new Map<string, string>();
|
||||
for (let index = matchedIndices[0]; index < branch.length; index++) {
|
||||
const entry = branch[index];
|
||||
const replacement = entry.type === "message" ? replacementsById.get(entry.id) : undefined;
|
||||
const newEntryId =
|
||||
replacement === undefined
|
||||
? appendBranchEntry({
|
||||
sessionManager: params.sessionManager,
|
||||
entry,
|
||||
rewrittenEntryIds,
|
||||
appendMessage,
|
||||
})
|
||||
: appendMessage(replacement as Parameters<typeof params.sessionManager.appendMessage>[0]);
|
||||
rewrittenEntryIds.set(entry.id, newEntryId);
|
||||
}
|
||||
|
||||
return {
|
||||
changed: true,
|
||||
bytesFreed,
|
||||
rewrittenEntries: matchedIndices.length,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a transcript file, rewrite message entries on the active branch, and
|
||||
* emit a transcript update when the active branch changed.
|
||||
*/
|
||||
export async function rewriteTranscriptEntriesInSessionFile(params: {
|
||||
sessionFile: string;
|
||||
sessionId?: string;
|
||||
sessionKey?: string;
|
||||
request: TranscriptRewriteRequest;
|
||||
}): Promise<TranscriptRewriteResult> {
|
||||
let sessionLock: Awaited<ReturnType<typeof acquireSessionWriteLock>> | undefined;
|
||||
try {
|
||||
sessionLock = await acquireSessionWriteLock({
|
||||
sessionFile: params.sessionFile,
|
||||
});
|
||||
const sessionManager = SessionManager.open(params.sessionFile);
|
||||
const result = rewriteTranscriptEntriesInSessionManager({
|
||||
sessionManager,
|
||||
replacements: params.request.replacements,
|
||||
});
|
||||
if (result.changed) {
|
||||
emitSessionTranscriptUpdate(params.sessionFile);
|
||||
log.info(
|
||||
`[transcript-rewrite] rewrote ${result.rewrittenEntries} entr` +
|
||||
`${result.rewrittenEntries === 1 ? "y" : "ies"} ` +
|
||||
`bytesFreed=${result.bytesFreed} ` +
|
||||
`sessionKey=${params.sessionKey ?? params.sessionId ?? "unknown"}`,
|
||||
);
|
||||
}
|
||||
return result;
|
||||
} catch (err) {
|
||||
const reason = err instanceof Error ? err.message : String(err);
|
||||
log.warn(`[transcript-rewrite] failed: ${reason}`);
|
||||
return {
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
reason,
|
||||
};
|
||||
} finally {
|
||||
await sessionLock?.release();
|
||||
}
|
||||
}
|
||||
@ -69,6 +69,18 @@ describe("resolveProviderCapabilities", () => {
|
||||
geminiThoughtSignatureModelHints: [],
|
||||
dropThinkingBlockModelHints: ["claude"],
|
||||
});
|
||||
expect(resolveProviderCapabilities("anthropic-vertex")).toEqual({
|
||||
anthropicToolSchemaMode: "native",
|
||||
anthropicToolChoiceMode: "native",
|
||||
providerFamily: "anthropic",
|
||||
preserveAnthropicThinkingSignatures: true,
|
||||
openAiCompatTurnValidation: true,
|
||||
geminiThoughtSignatureSanitization: false,
|
||||
transcriptToolCallIdMode: "default",
|
||||
transcriptToolCallIdModelHints: [],
|
||||
geminiThoughtSignatureModelHints: [],
|
||||
dropThinkingBlockModelHints: ["claude"],
|
||||
});
|
||||
expect(resolveProviderCapabilities("amazon-bedrock")).toEqual({
|
||||
anthropicToolSchemaMode: "native",
|
||||
anthropicToolChoiceMode: "native",
|
||||
@ -136,6 +148,7 @@ describe("resolveProviderCapabilities", () => {
|
||||
|
||||
it("tracks provider families and model-specific transcript quirks in the registry", () => {
|
||||
expect(isOpenAiProviderFamily("openai")).toBe(true);
|
||||
expect(isAnthropicProviderFamily("anthropic-vertex")).toBe(true);
|
||||
expect(isAnthropicProviderFamily("amazon-bedrock")).toBe(true);
|
||||
expect(
|
||||
shouldDropThinkingBlocksForModel({
|
||||
@ -143,6 +156,12 @@ describe("resolveProviderCapabilities", () => {
|
||||
modelId: "claude-opus-4-6",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
shouldDropThinkingBlocksForModel({
|
||||
provider: "anthropic-vertex",
|
||||
modelId: "claude-sonnet-4-6",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
shouldDropThinkingBlocksForModel({
|
||||
provider: "amazon-bedrock",
|
||||
|
||||
@ -35,6 +35,10 @@ const DEFAULT_PROVIDER_CAPABILITIES: ProviderCapabilities = {
|
||||
};
|
||||
|
||||
const CORE_PROVIDER_CAPABILITIES: Record<string, Partial<ProviderCapabilities>> = {
|
||||
"anthropic-vertex": {
|
||||
providerFamily: "anthropic",
|
||||
dropThinkingBlockModelHints: ["claude"],
|
||||
},
|
||||
"amazon-bedrock": {
|
||||
providerFamily: "anthropic",
|
||||
dropThinkingBlockModelHints: ["claude"],
|
||||
|
||||
@ -16,6 +16,11 @@ import { extractToolCallsFromAssistant, extractToolResultId } from "./tool-call-
|
||||
const GUARD_TRUNCATION_SUFFIX =
|
||||
"\n\n⚠️ [Content truncated during persistence — original exceeded size limit. " +
|
||||
"Use offset/limit parameters or request specific sections for large content.]";
|
||||
const RAW_APPEND_MESSAGE = Symbol("openclaw.session.rawAppendMessage");
|
||||
|
||||
type SessionManagerWithRawAppend = SessionManager & {
|
||||
[RAW_APPEND_MESSAGE]?: SessionManager["appendMessage"];
|
||||
};
|
||||
|
||||
/**
|
||||
* Truncate oversized text content blocks in a tool result message.
|
||||
@ -68,6 +73,16 @@ function normalizePersistedToolResultName(
|
||||
return toolResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the unguarded appendMessage implementation for a session manager.
|
||||
*/
|
||||
export function getRawSessionAppendMessage(
|
||||
sessionManager: SessionManager,
|
||||
): SessionManager["appendMessage"] {
|
||||
const rawAppend = (sessionManager as SessionManagerWithRawAppend)[RAW_APPEND_MESSAGE];
|
||||
return rawAppend ?? sessionManager.appendMessage.bind(sessionManager);
|
||||
}
|
||||
|
||||
export function installSessionToolResultGuard(
|
||||
sessionManager: SessionManager,
|
||||
opts?: {
|
||||
@ -109,7 +124,8 @@ export function installSessionToolResultGuard(
|
||||
clearPendingToolResults: () => void;
|
||||
getPendingIds: () => string[];
|
||||
} {
|
||||
const originalAppend = sessionManager.appendMessage.bind(sessionManager);
|
||||
const originalAppend = getRawSessionAppendMessage(sessionManager);
|
||||
(sessionManager as SessionManagerWithRawAppend)[RAW_APPEND_MESSAGE] = originalAppend;
|
||||
const pendingState = createPendingToolCallState();
|
||||
const persistMessage = (message: AgentMessage) => {
|
||||
const transformer = opts?.transformMessageForPersistence;
|
||||
|
||||
@ -13,14 +13,49 @@ type RegistrablePlugin = {
|
||||
};
|
||||
|
||||
export const bundledWebSearchPluginRegistrations: ReadonlyArray<{
|
||||
plugin: RegistrablePlugin;
|
||||
readonly plugin: RegistrablePlugin;
|
||||
credentialValue: unknown;
|
||||
}> = [
|
||||
{ plugin: bravePlugin, credentialValue: "BSA-test" },
|
||||
{ plugin: firecrawlPlugin, credentialValue: "fc-test" },
|
||||
{ plugin: googlePlugin, credentialValue: "AIza-test" },
|
||||
{ plugin: moonshotPlugin, credentialValue: "sk-test" },
|
||||
{ plugin: perplexityPlugin, credentialValue: "pplx-test" },
|
||||
{ plugin: tavilyPlugin, credentialValue: "tvly-test" },
|
||||
{ plugin: xaiPlugin, credentialValue: "xai-test" },
|
||||
{
|
||||
get plugin() {
|
||||
return bravePlugin;
|
||||
},
|
||||
credentialValue: "BSA-test",
|
||||
},
|
||||
{
|
||||
get plugin() {
|
||||
return firecrawlPlugin;
|
||||
},
|
||||
credentialValue: "fc-test",
|
||||
},
|
||||
{
|
||||
get plugin() {
|
||||
return googlePlugin;
|
||||
},
|
||||
credentialValue: "AIza-test",
|
||||
},
|
||||
{
|
||||
get plugin() {
|
||||
return moonshotPlugin;
|
||||
},
|
||||
credentialValue: "sk-test",
|
||||
},
|
||||
{
|
||||
get plugin() {
|
||||
return perplexityPlugin;
|
||||
},
|
||||
credentialValue: "pplx-test",
|
||||
},
|
||||
{
|
||||
get plugin() {
|
||||
return tavilyPlugin;
|
||||
},
|
||||
credentialValue: "tvly-test",
|
||||
},
|
||||
{
|
||||
get plugin() {
|
||||
return xaiPlugin;
|
||||
},
|
||||
credentialValue: "xai-test",
|
||||
},
|
||||
];
|
||||
|
||||
@ -136,6 +136,35 @@ function pluginWebSearchApiKey(
|
||||
}
|
||||
|
||||
describe("web search provider config", () => {
|
||||
it("does not warn for legacy brave config when bundled web search allowlist compat applies", () => {
|
||||
const res = validateConfigObjectWithPlugins({
|
||||
plugins: {
|
||||
allow: ["bluebubbles", "memory-core"],
|
||||
},
|
||||
tools: {
|
||||
web: {
|
||||
search: {
|
||||
enabled: true,
|
||||
apiKey: "test-brave-key", // pragma: allowlist secret
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(res.ok).toBe(true);
|
||||
if (!res.ok) {
|
||||
return;
|
||||
}
|
||||
expect(res.warnings).not.toContainEqual(
|
||||
expect.objectContaining({
|
||||
path: "plugins.entries.brave",
|
||||
message: expect.stringContaining(
|
||||
"plugin disabled (not in allowlist) but config is present",
|
||||
),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("accepts perplexity provider and config", () => {
|
||||
const res = validateConfigObjectWithPlugins(
|
||||
buildWebSearchProviderConfig({
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
import path from "node:path";
|
||||
import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "../agents/agent-scope.js";
|
||||
import { CHANNEL_IDS, normalizeChatChannelId } from "../channels/registry.js";
|
||||
import { withBundledPluginAllowlistCompat } from "../plugins/bundled-compat.js";
|
||||
import { resolveBundledWebSearchPluginIds } from "../plugins/bundled-web-search.js";
|
||||
import {
|
||||
normalizePluginsConfig,
|
||||
resolveEffectiveEnableState,
|
||||
@ -351,15 +353,38 @@ function validateConfigObjectWithPluginsBase(
|
||||
};
|
||||
|
||||
let registryInfo: RegistryInfo | null = null;
|
||||
let compatConfig: OpenClawConfig | null | undefined;
|
||||
|
||||
const ensureCompatConfig = (): OpenClawConfig => {
|
||||
if (compatConfig !== undefined) {
|
||||
return compatConfig ?? config;
|
||||
}
|
||||
|
||||
const workspaceDir = resolveAgentWorkspaceDir(config, resolveDefaultAgentId(config));
|
||||
const bundledWebSearchPluginIds = resolveBundledWebSearchPluginIds({
|
||||
config,
|
||||
workspaceDir: workspaceDir ?? undefined,
|
||||
env: opts.env,
|
||||
});
|
||||
compatConfig = withBundledPluginAllowlistCompat({
|
||||
config,
|
||||
pluginIds: bundledWebSearchPluginIds,
|
||||
});
|
||||
return compatConfig ?? config;
|
||||
};
|
||||
|
||||
const ensureRegistry = (): RegistryInfo => {
|
||||
if (registryInfo) {
|
||||
return registryInfo;
|
||||
}
|
||||
|
||||
const workspaceDir = resolveAgentWorkspaceDir(config, resolveDefaultAgentId(config));
|
||||
const effectiveConfig = ensureCompatConfig();
|
||||
const workspaceDir = resolveAgentWorkspaceDir(
|
||||
effectiveConfig,
|
||||
resolveDefaultAgentId(effectiveConfig),
|
||||
);
|
||||
const registry = loadPluginManifestRegistry({
|
||||
config,
|
||||
config: effectiveConfig,
|
||||
workspaceDir: workspaceDir ?? undefined,
|
||||
env: opts.env,
|
||||
});
|
||||
@ -393,7 +418,7 @@ function validateConfigObjectWithPluginsBase(
|
||||
const ensureNormalizedPlugins = (): ReturnType<typeof normalizePluginsConfig> => {
|
||||
const info = ensureRegistry();
|
||||
if (!info.normalizedPlugins) {
|
||||
info.normalizedPlugins = normalizePluginsConfig(config.plugins);
|
||||
info.normalizedPlugins = normalizePluginsConfig(ensureCompatConfig().plugins);
|
||||
}
|
||||
return info.normalizedPlugins;
|
||||
};
|
||||
|
||||
@ -20,6 +20,7 @@ import type {
|
||||
ContextEngineInfo,
|
||||
AssembleResult,
|
||||
CompactResult,
|
||||
ContextEngineMaintenanceResult,
|
||||
IngestResult,
|
||||
} from "./types.js";
|
||||
|
||||
@ -118,6 +119,7 @@ class LegacySessionKeyStrictEngine implements ContextEngine {
|
||||
readonly ingestCalls: Array<Record<string, unknown>> = [];
|
||||
readonly assembleCalls: Array<Record<string, unknown>> = [];
|
||||
readonly compactCalls: Array<Record<string, unknown>> = [];
|
||||
readonly maintainCalls: Array<Record<string, unknown>> = [];
|
||||
readonly ingestedMessages: AgentMessage[] = [];
|
||||
|
||||
private rejectSessionKey(params: { sessionKey?: string }): void {
|
||||
@ -143,6 +145,7 @@ class LegacySessionKeyStrictEngine implements ContextEngine {
|
||||
sessionKey?: string;
|
||||
messages: AgentMessage[];
|
||||
tokenBudget?: number;
|
||||
prompt?: string;
|
||||
}): Promise<AssembleResult> {
|
||||
this.assembleCalls.push({ ...params });
|
||||
this.rejectSessionKey(params);
|
||||
@ -172,6 +175,21 @@ class LegacySessionKeyStrictEngine implements ContextEngine {
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async maintain(params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
sessionFile: string;
|
||||
runtimeContext?: Record<string, unknown>;
|
||||
}): Promise<ContextEngineMaintenanceResult> {
|
||||
this.maintainCalls.push({ ...params });
|
||||
this.rejectSessionKey(params);
|
||||
return {
|
||||
changed: false,
|
||||
bytesFreed: 0,
|
||||
rewrittenEntries: 0,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
class SessionKeyRuntimeErrorEngine implements ContextEngine {
|
||||
@ -217,6 +235,58 @@ class SessionKeyRuntimeErrorEngine implements ContextEngine {
|
||||
}
|
||||
}
|
||||
|
||||
class LegacyAssembleStrictEngine implements ContextEngine {
|
||||
readonly info: ContextEngineInfo = {
|
||||
id: "legacy-assemble-strict",
|
||||
name: "Legacy Assemble Strict Engine",
|
||||
};
|
||||
readonly assembleCalls: Array<Record<string, unknown>> = [];
|
||||
|
||||
async ingest(_params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
message: AgentMessage;
|
||||
isHeartbeat?: boolean;
|
||||
}): Promise<IngestResult> {
|
||||
return { ingested: true };
|
||||
}
|
||||
|
||||
async assemble(params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
messages: AgentMessage[];
|
||||
tokenBudget?: number;
|
||||
prompt?: string;
|
||||
}): Promise<AssembleResult> {
|
||||
this.assembleCalls.push({ ...params });
|
||||
if (Object.prototype.hasOwnProperty.call(params, "sessionKey")) {
|
||||
throw new Error("Unrecognized key(s) in object: 'sessionKey'");
|
||||
}
|
||||
if (Object.prototype.hasOwnProperty.call(params, "prompt")) {
|
||||
throw new Error("Unrecognized key(s) in object: 'prompt'");
|
||||
}
|
||||
return {
|
||||
messages: params.messages,
|
||||
estimatedTokens: 3,
|
||||
};
|
||||
}
|
||||
|
||||
async compact(_params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
sessionFile: string;
|
||||
tokenBudget?: number;
|
||||
compactionTarget?: "budget" | "threshold";
|
||||
customInstructions?: string;
|
||||
runtimeContext?: Record<string, unknown>;
|
||||
}): Promise<CompactResult> {
|
||||
return {
|
||||
ok: true,
|
||||
compacted: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// 1. Engine contract tests
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
@ -463,6 +533,24 @@ describe("Legacy sessionKey compatibility", () => {
|
||||
expect(strictEngine.ingestedMessages).toEqual([firstMessage, secondMessage]);
|
||||
});
|
||||
|
||||
it("retries strict maintain once and memoizes legacy mode there too", async () => {
|
||||
const engineId = `legacy-sessionkey-maintain-${Date.now().toString(36)}`;
|
||||
const strictEngine = new LegacySessionKeyStrictEngine();
|
||||
registerContextEngine(engineId, () => strictEngine);
|
||||
|
||||
const engine = await resolveContextEngine(configWithSlot(engineId));
|
||||
|
||||
await engine.maintain?.({
|
||||
sessionId: "s1",
|
||||
sessionKey: "agent:main:test",
|
||||
sessionFile: "/tmp/session.json",
|
||||
});
|
||||
|
||||
expect(strictEngine.maintainCalls).toHaveLength(2);
|
||||
expect(strictEngine.maintainCalls[0]).toHaveProperty("sessionKey", "agent:main:test");
|
||||
expect(strictEngine.maintainCalls[1]).not.toHaveProperty("sessionKey");
|
||||
});
|
||||
|
||||
it("does not retry non-compat runtime errors", async () => {
|
||||
const engineId = `sessionkey-runtime-${Date.now().toString(36)}`;
|
||||
const runtimeErrorEngine = new SessionKeyRuntimeErrorEngine();
|
||||
@ -605,6 +693,124 @@ describe("LegacyContextEngine parity", () => {
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// 5b. assemble() prompt forwarding
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("assemble() prompt forwarding", () => {
|
||||
it("forwards prompt to the underlying engine", async () => {
|
||||
const engineId = `prompt-fwd-${Date.now().toString(36)}`;
|
||||
const calls: Array<Record<string, unknown>> = [];
|
||||
registerContextEngine(engineId, () => ({
|
||||
info: { id: engineId, name: "Prompt Tracker", version: "0.0.0" },
|
||||
async ingest() {
|
||||
return { ingested: false };
|
||||
},
|
||||
async assemble(params) {
|
||||
calls.push({ ...params });
|
||||
return { messages: params.messages, estimatedTokens: 0 };
|
||||
},
|
||||
async compact() {
|
||||
return { ok: true, compacted: false };
|
||||
},
|
||||
}));
|
||||
|
||||
const engine = await resolveContextEngine(configWithSlot(engineId));
|
||||
await engine.assemble({
|
||||
sessionId: "s1",
|
||||
messages: [makeMockMessage("user", "hello")],
|
||||
prompt: "hello",
|
||||
});
|
||||
|
||||
expect(calls).toHaveLength(1);
|
||||
expect(calls[0]).toHaveProperty("prompt", "hello");
|
||||
});
|
||||
|
||||
it("omits prompt when not provided", async () => {
|
||||
const engineId = `prompt-omit-${Date.now().toString(36)}`;
|
||||
const calls: Array<Record<string, unknown>> = [];
|
||||
registerContextEngine(engineId, () => ({
|
||||
info: { id: engineId, name: "Prompt Tracker", version: "0.0.0" },
|
||||
async ingest() {
|
||||
return { ingested: false };
|
||||
},
|
||||
async assemble(params) {
|
||||
calls.push({ ...params });
|
||||
return { messages: params.messages, estimatedTokens: 0 };
|
||||
},
|
||||
async compact() {
|
||||
return { ok: true, compacted: false };
|
||||
},
|
||||
}));
|
||||
|
||||
const engine = await resolveContextEngine(configWithSlot(engineId));
|
||||
await engine.assemble({
|
||||
sessionId: "s1",
|
||||
messages: [makeMockMessage("user", "hello")],
|
||||
});
|
||||
|
||||
expect(calls).toHaveLength(1);
|
||||
expect(calls[0]).not.toHaveProperty("prompt");
|
||||
});
|
||||
|
||||
it("does not leak prompt key when caller spreads undefined", async () => {
|
||||
// Guards against the pattern `{ prompt: params.prompt }` when params.prompt
|
||||
// is undefined — JavaScript keeps the key present with value undefined,
|
||||
// which breaks engines that guard with `'prompt' in params`.
|
||||
const engineId = `prompt-undef-${Date.now().toString(36)}`;
|
||||
const calls: Array<Record<string, unknown>> = [];
|
||||
registerContextEngine(engineId, () => ({
|
||||
info: { id: engineId, name: "Prompt Tracker", version: "0.0.0" },
|
||||
async ingest() {
|
||||
return { ingested: false };
|
||||
},
|
||||
async assemble(params) {
|
||||
calls.push({ ...params });
|
||||
return { messages: params.messages, estimatedTokens: 0 };
|
||||
},
|
||||
async compact() {
|
||||
return { ok: true, compacted: false };
|
||||
},
|
||||
}));
|
||||
|
||||
const engine = await resolveContextEngine(configWithSlot(engineId));
|
||||
// Simulate the attempt.ts call-site pattern: conditional spread
|
||||
const callerPrompt: string | undefined = undefined;
|
||||
await engine.assemble({
|
||||
sessionId: "s1",
|
||||
messages: [makeMockMessage("user", "hello")],
|
||||
...(callerPrompt !== undefined ? { prompt: callerPrompt } : {}),
|
||||
});
|
||||
|
||||
expect(calls).toHaveLength(1);
|
||||
expect(calls[0]).not.toHaveProperty("prompt");
|
||||
expect(Object.keys(calls[0] as object)).not.toContain("prompt");
|
||||
});
|
||||
|
||||
it("retries strict legacy assemble without sessionKey and prompt", async () => {
|
||||
const engineId = `prompt-legacy-${Date.now().toString(36)}`;
|
||||
const strictEngine = new LegacyAssembleStrictEngine();
|
||||
registerContextEngine(engineId, () => strictEngine);
|
||||
|
||||
const engine = await resolveContextEngine(configWithSlot(engineId));
|
||||
const result = await engine.assemble({
|
||||
sessionId: "s1",
|
||||
sessionKey: "agent:main:test",
|
||||
messages: [makeMockMessage("user", "hello")],
|
||||
prompt: "hello",
|
||||
});
|
||||
|
||||
expect(result.estimatedTokens).toBe(3);
|
||||
expect(strictEngine.assembleCalls).toHaveLength(3);
|
||||
expect(strictEngine.assembleCalls[0]).toHaveProperty("sessionKey", "agent:main:test");
|
||||
expect(strictEngine.assembleCalls[0]).toHaveProperty("prompt", "hello");
|
||||
expect(strictEngine.assembleCalls[1]).not.toHaveProperty("sessionKey");
|
||||
expect(strictEngine.assembleCalls[1]).toHaveProperty("prompt", "hello");
|
||||
expect(strictEngine.assembleCalls[2]).not.toHaveProperty("sessionKey");
|
||||
expect(strictEngine.assembleCalls[2]).not.toHaveProperty("prompt");
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// 6. Initialization guard
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@ -3,7 +3,12 @@ export type {
|
||||
ContextEngineInfo,
|
||||
AssembleResult,
|
||||
CompactResult,
|
||||
ContextEngineMaintenanceResult,
|
||||
ContextEngineRuntimeContext,
|
||||
IngestResult,
|
||||
TranscriptRewriteReplacement,
|
||||
TranscriptRewriteRequest,
|
||||
TranscriptRewriteResult,
|
||||
} from "./types.js";
|
||||
|
||||
export {
|
||||
|
||||
@ -16,17 +16,31 @@ type RegisterContextEngineForOwnerOptions = {
|
||||
const LEGACY_SESSION_KEY_COMPAT = Symbol.for("openclaw.contextEngine.sessionKeyCompat");
|
||||
const SESSION_KEY_COMPAT_METHODS = [
|
||||
"bootstrap",
|
||||
"maintain",
|
||||
"ingest",
|
||||
"ingestBatch",
|
||||
"afterTurn",
|
||||
"assemble",
|
||||
"compact",
|
||||
] as const;
|
||||
const LEGACY_COMPAT_PARAMS = ["sessionKey", "prompt"] as const;
|
||||
const LEGACY_COMPAT_METHOD_KEYS = {
|
||||
bootstrap: ["sessionKey"],
|
||||
maintain: ["sessionKey"],
|
||||
ingest: ["sessionKey"],
|
||||
ingestBatch: ["sessionKey"],
|
||||
afterTurn: ["sessionKey"],
|
||||
assemble: ["sessionKey", "prompt"],
|
||||
compact: ["sessionKey"],
|
||||
} as const;
|
||||
|
||||
type SessionKeyCompatMethodName = (typeof SESSION_KEY_COMPAT_METHODS)[number];
|
||||
type SessionKeyCompatParams = {
|
||||
sessionKey?: string;
|
||||
prompt?: string;
|
||||
};
|
||||
type LegacyCompatKey = (typeof LEGACY_COMPAT_PARAMS)[number];
|
||||
type LegacyCompatParamMap = Partial<Record<LegacyCompatKey, unknown>>;
|
||||
|
||||
function isSessionKeyCompatMethodName(value: PropertyKey): value is SessionKeyCompatMethodName {
|
||||
return (
|
||||
@ -34,21 +48,29 @@ function isSessionKeyCompatMethodName(value: PropertyKey): value is SessionKeyCo
|
||||
);
|
||||
}
|
||||
|
||||
function hasOwnSessionKey(params: unknown): params is SessionKeyCompatParams {
|
||||
function hasOwnLegacyCompatKey<K extends LegacyCompatKey>(
|
||||
params: unknown,
|
||||
key: K,
|
||||
): params is SessionKeyCompatParams & Required<Pick<LegacyCompatParamMap, K>> {
|
||||
return (
|
||||
params !== null &&
|
||||
typeof params === "object" &&
|
||||
Object.prototype.hasOwnProperty.call(params, "sessionKey")
|
||||
Object.prototype.hasOwnProperty.call(params, key)
|
||||
);
|
||||
}
|
||||
|
||||
function withoutSessionKey<T extends SessionKeyCompatParams>(params: T): T {
|
||||
function withoutLegacyCompatKeys<T extends SessionKeyCompatParams>(
|
||||
params: T,
|
||||
keys: Iterable<LegacyCompatKey>,
|
||||
): T {
|
||||
const legacyParams = { ...params };
|
||||
delete legacyParams.sessionKey;
|
||||
for (const key of keys) {
|
||||
delete legacyParams[key];
|
||||
}
|
||||
return legacyParams;
|
||||
}
|
||||
|
||||
function issueRejectsSessionKeyStrictly(issue: unknown): boolean {
|
||||
function issueRejectsLegacyCompatKeyStrictly(issue: unknown, key: LegacyCompatKey): boolean {
|
||||
if (!issue || typeof issue !== "object") {
|
||||
return false;
|
||||
}
|
||||
@ -61,12 +83,12 @@ function issueRejectsSessionKeyStrictly(issue: unknown): boolean {
|
||||
if (
|
||||
issueRecord.code === "unrecognized_keys" &&
|
||||
Array.isArray(issueRecord.keys) &&
|
||||
issueRecord.keys.some((key) => key === "sessionKey")
|
||||
issueRecord.keys.some((issueKey) => issueKey === key)
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return isSessionKeyCompatibilityError(issueRecord.message);
|
||||
return isLegacyCompatErrorForKey(issueRecord.message, key);
|
||||
}
|
||||
|
||||
function* iterateErrorChain(error: unknown) {
|
||||
@ -82,31 +104,45 @@ function* iterateErrorChain(error: unknown) {
|
||||
}
|
||||
}
|
||||
|
||||
const SESSION_KEY_UNKNOWN_FIELD_PATTERNS = [
|
||||
/\bunrecognized key(?:\(s\)|s)? in object:.*['"`]sessionKey['"`]/i,
|
||||
/\badditional propert(?:y|ies)\b.*['"`]sessionKey['"`]/i,
|
||||
/\bmust not have additional propert(?:y|ies)\b.*['"`]sessionKey['"`]/i,
|
||||
/\b(?:unexpected|extraneous)\s+(?:property|properties|field|fields|key|keys)\b.*['"`]sessionKey['"`]/i,
|
||||
/\b(?:unknown|invalid)\s+(?:property|properties|field|fields|key|keys)\b.*['"`]sessionKey['"`]/i,
|
||||
/['"`]sessionKey['"`].*\b(?:was|is)\s+not allowed\b/i,
|
||||
/"code"\s*:\s*"unrecognized_keys"[^]*"sessionKey"/i,
|
||||
] as const;
|
||||
const LEGACY_UNKNOWN_FIELD_PATTERNS: Record<LegacyCompatKey, readonly RegExp[]> = {
|
||||
sessionKey: [
|
||||
/\bunrecognized key(?:\(s\)|s)? in object:.*['"`]sessionKey['"`]/i,
|
||||
/\badditional propert(?:y|ies)\b.*['"`]sessionKey['"`]/i,
|
||||
/\bmust not have additional propert(?:y|ies)\b.*['"`]sessionKey['"`]/i,
|
||||
/\b(?:unexpected|extraneous)\s+(?:property|properties|field|fields|key|keys)\b.*['"`]sessionKey['"`]/i,
|
||||
/\b(?:unknown|invalid)\s+(?:property|properties|field|fields|key|keys)\b.*['"`]sessionKey['"`]/i,
|
||||
/['"`]sessionKey['"`].*\b(?:was|is)\s+not allowed\b/i,
|
||||
/"code"\s*:\s*"unrecognized_keys"[^]*"sessionKey"/i,
|
||||
],
|
||||
prompt: [
|
||||
/\bunrecognized key(?:\(s\)|s)? in object:.*['"`]prompt['"`]/i,
|
||||
/\badditional propert(?:y|ies)\b.*['"`]prompt['"`]/i,
|
||||
/\bmust not have additional propert(?:y|ies)\b.*['"`]prompt['"`]/i,
|
||||
/\b(?:unexpected|extraneous)\s+(?:property|properties|field|fields|key|keys)\b.*['"`]prompt['"`]/i,
|
||||
/\b(?:unknown|invalid)\s+(?:property|properties|field|fields|key|keys)\b.*['"`]prompt['"`]/i,
|
||||
/['"`]prompt['"`].*\b(?:was|is)\s+not allowed\b/i,
|
||||
/"code"\s*:\s*"unrecognized_keys"[^]*"prompt"/i,
|
||||
],
|
||||
} as const;
|
||||
|
||||
function isSessionKeyUnknownFieldValidationMessage(message: string): boolean {
|
||||
return SESSION_KEY_UNKNOWN_FIELD_PATTERNS.some((pattern) => pattern.test(message));
|
||||
function isLegacyCompatUnknownFieldValidationMessage(
|
||||
message: string,
|
||||
key: LegacyCompatKey,
|
||||
): boolean {
|
||||
return LEGACY_UNKNOWN_FIELD_PATTERNS[key].some((pattern) => pattern.test(message));
|
||||
}
|
||||
|
||||
function isSessionKeyCompatibilityError(error: unknown): boolean {
|
||||
function isLegacyCompatErrorForKey(error: unknown, key: LegacyCompatKey): boolean {
|
||||
for (const candidate of iterateErrorChain(error)) {
|
||||
if (Array.isArray(candidate)) {
|
||||
if (candidate.some((entry) => issueRejectsSessionKeyStrictly(entry))) {
|
||||
if (candidate.some((entry) => issueRejectsLegacyCompatKeyStrictly(entry, key))) {
|
||||
return true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (typeof candidate === "string") {
|
||||
if (isSessionKeyUnknownFieldValidationMessage(candidate)) {
|
||||
if (isLegacyCompatUnknownFieldValidationMessage(candidate, key)) {
|
||||
return true;
|
||||
}
|
||||
continue;
|
||||
@ -124,21 +160,21 @@ function isSessionKeyCompatibilityError(error: unknown): boolean {
|
||||
|
||||
if (
|
||||
Array.isArray(issueContainer.issues) &&
|
||||
issueContainer.issues.some((issue) => issueRejectsSessionKeyStrictly(issue))
|
||||
issueContainer.issues.some((issue) => issueRejectsLegacyCompatKeyStrictly(issue, key))
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (
|
||||
Array.isArray(issueContainer.errors) &&
|
||||
issueContainer.errors.some((issue) => issueRejectsSessionKeyStrictly(issue))
|
||||
issueContainer.errors.some((issue) => issueRejectsLegacyCompatKeyStrictly(issue, key))
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (
|
||||
typeof issueContainer.message === "string" &&
|
||||
isSessionKeyUnknownFieldValidationMessage(issueContainer.message)
|
||||
isLegacyCompatUnknownFieldValidationMessage(issueContainer.message, key)
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
@ -147,25 +183,66 @@ function isSessionKeyCompatibilityError(error: unknown): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
async function invokeWithLegacySessionKeyCompat<TResult, TParams extends SessionKeyCompatParams>(
|
||||
function detectRejectedLegacyCompatKeys(
|
||||
error: unknown,
|
||||
allowedKeys: readonly LegacyCompatKey[],
|
||||
): Set<LegacyCompatKey> {
|
||||
const rejectedKeys = new Set<LegacyCompatKey>();
|
||||
for (const key of allowedKeys) {
|
||||
if (isLegacyCompatErrorForKey(error, key)) {
|
||||
rejectedKeys.add(key);
|
||||
}
|
||||
}
|
||||
return rejectedKeys;
|
||||
}
|
||||
|
||||
async function invokeWithLegacyCompat<TResult, TParams extends SessionKeyCompatParams>(
|
||||
method: (params: TParams) => Promise<TResult> | TResult,
|
||||
params: TParams,
|
||||
allowedKeys: readonly LegacyCompatKey[],
|
||||
opts?: {
|
||||
onLegacyModeDetected?: () => void;
|
||||
onLegacyKeysDetected?: (keys: Set<LegacyCompatKey>) => void;
|
||||
rejectedKeys?: ReadonlySet<LegacyCompatKey>;
|
||||
},
|
||||
): Promise<TResult> {
|
||||
if (!hasOwnSessionKey(params)) {
|
||||
const activeRejectedKeys = new Set(opts?.rejectedKeys ?? []);
|
||||
const availableKeys = allowedKeys.filter((key) => hasOwnLegacyCompatKey(params, key));
|
||||
if (availableKeys.length === 0) {
|
||||
return await method(params);
|
||||
}
|
||||
|
||||
let currentParams =
|
||||
activeRejectedKeys.size > 0 ? withoutLegacyCompatKeys(params, activeRejectedKeys) : params;
|
||||
|
||||
try {
|
||||
return await method(params);
|
||||
return await method(currentParams);
|
||||
} catch (error) {
|
||||
if (!isSessionKeyCompatibilityError(error)) {
|
||||
throw error;
|
||||
let currentError = error;
|
||||
while (true) {
|
||||
const rejectedKeys = detectRejectedLegacyCompatKeys(currentError, availableKeys);
|
||||
let learnedNewKey = false;
|
||||
for (const key of rejectedKeys) {
|
||||
if (!activeRejectedKeys.has(key)) {
|
||||
activeRejectedKeys.add(key);
|
||||
learnedNewKey = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!learnedNewKey) {
|
||||
throw currentError;
|
||||
}
|
||||
|
||||
opts?.onLegacyModeDetected?.();
|
||||
opts?.onLegacyKeysDetected?.(rejectedKeys);
|
||||
currentParams = withoutLegacyCompatKeys(params, activeRejectedKeys);
|
||||
|
||||
try {
|
||||
return await method(currentParams);
|
||||
} catch (retryError) {
|
||||
currentError = retryError;
|
||||
}
|
||||
}
|
||||
opts?.onLegacyModeDetected?.();
|
||||
return await method(withoutSessionKey(params));
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,6 +255,7 @@ function wrapContextEngineWithSessionKeyCompat(engine: ContextEngine): ContextEn
|
||||
}
|
||||
|
||||
let isLegacy = false;
|
||||
const rejectedKeys = new Set<LegacyCompatKey>();
|
||||
const proxy: ContextEngine = new Proxy(engine, {
|
||||
get(target, property, receiver) {
|
||||
if (property === LEGACY_SESSION_KEY_COMPAT) {
|
||||
@ -195,13 +273,23 @@ function wrapContextEngineWithSessionKeyCompat(engine: ContextEngine): ContextEn
|
||||
|
||||
return (params: SessionKeyCompatParams) => {
|
||||
const method = value.bind(target) as (params: SessionKeyCompatParams) => unknown;
|
||||
if (isLegacy && hasOwnSessionKey(params)) {
|
||||
return method(withoutSessionKey(params));
|
||||
const allowedKeys = LEGACY_COMPAT_METHOD_KEYS[property];
|
||||
if (
|
||||
isLegacy &&
|
||||
allowedKeys.some((key) => rejectedKeys.has(key) && hasOwnLegacyCompatKey(params, key))
|
||||
) {
|
||||
return method(withoutLegacyCompatKeys(params, rejectedKeys));
|
||||
}
|
||||
return invokeWithLegacySessionKeyCompat(method, params, {
|
||||
return invokeWithLegacyCompat(method, params, allowedKeys, {
|
||||
onLegacyModeDetected: () => {
|
||||
isLegacy = true;
|
||||
},
|
||||
onLegacyKeysDetected: (keys) => {
|
||||
for (const key of keys) {
|
||||
rejectedKeys.add(key);
|
||||
}
|
||||
},
|
||||
rejectedKeys,
|
||||
});
|
||||
};
|
||||
},
|
||||
|
||||
@ -57,7 +57,43 @@ export type SubagentSpawnPreparation = {
|
||||
};
|
||||
|
||||
export type SubagentEndReason = "deleted" | "completed" | "swept" | "released";
|
||||
export type ContextEngineRuntimeContext = Record<string, unknown>;
|
||||
|
||||
export type TranscriptRewriteReplacement = {
|
||||
/** Existing transcript entry id to replace on the active branch. */
|
||||
entryId: string;
|
||||
/** Replacement message content for that entry. */
|
||||
message: AgentMessage;
|
||||
};
|
||||
|
||||
export type TranscriptRewriteRequest = {
|
||||
/** Message entry replacements to apply in one branch-and-reappend pass. */
|
||||
replacements: TranscriptRewriteReplacement[];
|
||||
};
|
||||
|
||||
export type TranscriptRewriteResult = {
|
||||
/** Whether the active branch changed. */
|
||||
changed: boolean;
|
||||
/** Estimated bytes removed from the active branch message payloads. */
|
||||
bytesFreed: number;
|
||||
/** Number of transcript message entries rewritten. */
|
||||
rewrittenEntries: number;
|
||||
/** Optional reason when no rewrite occurred. */
|
||||
reason?: string;
|
||||
};
|
||||
|
||||
export type ContextEngineMaintenanceResult = TranscriptRewriteResult;
|
||||
|
||||
export type ContextEngineRuntimeContext = Record<string, unknown> & {
|
||||
/**
|
||||
* Safe transcript rewrite helper implemented by the runtime.
|
||||
*
|
||||
* Engines decide what is safe to rewrite; the runtime owns how the session
|
||||
* DAG is updated on disk.
|
||||
*/
|
||||
rewriteTranscriptEntries?: (
|
||||
request: TranscriptRewriteRequest,
|
||||
) => Promise<TranscriptRewriteResult>;
|
||||
};
|
||||
|
||||
/**
|
||||
* ContextEngine defines the pluggable contract for context management.
|
||||
@ -78,6 +114,19 @@ export interface ContextEngine {
|
||||
sessionFile: string;
|
||||
}): Promise<BootstrapResult>;
|
||||
|
||||
/**
|
||||
* Run transcript maintenance after bootstrap, successful turns, or compaction.
|
||||
*
|
||||
* Engines can use runtimeContext.rewriteTranscriptEntries() to request safe
|
||||
* branch-and-reappend transcript rewrites without depending on Pi internals.
|
||||
*/
|
||||
maintain?(params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
sessionFile: string;
|
||||
runtimeContext?: ContextEngineRuntimeContext;
|
||||
}): Promise<ContextEngineMaintenanceResult>;
|
||||
|
||||
/**
|
||||
* Ingest a single message into the engine's store.
|
||||
*/
|
||||
@ -134,6 +183,8 @@ export interface ContextEngine {
|
||||
/** Current model identifier (e.g. "claude-opus-4", "gpt-4o", "qwen2.5-7b").
|
||||
* Allows context engine plugins to adapt formatting per model. */
|
||||
model?: string;
|
||||
/** The incoming user prompt for this turn (useful for retrieval-oriented engines). */
|
||||
prompt?: string;
|
||||
}): Promise<AssembleResult>;
|
||||
|
||||
/**
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
import {
|
||||
clearFastTestEnv,
|
||||
loadRunCronIsolatedAgentTurn,
|
||||
@ -8,11 +8,7 @@ import {
|
||||
runWithModelFallbackMock,
|
||||
} from "./run.test-harness.js";
|
||||
|
||||
type RunModule = typeof import("./run.js");
|
||||
type SandboxConfigModule = typeof import("../../agents/sandbox/config.js");
|
||||
|
||||
let runCronIsolatedAgentTurn: RunModule["runCronIsolatedAgentTurn"];
|
||||
let resolveSandboxConfigForAgent: SandboxConfigModule["resolveSandboxConfigForAgent"];
|
||||
const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn();
|
||||
|
||||
function makeJob(overrides?: Record<string, unknown>) {
|
||||
return {
|
||||
@ -85,10 +81,7 @@ function expectDefaultSandboxPreserved(
|
||||
describe("runCronIsolatedAgentTurn sandbox config preserved", () => {
|
||||
let previousFastTestEnv: string | undefined;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn();
|
||||
({ resolveSandboxConfigForAgent } = await import("../../agents/sandbox/config.js"));
|
||||
beforeEach(() => {
|
||||
previousFastTestEnv = clearFastTestEnv();
|
||||
resetRunCronIsolatedAgentTurnHarness();
|
||||
});
|
||||
@ -132,6 +125,7 @@ describe("runCronIsolatedAgentTurn sandbox config preserved", () => {
|
||||
|
||||
expect(runWithModelFallbackMock).toHaveBeenCalledTimes(1);
|
||||
const runCfg = runWithModelFallbackMock.mock.calls[0]?.[0]?.cfg;
|
||||
const { resolveSandboxConfigForAgent } = await import("../../agents/sandbox/config.js");
|
||||
const resolvedSandbox = resolveSandboxConfigForAgent(runCfg, "specialist");
|
||||
|
||||
expectDefaultSandboxPreserved(runCfg);
|
||||
|
||||
@ -58,6 +58,13 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProviderPlu
|
||||
throw new Error("OpenAI API key missing");
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
const timeoutMs = req.timeoutMs;
|
||||
const timeout =
|
||||
typeof timeoutMs === "number" && Number.isFinite(timeoutMs) && timeoutMs > 0
|
||||
? setTimeout(() => controller.abort(), timeoutMs)
|
||||
: undefined;
|
||||
|
||||
const response = await fetch(`${resolveOpenAIBaseUrl(req.cfg)}/images/generations`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
@ -70,6 +77,9 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProviderPlu
|
||||
n: req.count ?? 1,
|
||||
size: req.size ?? DEFAULT_SIZE,
|
||||
}),
|
||||
signal: controller.signal,
|
||||
}).finally(() => {
|
||||
clearTimeout(timeout);
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
|
||||
@ -25,6 +25,7 @@ export type ImageGenerationRequest = {
|
||||
cfg: OpenClawConfig;
|
||||
agentDir?: string;
|
||||
authStore?: AuthProfileStore;
|
||||
timeoutMs?: number;
|
||||
count?: number;
|
||||
size?: string;
|
||||
aspectRatio?: string;
|
||||
|
||||
@ -318,14 +318,16 @@ async function handleBroadcastAction(
|
||||
throw new Error("Broadcast requires at least one target in --targets.");
|
||||
}
|
||||
const channelHint = readStringParam(params, "channel");
|
||||
const configured = await listConfiguredMessageChannels(input.cfg);
|
||||
if (configured.length === 0) {
|
||||
throw new Error("Broadcast requires at least one configured channel.");
|
||||
}
|
||||
const targetChannels =
|
||||
channelHint && channelHint.trim().toLowerCase() !== "all"
|
||||
? [await resolveChannel(input.cfg, { channel: channelHint }, input.toolContext)]
|
||||
: configured;
|
||||
: await (async () => {
|
||||
const configured = await listConfiguredMessageChannels(input.cfg);
|
||||
if (configured.length === 0) {
|
||||
throw new Error("Broadcast requires at least one configured channel.");
|
||||
}
|
||||
return configured;
|
||||
})();
|
||||
const results: Array<{
|
||||
channel: ChannelId;
|
||||
to: string;
|
||||
|
||||
@ -1,58 +1,72 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const completeMock = vi.fn();
|
||||
const minimaxUnderstandImageMock = vi.fn();
|
||||
const ensureOpenClawModelsJsonMock = vi.fn(async () => {});
|
||||
const getApiKeyForModelMock = vi.fn(async () => ({
|
||||
apiKey: "oauth-test", // pragma: allowlist secret
|
||||
source: "test",
|
||||
mode: "oauth",
|
||||
const hoisted = vi.hoisted(() => ({
|
||||
completeMock: vi.fn(),
|
||||
minimaxUnderstandImageMock: vi.fn(),
|
||||
ensureOpenClawModelsJsonMock: vi.fn(async () => {}),
|
||||
getApiKeyForModelMock: vi.fn(async () => ({
|
||||
apiKey: "oauth-test", // pragma: allowlist secret
|
||||
source: "test",
|
||||
mode: "oauth",
|
||||
})),
|
||||
resolveApiKeyForProviderMock: vi.fn(async () => ({
|
||||
apiKey: "oauth-test", // pragma: allowlist secret
|
||||
source: "test",
|
||||
mode: "oauth",
|
||||
})),
|
||||
requireApiKeyMock: vi.fn((auth: { apiKey?: string }) => auth.apiKey ?? ""),
|
||||
setRuntimeApiKeyMock: vi.fn(),
|
||||
discoverModelsMock: vi.fn(),
|
||||
}));
|
||||
const resolveApiKeyForProviderMock = vi.fn(async () => ({
|
||||
apiKey: "oauth-test", // pragma: allowlist secret
|
||||
source: "test",
|
||||
mode: "oauth",
|
||||
}));
|
||||
const requireApiKeyMock = vi.fn((auth: { apiKey?: string }) => auth.apiKey ?? "");
|
||||
const setRuntimeApiKeyMock = vi.fn();
|
||||
const discoverModelsMock = vi.fn();
|
||||
type ImageModule = typeof import("./image.js");
|
||||
const {
|
||||
completeMock,
|
||||
minimaxUnderstandImageMock,
|
||||
ensureOpenClawModelsJsonMock,
|
||||
getApiKeyForModelMock,
|
||||
resolveApiKeyForProviderMock,
|
||||
requireApiKeyMock,
|
||||
setRuntimeApiKeyMock,
|
||||
discoverModelsMock,
|
||||
} = hoisted;
|
||||
|
||||
let describeImageWithModel: ImageModule["describeImageWithModel"];
|
||||
vi.mock("@mariozechner/pi-ai", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("@mariozechner/pi-ai")>();
|
||||
return {
|
||||
...actual,
|
||||
complete: completeMock,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../../agents/minimax-vlm.js", () => ({
|
||||
isMinimaxVlmProvider: (provider: string) =>
|
||||
provider === "minimax" || provider === "minimax-portal",
|
||||
isMinimaxVlmModel: (provider: string, modelId: string) =>
|
||||
(provider === "minimax" || provider === "minimax-portal") && modelId === "MiniMax-VL-01",
|
||||
minimaxUnderstandImage: minimaxUnderstandImageMock,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/models-config.js", () => ({
|
||||
ensureOpenClawModelsJson: ensureOpenClawModelsJsonMock,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/model-auth.js", () => ({
|
||||
getApiKeyForModel: getApiKeyForModelMock,
|
||||
resolveApiKeyForProvider: resolveApiKeyForProviderMock,
|
||||
requireApiKey: requireApiKeyMock,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/pi-model-discovery-runtime.js", () => ({
|
||||
discoverAuthStorage: () => ({
|
||||
setRuntimeApiKey: setRuntimeApiKeyMock,
|
||||
}),
|
||||
discoverModels: discoverModelsMock,
|
||||
}));
|
||||
|
||||
const { describeImageWithModel } = await import("./image.js");
|
||||
|
||||
describe("describeImageWithModel", () => {
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.doMock("@mariozechner/pi-ai", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("@mariozechner/pi-ai")>();
|
||||
return {
|
||||
...actual,
|
||||
complete: completeMock,
|
||||
};
|
||||
});
|
||||
vi.doMock("../../agents/minimax-vlm.js", () => ({
|
||||
isMinimaxVlmProvider: (provider: string) =>
|
||||
provider === "minimax" || provider === "minimax-portal",
|
||||
isMinimaxVlmModel: (provider: string, modelId: string) =>
|
||||
(provider === "minimax" || provider === "minimax-portal") && modelId === "MiniMax-VL-01",
|
||||
minimaxUnderstandImage: minimaxUnderstandImageMock,
|
||||
}));
|
||||
vi.doMock("../../agents/models-config.js", () => ({
|
||||
ensureOpenClawModelsJson: ensureOpenClawModelsJsonMock,
|
||||
}));
|
||||
vi.doMock("../../agents/model-auth.js", () => ({
|
||||
getApiKeyForModel: getApiKeyForModelMock,
|
||||
resolveApiKeyForProvider: resolveApiKeyForProviderMock,
|
||||
requireApiKey: requireApiKeyMock,
|
||||
}));
|
||||
vi.doMock("../../agents/pi-model-discovery-runtime.js", () => ({
|
||||
discoverAuthStorage: () => ({
|
||||
setRuntimeApiKey: setRuntimeApiKeyMock,
|
||||
}),
|
||||
discoverModels: discoverModelsMock,
|
||||
}));
|
||||
({ describeImageWithModel } = await import("./image.js"));
|
||||
minimaxUnderstandImageMock.mockResolvedValue("portal ok");
|
||||
discoverModelsMock.mockReturnValue({
|
||||
find: vi.fn(() => ({
|
||||
|
||||
@ -196,9 +196,19 @@ export async function describeImagesWithModel(
|
||||
}
|
||||
|
||||
const context = buildImageContext(prompt, params.images);
|
||||
const controller = new AbortController();
|
||||
const timeout =
|
||||
typeof params.timeoutMs === "number" &&
|
||||
Number.isFinite(params.timeoutMs) &&
|
||||
params.timeoutMs > 0
|
||||
? setTimeout(() => controller.abort(), params.timeoutMs)
|
||||
: undefined;
|
||||
const message = await complete(model, context, {
|
||||
apiKey,
|
||||
maxTokens: resolveImageToolMaxTokens(model.maxTokens, params.maxTokens ?? 512),
|
||||
signal: controller.signal,
|
||||
}).finally(() => {
|
||||
clearTimeout(timeout);
|
||||
});
|
||||
const text = coerceImageAssistantText({
|
||||
message,
|
||||
|
||||
@ -3,6 +3,19 @@ import { withEnv } from "../test-utils/env.js";
|
||||
import { decodeCapturedOutputBuffer, parseWindowsCodePage, sanitizeEnv } from "./invoke.js";
|
||||
import { buildNodeInvokeResultParams } from "./runner.js";
|
||||
|
||||
function getEnvValueCaseInsensitive(
|
||||
env: Record<string, string>,
|
||||
expectedKey: string,
|
||||
): string | undefined {
|
||||
const direct = env[expectedKey];
|
||||
if (direct !== undefined) {
|
||||
return direct;
|
||||
}
|
||||
const upper = expectedKey.toUpperCase();
|
||||
const actualKey = Object.keys(env).find((key) => key.toUpperCase() === upper);
|
||||
return actualKey ? env[actualKey] : undefined;
|
||||
}
|
||||
|
||||
describe("node-host sanitizeEnv", () => {
|
||||
it("ignores PATH overrides", () => {
|
||||
withEnv({ PATH: "/usr/bin" }, () => {
|
||||
@ -55,7 +68,7 @@ describe("node-host sanitizeEnv", () => {
|
||||
it("preserves inherited non-portable Windows-style env keys", () => {
|
||||
withEnv({ "ProgramFiles(x86)": "C:\\Program Files (x86)" }, () => {
|
||||
const env = sanitizeEnv(undefined);
|
||||
expect(env["ProgramFiles(x86)"]).toBe("C:\\Program Files (x86)");
|
||||
expect(getEnvValueCaseInsensitive(env, "ProgramFiles(x86)")).toBe("C:\\Program Files (x86)");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -65,6 +65,15 @@ export type { ReplyPayload } from "../auto-reply/types.js";
|
||||
export type { WizardPrompter } from "../wizard/prompts.js";
|
||||
export type { ContextEngineFactory } from "../context-engine/registry.js";
|
||||
export type { DiagnosticEventPayload } from "../infra/diagnostic-events.js";
|
||||
export type {
|
||||
ContextEngine,
|
||||
ContextEngineInfo,
|
||||
ContextEngineMaintenanceResult,
|
||||
ContextEngineRuntimeContext,
|
||||
TranscriptRewriteReplacement,
|
||||
TranscriptRewriteRequest,
|
||||
TranscriptRewriteResult,
|
||||
} from "../context-engine/types.js";
|
||||
|
||||
export { emptyPluginConfigSchema } from "../plugins/config-schema.js";
|
||||
export { registerContextEngine } from "../context-engine/registry.js";
|
||||
|
||||
7
src/plugin-sdk/matrix-runtime-heavy.ts
Normal file
7
src/plugin-sdk/matrix-runtime-heavy.ts
Normal file
@ -0,0 +1,7 @@
|
||||
// Matrix runtime helpers that are needed internally by the bundled extension
|
||||
// but are too heavy for the light external runtime-api surface.
|
||||
|
||||
export { ensureConfiguredAcpBindingReady } from "../acp/persistent-bindings.lifecycle.js";
|
||||
export { resolveConfiguredAcpBindingRecord } from "../acp/persistent-bindings.resolve.js";
|
||||
export { maybeCreateMatrixMigrationSnapshot } from "../infra/matrix-migration-snapshot.js";
|
||||
export { dispatchReplyFromConfigWithSettledDispatcher } from "./inbound-reply-dispatch.js";
|
||||
11
src/plugin-sdk/matrix-runtime-shared.ts
Normal file
11
src/plugin-sdk/matrix-runtime-shared.ts
Normal file
@ -0,0 +1,11 @@
|
||||
// Narrow shared Matrix runtime exports for light runtime-api consumers.
|
||||
|
||||
export type {
|
||||
ChannelDirectoryEntry,
|
||||
ChannelMessageActionContext,
|
||||
} from "../channels/plugins/types.js";
|
||||
export type { OpenClawConfig } from "../config/config.js";
|
||||
export { formatZonedTimestamp } from "../infra/format-time/format-datetime.js";
|
||||
export type { PluginRuntime, RuntimeLogger } from "../plugins/runtime/types.js";
|
||||
export type { RuntimeEnv } from "../runtime.js";
|
||||
export type { WizardPrompter } from "../wizard/prompts.js";
|
||||
@ -27,8 +27,6 @@ export {
|
||||
patchAllowlistUsersInConfigEntries,
|
||||
summarizeMapping,
|
||||
} from "../channels/allowlists/resolve-utils.js";
|
||||
export { ensureConfiguredAcpBindingReady } from "../acp/persistent-bindings.lifecycle.js";
|
||||
export { resolveConfiguredAcpBindingRecord } from "../acp/persistent-bindings.resolve.js";
|
||||
export { resolveControlCommandGate } from "../channels/command-gating.js";
|
||||
export type { NormalizedLocation } from "../channels/location.js";
|
||||
export { formatLocationText, toLocationContext } from "../channels/location.js";
|
||||
@ -112,7 +110,6 @@ export { ToolPolicySchema } from "../config/zod-schema.agent-runtime.js";
|
||||
export { MarkdownConfigSchema } from "../config/zod-schema.core.js";
|
||||
export { formatZonedTimestamp } from "../infra/format-time/format-datetime.js";
|
||||
export { fetchWithSsrFGuard } from "../infra/net/fetch-guard.js";
|
||||
export { maybeCreateMatrixMigrationSnapshot } from "../infra/matrix-migration-snapshot.js";
|
||||
export {
|
||||
getSessionBindingService,
|
||||
registerSessionBindingAdapter,
|
||||
@ -150,7 +147,6 @@ export { readJsonFileWithFallback, writeJsonFileAtomically } from "./json-store.
|
||||
export { formatResolvedUnresolvedNote } from "./resolution-notes.js";
|
||||
export { runPluginCommandWithTimeout } from "./run-command.js";
|
||||
export { createLoggerBackedRuntime, resolveRuntimeEnv } from "./runtime.js";
|
||||
export { dispatchReplyFromConfigWithSettledDispatcher } from "./inbound-reply-dispatch.js";
|
||||
export {
|
||||
buildProbeChannelStatusSummary,
|
||||
collectStatusIssuesFromLastError,
|
||||
|
||||
@ -41,6 +41,7 @@ export {
|
||||
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
|
||||
resolveCloudflareAiGatewayBaseUrl,
|
||||
} from "../agents/cloudflare-ai-gateway.js";
|
||||
export { resolveAnthropicVertexRegion } from "../agents/anthropic-vertex-provider.js";
|
||||
export {
|
||||
discoverHuggingfaceModels,
|
||||
HUGGINGFACE_BASE_URL,
|
||||
|
||||
@ -38,7 +38,7 @@ const RUNTIME_API_EXPORT_GUARDS: Record<string, readonly string[]> = {
|
||||
"extensions/matrix/runtime-api.ts": [
|
||||
'export * from "./src/auth-precedence.js";',
|
||||
'export * from "./helper-api.js";',
|
||||
'export { assertHttpUrlTargetsPrivateNetwork, closeDispatcher, createPinnedDispatcher, resolvePinnedHostnameWithPolicy, ssrfPolicyFromAllowPrivateNetwork, type LookupFn, type SsrFPolicy } from "openclaw/plugin-sdk/infra-runtime";',
|
||||
'export { assertHttpUrlTargetsPrivateNetwork, closeDispatcher, createPinnedDispatcher, resolvePinnedHostnameWithPolicy, ssrfPolicyFromAllowPrivateNetwork, type LookupFn, type SsrFPolicy } from "openclaw/plugin-sdk/ssrf-runtime";',
|
||||
'export { setMatrixThreadBindingIdleTimeoutBySessionKey, setMatrixThreadBindingMaxAgeBySessionKey } from "./thread-bindings-runtime.js";',
|
||||
'export { writeJsonFileAtomically } from "../../src/plugin-sdk/json-store.js";',
|
||||
'export type { ChannelDirectoryEntry, ChannelMessageActionContext, OpenClawConfig, PluginRuntime, RuntimeLogger, RuntimeEnv, WizardPrompter } from "../../src/plugin-sdk/matrix.js";',
|
||||
|
||||
14
src/plugin-sdk/ssrf-runtime.ts
Normal file
14
src/plugin-sdk/ssrf-runtime.ts
Normal file
@ -0,0 +1,14 @@
|
||||
// Narrow SSRF helpers for extensions that need pinned-dispatcher and policy
|
||||
// utilities without loading the full infra-runtime surface.
|
||||
|
||||
export {
|
||||
closeDispatcher,
|
||||
createPinnedDispatcher,
|
||||
resolvePinnedHostnameWithPolicy,
|
||||
type LookupFn,
|
||||
type SsrFPolicy,
|
||||
} from "../infra/net/ssrf.js";
|
||||
export {
|
||||
assertHttpUrlTargetsPrivateNetwork,
|
||||
ssrfPolicyFromAllowPrivateNetwork,
|
||||
} from "./ssrf-policy.js";
|
||||
@ -36,6 +36,7 @@ import type {
|
||||
import * as directoryRuntimeSdk from "openclaw/plugin-sdk/directory-runtime";
|
||||
import * as infraRuntimeSdk from "openclaw/plugin-sdk/infra-runtime";
|
||||
import * as lazyRuntimeSdk from "openclaw/plugin-sdk/lazy-runtime";
|
||||
import * as matrixRuntimeSharedSdk from "openclaw/plugin-sdk/matrix-runtime-shared";
|
||||
import * as mediaRuntimeSdk from "openclaw/plugin-sdk/media-runtime";
|
||||
import * as ollamaSetupSdk from "openclaw/plugin-sdk/ollama-setup";
|
||||
import * as providerAuthSdk from "openclaw/plugin-sdk/provider-auth";
|
||||
@ -50,7 +51,9 @@ import * as sandboxSdk from "openclaw/plugin-sdk/sandbox";
|
||||
import * as secretInputSdk from "openclaw/plugin-sdk/secret-input";
|
||||
import * as selfHostedProviderSetupSdk from "openclaw/plugin-sdk/self-hosted-provider-setup";
|
||||
import * as setupSdk from "openclaw/plugin-sdk/setup";
|
||||
import * as ssrfRuntimeSdk from "openclaw/plugin-sdk/ssrf-runtime";
|
||||
import * as testingSdk from "openclaw/plugin-sdk/testing";
|
||||
import * as threadBindingsRuntimeSdk from "openclaw/plugin-sdk/thread-bindings-runtime";
|
||||
import * as webhookIngressSdk from "openclaw/plugin-sdk/webhook-ingress";
|
||||
import { describe, expect, expectTypeOf, it } from "vitest";
|
||||
import type { ChannelMessageActionContext } from "../channels/plugins/types.js";
|
||||
@ -523,6 +526,22 @@ describe("plugin-sdk subpath exports", () => {
|
||||
expect(typeof conversationRuntimeSdk.createTopLevelChannelReplyToModeResolver).toBe("function");
|
||||
});
|
||||
|
||||
it("exports narrow binding lifecycle helpers from the dedicated subpath", () => {
|
||||
expect(typeof threadBindingsRuntimeSdk.resolveThreadBindingLifecycle).toBe("function");
|
||||
});
|
||||
|
||||
it("exports narrow matrix runtime helpers from the dedicated subpath", () => {
|
||||
expect(typeof matrixRuntimeSharedSdk.formatZonedTimestamp).toBe("function");
|
||||
});
|
||||
|
||||
it("exports narrow ssrf helpers from the dedicated subpath", () => {
|
||||
expect(typeof ssrfRuntimeSdk.closeDispatcher).toBe("function");
|
||||
expect(typeof ssrfRuntimeSdk.createPinnedDispatcher).toBe("function");
|
||||
expect(typeof ssrfRuntimeSdk.resolvePinnedHostnameWithPolicy).toBe("function");
|
||||
expect(typeof ssrfRuntimeSdk.assertHttpUrlTargetsPrivateNetwork).toBe("function");
|
||||
expect(typeof ssrfRuntimeSdk.ssrfPolicyFromAllowPrivateNetwork).toBe("function");
|
||||
});
|
||||
|
||||
it("exports provider setup helpers from the dedicated subpath", () => {
|
||||
expect(typeof providerSetupSdk.buildVllmProvider).toBe("function");
|
||||
expect(typeof providerSetupSdk.discoverOpenAICompatibleSelfHostedProvider).toBe("function");
|
||||
|
||||
9
src/plugin-sdk/thread-bindings-runtime.ts
Normal file
9
src/plugin-sdk/thread-bindings-runtime.ts
Normal file
@ -0,0 +1,9 @@
|
||||
// Narrow thread-binding lifecycle helpers for extensions that need binding
|
||||
// expiry and session-binding record types without loading the full
|
||||
// conversation-runtime surface.
|
||||
|
||||
export { resolveThreadBindingLifecycle } from "../channels/thread-bindings-policy.js";
|
||||
export type {
|
||||
BindingTargetKind,
|
||||
SessionBindingRecord,
|
||||
} from "../infra/outbound/session-binding-service.js";
|
||||
@ -4,23 +4,58 @@ import type { PluginLoadOptions } from "./loader.js";
|
||||
import { loadPluginManifestRegistry } from "./manifest-registry.js";
|
||||
import type { PluginWebSearchProviderEntry } from "./types.js";
|
||||
|
||||
export const BUNDLED_WEB_SEARCH_PLUGIN_IDS = bundledWebSearchPluginRegistrations
|
||||
.map((entry) => entry.plugin.id)
|
||||
.toSorted((left, right) => left.localeCompare(right));
|
||||
|
||||
const bundledWebSearchPluginIdSet = new Set<string>(BUNDLED_WEB_SEARCH_PLUGIN_IDS);
|
||||
|
||||
type BundledWebSearchProviderEntry = PluginWebSearchProviderEntry & { pluginId: string };
|
||||
type BundledWebSearchPluginRegistration = (typeof bundledWebSearchPluginRegistrations)[number];
|
||||
|
||||
let bundledWebSearchProvidersCache: BundledWebSearchProviderEntry[] | null = null;
|
||||
let bundledWebSearchPluginIdsCache: string[] | null = null;
|
||||
|
||||
function resolveBundledWebSearchPlugin(
|
||||
entry: BundledWebSearchPluginRegistration,
|
||||
): BundledWebSearchPluginRegistration["plugin"] | null {
|
||||
try {
|
||||
return entry.plugin;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function listBundledWebSearchPluginRegistrations() {
|
||||
return bundledWebSearchPluginRegistrations
|
||||
.map((entry) => {
|
||||
const plugin = resolveBundledWebSearchPlugin(entry);
|
||||
return plugin ? { ...entry, plugin } : null;
|
||||
})
|
||||
.filter(
|
||||
(
|
||||
entry,
|
||||
): entry is BundledWebSearchPluginRegistration & {
|
||||
plugin: BundledWebSearchPluginRegistration["plugin"];
|
||||
} => Boolean(entry),
|
||||
);
|
||||
}
|
||||
|
||||
function loadBundledWebSearchPluginIds(): string[] {
|
||||
if (!bundledWebSearchPluginIdsCache) {
|
||||
bundledWebSearchPluginIdsCache = listBundledWebSearchPluginRegistrations()
|
||||
.map(({ plugin }) => plugin.id)
|
||||
.toSorted((left, right) => left.localeCompare(right));
|
||||
}
|
||||
return bundledWebSearchPluginIdsCache;
|
||||
}
|
||||
|
||||
export function listBundledWebSearchPluginIds(): string[] {
|
||||
return loadBundledWebSearchPluginIds();
|
||||
}
|
||||
|
||||
function loadBundledWebSearchProviders(): BundledWebSearchProviderEntry[] {
|
||||
if (!bundledWebSearchProvidersCache) {
|
||||
bundledWebSearchProvidersCache = bundledWebSearchPluginRegistrations.flatMap(({ plugin }) =>
|
||||
capturePluginRegistration(plugin).webSearchProviders.map((provider) => ({
|
||||
...provider,
|
||||
pluginId: plugin.id,
|
||||
})),
|
||||
bundledWebSearchProvidersCache = listBundledWebSearchPluginRegistrations().flatMap(
|
||||
({ plugin }) =>
|
||||
capturePluginRegistration(plugin).webSearchProviders.map((provider) => ({
|
||||
...provider,
|
||||
pluginId: plugin.id,
|
||||
})),
|
||||
);
|
||||
}
|
||||
return bundledWebSearchProvidersCache;
|
||||
@ -36,6 +71,7 @@ export function resolveBundledWebSearchPluginIds(params: {
|
||||
workspaceDir: params.workspaceDir,
|
||||
env: params.env,
|
||||
});
|
||||
const bundledWebSearchPluginIdSet = new Set<string>(loadBundledWebSearchPluginIds());
|
||||
return registry.plugins
|
||||
.filter((plugin) => plugin.origin === "bundled" && bundledWebSearchPluginIdSet.has(plugin.id))
|
||||
.map((plugin) => plugin.id)
|
||||
|
||||
129
src/plugins/runtime/runtime-matrix-boundary.ts
Normal file
129
src/plugins/runtime/runtime-matrix-boundary.ts
Normal file
@ -0,0 +1,129 @@
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { createJiti } from "jiti";
|
||||
import { loadConfig } from "../../config/config.js";
|
||||
import { loadPluginManifestRegistry } from "../manifest-registry.js";
|
||||
import {
|
||||
buildPluginLoaderJitiOptions,
|
||||
resolvePluginSdkAliasFile,
|
||||
resolvePluginSdkScopedAliasMap,
|
||||
shouldPreferNativeJiti,
|
||||
} from "../sdk-alias.js";
|
||||
|
||||
const MATRIX_PLUGIN_ID = "matrix";
|
||||
|
||||
type MatrixModule = typeof import("../../../extensions/matrix/runtime-api.js");
|
||||
|
||||
type MatrixPluginRecord = {
|
||||
rootDir?: string;
|
||||
source: string;
|
||||
};
|
||||
|
||||
let cachedModulePath: string | null = null;
|
||||
let cachedModule: MatrixModule | null = null;
|
||||
|
||||
const jitiLoaders = new Map<boolean, ReturnType<typeof createJiti>>();
|
||||
|
||||
function readConfigSafely() {
|
||||
try {
|
||||
return loadConfig();
|
||||
} catch {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
function resolveMatrixPluginRecord(): MatrixPluginRecord | null {
|
||||
const manifestRegistry = loadPluginManifestRegistry({
|
||||
config: readConfigSafely(),
|
||||
cache: true,
|
||||
});
|
||||
const record = manifestRegistry.plugins.find((plugin) => plugin.id === MATRIX_PLUGIN_ID);
|
||||
if (!record?.source) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
rootDir: record.rootDir,
|
||||
source: record.source,
|
||||
};
|
||||
}
|
||||
|
||||
function resolveMatrixRuntimeModulePath(record: MatrixPluginRecord): string | null {
|
||||
const candidates = [
|
||||
path.join(path.dirname(record.source), "runtime-api.js"),
|
||||
path.join(path.dirname(record.source), "runtime-api.ts"),
|
||||
...(record.rootDir
|
||||
? [path.join(record.rootDir, "runtime-api.js"), path.join(record.rootDir, "runtime-api.ts")]
|
||||
: []),
|
||||
];
|
||||
for (const candidate of candidates) {
|
||||
if (fs.existsSync(candidate)) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function getJiti(modulePath: string) {
|
||||
const tryNative = shouldPreferNativeJiti(modulePath);
|
||||
const cached = jitiLoaders.get(tryNative);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
const pluginSdkAlias = resolvePluginSdkAliasFile({
|
||||
srcFile: "root-alias.cjs",
|
||||
distFile: "root-alias.cjs",
|
||||
modulePath,
|
||||
});
|
||||
const aliasMap = {
|
||||
...(pluginSdkAlias ? { "openclaw/plugin-sdk": pluginSdkAlias } : {}),
|
||||
...resolvePluginSdkScopedAliasMap({ modulePath }),
|
||||
};
|
||||
const loader = createJiti(import.meta.url, {
|
||||
...buildPluginLoaderJitiOptions(aliasMap),
|
||||
tryNative,
|
||||
});
|
||||
jitiLoaders.set(tryNative, loader);
|
||||
return loader;
|
||||
}
|
||||
|
||||
function loadWithJiti<TModule>(modulePath: string): TModule {
|
||||
return getJiti(modulePath)(modulePath) as TModule;
|
||||
}
|
||||
|
||||
function loadMatrixModule(): MatrixModule | null {
|
||||
const record = resolveMatrixPluginRecord();
|
||||
if (!record) {
|
||||
return null;
|
||||
}
|
||||
const modulePath = resolveMatrixRuntimeModulePath(record);
|
||||
if (!modulePath) {
|
||||
return null;
|
||||
}
|
||||
if (cachedModule && cachedModulePath === modulePath) {
|
||||
return cachedModule;
|
||||
}
|
||||
const loaded = loadWithJiti<MatrixModule>(modulePath);
|
||||
cachedModulePath = modulePath;
|
||||
cachedModule = loaded;
|
||||
return loaded;
|
||||
}
|
||||
|
||||
export function setMatrixThreadBindingIdleTimeoutBySessionKey(
|
||||
...args: Parameters<MatrixModule["setMatrixThreadBindingIdleTimeoutBySessionKey"]>
|
||||
): ReturnType<MatrixModule["setMatrixThreadBindingIdleTimeoutBySessionKey"]> {
|
||||
const fn = loadMatrixModule()?.setMatrixThreadBindingIdleTimeoutBySessionKey;
|
||||
if (typeof fn !== "function") {
|
||||
return [];
|
||||
}
|
||||
return fn(...args);
|
||||
}
|
||||
|
||||
export function setMatrixThreadBindingMaxAgeBySessionKey(
|
||||
...args: Parameters<MatrixModule["setMatrixThreadBindingMaxAgeBySessionKey"]>
|
||||
): ReturnType<MatrixModule["setMatrixThreadBindingMaxAgeBySessionKey"]> {
|
||||
const fn = loadMatrixModule()?.setMatrixThreadBindingMaxAgeBySessionKey;
|
||||
if (typeof fn !== "function") {
|
||||
return [];
|
||||
}
|
||||
return fn(...args);
|
||||
}
|
||||
@ -1,7 +1,7 @@
|
||||
import {
|
||||
setMatrixThreadBindingIdleTimeoutBySessionKey,
|
||||
setMatrixThreadBindingMaxAgeBySessionKey,
|
||||
} from "../../../extensions/matrix/runtime-api.js";
|
||||
} from "./runtime-matrix-boundary.js";
|
||||
import type { PluginRuntimeChannel } from "./types-channel.js";
|
||||
|
||||
export function createRuntimeMatrix(): PluginRuntimeChannel["matrix"] {
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { resolveSecretInputRef } from "../config/types.secrets.js";
|
||||
import {
|
||||
BUNDLED_WEB_SEARCH_PLUGIN_IDS,
|
||||
listBundledWebSearchPluginIds,
|
||||
resolveBundledWebSearchPluginId,
|
||||
} from "../plugins/bundled-web-search.js";
|
||||
import type {
|
||||
@ -82,7 +82,7 @@ function hasCustomWebSearchPluginRisk(config: OpenClawConfig): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
const bundledPluginIds = new Set<string>(BUNDLED_WEB_SEARCH_PLUGIN_IDS);
|
||||
const bundledPluginIds = new Set<string>(listBundledWebSearchPluginIds());
|
||||
const hasNonBundledPluginId = (pluginId: string) => !bundledPluginIds.has(pluginId.trim());
|
||||
if (Array.isArray(plugins.allow) && plugins.allow.some(hasNonBundledPluginId)) {
|
||||
return true;
|
||||
|
||||
4
test/fixtures/test-parallel.behavior.json
vendored
4
test/fixtures/test-parallel.behavior.json
vendored
@ -333,6 +333,10 @@
|
||||
"file": "src/infra/outbound/message-action-runner.poll.test.ts",
|
||||
"reason": "Terminates cleanly under threads, but not process forks on this host."
|
||||
},
|
||||
{
|
||||
"file": "src/infra/outbound/message-action-runner.context.test.ts",
|
||||
"reason": "Terminates cleanly under threads, but not process forks on this host."
|
||||
},
|
||||
{
|
||||
"file": "src/tts/tts.test.ts",
|
||||
"reason": "Terminates cleanly under threads, but not process forks on this host."
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user