Merge remote-tracking branch 'origin/main' into codex/assemble-runtime-context-budget

This commit is contained in:
Codex 2026-03-21 13:53:08 +08:00
commit 21a79d6076
75 changed files with 4503 additions and 616 deletions

View File

@ -153,6 +153,7 @@ Docs: https://docs.openclaw.ai
- Hardening: refresh stale device pairing requests and pending metadata (#50695) Thanks @smaeljaish771 and @joshavant.
- Gateway: harden OpenResponses file-context escaping (#50782) Thanks @YLChen-007 and @joshavant.
- LINE: harden Express webhook parsing to verified raw body (#51202) Thanks @gladiator9797 and @joshavant.
- Exec: harden host env override handling across gateway and node (#51207) Thanks @gladiator9797 and @joshavant.
- xAI/models: rename the bundled Grok 4.20 catalog entries to the GA IDs and normalize saved deprecated beta IDs at runtime so existing configs and sessions keep resolving. (#50772) thanks @Jaaneek
### Fixes
@ -186,6 +187,7 @@ Docs: https://docs.openclaw.ai
- Agents/compaction: add an opt-in post-compaction session JSONL truncation step that drops summarized transcript entries while preserving the retained branch tail and live session metadata. (#41021) thanks @thirumaleshp.
- Telegram/routing: fail loud when `message send` targets an unknown non-default Telegram `accountId`, instead of silently falling back to the channel-level bot token and sending through the wrong bot. (#50853) Thanks @hclsys.
- Web search: align onboarding, configure, and finalize with plugin-owned provider contracts, including disabled-provider recovery, config-aware credential hooks, and runtime-visible summaries. (#50935) Thanks @gumadeiras.
- Agents/replay: sanitize malformed assistant tool-call replay blocks before provider replay so follow-up Anthropic requests do not inherit the downstream `replace` crash. (#50005) Thanks @jalehman.
### Breaking

View File

@ -1,5 +1,10 @@
import Foundation
struct HostEnvOverrideDiagnostics: Equatable {
var blockedKeys: [String]
var invalidKeys: [String]
}
enum HostEnvSanitizer {
/// Generated from src/infra/host-env-security-policy.json via scripts/generate-host-env-security-policy-swift.mjs.
/// Parity is validated by src/infra/host-env-security.policy-parity.test.ts.
@ -41,6 +46,67 @@ enum HostEnvSanitizer {
return filtered.isEmpty ? nil : filtered
}
private static func isPortableHead(_ scalar: UnicodeScalar) -> Bool {
let value = scalar.value
return value == 95 || (65...90).contains(value) || (97...122).contains(value)
}
private static func isPortableTail(_ scalar: UnicodeScalar) -> Bool {
let value = scalar.value
return self.isPortableHead(scalar) || (48...57).contains(value)
}
private static func normalizeOverrideKey(_ rawKey: String) -> String? {
let key = rawKey.trimmingCharacters(in: .whitespacesAndNewlines)
guard !key.isEmpty else { return nil }
guard let first = key.unicodeScalars.first, self.isPortableHead(first) else {
return nil
}
for scalar in key.unicodeScalars.dropFirst() {
if self.isPortableTail(scalar) || scalar == "(" || scalar == ")" {
continue
}
return nil
}
return key
}
private static func sortedUnique(_ values: [String]) -> [String] {
Array(Set(values)).sorted()
}
static func inspectOverrides(
overrides: [String: String]?,
blockPathOverrides: Bool = true) -> HostEnvOverrideDiagnostics
{
guard let overrides else {
return HostEnvOverrideDiagnostics(blockedKeys: [], invalidKeys: [])
}
var blocked: [String] = []
var invalid: [String] = []
for (rawKey, _) in overrides {
let candidate = rawKey.trimmingCharacters(in: .whitespacesAndNewlines)
guard let normalized = self.normalizeOverrideKey(rawKey) else {
invalid.append(candidate.isEmpty ? rawKey : candidate)
continue
}
let upper = normalized.uppercased()
if blockPathOverrides, upper == "PATH" {
blocked.append(upper)
continue
}
if self.isBlockedOverride(upper) || self.isBlocked(upper) {
blocked.append(upper)
continue
}
}
return HostEnvOverrideDiagnostics(
blockedKeys: self.sortedUnique(blocked),
invalidKeys: self.sortedUnique(invalid))
}
static func sanitize(overrides: [String: String]?, shellWrapper: Bool = false) -> [String: String] {
var merged: [String: String] = [:]
for (rawKey, value) in ProcessInfo.processInfo.environment {
@ -57,8 +123,7 @@ enum HostEnvSanitizer {
guard let effectiveOverrides else { return merged }
for (rawKey, value) in effectiveOverrides {
let key = rawKey.trimmingCharacters(in: .whitespacesAndNewlines)
guard !key.isEmpty else { continue }
guard let key = self.normalizeOverrideKey(rawKey) else { continue }
let upper = key.uppercased()
// PATH is part of the security boundary (command resolution + safe-bin checks). Never
// allow request-scoped PATH overrides from agents/gateways.

View File

@ -63,7 +63,23 @@ enum HostEnvSecurityPolicy {
"OPENSSL_ENGINES",
"PYTHONSTARTUP",
"WGETRC",
"CURL_HOME"
"CURL_HOME",
"CLASSPATH",
"CGO_CFLAGS",
"CGO_LDFLAGS",
"GOFLAGS",
"CORECLR_PROFILER_PATH",
"PHPRC",
"PHP_INI_SCAN_DIR",
"DENO_DIR",
"BUN_CONFIG_REGISTRY",
"LUA_PATH",
"LUA_CPATH",
"GEM_HOME",
"GEM_PATH",
"BUNDLE_GEMFILE",
"COMPOSER_HOME",
"XDG_CONFIG_HOME"
]
static let blockedOverridePrefixes: [String] = [

View File

@ -465,6 +465,23 @@ actor MacNodeRuntime {
? params.sessionKey!.trimmingCharacters(in: .whitespacesAndNewlines)
: self.mainSessionKey
let runId = UUID().uuidString
let envOverrideDiagnostics = HostEnvSanitizer.inspectOverrides(
overrides: params.env,
blockPathOverrides: true)
if !envOverrideDiagnostics.blockedKeys.isEmpty || !envOverrideDiagnostics.invalidKeys.isEmpty {
var details: [String] = []
if !envOverrideDiagnostics.blockedKeys.isEmpty {
details.append("blocked override keys: \(envOverrideDiagnostics.blockedKeys.joined(separator: ", "))")
}
if !envOverrideDiagnostics.invalidKeys.isEmpty {
details.append(
"invalid non-portable override keys: \(envOverrideDiagnostics.invalidKeys.joined(separator: ", "))")
}
return Self.errorResponse(
req,
code: .invalidRequest,
message: "SYSTEM_RUN_DENIED: environment override rejected (\(details.joined(separator: "; ")))")
}
let evaluation = await ExecApprovalEvaluator.evaluate(
command: command,
rawCommand: params.rawCommand,

View File

@ -33,4 +33,24 @@ struct HostEnvSanitizerTests {
let env = HostEnvSanitizer.sanitize(overrides: ["OPENCLAW_TOKEN": "secret"])
#expect(env["OPENCLAW_TOKEN"] == "secret")
}
@Test func `inspect overrides rejects blocked and invalid keys`() {
let diagnostics = HostEnvSanitizer.inspectOverrides(overrides: [
"CLASSPATH": "/tmp/evil-classpath",
"BAD-KEY": "x",
"ProgramFiles(x86)": "C:\\Program Files (x86)",
])
#expect(diagnostics.blockedKeys == ["CLASSPATH"])
#expect(diagnostics.invalidKeys == ["BAD-KEY"])
}
@Test func `sanitize accepts Windows-style override key names`() {
let env = HostEnvSanitizer.sanitize(overrides: [
"ProgramFiles(x86)": "D:\\SDKs",
"CommonProgramFiles(x86)": "D:\\Common",
])
#expect(env["ProgramFiles(x86)"] == "D:\\SDKs")
#expect(env["CommonProgramFiles(x86)"] == "D:\\Common")
}
}

View File

@ -21,6 +21,32 @@ struct MacNodeRuntimeTests {
#expect(response.ok == false)
}
@Test func `handle invoke rejects blocked system run env override before execution`() async throws {
let runtime = MacNodeRuntime()
let params = OpenClawSystemRunParams(
command: ["/bin/sh", "-lc", "echo ok"],
env: ["CLASSPATH": "/tmp/evil-classpath"])
let json = try String(data: JSONEncoder().encode(params), encoding: .utf8)
let response = await runtime.handleInvoke(
BridgeInvokeRequest(id: "req-2c", command: OpenClawSystemCommand.run.rawValue, paramsJSON: json))
#expect(response.ok == false)
#expect(response.error?.message.contains("SYSTEM_RUN_DENIED: environment override rejected") == true)
#expect(response.error?.message.contains("CLASSPATH") == true)
}
@Test func `handle invoke rejects invalid system run env override key before execution`() async throws {
let runtime = MacNodeRuntime()
let params = OpenClawSystemRunParams(
command: ["/bin/sh", "-lc", "echo ok"],
env: ["BAD-KEY": "x"])
let json = try String(data: JSONEncoder().encode(params), encoding: .utf8)
let response = await runtime.handleInvoke(
BridgeInvokeRequest(id: "req-2d", command: OpenClawSystemCommand.run.rawValue, paramsJSON: json))
#expect(response.ok == false)
#expect(response.error?.message.contains("SYSTEM_RUN_DENIED: environment override rejected") == true)
#expect(response.error?.message.contains("BAD-KEY") == true)
}
@Test func `handle invoke rejects empty system which`() async throws {
let runtime = MacNodeRuntime()
let params = OpenClawSystemWhichParams(bins: [])

View File

@ -0,0 +1,65 @@
import type {
ModelDefinitionConfig,
ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
import { resolveAnthropicVertexRegion } from "openclaw/plugin-sdk/provider-models";
export const ANTHROPIC_VERTEX_DEFAULT_MODEL_ID = "claude-sonnet-4-6";
const ANTHROPIC_VERTEX_DEFAULT_CONTEXT_WINDOW = 1_000_000;
const GCP_VERTEX_CREDENTIALS_MARKER = "gcp-vertex-credentials";
function buildAnthropicVertexModel(params: {
id: string;
name: string;
reasoning: boolean;
input: ModelDefinitionConfig["input"];
cost: ModelDefinitionConfig["cost"];
maxTokens: number;
}): ModelDefinitionConfig {
return {
id: params.id,
name: params.name,
reasoning: params.reasoning,
input: params.input,
cost: params.cost,
contextWindow: ANTHROPIC_VERTEX_DEFAULT_CONTEXT_WINDOW,
maxTokens: params.maxTokens,
};
}
function buildAnthropicVertexCatalog(): ModelDefinitionConfig[] {
return [
buildAnthropicVertexModel({
id: "claude-opus-4-6",
name: "Claude Opus 4.6",
reasoning: true,
input: ["text", "image"],
cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
maxTokens: 128000,
}),
buildAnthropicVertexModel({
id: ANTHROPIC_VERTEX_DEFAULT_MODEL_ID,
name: "Claude Sonnet 4.6",
reasoning: true,
input: ["text", "image"],
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
maxTokens: 128000,
}),
];
}
export function buildAnthropicVertexProvider(params?: {
env?: NodeJS.ProcessEnv;
}): ModelProviderConfig {
const region = resolveAnthropicVertexRegion(params?.env);
const baseUrl =
region.toLowerCase() === "global"
? "https://aiplatform.googleapis.com"
: `https://${region}-aiplatform.googleapis.com`;
return {
baseUrl,
api: "anthropic-messages",
apiKey: GCP_VERTEX_CREDENTIALS_MARKER,
models: buildAnthropicVertexCatalog(),
};
}

View File

@ -1,6 +1,6 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
import "./test-mocks.js";
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
import type { PluginRuntime } from "./runtime-api.js";
import { clearBlueBubblesRuntime, setBlueBubblesRuntime } from "./runtime.js";
import { sendMessageBlueBubbles, resolveChatGuidForTarget, createChatForHandle } from "./send.js";

View File

@ -62,14 +62,16 @@ export function createBlueBubblesProbeMockModule(): BlueBubblesProbeMockModule {
export function installBlueBubblesFetchTestHooks(params: {
mockFetch: ReturnType<typeof vi.fn>;
privateApiStatusMock: {
mockReset: () => unknown;
mockReset?: () => unknown;
mockClear?: () => unknown;
mockReturnValue: (value: boolean | null) => unknown;
};
}) {
beforeEach(() => {
vi.stubGlobal("fetch", params.mockFetch);
params.mockFetch.mockReset();
params.privateApiStatusMock.mockReset();
params.privateApiStatusMock.mockReset?.();
params.privateApiStatusMock.mockClear?.();
params.privateApiStatusMock.mockReturnValue(BLUE_BUBBLES_PRIVATE_API_STATUS.unknown);
});

View File

@ -11,7 +11,7 @@ export {
ssrfPolicyFromAllowPrivateNetwork,
type LookupFn,
type SsrFPolicy,
} from "openclaw/plugin-sdk/infra-runtime";
} from "openclaw/plugin-sdk/ssrf-runtime";
export {
setMatrixThreadBindingIdleTimeoutBySessionKey,
setMatrixThreadBindingMaxAgeBySessionKey,

View File

@ -53,11 +53,19 @@ function createHandlerHarness() {
dispatcher: {},
replyOptions: {},
markDispatchIdle: vi.fn(),
markRunComplete: vi.fn(),
}),
resolveHumanDelayConfig: vi.fn().mockReturnValue(undefined),
dispatchReplyFromConfig: vi
.fn()
.mockResolvedValue({ queuedFinal: false, counts: { final: 0, block: 0, tool: 0 } }),
withReplyDispatcher: vi.fn().mockImplementation(async ({ run, onSettled }) => {
try {
return await run();
} finally {
await onSettled?.();
}
}),
},
commands: {
shouldHandleTextCommands: vi.fn().mockReturnValue(true),

View File

@ -1,8 +1,8 @@
import type {
BindingTargetKind,
SessionBindingRecord,
} from "openclaw/plugin-sdk/conversation-runtime";
import { resolveThreadBindingLifecycle } from "openclaw/plugin-sdk/conversation-runtime";
} from "openclaw/plugin-sdk/thread-bindings-runtime";
import { resolveThreadBindingLifecycle } from "openclaw/plugin-sdk/thread-bindings-runtime";
export type MatrixThreadBindingTargetKind = "subagent" | "acp";

View File

@ -16,30 +16,14 @@ import {
setMatrixThreadBindingMaxAgeBySessionKey,
} from "./thread-bindings.js";
const pluginSdkActual = vi.hoisted(() => ({
writeJsonFileAtomically: null as null | ((filePath: string, value: unknown) => Promise<void>),
}));
const sendMessageMatrixMock = vi.hoisted(() =>
vi.fn(async (_to: string, _message: string, opts?: { threadId?: string }) => ({
messageId: opts?.threadId ? "$reply" : "$root",
roomId: "!room:example",
})),
);
const writeJsonFileAtomicallyMock = vi.hoisted(() =>
vi.fn<(filePath: string, value: unknown) => Promise<void>>(),
);
vi.mock("../../runtime-api.js", async () => {
const actual =
await vi.importActual<typeof import("../../runtime-api.js")>("../../runtime-api.js");
pluginSdkActual.writeJsonFileAtomically = actual.writeJsonFileAtomically;
return {
...actual,
writeJsonFileAtomically: (filePath: string, value: unknown) =>
writeJsonFileAtomicallyMock(filePath, value),
};
});
const actualRename = fs.rename.bind(fs);
const renameMock = vi.spyOn(fs, "rename");
vi.mock("./send.js", async () => {
const actual = await vi.importActual<typeof import("./send.js")>("./send.js");
@ -82,10 +66,8 @@ describe("matrix thread bindings", () => {
__testing.resetSessionBindingAdaptersForTests();
resetMatrixThreadBindingsForTests();
sendMessageMatrixMock.mockClear();
writeJsonFileAtomicallyMock.mockReset();
writeJsonFileAtomicallyMock.mockImplementation(async (filePath: string, value: unknown) => {
await pluginSdkActual.writeJsonFileAtomically?.(filePath, value);
});
renameMock.mockReset();
renameMock.mockImplementation(actualRename);
setMatrixRuntime({
state: {
resolveStateDir: () => stateDir,
@ -216,7 +198,7 @@ describe("matrix thread bindings", () => {
}
});
it("persists a batch of expired bindings once per sweep", async () => {
it("persists expired bindings after a sweep", async () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-03-08T12:00:00.000Z"));
try {
@ -251,12 +233,8 @@ describe("matrix thread bindings", () => {
placement: "current",
});
writeJsonFileAtomicallyMock.mockClear();
await vi.advanceTimersByTimeAsync(61_000);
await vi.waitFor(() => {
expect(writeJsonFileAtomicallyMock).toHaveBeenCalledTimes(1);
});
await Promise.resolve();
await vi.waitFor(async () => {
const persistedRaw = await fs.readFile(resolveBindingsFilePath(), "utf-8");
@ -296,13 +274,23 @@ describe("matrix thread bindings", () => {
placement: "current",
});
writeJsonFileAtomicallyMock.mockClear();
writeJsonFileAtomicallyMock.mockRejectedValueOnce(new Error("disk full"));
renameMock.mockRejectedValueOnce(new Error("disk full"));
await vi.advanceTimersByTimeAsync(61_000);
await Promise.resolve();
await vi.waitFor(() => {
expect(
logVerboseMessage.mock.calls.some(
([message]) =>
typeof message === "string" &&
message.includes("failed auto-unbinding expired bindings"),
),
).toBe(true);
});
await vi.waitFor(() => {
expect(logVerboseMessage).toHaveBeenCalledWith(
expect.stringContaining("failed auto-unbinding expired bindings"),
expect.stringContaining("matrix: auto-unbinding $thread due to idle-expired"),
);
});

View File

@ -8,6 +8,12 @@ export {
type LookupFn,
type SsrFPolicy,
} from "openclaw/plugin-sdk/infra-runtime";
export {
dispatchReplyFromConfigWithSettledDispatcher,
ensureConfiguredAcpBindingReady,
maybeCreateMatrixMigrationSnapshot,
resolveConfiguredAcpBindingRecord,
} from "openclaw/plugin-sdk/matrix-runtime-heavy";
// Keep auth-precedence available internally without re-exporting helper-api
// twice through both plugin-sdk/matrix and ../runtime-api.js.
export * from "./auth-precedence.js";

View File

@ -0,0 +1,397 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import OpenAI from "openai";
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../../src/config/config.js";
import { loadConfig } from "../../src/config/config.js";
import { encodePngRgba, fillPixel } from "../../src/media/png-encode.js";
import type { ResolvedTtsConfig } from "../../src/tts/tts.js";
import { createTestPluginApi } from "../../test/helpers/extensions/plugin-api.js";
import plugin from "./index.js";
const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? "";
const LIVE_MODEL_ID = process.env.OPENCLAW_LIVE_OPENAI_PLUGIN_MODEL?.trim() || "gpt-5.4-nano";
const LIVE_IMAGE_MODEL = process.env.OPENCLAW_LIVE_OPENAI_IMAGE_MODEL?.trim() || "gpt-image-1";
const LIVE_VISION_MODEL = process.env.OPENCLAW_LIVE_OPENAI_VISION_MODEL?.trim() || "gpt-4.1-mini";
const liveEnabled = OPENAI_API_KEY.trim().length > 0 && process.env.OPENCLAW_LIVE_TEST === "1";
const describeLive = liveEnabled ? describe : describe.skip;
const EMPTY_AUTH_STORE = { version: 1, profiles: {} } as const;
function createTemplateModel(modelId: string) {
switch (modelId) {
case "gpt-5.4":
return {
id: "gpt-5.2",
name: "GPT-5.2",
provider: "openai",
api: "openai-completions",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400_000,
maxTokens: 128_000,
};
case "gpt-5.4-mini":
return {
id: "gpt-5-mini",
name: "GPT-5 mini",
provider: "openai",
api: "openai-completions",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400_000,
maxTokens: 128_000,
};
case "gpt-5.4-nano":
return {
id: "gpt-5-nano",
name: "GPT-5 nano",
provider: "openai",
api: "openai-completions",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 0.5, output: 1, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200_000,
maxTokens: 64_000,
};
default:
throw new Error(`Unsupported live OpenAI plugin model: ${modelId}`);
}
}
function registerOpenAIPlugin() {
const providers: unknown[] = [];
const speechProviders: unknown[] = [];
const mediaProviders: unknown[] = [];
const imageProviders: unknown[] = [];
plugin.register(
createTestPluginApi({
id: "openai",
name: "OpenAI Provider",
source: "test",
config: {},
runtime: {} as never,
registerProvider: (provider) => {
providers.push(provider);
},
registerSpeechProvider: (provider) => {
speechProviders.push(provider);
},
registerMediaUnderstandingProvider: (provider) => {
mediaProviders.push(provider);
},
registerImageGenerationProvider: (provider) => {
imageProviders.push(provider);
},
}),
);
return { providers, speechProviders, mediaProviders, imageProviders };
}
function createReferencePng(): Buffer {
const width = 96;
const height = 96;
const buf = Buffer.alloc(width * height * 4, 255);
for (let y = 0; y < height; y += 1) {
for (let x = 0; x < width; x += 1) {
fillPixel(buf, x, y, width, 225, 242, 255, 255);
}
}
for (let y = 24; y < 72; y += 1) {
for (let x = 24; x < 72; x += 1) {
fillPixel(buf, x, y, width, 255, 153, 51, 255);
}
}
return encodePngRgba(buf, width, height);
}
function createLiveConfig(): OpenClawConfig {
const cfg = loadConfig();
return {
...cfg,
models: {
...cfg.models,
providers: {
...cfg.models?.providers,
openai: {
...cfg.models?.providers?.openai,
apiKey: OPENAI_API_KEY,
baseUrl: "https://api.openai.com/v1",
},
},
},
} as OpenClawConfig;
}
function createLiveTtsConfig(): ResolvedTtsConfig {
return {
auto: "off",
mode: "final",
provider: "openai",
providerSource: "config",
modelOverrides: {
enabled: true,
allowText: true,
allowProvider: true,
allowVoice: true,
allowModelId: true,
allowVoiceSettings: true,
allowNormalization: true,
allowSeed: true,
},
elevenlabs: {
baseUrl: "https://api.elevenlabs.io",
voiceId: "",
modelId: "eleven_multilingual_v2",
voiceSettings: {
stability: 0.5,
similarityBoost: 0.75,
style: 0,
useSpeakerBoost: true,
speed: 1,
},
},
openai: {
apiKey: OPENAI_API_KEY,
baseUrl: "https://api.openai.com/v1",
model: "gpt-4o-mini-tts",
voice: "alloy",
},
edge: {
enabled: false,
voice: "en-US-AriaNeural",
lang: "en-US",
outputFormat: "audio-24khz-48kbitrate-mono-mp3",
outputFormatConfigured: false,
saveSubtitles: false,
},
maxTextLength: 4_000,
timeoutMs: 30_000,
};
}
async function createTempAgentDir(): Promise<string> {
return await fs.mkdtemp(path.join(os.tmpdir(), "openai-plugin-live-"));
}
describe("openai plugin", () => {
it("registers the expected provider surfaces", () => {
const { providers, speechProviders, mediaProviders, imageProviders } = registerOpenAIPlugin();
expect(providers).toHaveLength(2);
expect(
providers.map(
(provider) =>
// oxlint-disable-next-line typescript/no-explicit-any
(provider as any).id,
),
).toEqual(["openai", "openai-codex"]);
expect(speechProviders).toHaveLength(1);
expect(mediaProviders).toHaveLength(1);
expect(imageProviders).toHaveLength(1);
});
});
describeLive("openai plugin live", () => {
it("registers an OpenAI provider that can complete a live request", async () => {
const { providers } = registerOpenAIPlugin();
const provider =
// oxlint-disable-next-line typescript/no-explicit-any
providers.find((entry) => (entry as any).id === "openai");
expect(provider).toBeDefined();
// oxlint-disable-next-line typescript/no-explicit-any
const resolved = (provider as any).resolveDynamicModel?.({
provider: "openai",
modelId: LIVE_MODEL_ID,
modelRegistry: {
find(providerId: string, id: string) {
if (providerId !== "openai") {
return null;
}
const template = createTemplateModel(LIVE_MODEL_ID);
return id === template.id ? template : null;
},
},
});
expect(resolved).toBeDefined();
// oxlint-disable-next-line typescript/no-explicit-any
const normalized = (provider as any).normalizeResolvedModel?.({
provider: "openai",
modelId: resolved.id,
model: resolved,
});
expect(normalized).toMatchObject({
provider: "openai",
id: LIVE_MODEL_ID,
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
});
const client = new OpenAI({
apiKey: OPENAI_API_KEY,
baseURL: normalized?.baseUrl,
});
const response = await client.responses.create({
model: normalized?.id ?? LIVE_MODEL_ID,
input: "Reply with exactly OK.",
max_output_tokens: 16,
});
expect(response.output_text.trim()).toMatch(/^OK[.!]?$/);
}, 30_000);
it("lists voices and synthesizes audio through the registered speech provider", async () => {
const { speechProviders } = registerOpenAIPlugin();
const speechProvider =
// oxlint-disable-next-line typescript/no-explicit-any
speechProviders.find((entry) => (entry as any).id === "openai");
expect(speechProvider).toBeDefined();
// oxlint-disable-next-line typescript/no-explicit-any
const voices = await (speechProvider as any).listVoices?.({});
expect(Array.isArray(voices)).toBe(true);
expect(voices.map((voice: { id: string }) => voice.id)).toContain("alloy");
const cfg = createLiveConfig();
const ttsConfig = createLiveTtsConfig();
// oxlint-disable-next-line typescript/no-explicit-any
const audioFile = await (speechProvider as any).synthesize({
text: "OpenClaw integration test OK.",
cfg,
config: ttsConfig,
target: "audio-file",
});
expect(audioFile.outputFormat).toBe("mp3");
expect(audioFile.fileExtension).toBe(".mp3");
expect(audioFile.audioBuffer.byteLength).toBeGreaterThan(512);
// oxlint-disable-next-line typescript/no-explicit-any
const telephony = await (speechProvider as any).synthesizeTelephony?.({
text: "Telephony check OK.",
cfg,
config: ttsConfig,
});
expect(telephony?.outputFormat).toBe("pcm");
expect(telephony?.sampleRate).toBe(24_000);
expect(telephony?.audioBuffer.byteLength).toBeGreaterThan(512);
}, 45_000);
it("transcribes synthesized speech through the registered media provider", async () => {
const { speechProviders, mediaProviders } = registerOpenAIPlugin();
const speechProvider =
// oxlint-disable-next-line typescript/no-explicit-any
speechProviders.find((entry) => (entry as any).id === "openai");
const mediaProvider =
// oxlint-disable-next-line typescript/no-explicit-any
mediaProviders.find((entry) => (entry as any).id === "openai");
expect(speechProvider).toBeDefined();
expect(mediaProvider).toBeDefined();
const cfg = createLiveConfig();
const ttsConfig = createLiveTtsConfig();
// oxlint-disable-next-line typescript/no-explicit-any
const synthesized = await (speechProvider as any).synthesize({
text: "OpenClaw integration test OK.",
cfg,
config: ttsConfig,
target: "audio-file",
});
// oxlint-disable-next-line typescript/no-explicit-any
const transcription = await (mediaProvider as any).transcribeAudio?.({
buffer: synthesized.audioBuffer,
fileName: "openai-plugin-live.mp3",
mime: "audio/mpeg",
apiKey: OPENAI_API_KEY,
timeoutMs: 30_000,
});
const text = String(transcription?.text ?? "").toLowerCase();
expect(text.length).toBeGreaterThan(0);
expect(text).toContain("openclaw");
expect(text).toMatch(/\bok\b/);
}, 45_000);
it("generates an image through the registered image provider", async () => {
const { imageProviders } = registerOpenAIPlugin();
const imageProvider =
// oxlint-disable-next-line typescript/no-explicit-any
imageProviders.find((entry) => (entry as any).id === "openai");
expect(imageProvider).toBeDefined();
const cfg = createLiveConfig();
const agentDir = await createTempAgentDir();
try {
// oxlint-disable-next-line typescript/no-explicit-any
const generated = await (imageProvider as any).generateImage({
provider: "openai",
model: LIVE_IMAGE_MODEL,
prompt: "Create a minimal flat orange square centered on a white background.",
cfg,
agentDir,
authStore: EMPTY_AUTH_STORE,
timeoutMs: 45_000,
size: "1024x1024",
});
expect(generated.model).toBe(LIVE_IMAGE_MODEL);
expect(generated.images.length).toBeGreaterThan(0);
expect(generated.images[0]?.mimeType).toBe("image/png");
expect(generated.images[0]?.buffer.byteLength).toBeGreaterThan(1_000);
} finally {
await fs.rm(agentDir, { recursive: true, force: true });
}
}, 60_000);
it("describes a deterministic image through the registered media provider", async () => {
const { mediaProviders } = registerOpenAIPlugin();
const mediaProvider =
// oxlint-disable-next-line typescript/no-explicit-any
mediaProviders.find((entry) => (entry as any).id === "openai");
expect(mediaProvider).toBeDefined();
const cfg = createLiveConfig();
const agentDir = await createTempAgentDir();
try {
// oxlint-disable-next-line typescript/no-explicit-any
const description = await (mediaProvider as any).describeImage?.({
buffer: createReferencePng(),
fileName: "reference.png",
mime: "image/png",
prompt: "Reply with one lowercase word for the dominant center color.",
timeoutMs: 30_000,
agentDir,
cfg,
model: LIVE_VISION_MODEL,
provider: "openai",
});
expect(String(description?.text ?? "").toLowerCase()).toContain("orange");
} finally {
await fs.rm(agentDir, { recursive: true, force: true });
}
}, 60_000);
});

View File

@ -1,6 +1,73 @@
import OpenAI from "openai";
import { describe, expect, it } from "vitest";
import { buildOpenAIProvider } from "./openai-provider.js";
const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? "";
const DEFAULT_LIVE_MODEL_IDS = ["gpt-5.4-mini", "gpt-5.4-nano"] as const;
const liveEnabled = OPENAI_API_KEY.trim().length > 0 && process.env.OPENCLAW_LIVE_TEST === "1";
const describeLive = liveEnabled ? describe : describe.skip;
type LiveModelCase = {
modelId: string;
templateId: string;
templateName: string;
cost: { input: number; output: number; cacheRead: number; cacheWrite: number };
contextWindow: number;
maxTokens: number;
};
function resolveLiveModelCase(modelId: string): LiveModelCase {
switch (modelId) {
case "gpt-5.4":
return {
modelId,
templateId: "gpt-5.2",
templateName: "GPT-5.2",
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400_000,
maxTokens: 128_000,
};
case "gpt-5.4-pro":
return {
modelId,
templateId: "gpt-5.2-pro",
templateName: "GPT-5.2 Pro",
cost: { input: 15, output: 60, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400_000,
maxTokens: 128_000,
};
case "gpt-5.4-mini":
return {
modelId,
templateId: "gpt-5-mini",
templateName: "GPT-5 mini",
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400_000,
maxTokens: 128_000,
};
case "gpt-5.4-nano":
return {
modelId,
templateId: "gpt-5-nano",
templateName: "GPT-5 nano",
cost: { input: 0.5, output: 1, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200_000,
maxTokens: 64_000,
};
default:
throw new Error(`Unsupported live OpenAI model: ${modelId}`);
}
}
function resolveLiveModelCases(raw?: string): LiveModelCase[] {
const requested = raw
?.split(",")
.map((value) => value.trim())
.filter(Boolean);
const modelIds = requested?.length ? requested : [...DEFAULT_LIVE_MODEL_IDS];
return [...new Set(modelIds)].map((modelId) => resolveLiveModelCase(modelId));
}
describe("buildOpenAIProvider", () => {
it("resolves gpt-5.4 mini and nano from GPT-5 small-model templates", () => {
const provider = buildOpenAIProvider();
@ -106,3 +173,69 @@ describe("buildOpenAIProvider", () => {
});
});
});
describeLive("buildOpenAIProvider live", () => {
it.each(resolveLiveModelCases(process.env.OPENCLAW_LIVE_OPENAI_MODELS))(
"resolves %s and completes through the OpenAI responses API",
async (liveCase) => {
const provider = buildOpenAIProvider();
const registry = {
find(providerId: string, id: string) {
if (providerId !== "openai") {
return null;
}
if (id === liveCase.templateId) {
return {
id: liveCase.templateId,
name: liveCase.templateName,
provider: "openai",
api: "openai-completions",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: liveCase.cost,
contextWindow: liveCase.contextWindow,
maxTokens: liveCase.maxTokens,
};
}
return null;
},
};
const resolved = provider.resolveDynamicModel?.({
provider: "openai",
modelId: liveCase.modelId,
modelRegistry: registry as never,
});
expect(resolved).toBeDefined();
const normalized = provider.normalizeResolvedModel?.({
provider: "openai",
modelId: resolved!.id,
model: resolved!,
});
expect(normalized).toMatchObject({
provider: "openai",
id: liveCase.modelId,
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
});
const client = new OpenAI({
apiKey: OPENAI_API_KEY,
baseURL: normalized?.baseUrl,
});
const response = await client.responses.create({
model: normalized?.id ?? liveCase.modelId,
input: "Reply with exactly OK.",
max_output_tokens: 16,
});
expect(response.output_text.trim()).toMatch(/^OK[.!]?$/);
},
30_000,
);
});

View File

@ -0,0 +1,101 @@
import OpenAI from "openai";
import { describe, expect, it } from "vitest";
import { createTestPluginApi } from "../../test/helpers/extensions/plugin-api.js";
import plugin from "./index.js";
const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY ?? "";
const LIVE_MODEL_ID =
process.env.OPENCLAW_LIVE_OPENROUTER_PLUGIN_MODEL?.trim() || "openai/gpt-5.4-nano";
const liveEnabled = OPENROUTER_API_KEY.trim().length > 0 && process.env.OPENCLAW_LIVE_TEST === "1";
const describeLive = liveEnabled ? describe : describe.skip;
function registerOpenRouterPlugin() {
const providers: unknown[] = [];
const speechProviders: unknown[] = [];
const mediaProviders: unknown[] = [];
const imageProviders: unknown[] = [];
plugin.register(
createTestPluginApi({
id: "openrouter",
name: "OpenRouter Provider",
source: "test",
config: {},
runtime: {} as never,
registerProvider: (provider) => {
providers.push(provider);
},
registerSpeechProvider: (provider) => {
speechProviders.push(provider);
},
registerMediaUnderstandingProvider: (provider) => {
mediaProviders.push(provider);
},
registerImageGenerationProvider: (provider) => {
imageProviders.push(provider);
},
}),
);
return { providers, speechProviders, mediaProviders, imageProviders };
}
describe("openrouter plugin", () => {
it("registers the expected provider surfaces", () => {
const { providers, speechProviders, mediaProviders, imageProviders } =
registerOpenRouterPlugin();
expect(providers).toHaveLength(1);
expect(
providers.map(
(provider) =>
// oxlint-disable-next-line typescript/no-explicit-any
(provider as any).id,
),
).toEqual(["openrouter"]);
expect(speechProviders).toHaveLength(0);
expect(mediaProviders).toHaveLength(0);
expect(imageProviders).toHaveLength(0);
});
});
describeLive("openrouter plugin live", () => {
it("registers an OpenRouter provider that can complete a live request", async () => {
const { providers } = registerOpenRouterPlugin();
const provider =
// oxlint-disable-next-line typescript/no-explicit-any
providers.find((entry) => (entry as any).id === "openrouter");
expect(provider).toBeDefined();
// oxlint-disable-next-line typescript/no-explicit-any
const resolved = (provider as any).resolveDynamicModel?.({
provider: "openrouter",
modelId: LIVE_MODEL_ID,
modelRegistry: {
find() {
return null;
},
},
});
expect(resolved).toMatchObject({
provider: "openrouter",
id: LIVE_MODEL_ID,
api: "openai-completions",
baseUrl: "https://openrouter.ai/api/v1",
});
const client = new OpenAI({
apiKey: OPENROUTER_API_KEY,
baseURL: resolved?.baseUrl,
});
const response = await client.chat.completions.create({
model: resolved?.id ?? LIVE_MODEL_ID,
messages: [{ role: "user", content: "Reply with exactly OK." }],
max_tokens: 16,
});
expect(response.choices[0]?.message?.content?.trim()).toMatch(/^OK[.!]?$/);
}, 30_000);
});

View File

@ -1,7 +1,7 @@
import { resolveAgentRoute } from "openclaw/plugin-sdk/routing";
import { normalizeE164 } from "openclaw/plugin-sdk/text-runtime";
import { describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../../src/config/config.js";
import { resolveAgentRoute } from "../../../src/routing/resolve-route.js";
import { normalizeE164 } from "../../../src/utils.js";
import type { SignalDaemonExitEvent } from "./daemon.js";
import {
createMockSignalDaemonHandle,
@ -16,16 +16,14 @@ installSignalToolResultTestHooks();
// Import after the harness registers `vi.mock(...)` for Signal internals.
vi.resetModules();
const [{ peekSystemEvents }, { monitorSignalProvider }] = await Promise.all([
import("openclaw/plugin-sdk/infra-runtime"),
import("./monitor.js"),
]);
const { monitorSignalProvider } = await import("./monitor.js");
const {
replyMock,
sendMock,
streamMock,
updateLastRouteMock,
enqueueSystemEventMock,
upsertPairingRequestMock,
waitForTransportReadyMock,
spawnSignalDaemonMock,
@ -109,14 +107,23 @@ async function receiveSignalPayloads(params: {
await flush();
}
function getDirectSignalEventsFor(sender: string) {
function hasQueuedReactionEventFor(sender: string) {
const route = resolveAgentRoute({
cfg: config as OpenClawConfig,
channel: "signal",
accountId: "default",
peer: { kind: "direct", id: normalizeE164(sender) },
});
return peekSystemEvents(route.sessionKey);
return enqueueSystemEventMock.mock.calls.some(([text, options]) => {
return (
typeof text === "string" &&
text.includes("Signal reaction added") &&
typeof options === "object" &&
options !== null &&
"sessionKey" in options &&
(options as { sessionKey?: string }).sessionKey === route.sessionKey
);
});
}
function makeBaseEnvelope(overrides: Record<string, unknown> = {}) {
@ -383,8 +390,7 @@ describe("monitorSignalProvider tool results", () => {
},
});
const events = getDirectSignalEventsFor("+15550001111");
expect(events.some((text) => text.includes("Signal reaction added"))).toBe(true);
expect(hasQueuedReactionEventFor("+15550001111")).toBe(true);
});
it.each([
@ -424,8 +430,7 @@ describe("monitorSignalProvider tool results", () => {
},
});
const events = getDirectSignalEventsFor("+15550001111");
expect(events.some((text) => text.includes("Signal reaction added"))).toBe(shouldEnqueue);
expect(hasQueuedReactionEventFor("+15550001111")).toBe(shouldEnqueue);
expect(sendMock).not.toHaveBeenCalled();
expect(upsertPairingRequestMock).not.toHaveBeenCalled();
});
@ -442,8 +447,7 @@ describe("monitorSignalProvider tool results", () => {
},
});
const events = getDirectSignalEventsFor("+15550001111");
expect(events.some((text) => text.includes("Signal reaction added"))).toBe(true);
expect(hasQueuedReactionEventFor("+15550001111")).toBe(true);
});
it("processes messages when reaction metadata is present", async () => {

View File

@ -4,6 +4,7 @@ import type { SignalDaemonExitEvent, SignalDaemonHandle } from "./daemon.js";
type SignalToolResultTestMocks = {
waitForTransportReadyMock: MockFn;
enqueueSystemEventMock: MockFn;
sendMock: MockFn;
replyMock: MockFn;
updateLastRouteMock: MockFn;
@ -16,6 +17,7 @@ type SignalToolResultTestMocks = {
};
const waitForTransportReadyMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
const enqueueSystemEventMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
const sendMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
const replyMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
const updateLastRouteMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
@ -29,6 +31,7 @@ const spawnSignalDaemonMock = vi.hoisted(() => vi.fn()) as unknown as MockFn;
export function getSignalToolResultTestMocks(): SignalToolResultTestMocks {
return {
waitForTransportReadyMock,
enqueueSystemEventMock,
sendMock,
replyMock,
updateLastRouteMock,
@ -162,6 +165,10 @@ vi.mock("openclaw/plugin-sdk/infra-runtime", async () => {
return {
...actual,
waitForTransportReady: (...args: unknown[]) => waitForTransportReadyMock(...args),
enqueueSystemEvent: (...args: Parameters<typeof actual.enqueueSystemEvent>) => {
enqueueSystemEventMock(...args);
return actual.enqueueSystemEvent(...args);
},
};
});
@ -189,6 +196,7 @@ export function installSignalToolResultTestHooks() {
readAllowFromStoreMock.mockReset().mockResolvedValue([]);
upsertPairingRequestMock.mockReset().mockResolvedValue({ code: "PAIRCODE", created: true });
waitForTransportReadyMock.mockReset().mockResolvedValue(undefined);
enqueueSystemEventMock.mockReset();
resetSystemEventsForTest();
});

View File

@ -21,8 +21,10 @@ const { resolveTelegramFetch } = vi.hoisted(() => ({
resolveTelegramFetch: vi.fn(),
}));
vi.mock("../../../src/config/config.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../../../src/config/config.js")>();
vi.mock("openclaw/plugin-sdk/config-runtime", async () => {
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/config-runtime")>(
"openclaw/plugin-sdk/config-runtime",
);
return {
...actual,
loadConfig,

View File

@ -8,8 +8,10 @@ const readAllowFromStoreMock = vi.fn().mockResolvedValue([]);
const upsertPairingRequestMock = vi.fn().mockResolvedValue({ code: "PAIRCODE", created: true });
const saveMediaBufferSpy = vi.fn();
vi.mock("../../../src/config/config.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../../../src/config/config.js")>();
vi.mock("openclaw/plugin-sdk/config-runtime", async () => {
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/config-runtime")>(
"openclaw/plugin-sdk/config-runtime",
);
return {
...actual,
loadConfig: vi.fn().mockReturnValue({
@ -37,8 +39,10 @@ vi.mock("../../../src/pairing/pairing-store.js", () => {
};
});
vi.mock("../../../src/media/store.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../../../src/media/store.js")>();
vi.mock("openclaw/plugin-sdk/media-runtime", async () => {
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/media-runtime")>(
"openclaw/plugin-sdk/media-runtime",
);
return {
...actual,
saveMediaBuffer: vi.fn(async (...args: Parameters<typeof actual.saveMediaBuffer>) => {

View File

@ -19,25 +19,30 @@ function resolveTestAuthDir() {
const authDir = resolveTestAuthDir();
vi.mock("../../../src/config/config.js", () => ({
loadConfig: () =>
({
channels: {
whatsapp: {
accounts: {
default: { enabled: true, authDir: resolveTestAuthDir() },
vi.mock("openclaw/plugin-sdk/config-runtime", async () => {
const actual = await vi.importActual<typeof import("openclaw/plugin-sdk/config-runtime")>(
"openclaw/plugin-sdk/config-runtime",
);
return {
...actual,
loadConfig: () =>
({
channels: {
whatsapp: {
accounts: {
default: { enabled: true, authDir: resolveTestAuthDir() },
},
},
},
},
}) as never,
}));
}) as never,
};
});
vi.mock("./session.js", () => {
const authDir = resolveTestAuthDir();
const sockA = { ws: { close: vi.fn() } };
const sockB = { ws: { close: vi.fn() } };
let call = 0;
const createWaSocket = vi.fn(async () => (call++ === 0 ? sockA : sockB));
const createWaSocket = vi.fn(async () => (createWaSocket.mock.calls.length <= 1 ? sockA : sockB));
const waitForWaConnection = vi.fn();
const formatError = vi.fn((err: unknown) => `formatted:${String(err)}`);
const getStatusCode = vi.fn(
@ -78,6 +83,10 @@ describe("loginWeb coverage", () => {
beforeEach(() => {
vi.useFakeTimers();
vi.clearAllMocks();
createWaSocketMock.mockClear();
waitForWaConnectionMock.mockReset().mockResolvedValue(undefined);
waitForCredsSaveQueueWithTimeoutMock.mockReset().mockResolvedValue(undefined);
formatErrorMock.mockReset().mockImplementation((err: unknown) => `formatted:${String(err)}`);
rmMock.mockClear();
});
afterEach(() => {

View File

@ -121,6 +121,10 @@
"types": "./dist/plugin-sdk/infra-runtime.d.ts",
"default": "./dist/plugin-sdk/infra-runtime.js"
},
"./plugin-sdk/ssrf-runtime": {
"types": "./dist/plugin-sdk/ssrf-runtime.d.ts",
"default": "./dist/plugin-sdk/ssrf-runtime.js"
},
"./plugin-sdk/media-runtime": {
"types": "./dist/plugin-sdk/media-runtime.d.ts",
"default": "./dist/plugin-sdk/media-runtime.js"
@ -133,6 +137,18 @@
"types": "./dist/plugin-sdk/conversation-runtime.d.ts",
"default": "./dist/plugin-sdk/conversation-runtime.js"
},
"./plugin-sdk/matrix-runtime-heavy": {
"types": "./dist/plugin-sdk/matrix-runtime-heavy.d.ts",
"default": "./dist/plugin-sdk/matrix-runtime-heavy.js"
},
"./plugin-sdk/matrix-runtime-shared": {
"types": "./dist/plugin-sdk/matrix-runtime-shared.d.ts",
"default": "./dist/plugin-sdk/matrix-runtime-shared.js"
},
"./plugin-sdk/thread-bindings-runtime": {
"types": "./dist/plugin-sdk/thread-bindings-runtime.d.ts",
"default": "./dist/plugin-sdk/thread-bindings-runtime.js"
},
"./plugin-sdk/text-runtime": {
"types": "./dist/plugin-sdk/text-runtime.d.ts",
"default": "./dist/plugin-sdk/text-runtime.js"
@ -577,6 +593,7 @@
},
"dependencies": {
"@agentclientprotocol/sdk": "0.16.1",
"@anthropic-ai/vertex-sdk": "^0.14.4",
"@aws-sdk/client-bedrock": "^3.1011.0",
"@clack/prompts": "^1.1.0",
"@homebridge/ciao": "^1.3.5",

208
pnpm-lock.yaml generated
View File

@ -29,6 +29,9 @@ importers:
'@agentclientprotocol/sdk':
specifier: 0.16.1
version: 0.16.1(zod@4.3.6)
'@anthropic-ai/vertex-sdk':
specifier: ^0.14.4
version: 0.14.4(zod@4.3.6)
'@aws-sdk/client-bedrock':
specifier: ^3.1011.0
version: 3.1011.0
@ -688,6 +691,9 @@ packages:
zod:
optional: true
'@anthropic-ai/vertex-sdk@0.14.4':
resolution: {integrity: sha512-BZUPRWghZxfSFtAxU563wH+jfWBPoedAwsVxG35FhmNsjeV8tyfN+lFriWhCpcZApxA4NdT6Soov+PzfnxxD5g==}
'@asamuzakjp/css-color@5.0.1':
resolution: {integrity: sha512-2SZFvqMyvboVV1d15lMf7XiI3m7SDqXUuKaTymJYLN6dSGadqp+fVojqJlVoMlbZnlTmu3S0TLwLTJpvBMO1Aw==}
engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0}
@ -1480,10 +1486,6 @@ packages:
cpu: [x64]
os: [win32]
'@isaacs/cliui@8.0.2':
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
engines: {node: '>=12'}
'@isaacs/fs-minipass@4.0.1':
resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==}
engines: {node: '>=18.0.0'}
@ -2619,10 +2621,6 @@ packages:
'@pinojs/redact@0.4.0':
resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==}
'@pkgjs/parseargs@0.11.0':
resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
engines: {node: '>=14'}
'@polka/url@1.0.0-next.29':
resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==}
@ -4125,9 +4123,6 @@ packages:
resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
engines: {node: '>= 0.4'}
eastasianwidth@0.2.0:
resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
ecdsa-sig-formatter@1.0.11:
resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==}
@ -4140,9 +4135,6 @@ packages:
emoji-regex@8.0.0:
resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
emoji-regex@9.2.2:
resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
empathic@2.0.0:
resolution: {integrity: sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA==}
engines: {node: '>=14'}
@ -4359,10 +4351,6 @@ packages:
debug:
optional: true
foreground-child@3.3.1:
resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==}
engines: {node: '>=14'}
form-data@2.5.4:
resolution: {integrity: sha512-Y/3MmRiR8Nd+0CUtrbvcKtKzLWiUfpQ7DFVggH8PwmGt/0r7RSy32GuP4hpCJlQNEBusisSx1DLtD8uD386HJQ==}
engines: {node: '>= 0.12'}
@ -4409,14 +4397,18 @@ packages:
engines: {node: '>=10'}
deprecated: This package is no longer supported.
gaxios@7.1.3:
resolution: {integrity: sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ==}
engines: {node: '>=18'}
gaxios@6.7.1:
resolution: {integrity: sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==}
engines: {node: '>=14'}
gaxios@7.1.4:
resolution: {integrity: sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==}
engines: {node: '>=18'}
gcp-metadata@6.1.1:
resolution: {integrity: sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==}
engines: {node: '>=14'}
gcp-metadata@8.1.2:
resolution: {integrity: sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==}
engines: {node: '>=18'}
@ -4459,11 +4451,6 @@ packages:
resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
engines: {node: '>= 6'}
glob@10.5.0:
resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==}
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
hasBin: true
glob@13.0.6:
resolution: {integrity: sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==}
engines: {node: 18 || 20 || >=22}
@ -4472,14 +4459,18 @@ packages:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
google-auth-library@10.6.1:
resolution: {integrity: sha512-5awwuLrzNol+pFDmKJd0dKtZ0fPLAtoA5p7YO4ODsDu6ONJUVqbYwvv8y2ZBO5MBNp9TJXigB19710kYpBPdtA==}
engines: {node: '>=18'}
google-auth-library@10.6.2:
resolution: {integrity: sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==}
engines: {node: '>=18'}
google-auth-library@9.15.1:
resolution: {integrity: sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng==}
engines: {node: '>=14'}
google-logging-utils@0.0.2:
resolution: {integrity: sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==}
engines: {node: '>=14'}
google-logging-utils@1.1.3:
resolution: {integrity: sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==}
engines: {node: '>=14'}
@ -4495,6 +4486,10 @@ packages:
resolution: {integrity: sha512-wcHAQ1e7svL3fJMpDchcQVcWUmywhuepOOjHUHmMmWAwUJEIyK5ea5sbSjZd+Gy1aMpZeP8VYJa+4tP+j1YptQ==}
engines: {node: ^12.20.0 || >=14.13.1}
gtoken@7.1.0:
resolution: {integrity: sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==}
engines: {node: '>=14.0.0'}
has-flag@4.0.0:
resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
engines: {node: '>=8'}
@ -4721,9 +4716,6 @@ packages:
resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==}
engines: {node: '>=8'}
jackspeak@3.4.3:
resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==}
jimp@1.6.0:
resolution: {integrity: sha512-YcwCHw1kiqEeI5xRpDlPPBGL2EOpBKLwO4yIBJcXWHPj5PnA5urGq0jbyhM5KoNpypQ6VboSoxc9D8HyfvngSg==}
engines: {node: '>=18'}
@ -4993,9 +4985,6 @@ packages:
resolution: {integrity: sha512-neJAj8GwF0e8EpycYIDFqEPcx9Qz4GUho20jWFR7YiFeXzF1YMLdxB36PypcTSPMA+4+LvgyMacYhlr18Zlymw==}
engines: {node: '>=18'}
lru-cache@10.4.3:
resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
lru-cache@11.2.7:
resolution: {integrity: sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==}
engines: {node: 20 || >=22}
@ -5423,9 +5412,6 @@ packages:
resolution: {integrity: sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==}
engines: {node: '>= 14'}
package-json-from-dist@1.0.1:
resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==}
pako@1.0.11:
resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==}
@ -5483,10 +5469,6 @@ packages:
path-parse@1.0.7:
resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
path-scurry@1.11.1:
resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
engines: {node: '>=16 || 14 >=14.18'}
path-scurry@2.0.2:
resolution: {integrity: sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==}
engines: {node: 18 || 20 || >=22}
@ -5794,10 +5776,6 @@ packages:
deprecated: Rimraf versions prior to v4 are no longer supported
hasBin: true
rimraf@5.0.10:
resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==}
hasBin: true
rolldown-plugin-dts@0.22.5:
resolution: {integrity: sha512-M/HXfM4cboo+jONx9Z0X+CUf3B5tCi7ni+kR5fUW50Fp9AlZk0oVLesibGWgCXDKFp5lpgQ9yhKoImUFjl3VZw==}
engines: {node: '>=20.19.0'}
@ -6089,10 +6067,6 @@ packages:
resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
engines: {node: '>=8'}
string-width@5.1.2:
resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
engines: {node: '>=12'}
string-width@7.2.0:
resolution: {integrity: sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==}
engines: {node: '>=18'}
@ -6402,6 +6376,10 @@ packages:
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
hasBin: true
uuid@9.0.1:
resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==}
hasBin: true
validate-npm-package-name@7.0.2:
resolution: {integrity: sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A==}
engines: {node: ^20.17.0 || >=22.9.0}
@ -6557,10 +6535,6 @@ packages:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'}
wrap-ansi@8.1.0:
resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==}
engines: {node: '>=12'}
wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
@ -6668,6 +6642,15 @@ snapshots:
optionalDependencies:
zod: 4.3.6
'@anthropic-ai/vertex-sdk@0.14.4(zod@4.3.6)':
dependencies:
'@anthropic-ai/sdk': 0.73.0(zod@4.3.6)
google-auth-library: 9.15.1
transitivePeerDependencies:
- encoding
- supports-color
- zod
'@asamuzakjp/css-color@5.0.1':
dependencies:
'@csstools/css-calc': 3.1.1(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0)
@ -7804,7 +7787,7 @@ snapshots:
'@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))':
dependencies:
google-auth-library: 10.6.1
google-auth-library: 10.6.2
p-retry: 4.6.2
protobufjs: 7.5.4
ws: 8.19.0
@ -7969,15 +7952,6 @@ snapshots:
'@img/sharp-win32-x64@0.34.5':
optional: true
'@isaacs/cliui@8.0.2':
dependencies:
string-width: 5.1.2
string-width-cjs: string-width@4.2.3
strip-ansi: 7.2.0
strip-ansi-cjs: strip-ansi@6.0.1
wrap-ansi: 8.1.0
wrap-ansi-cjs: wrap-ansi@7.0.0
'@isaacs/fs-minipass@4.0.1':
dependencies:
minipass: 7.1.3
@ -9320,9 +9294,6 @@ snapshots:
'@pinojs/redact@0.4.0': {}
'@pkgjs/parseargs@0.11.0':
optional: true
'@polka/url@1.0.0-next.29': {}
'@protobufjs/aspromise@1.1.2': {}
@ -11012,8 +10983,6 @@ snapshots:
es-errors: 1.3.0
gopd: 1.2.0
eastasianwidth@0.2.0: {}
ecdsa-sig-formatter@1.0.11:
dependencies:
safe-buffer: 5.2.1
@ -11024,8 +10993,6 @@ snapshots:
emoji-regex@8.0.0: {}
emoji-regex@9.2.2: {}
empathic@2.0.0: {}
encodeurl@2.0.0: {}
@ -11278,11 +11245,6 @@ snapshots:
follow-redirects@1.15.11: {}
foreground-child@3.3.1:
dependencies:
cross-spawn: 7.0.6
signal-exit: 4.1.0
form-data@2.5.4:
dependencies:
asynckit: 0.4.0
@ -11336,13 +11298,15 @@ snapshots:
wide-align: 1.1.5
optional: true
gaxios@7.1.3:
gaxios@6.7.1:
dependencies:
extend: 3.0.2
https-proxy-agent: 7.0.6
node-fetch: 3.3.2
rimraf: 5.0.10
is-stream: 2.0.1
node-fetch: 2.7.0
uuid: 9.0.1
transitivePeerDependencies:
- encoding
- supports-color
gaxios@7.1.4:
@ -11353,6 +11317,15 @@ snapshots:
transitivePeerDependencies:
- supports-color
gcp-metadata@6.1.1:
dependencies:
gaxios: 6.7.1
google-logging-utils: 0.0.2
json-bigint: 1.0.0
transitivePeerDependencies:
- encoding
- supports-color
gcp-metadata@8.1.2:
dependencies:
gaxios: 7.1.4
@ -11411,15 +11384,6 @@ snapshots:
dependencies:
is-glob: 4.0.3
glob@10.5.0:
dependencies:
foreground-child: 3.3.1
jackspeak: 3.4.3
minimatch: 10.2.4
minipass: 7.1.3
package-json-from-dist: 1.0.1
path-scurry: 1.11.1
glob@13.0.6:
dependencies:
minimatch: 10.2.4
@ -11436,17 +11400,6 @@ snapshots:
path-is-absolute: 1.0.1
optional: true
google-auth-library@10.6.1:
dependencies:
base64-js: 1.5.1
ecdsa-sig-formatter: 1.0.11
gaxios: 7.1.3
gcp-metadata: 8.1.2
google-logging-utils: 1.1.3
jws: 4.0.1
transitivePeerDependencies:
- supports-color
google-auth-library@10.6.2:
dependencies:
base64-js: 1.5.1
@ -11458,6 +11411,20 @@ snapshots:
transitivePeerDependencies:
- supports-color
google-auth-library@9.15.1:
dependencies:
base64-js: 1.5.1
ecdsa-sig-formatter: 1.0.11
gaxios: 6.7.1
gcp-metadata: 6.1.1
gtoken: 7.1.0
jws: 4.0.1
transitivePeerDependencies:
- encoding
- supports-color
google-logging-utils@0.0.2: {}
google-logging-utils@1.1.3: {}
gopd@1.2.0: {}
@ -11474,6 +11441,14 @@ snapshots:
- encoding
- supports-color
gtoken@7.1.0:
dependencies:
gaxios: 6.7.1
jws: 4.0.1
transitivePeerDependencies:
- encoding
- supports-color
has-flag@4.0.0: {}
has-own@1.0.1: {}
@ -11725,12 +11700,6 @@ snapshots:
html-escaper: 2.0.2
istanbul-lib-report: 3.0.1
jackspeak@3.4.3:
dependencies:
'@isaacs/cliui': 8.0.2
optionalDependencies:
'@pkgjs/parseargs': 0.11.0
jimp@1.6.0:
dependencies:
'@jimp/core': 1.6.0
@ -12037,8 +12006,6 @@ snapshots:
dependencies:
steno: 4.0.2
lru-cache@10.4.3: {}
lru-cache@11.2.7: {}
lru-cache@6.0.0:
@ -12634,8 +12601,6 @@ snapshots:
degenerator: 5.0.1
netmask: 2.0.2
package-json-from-dist@1.0.1: {}
pako@1.0.11: {}
pako@2.1.0: {}
@ -12681,11 +12646,6 @@ snapshots:
path-parse@1.0.7: {}
path-scurry@1.11.1:
dependencies:
lru-cache: 10.4.3
minipass: 7.1.3
path-scurry@2.0.2:
dependencies:
lru-cache: 11.2.7
@ -13036,10 +12996,6 @@ snapshots:
glob: 7.2.3
optional: true
rimraf@5.0.10:
dependencies:
glob: 10.5.0
rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260317.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3):
dependencies:
'@babel/generator': 8.0.0-rc.2
@ -13394,12 +13350,6 @@ snapshots:
is-fullwidth-code-point: 3.0.0
strip-ansi: 6.0.1
string-width@5.1.2:
dependencies:
eastasianwidth: 0.2.0
emoji-regex: 9.2.2
strip-ansi: 7.2.0
string-width@7.2.0:
dependencies:
emoji-regex: 10.6.0
@ -13687,6 +13637,8 @@ snapshots:
uuid@8.3.2: {}
uuid@9.0.1: {}
validate-npm-package-name@7.0.2: {}
vary@1.1.2: {}
@ -13809,12 +13761,6 @@ snapshots:
string-width: 4.2.3
strip-ansi: 6.0.1
wrap-ansi@8.1.0:
dependencies:
ansi-styles: 6.2.3
string-width: 5.1.2
strip-ansi: 7.2.0
wrappy@1.0.2: {}
ws@8.19.0: {}

View File

@ -20,9 +20,13 @@
"channel-runtime",
"interactive-runtime",
"infra-runtime",
"ssrf-runtime",
"media-runtime",
"media-understanding-runtime",
"conversation-runtime",
"matrix-runtime-heavy",
"matrix-runtime-shared",
"thread-bindings-runtime",
"text-runtime",
"agent-runtime",
"speech-runtime",

View File

@ -297,7 +297,7 @@ const defaultHeavyUnitFileLimit =
: isMacMiniProfile
? 90
: testProfile === "low"
? 32
? 36
: highMemLocalHost
? 80
: 60;
@ -307,7 +307,7 @@ const defaultHeavyUnitLaneCount =
: isMacMiniProfile
? 6
: testProfile === "low"
? 3
? 4
: highMemLocalHost
? 5
: 4;
@ -365,11 +365,13 @@ const defaultSingletonBatchLaneCount =
? 0
: isCI
? Math.ceil(unitSingletonBatchFiles.length / 6)
: highMemLocalHost
? Math.ceil(unitSingletonBatchFiles.length / 8)
: lowMemLocalHost
? Math.ceil(unitSingletonBatchFiles.length / 12)
: Math.ceil(unitSingletonBatchFiles.length / 10);
: testProfile === "low" && highMemLocalHost
? Math.ceil(unitSingletonBatchFiles.length / 8) + 1
: highMemLocalHost
? Math.ceil(unitSingletonBatchFiles.length / 8)
: lowMemLocalHost
? Math.ceil(unitSingletonBatchFiles.length / 12)
: Math.ceil(unitSingletonBatchFiles.length / 10);
const singletonBatchLaneCount =
unitSingletonBatchFiles.length === 0
? 0
@ -437,6 +439,22 @@ const unitSingletonEntries = unitSingletonBuckets.map((files, index) => ({
unitSingletonBuckets.length === 1 ? "unit-singleton" : `unit-singleton-${String(index + 1)}`,
args: ["vitest", "run", "--config", "vitest.unit.config.ts", "--pool=forks", ...files],
}));
const unitThreadEntries =
unitThreadSingletonFiles.length > 0
? [
{
name: "unit-threads",
args: [
"vitest",
"run",
"--config",
"vitest.unit.config.ts",
"--pool=threads",
...unitThreadSingletonFiles,
],
},
]
: [];
const baseRuns = [
...(shouldSplitUnitRuns
? [
@ -469,10 +487,7 @@ const baseRuns = [
file,
],
})),
...unitThreadSingletonFiles.map((file) => ({
name: `${path.basename(file, ".test.ts")}-threads`,
args: ["vitest", "run", "--config", "vitest.unit.config.ts", "--pool=threads", file],
})),
...unitThreadEntries,
...unitVmForkSingletonFiles.map((file) => ({
name: `${path.basename(file, ".test.ts")}-vmforks`,
args: [
@ -695,7 +710,9 @@ const defaultTopLevelParallelLimit =
testProfile === "serial"
? 1
: testProfile === "low"
? 2
? lowMemLocalHost
? 2
: 3
: testProfile === "max"
? 5
: highMemLocalHost
@ -1287,9 +1304,16 @@ if (serialPrefixRuns.length > 0) {
if (failedSerialPrefix !== undefined) {
process.exit(failedSerialPrefix);
}
const deferredRunConcurrency = isMacMiniProfile ? 3 : testProfile === "low" ? 2 : undefined;
const failedDeferredParallel = isMacMiniProfile
? await runEntriesWithLimit(deferredParallelRuns, passthroughOptionArgs, 3)
: await runEntries(deferredParallelRuns, passthroughOptionArgs);
? await runEntriesWithLimit(deferredParallelRuns, passthroughOptionArgs, deferredRunConcurrency)
: deferredRunConcurrency
? await runEntriesWithLimit(
deferredParallelRuns,
passthroughOptionArgs,
deferredRunConcurrency,
)
: await runEntries(deferredParallelRuns, passthroughOptionArgs);
if (failedDeferredParallel !== undefined) {
process.exit(failedDeferredParallel);
}

View File

@ -25,14 +25,25 @@ const readJson = (filePath, fallback) => {
};
const normalizeRepoPath = (value) => value.split(path.sep).join("/");
const repoRoot = path.resolve(process.cwd());
const normalizeTrackedRepoPath = (value) => {
const normalizedValue = typeof value === "string" ? value : String(value ?? "");
const repoRelative = path.isAbsolute(normalizedValue)
? path.relative(repoRoot, path.resolve(normalizedValue))
: normalizedValue;
if (path.isAbsolute(repoRelative) || repoRelative.startsWith("..") || repoRelative === "") {
return normalizeRepoPath(normalizedValue);
}
return normalizeRepoPath(repoRelative);
};
const normalizeManifestEntries = (entries) =>
entries
.map((entry) =>
typeof entry === "string"
? { file: normalizeRepoPath(entry), reason: "" }
? { file: normalizeTrackedRepoPath(entry), reason: "" }
: {
file: normalizeRepoPath(String(entry?.file ?? "")),
file: normalizeTrackedRepoPath(String(entry?.file ?? "")),
reason: typeof entry?.reason === "string" ? entry.reason : "",
},
)
@ -60,7 +71,7 @@ export function loadUnitTimingManifest() {
const files = Object.fromEntries(
Object.entries(raw.files ?? {})
.map(([file, value]) => {
const normalizedFile = normalizeRepoPath(file);
const normalizedFile = normalizeTrackedRepoPath(file);
const durationMs =
Number.isFinite(value?.durationMs) && value.durationMs >= 0 ? value.durationMs : null;
const testCount =
@ -97,7 +108,7 @@ export function loadUnitMemoryHotspotManifest() {
const files = Object.fromEntries(
Object.entries(raw.files ?? {})
.map(([file, value]) => {
const normalizedFile = normalizeRepoPath(file);
const normalizedFile = normalizeTrackedRepoPath(file);
const deltaKb =
Number.isFinite(value?.deltaKb) && value.deltaKb > 0 ? Math.round(value.deltaKb) : null;
const sources = Array.isArray(value?.sources)

View File

@ -57,10 +57,24 @@ function parseArgs(argv) {
return args;
}
const normalizeRepoPath = (value) => value.split(path.sep).join("/");
const repoRoot = path.resolve(process.cwd());
const normalizeTrackedRepoPath = (value) => {
const normalizedValue = typeof value === "string" ? value : String(value ?? "");
const repoRelative = path.isAbsolute(normalizedValue)
? path.relative(repoRoot, path.resolve(normalizedValue))
: normalizedValue;
if (path.isAbsolute(repoRelative) || repoRelative.startsWith("..") || repoRelative === "") {
return normalizeRepoPath(normalizedValue);
}
return normalizeRepoPath(repoRelative);
};
function mergeHotspotEntry(aggregated, file, value) {
if (!(Number.isFinite(value?.deltaKb) && value.deltaKb > 0)) {
return;
}
const normalizedFile = normalizeTrackedRepoPath(file);
const normalizeSourceLabel = (source) => {
const separator = source.lastIndexOf(":");
if (separator === -1) {
@ -75,9 +89,9 @@ function mergeHotspotEntry(aggregated, file, value) {
.filter((source) => typeof source === "string" && source.length > 0)
.map(normalizeSourceLabel)
: [];
const previous = aggregated.get(file);
const previous = aggregated.get(normalizedFile);
if (!previous) {
aggregated.set(file, {
aggregated.set(normalizedFile, {
deltaKb: Math.round(value.deltaKb),
sources: [...new Set(nextSources)],
});

View File

@ -9,7 +9,7 @@ function parseArgs(argv) {
config: "vitest.unit.config.ts",
out: unitTimingManifestPath,
reportPath: "",
limit: 128,
limit: 256,
defaultDurationMs: 250,
};
for (let i = 0; i < argv.length; i += 1) {
@ -50,6 +50,17 @@ function parseArgs(argv) {
}
const normalizeRepoPath = (value) => value.split(path.sep).join("/");
const repoRoot = path.resolve(process.cwd());
const normalizeTrackedRepoPath = (value) => {
const normalizedValue = typeof value === "string" ? value : String(value ?? "");
const repoRelative = path.isAbsolute(normalizedValue)
? path.relative(repoRoot, path.resolve(normalizedValue))
: normalizedValue;
if (path.isAbsolute(repoRelative) || repoRelative.startsWith("..") || repoRelative === "") {
return normalizeRepoPath(normalizedValue);
}
return normalizeRepoPath(repoRelative);
};
const opts = parseArgs(process.argv.slice(2));
const reportPath =
@ -74,7 +85,7 @@ const report = JSON.parse(fs.readFileSync(reportPath, "utf8"));
const files = Object.fromEntries(
(report.testResults ?? [])
.map((result) => {
const file = typeof result.name === "string" ? normalizeRepoPath(result.name) : "";
const file = typeof result.name === "string" ? normalizeTrackedRepoPath(result.name) : "";
const start = typeof result.startTime === "number" ? result.startTime : 0;
const end = typeof result.endTime === "number" ? result.endTime : 0;
const testCount = Array.isArray(result.assertionResults) ? result.assertionResults.length : 0;

View File

@ -0,0 +1,124 @@
import { existsSync, readFileSync } from "node:fs";
import { homedir, platform } from "node:os";
import { join } from "node:path";
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
const ANTHROPIC_VERTEX_DEFAULT_REGION = "global";
const ANTHROPIC_VERTEX_REGION_RE = /^[a-z0-9-]+$/;
const GCLOUD_DEFAULT_ADC_PATH = join(
homedir(),
".config",
"gcloud",
"application_default_credentials.json",
);
type AdcProjectFile = {
project_id?: unknown;
quota_project_id?: unknown;
};
export function resolveAnthropicVertexProjectId(
env: NodeJS.ProcessEnv = process.env,
): string | undefined {
return (
normalizeOptionalSecretInput(env.ANTHROPIC_VERTEX_PROJECT_ID) ||
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_PROJECT) ||
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_PROJECT_ID) ||
resolveAnthropicVertexProjectIdFromAdc(env)
);
}
export function resolveAnthropicVertexRegion(env: NodeJS.ProcessEnv = process.env): string {
const region =
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_LOCATION) ||
normalizeOptionalSecretInput(env.CLOUD_ML_REGION);
return region && ANTHROPIC_VERTEX_REGION_RE.test(region)
? region
: ANTHROPIC_VERTEX_DEFAULT_REGION;
}
export function resolveAnthropicVertexRegionFromBaseUrl(baseUrl?: string): string | undefined {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return undefined;
}
try {
const host = new URL(trimmed).hostname.toLowerCase();
if (host === "aiplatform.googleapis.com") {
return "global";
}
const match = /^([a-z0-9-]+)-aiplatform\.googleapis\.com$/.exec(host);
return match?.[1];
} catch {
return undefined;
}
}
export function resolveAnthropicVertexClientRegion(params?: {
baseUrl?: string;
env?: NodeJS.ProcessEnv;
}): string {
return (
resolveAnthropicVertexRegionFromBaseUrl(params?.baseUrl) ||
resolveAnthropicVertexRegion(params?.env)
);
}
function hasAnthropicVertexMetadataServerAdc(env: NodeJS.ProcessEnv = process.env): boolean {
const explicitMetadataOptIn = normalizeOptionalSecretInput(env.ANTHROPIC_VERTEX_USE_GCP_METADATA);
return explicitMetadataOptIn === "1" || explicitMetadataOptIn?.toLowerCase() === "true";
}
function resolveAnthropicVertexDefaultAdcPath(env: NodeJS.ProcessEnv = process.env): string {
return platform() === "win32"
? join(
env.APPDATA ?? join(homedir(), "AppData", "Roaming"),
"gcloud",
"application_default_credentials.json",
)
: GCLOUD_DEFAULT_ADC_PATH;
}
function resolveAnthropicVertexAdcCredentialsPath(
env: NodeJS.ProcessEnv = process.env,
): string | undefined {
const explicitCredentialsPath = normalizeOptionalSecretInput(env.GOOGLE_APPLICATION_CREDENTIALS);
if (explicitCredentialsPath) {
return existsSync(explicitCredentialsPath) ? explicitCredentialsPath : undefined;
}
const defaultAdcPath = resolveAnthropicVertexDefaultAdcPath(env);
return existsSync(defaultAdcPath) ? defaultAdcPath : undefined;
}
function resolveAnthropicVertexProjectIdFromAdc(
env: NodeJS.ProcessEnv = process.env,
): string | undefined {
const credentialsPath = resolveAnthropicVertexAdcCredentialsPath(env);
if (!credentialsPath) {
return undefined;
}
try {
const parsed = JSON.parse(readFileSync(credentialsPath, "utf8")) as AdcProjectFile;
return (
normalizeOptionalSecretInput(parsed.project_id) ||
normalizeOptionalSecretInput(parsed.quota_project_id)
);
} catch {
return undefined;
}
}
export function hasAnthropicVertexCredentials(env: NodeJS.ProcessEnv = process.env): boolean {
return (
hasAnthropicVertexMetadataServerAdc(env) ||
resolveAnthropicVertexAdcCredentialsPath(env) !== undefined
);
}
export function hasAnthropicVertexAvailableAuth(env: NodeJS.ProcessEnv = process.env): boolean {
return hasAnthropicVertexCredentials(env);
}

View File

@ -0,0 +1,221 @@
import type { Model } from "@mariozechner/pi-ai";
import { beforeEach, describe, expect, it, vi } from "vitest";
const hoisted = vi.hoisted(() => {
const streamAnthropicMock = vi.fn<(model: unknown, context: unknown, options: unknown) => symbol>(
() => Symbol("anthropic-vertex-stream"),
);
const anthropicVertexCtorMock = vi.fn();
return {
streamAnthropicMock,
anthropicVertexCtorMock,
};
});
vi.mock("@mariozechner/pi-ai", () => {
return {
streamAnthropic: (model: unknown, context: unknown, options: unknown) =>
hoisted.streamAnthropicMock(model, context, options),
};
});
vi.mock("@anthropic-ai/vertex-sdk", () => ({
AnthropicVertex: vi.fn(function MockAnthropicVertex(options: unknown) {
hoisted.anthropicVertexCtorMock(options);
return { options };
}),
}));
import {
resolveAnthropicVertexRegion,
resolveAnthropicVertexRegionFromBaseUrl,
} from "./anthropic-vertex-provider.js";
import {
createAnthropicVertexStreamFn,
createAnthropicVertexStreamFnForModel,
} from "./anthropic-vertex-stream.js";
function makeModel(params: { id: string; maxTokens?: number }): Model<"anthropic-messages"> {
return {
id: params.id,
api: "anthropic-messages",
provider: "anthropic-vertex",
...(params.maxTokens !== undefined ? { maxTokens: params.maxTokens } : {}),
} as Model<"anthropic-messages">;
}
describe("createAnthropicVertexStreamFn", () => {
beforeEach(() => {
hoisted.streamAnthropicMock.mockClear();
hoisted.anthropicVertexCtorMock.mockClear();
});
it("omits projectId when ADC credentials are used without an explicit project", () => {
const streamFn = createAnthropicVertexStreamFn(undefined, "global");
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 128000 }), { messages: [] }, {});
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
region: "global",
});
});
it("passes an explicit baseURL through to the Vertex client", () => {
const streamFn = createAnthropicVertexStreamFn(
"vertex-project",
"us-east5",
"https://proxy.example.test/vertex/v1",
);
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 128000 }), { messages: [] }, {});
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
projectId: "vertex-project",
region: "us-east5",
baseURL: "https://proxy.example.test/vertex/v1",
});
});
it("defaults maxTokens to the model limit instead of the old 32000 cap", () => {
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
const model = makeModel({ id: "claude-opus-4-6", maxTokens: 128000 });
void streamFn(model, { messages: [] }, {});
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
model,
{ messages: [] },
expect.objectContaining({
maxTokens: 128000,
}),
);
});
it("clamps explicit maxTokens to the selected model limit", () => {
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
const model = makeModel({ id: "claude-sonnet-4-6", maxTokens: 128000 });
void streamFn(model, { messages: [] }, { maxTokens: 999999 });
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
model,
{ messages: [] },
expect.objectContaining({
maxTokens: 128000,
}),
);
});
it("maps xhigh reasoning to max effort for adaptive Opus models", () => {
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
const model = makeModel({ id: "claude-opus-4-6", maxTokens: 64000 });
void streamFn(model, { messages: [] }, { reasoning: "xhigh" });
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
model,
{ messages: [] },
expect.objectContaining({
thinkingEnabled: true,
effort: "max",
}),
);
});
it("omits maxTokens when neither the model nor request provide a finite limit", () => {
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
const model = makeModel({ id: "claude-sonnet-4-6" });
void streamFn(model, { messages: [] }, { maxTokens: Number.NaN });
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
model,
{ messages: [] },
expect.not.objectContaining({
maxTokens: expect.anything(),
}),
);
});
});
describe("resolveAnthropicVertexRegionFromBaseUrl", () => {
it("accepts well-formed regional env values", () => {
expect(
resolveAnthropicVertexRegion({
GOOGLE_CLOUD_LOCATION: "us-east1",
} as NodeJS.ProcessEnv),
).toBe("us-east1");
});
it("falls back to the default region for malformed env values", () => {
expect(
resolveAnthropicVertexRegion({
GOOGLE_CLOUD_LOCATION: "us-central1.attacker.example",
} as NodeJS.ProcessEnv),
).toBe("global");
});
it("parses regional Vertex endpoints", () => {
expect(
resolveAnthropicVertexRegionFromBaseUrl("https://europe-west4-aiplatform.googleapis.com"),
).toBe("europe-west4");
});
it("treats the global Vertex endpoint as global", () => {
expect(resolveAnthropicVertexRegionFromBaseUrl("https://aiplatform.googleapis.com")).toBe(
"global",
);
});
});
describe("createAnthropicVertexStreamFnForModel", () => {
beforeEach(() => {
hoisted.anthropicVertexCtorMock.mockClear();
});
it("derives project and region from the model and env", () => {
const streamFn = createAnthropicVertexStreamFnForModel(
{ baseUrl: "https://europe-west4-aiplatform.googleapis.com" },
{ GOOGLE_CLOUD_PROJECT_ID: "vertex-project" } as NodeJS.ProcessEnv,
);
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 }), { messages: [] }, {});
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
projectId: "vertex-project",
region: "europe-west4",
baseURL: "https://europe-west4-aiplatform.googleapis.com/v1",
});
});
it("preserves explicit custom provider base URLs", () => {
const streamFn = createAnthropicVertexStreamFnForModel(
{ baseUrl: "https://proxy.example.test/custom-root/v1" },
{ GOOGLE_CLOUD_PROJECT_ID: "vertex-project" } as NodeJS.ProcessEnv,
);
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 }), { messages: [] }, {});
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
projectId: "vertex-project",
region: "global",
baseURL: "https://proxy.example.test/custom-root/v1",
});
});
it("adds /v1 for path-prefixed custom provider base URLs", () => {
const streamFn = createAnthropicVertexStreamFnForModel(
{ baseUrl: "https://proxy.example.test/custom-root" },
{ GOOGLE_CLOUD_PROJECT_ID: "vertex-project" } as NodeJS.ProcessEnv,
);
void streamFn(makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 }), { messages: [] }, {});
expect(hoisted.anthropicVertexCtorMock).toHaveBeenCalledWith({
projectId: "vertex-project",
region: "global",
baseURL: "https://proxy.example.test/custom-root/v1",
});
});
});

View File

@ -0,0 +1,137 @@
import { AnthropicVertex } from "@anthropic-ai/vertex-sdk";
import type { StreamFn } from "@mariozechner/pi-agent-core";
import { streamAnthropic, type AnthropicOptions, type Model } from "@mariozechner/pi-ai";
import {
resolveAnthropicVertexClientRegion,
resolveAnthropicVertexProjectId,
} from "./anthropic-vertex-provider.js";
type AnthropicVertexEffort = NonNullable<AnthropicOptions["effort"]>;
function resolveAnthropicVertexMaxTokens(params: {
modelMaxTokens: number | undefined;
requestedMaxTokens: number | undefined;
}): number | undefined {
const modelMax =
typeof params.modelMaxTokens === "number" &&
Number.isFinite(params.modelMaxTokens) &&
params.modelMaxTokens > 0
? Math.floor(params.modelMaxTokens)
: undefined;
const requested =
typeof params.requestedMaxTokens === "number" &&
Number.isFinite(params.requestedMaxTokens) &&
params.requestedMaxTokens > 0
? Math.floor(params.requestedMaxTokens)
: undefined;
if (modelMax !== undefined && requested !== undefined) {
return Math.min(requested, modelMax);
}
return requested ?? modelMax;
}
/**
* Create a StreamFn that routes through pi-ai's `streamAnthropic` with an
* injected `AnthropicVertex` client. All streaming, message conversion, and
* event handling is handled by pi-ai we only supply the GCP-authenticated
* client and map SimpleStreamOptions AnthropicOptions.
*/
export function createAnthropicVertexStreamFn(
projectId: string | undefined,
region: string,
baseURL?: string,
): StreamFn {
const client = new AnthropicVertex({
region,
...(baseURL ? { baseURL } : {}),
...(projectId ? { projectId } : {}),
});
return (model, context, options) => {
const maxTokens = resolveAnthropicVertexMaxTokens({
modelMaxTokens: model.maxTokens,
requestedMaxTokens: options?.maxTokens,
});
const opts: AnthropicOptions = {
client: client as unknown as AnthropicOptions["client"],
temperature: options?.temperature,
...(maxTokens !== undefined ? { maxTokens } : {}),
signal: options?.signal,
cacheRetention: options?.cacheRetention,
sessionId: options?.sessionId,
headers: options?.headers,
onPayload: options?.onPayload,
maxRetryDelayMs: options?.maxRetryDelayMs,
metadata: options?.metadata,
};
if (options?.reasoning) {
const isAdaptive =
model.id.includes("opus-4-6") ||
model.id.includes("opus-4.6") ||
model.id.includes("sonnet-4-6") ||
model.id.includes("sonnet-4.6");
if (isAdaptive) {
opts.thinkingEnabled = true;
const effortMap: Record<string, AnthropicVertexEffort> = {
minimal: "low",
low: "low",
medium: "medium",
high: "high",
xhigh: model.id.includes("opus-4-6") || model.id.includes("opus-4.6") ? "max" : "high",
};
opts.effort = effortMap[options.reasoning] ?? "high";
} else {
opts.thinkingEnabled = true;
const budgets = options.thinkingBudgets;
opts.thinkingBudgetTokens =
(budgets && options.reasoning in budgets
? budgets[options.reasoning as keyof typeof budgets]
: undefined) ?? 10000;
}
} else {
opts.thinkingEnabled = false;
}
return streamAnthropic(model as Model<"anthropic-messages">, context, opts);
};
}
function resolveAnthropicVertexSdkBaseUrl(baseUrl?: string): string | undefined {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return undefined;
}
try {
const url = new URL(trimmed);
const normalizedPath = url.pathname.replace(/\/+$/, "");
if (!normalizedPath || normalizedPath === "") {
url.pathname = "/v1";
return url.toString().replace(/\/$/, "");
}
if (!normalizedPath.endsWith("/v1")) {
url.pathname = `${normalizedPath}/v1`;
return url.toString().replace(/\/$/, "");
}
return trimmed;
} catch {
return trimmed;
}
}
export function createAnthropicVertexStreamFnForModel(
model: { baseUrl?: string },
env: NodeJS.ProcessEnv = process.env,
): StreamFn {
return createAnthropicVertexStreamFn(
resolveAnthropicVertexProjectId(env),
resolveAnthropicVertexClientRegion({
baseUrl: model.baseUrl,
env,
}),
resolveAnthropicVertexSdkBaseUrl(model.baseUrl),
);
}

View File

@ -130,6 +130,22 @@ describe("exec PATH login shell merge", () => {
expect(shellPathMock).not.toHaveBeenCalled();
});
it("fails closed when a blocked runtime override key is requested", async () => {
if (isWin) {
return;
}
const tool = createExecTool({ host: "gateway", security: "full", ask: "off" });
await expect(
tool.execute("call-blocked-runtime-env", {
command: "echo ok",
env: { CLASSPATH: "/tmp/evil-classpath" },
}),
).rejects.toThrow(
/Security Violation: Environment variable 'CLASSPATH' is forbidden during host execution\./,
);
});
it("does not apply login-shell PATH when probe rejects unregistered absolute SHELL", async () => {
if (isWin) {
return;

View File

@ -3,6 +3,7 @@ import path from "node:path";
import type { AgentTool, AgentToolResult } from "@mariozechner/pi-agent-core";
import { type ExecHost, loadExecApprovals, maxAsk, minSecurity } from "../infra/exec-approvals.js";
import { resolveExecSafeBinRuntimePolicy } from "../infra/exec-safe-bin-runtime-policy.js";
import { sanitizeHostExecEnvWithDiagnostics } from "../infra/host-env-security.js";
import {
getShellPathFromLoginShell,
resolveShellEnvFallbackTimeoutMs,
@ -25,9 +26,7 @@ import {
renderExecHostLabel,
resolveApprovalRunningNoticeMs,
runExecProcess,
sanitizeHostBaseEnv,
execSchema,
validateHostEnv,
} from "./bash-tools.exec-runtime.js";
import type {
ExecElevatedDefaults,
@ -362,24 +361,58 @@ export function createExecTool(
}
const inheritedBaseEnv = coerceEnv(process.env);
const baseEnv = host === "sandbox" ? inheritedBaseEnv : sanitizeHostBaseEnv(inheritedBaseEnv);
// Logic: Sandbox gets raw env. Host (gateway/node) must pass validation.
// We validate BEFORE merging to prevent any dangerous vars from entering the stream.
if (host !== "sandbox" && params.env) {
validateHostEnv(params.env);
const hostEnvResult =
host === "sandbox"
? null
: sanitizeHostExecEnvWithDiagnostics({
baseEnv: inheritedBaseEnv,
overrides: params.env,
blockPathOverrides: true,
});
if (
hostEnvResult &&
params.env &&
(hostEnvResult.rejectedOverrideBlockedKeys.length > 0 ||
hostEnvResult.rejectedOverrideInvalidKeys.length > 0)
) {
const blockedKeys = hostEnvResult.rejectedOverrideBlockedKeys;
const invalidKeys = hostEnvResult.rejectedOverrideInvalidKeys;
const pathBlocked = blockedKeys.includes("PATH");
if (pathBlocked && blockedKeys.length === 1 && invalidKeys.length === 0) {
throw new Error(
"Security Violation: Custom 'PATH' variable is forbidden during host execution.",
);
}
if (blockedKeys.length === 1 && invalidKeys.length === 0) {
throw new Error(
`Security Violation: Environment variable '${blockedKeys[0]}' is forbidden during host execution.`,
);
}
const details: string[] = [];
if (blockedKeys.length > 0) {
details.push(`blocked override keys: ${blockedKeys.join(", ")}`);
}
if (invalidKeys.length > 0) {
details.push(`invalid non-portable override keys: ${invalidKeys.join(", ")}`);
}
const suffix = details.join("; ");
if (pathBlocked) {
throw new Error(
`Security Violation: Custom 'PATH' variable is forbidden during host execution (${suffix}).`,
);
}
throw new Error(`Security Violation: ${suffix}.`);
}
const mergedEnv = params.env ? { ...baseEnv, ...params.env } : baseEnv;
const env = sandbox
? buildSandboxEnv({
defaultPath: DEFAULT_PATH,
paramsEnv: params.env,
sandboxEnv: sandbox.env,
containerWorkdir: containerWorkdir ?? sandbox.containerWorkdir,
})
: mergedEnv;
const env =
sandbox && host === "sandbox"
? buildSandboxEnv({
defaultPath: DEFAULT_PATH,
paramsEnv: params.env,
sandboxEnv: sandbox.env,
containerWorkdir: containerWorkdir ?? sandbox.containerWorkdir,
})
: (hostEnvResult?.env ?? inheritedBaseEnv);
if (!sandbox && host === "gateway" && !params.env?.PATH) {
const shellPath = getShellPathFromLoginShell({

View File

@ -1,6 +1,7 @@
import { describe, expect, it } from "vitest";
import { listKnownProviderEnvApiKeyNames } from "./model-auth-env-vars.js";
import {
GCP_VERTEX_CREDENTIALS_MARKER,
isKnownEnvApiKeyMarker,
isNonSecretApiKeyMarker,
NON_ENV_SECRETREF_MARKER,
@ -13,6 +14,7 @@ describe("model auth markers", () => {
expect(isNonSecretApiKeyMarker("qwen-oauth")).toBe(true);
expect(isNonSecretApiKeyMarker(resolveOAuthApiKeyMarker("chutes"))).toBe(true);
expect(isNonSecretApiKeyMarker("ollama-local")).toBe(true);
expect(isNonSecretApiKeyMarker(GCP_VERTEX_CREDENTIALS_MARKER)).toBe(true);
});
it("recognizes known env marker names but not arbitrary all-caps keys", () => {

View File

@ -6,6 +6,7 @@ export const OAUTH_API_KEY_MARKER_PREFIX = "oauth:";
export const QWEN_OAUTH_MARKER = "qwen-oauth";
export const OLLAMA_LOCAL_AUTH_MARKER = "ollama-local";
export const CUSTOM_LOCAL_AUTH_MARKER = "custom-local";
export const GCP_VERTEX_CREDENTIALS_MARKER = "gcp-vertex-credentials";
export const NON_ENV_SECRETREF_MARKER = "secretref-managed"; // pragma: allowlist secret
export const SECRETREF_ENV_HEADER_MARKER_PREFIX = "secretref-env:"; // pragma: allowlist secret
@ -83,6 +84,7 @@ export function isNonSecretApiKeyMarker(
isOAuthApiKeyMarker(trimmed) ||
trimmed === OLLAMA_LOCAL_AUTH_MARKER ||
trimmed === CUSTOM_LOCAL_AUTH_MARKER ||
trimmed === GCP_VERTEX_CREDENTIALS_MARKER ||
trimmed === NON_ENV_SECRETREF_MARKER ||
isAwsSdkAuthMarker(trimmed);
if (isKnownMarker) {

View File

@ -506,4 +506,55 @@ describe("getApiKeyForModel", () => {
},
);
});
it("resolveEnvApiKey('anthropic-vertex') uses the provided env snapshot", async () => {
const resolved = resolveEnvApiKey("anthropic-vertex", {
GOOGLE_CLOUD_PROJECT_ID: "vertex-project",
} as NodeJS.ProcessEnv);
expect(resolved).toBeNull();
});
it("resolveEnvApiKey('anthropic-vertex') accepts GOOGLE_APPLICATION_CREDENTIALS with project_id", async () => {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-adc-"));
const credentialsPath = path.join(tempDir, "adc.json");
await fs.writeFile(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
try {
const resolved = resolveEnvApiKey("anthropic-vertex", {
GOOGLE_APPLICATION_CREDENTIALS: credentialsPath,
} as NodeJS.ProcessEnv);
expect(resolved?.apiKey).toBe("gcp-vertex-credentials");
expect(resolved?.source).toBe("gcloud adc");
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
});
it("resolveEnvApiKey('anthropic-vertex') accepts GOOGLE_APPLICATION_CREDENTIALS without a local project field", async () => {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-adc-"));
const credentialsPath = path.join(tempDir, "adc.json");
await fs.writeFile(credentialsPath, "{}", "utf8");
try {
const resolved = resolveEnvApiKey("anthropic-vertex", {
GOOGLE_APPLICATION_CREDENTIALS: credentialsPath,
} as NodeJS.ProcessEnv);
expect(resolved?.apiKey).toBe("gcp-vertex-credentials");
expect(resolved?.source).toBe("gcloud adc");
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
});
it("resolveEnvApiKey('anthropic-vertex') accepts explicit metadata auth opt-in", async () => {
const resolved = resolveEnvApiKey("anthropic-vertex", {
ANTHROPIC_VERTEX_USE_GCP_METADATA: "true",
} as NodeJS.ProcessEnv);
expect(resolved?.apiKey).toBe("gcp-vertex-credentials");
expect(resolved?.source).toBe("gcloud adc");
});
});

View File

@ -2,7 +2,11 @@ import { streamSimpleOpenAICompletions, type Model } from "@mariozechner/pi-ai";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withFetchPreconnect } from "../test-utils/fetch-mock.js";
import type { AuthProfileStore } from "./auth-profiles.js";
import { CUSTOM_LOCAL_AUTH_MARKER, NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js";
import {
CUSTOM_LOCAL_AUTH_MARKER,
GCP_VERTEX_CREDENTIALS_MARKER,
NON_ENV_SECRETREF_MARKER,
} from "./model-auth-markers.js";
import {
applyLocalNoAuthHeaderOverride,
hasUsableCustomProviderApiKey,
@ -169,6 +173,24 @@ describe("resolveUsableCustomProviderApiKey", () => {
expect(resolved).toBeNull();
});
it("does not treat the Vertex ADC marker as a usable models.json credential", () => {
const resolved = resolveUsableCustomProviderApiKey({
cfg: {
models: {
providers: {
"anthropic-vertex": {
baseUrl: "https://us-central1-aiplatform.googleapis.com",
apiKey: GCP_VERTEX_CREDENTIALS_MARKER,
models: [],
},
},
},
},
provider: "anthropic-vertex",
});
expect(resolved).toBeNull();
});
it("resolves known env marker names from process env for custom providers", () => {
const previous = process.env.OPENAI_API_KEY;
process.env.OPENAI_API_KEY = "sk-from-env"; // pragma: allowlist secret

View File

@ -10,6 +10,7 @@ import {
normalizeOptionalSecretInput,
normalizeSecretInput,
} from "../utils/normalize-secret-input.js";
import { hasAnthropicVertexAvailableAuth } from "./anthropic-vertex-provider.js";
import {
type AuthProfileStore,
ensureAuthProfileStore,
@ -21,6 +22,7 @@ import {
import { PROVIDER_ENV_API_KEY_CANDIDATES } from "./model-auth-env-vars.js";
import {
CUSTOM_LOCAL_AUTH_MARKER,
GCP_VERTEX_CREDENTIALS_MARKER,
isKnownEnvApiKeyMarker,
isNonSecretApiKeyMarker,
OLLAMA_LOCAL_AUTH_MARKER,
@ -428,6 +430,16 @@ export function resolveEnvApiKey(
}
return { apiKey: envKey, source: "gcloud adc" };
}
if (normalized === "anthropic-vertex") {
// Vertex AI uses GCP credentials (SA JSON or ADC), not API keys.
// Return a sentinel so the model resolver considers this provider available.
if (hasAnthropicVertexAvailableAuth(env)) {
return { apiKey: GCP_VERTEX_CREDENTIALS_MARKER, source: "gcloud adc" };
}
return null;
}
return null;
}

View File

@ -112,9 +112,15 @@ export const MODELS_CONFIG_IMPLICIT_ENV_VARS = [
"KIMI_API_KEY",
"KIMICODE_API_KEY",
"GEMINI_API_KEY",
"GOOGLE_APPLICATION_CREDENTIALS",
"GOOGLE_CLOUD_LOCATION",
"GOOGLE_CLOUD_PROJECT",
"GOOGLE_CLOUD_PROJECT_ID",
"VENICE_API_KEY",
"VLLM_API_KEY",
"XIAOMI_API_KEY",
"ANTHROPIC_VERTEX_PROJECT_ID",
"CLOUD_ML_REGION",
// Avoid ambient AWS creds unintentionally enabling Bedrock discovery.
"AWS_ACCESS_KEY_ID",
"AWS_CONFIG_FILE",

View File

@ -1,4 +1,5 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
@ -333,6 +334,53 @@ describe("models-config", () => {
});
});
});
it("fills anthropic-vertex apiKey with the ADC sentinel when models exist", async () => {
await withTempHome(async () => {
const adcDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-adc-"));
const credentialsPath = path.join(adcDir, "application_default_credentials.json");
await fs.writeFile(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
const previousCredentials = process.env.GOOGLE_APPLICATION_CREDENTIALS;
try {
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
await ensureOpenClawModelsJson({
models: {
providers: {
"anthropic-vertex": {
baseUrl: "https://us-central1-aiplatform.googleapis.com",
api: "anthropic-messages",
models: [
{
id: "claude-sonnet-4-6",
name: "Claude Sonnet 4.6",
reasoning: true,
input: ["text", "image"],
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
contextWindow: 200000,
maxTokens: 64000,
},
],
},
},
},
});
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(parsed.providers["anthropic-vertex"]?.apiKey).toBe("gcp-vertex-credentials");
} finally {
if (previousCredentials === undefined) {
delete process.env.GOOGLE_APPLICATION_CREDENTIALS;
} else {
process.env.GOOGLE_APPLICATION_CREDENTIALS = previousCredentials;
}
await fs.rm(adcDir, { recursive: true, force: true });
}
});
});
it("merges providers by default", async () => {
await withTempHome(async () => {
await writeAgentModelsJson({

View File

@ -0,0 +1,190 @@
import { mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { captureEnv } from "../test-utils/env.js";
import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js";
describe("anthropic-vertex implicit provider", () => {
it("offers Claude models when GOOGLE_CLOUD_PROJECT_ID is set", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_CLOUD_PROJECT_ID"]);
process.env.GOOGLE_CLOUD_PROJECT_ID = "vertex-project";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]).toBeUndefined();
} finally {
envSnapshot.restore();
}
});
it("accepts ADC credentials when the file includes a project_id", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
const credentialsPath = join(adcDir, "application_default_credentials.json");
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
process.env.GOOGLE_CLOUD_LOCATION = "us-east1";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
"https://us-east1-aiplatform.googleapis.com",
);
expect(providers?.["anthropic-vertex"]?.models).toMatchObject([
{ id: "claude-opus-4-6", maxTokens: 128000, contextWindow: 1_000_000 },
{ id: "claude-sonnet-4-6", maxTokens: 128000, contextWindow: 1_000_000 },
]);
} finally {
rmSync(adcDir, { recursive: true, force: true });
envSnapshot.restore();
}
});
it("accepts ADC credentials when the file only includes a quota_project_id", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
const credentialsPath = join(adcDir, "application_default_credentials.json");
writeFileSync(credentialsPath, JSON.stringify({ quota_project_id: "vertex-quota" }), "utf8");
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
"https://us-east5-aiplatform.googleapis.com",
);
} finally {
rmSync(adcDir, { recursive: true, force: true });
envSnapshot.restore();
}
});
it("accepts ADC credentials when project_id is resolved at runtime", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
const credentialsPath = join(adcDir, "application_default_credentials.json");
writeFileSync(credentialsPath, "{}", "utf8");
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
process.env.GOOGLE_CLOUD_LOCATION = "europe-west4";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
"https://europe-west4-aiplatform.googleapis.com",
);
} finally {
rmSync(adcDir, { recursive: true, force: true });
envSnapshot.restore();
}
});
it("falls back to the default region when GOOGLE_CLOUD_LOCATION is invalid", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
const credentialsPath = join(adcDir, "application_default_credentials.json");
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
process.env.GOOGLE_CLOUD_LOCATION = "us-central1.attacker.example";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe("https://aiplatform.googleapis.com");
} finally {
rmSync(adcDir, { recursive: true, force: true });
envSnapshot.restore();
}
});
it("uses the Vertex global endpoint when GOOGLE_CLOUD_LOCATION=global", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
const credentialsPath = join(adcDir, "application_default_credentials.json");
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
process.env.GOOGLE_CLOUD_LOCATION = "global";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe("https://aiplatform.googleapis.com");
} finally {
rmSync(adcDir, { recursive: true, force: true });
envSnapshot.restore();
}
});
it("accepts explicit metadata auth opt-in without local credential files", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["ANTHROPIC_VERTEX_USE_GCP_METADATA", "GOOGLE_CLOUD_LOCATION"]);
process.env.ANTHROPIC_VERTEX_USE_GCP_METADATA = "true";
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
"https://us-east5-aiplatform.googleapis.com",
);
} finally {
envSnapshot.restore();
}
});
it("merges the bundled catalog into explicit anthropic-vertex provider overrides", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["GOOGLE_APPLICATION_CREDENTIALS", "GOOGLE_CLOUD_LOCATION"]);
const adcDir = mkdtempSync(join(tmpdir(), "openclaw-adc-"));
const credentialsPath = join(adcDir, "application_default_credentials.json");
writeFileSync(credentialsPath, JSON.stringify({ project_id: "vertex-project" }), "utf8");
process.env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
try {
const providers = await resolveImplicitProvidersForTest({
agentDir,
config: {
models: {
providers: {
"anthropic-vertex": {
baseUrl: "https://europe-west4-aiplatform.googleapis.com",
headers: { "x-test-header": "1" },
},
},
},
} as unknown as OpenClawConfig,
});
expect(providers?.["anthropic-vertex"]?.baseUrl).toBe(
"https://europe-west4-aiplatform.googleapis.com",
);
expect(providers?.["anthropic-vertex"]?.headers).toEqual({ "x-test-header": "1" });
expect(providers?.["anthropic-vertex"]?.models?.map((model) => model.id)).toEqual([
"claude-opus-4-6",
"claude-sonnet-4-6",
]);
} finally {
rmSync(adcDir, { recursive: true, force: true });
envSnapshot.restore();
}
});
it("does not accept generic Kubernetes env without a GCP ADC signal", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const envSnapshot = captureEnv(["KUBERNETES_SERVICE_HOST", "GOOGLE_CLOUD_LOCATION"]);
process.env.KUBERNETES_SERVICE_HOST = "10.0.0.1";
process.env.GOOGLE_CLOUD_LOCATION = "us-east5";
try {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.["anthropic-vertex"]).toBeUndefined();
} finally {
envSnapshot.restore();
}
});
});

View File

@ -1,3 +1,7 @@
export {
ANTHROPIC_VERTEX_DEFAULT_MODEL_ID,
buildAnthropicVertexProvider,
} from "../../extensions/anthropic-vertex/provider-catalog.js";
export {
buildBytePlusCodingProvider,
buildBytePlusProvider,

View File

@ -1,3 +1,4 @@
import { buildAnthropicVertexProvider } from "../../extensions/anthropic-vertex/provider-catalog.js";
import {
QIANFAN_BASE_URL,
QIANFAN_DEFAULT_MODEL_ID,
@ -7,6 +8,7 @@ import type { OpenClawConfig } from "../config/config.js";
import { coerceSecretRef, resolveSecretInputRef } from "../config/types.secrets.js";
import { isRecord } from "../utils.js";
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
import { hasAnthropicVertexAvailableAuth } from "./anthropic-vertex-provider.js";
import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js";
import { discoverBedrockModels } from "./bedrock-discovery.js";
import { normalizeGoogleModelId, normalizeXaiModelId } from "./model-id-normalization.js";
@ -552,7 +554,10 @@ export function normalizeProviders(params: {
mutated = true;
normalizedProvider = { ...normalizedProvider, apiKey };
} else {
const fromEnv = resolveEnvApiKeyVarName(normalizedKey, env);
const fromEnv =
normalizedKey === "anthropic-vertex"
? resolveEnvApiKey(normalizedKey, env)?.apiKey
: resolveEnvApiKeyVarName(normalizedKey, env);
const apiKey = fromEnv ?? profileApiKey?.apiKey;
if (apiKey?.trim()) {
if (profileApiKey && profileApiKey.source !== "plaintext") {
@ -812,9 +817,34 @@ export async function resolveImplicitProviders(
: implicitBedrock;
}
const implicitAnthropicVertex = resolveImplicitAnthropicVertexProvider({ env });
if (implicitAnthropicVertex) {
const existing = providers["anthropic-vertex"];
providers["anthropic-vertex"] = existing
? {
...implicitAnthropicVertex,
...existing,
models:
Array.isArray(existing.models) && existing.models.length > 0
? existing.models
: implicitAnthropicVertex.models,
}
: implicitAnthropicVertex;
}
return providers;
}
export function resolveImplicitAnthropicVertexProvider(params: {
env?: NodeJS.ProcessEnv;
}): ProviderConfig | null {
const env = params.env ?? process.env;
if (!hasAnthropicVertexAvailableAuth(env)) {
return null;
}
return buildAnthropicVertexProvider({ env });
}
export async function resolveImplicitBedrockProvider(params: {
agentDir: string;
config?: OpenClawConfig;

View File

@ -16,6 +16,7 @@ import {
decodeHtmlEntitiesInObject,
wrapOllamaCompatNumCtx,
wrapStreamFnRepairMalformedToolCallArguments,
wrapStreamFnSanitizeMalformedToolCalls,
wrapStreamFnTrimToolCallNames,
} from "./attempt.js";
@ -779,6 +780,552 @@ describe("wrapStreamFnTrimToolCallNames", () => {
});
});
describe("wrapStreamFnSanitizeMalformedToolCalls", () => {
it("drops malformed assistant tool calls from outbound context before provider replay", async () => {
const messages = [
{
role: "assistant",
stopReason: "error",
content: [{ type: "toolCall", name: "read", arguments: {} }],
},
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as { messages: unknown[] };
expect(seenContext.messages).toEqual([
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
]);
expect(seenContext.messages).not.toBe(messages);
});
it("preserves outbound context when all assistant tool calls are valid", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as { messages: unknown[] };
expect(seenContext.messages).toBe(messages);
});
it("preserves sessions_spawn attachment payloads on replay", async () => {
const attachmentContent = "INLINE_ATTACHMENT_PAYLOAD";
const messages = [
{
role: "assistant",
content: [
{
type: "toolUse",
id: "call_1",
name: " SESSIONS_SPAWN ",
input: {
task: "inspect attachment",
attachments: [{ name: "snapshot.txt", content: attachmentContent }],
},
},
],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(
baseFn as never,
new Set(["sessions_spawn"]),
);
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ content?: Array<Record<string, unknown>> }>;
};
const toolCall = seenContext.messages[0]?.content?.[0] as {
name?: string;
input?: { attachments?: Array<{ content?: string }> };
};
expect(toolCall.name).toBe("sessions_spawn");
expect(toolCall.input?.attachments?.[0]?.content).toBe(attachmentContent);
});
it("preserves allowlisted tool names that contain punctuation", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolUse", id: "call_1", name: "admin.export", input: { scope: "all" } }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(
baseFn as never,
new Set(["admin.export"]),
);
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as { messages: unknown[] };
expect(seenContext.messages).toBe(messages);
});
it("normalizes provider-prefixed replayed tool names before provider replay", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolUse", id: "call_1", name: "functions.read", input: { path: "." } }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ content?: Array<{ name?: string }> }>;
};
expect(seenContext.messages[0]?.content?.[0]?.name).toBe("read");
});
it("canonicalizes mixed-case allowlisted tool names on replay", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", id: "call_1", name: "readfile", arguments: {} }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["ReadFile"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ content?: Array<{ name?: string }> }>;
};
expect(seenContext.messages[0]?.content?.[0]?.name).toBe("ReadFile");
});
it("recovers blank replayed tool names from their ids", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", id: "functionswrite4", name: " ", arguments: {} }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["write"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ content?: Array<{ name?: string }> }>;
};
expect(seenContext.messages[0]?.content?.[0]?.name).toBe("write");
});
it("recovers mangled replayed tool names before dropping the call", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", id: "call_1", name: "functionsread3", arguments: {} }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ content?: Array<{ name?: string }> }>;
};
expect(seenContext.messages[0]?.content?.[0]?.name).toBe("read");
});
it("drops orphaned tool results after replay sanitization removes a tool-call turn", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", name: "read", arguments: {} }],
stopReason: "error",
},
{
role: "toolResult",
toolCallId: "call_missing",
toolName: "read",
content: [{ type: "text", text: "stale result" }],
isError: false,
},
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ role?: string }>;
};
expect(seenContext.messages).toEqual([
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
]);
});
it("drops replayed tool calls that are no longer allowlisted", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", id: "call_1", name: "write", arguments: {} }],
},
{
role: "toolResult",
toolCallId: "call_1",
toolName: "write",
content: [{ type: "text", text: "stale result" }],
isError: false,
},
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ role?: string }>;
};
expect(seenContext.messages).toEqual([
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
]);
});
it("drops replayed tool names that are no longer allowlisted", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolUse", id: "call_1", name: "unknown_tool", input: { path: "." } }],
},
{
role: "toolResult",
toolCallId: "call_1",
toolName: "unknown_tool",
content: [{ type: "text", text: "stale result" }],
isError: false,
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as { messages: unknown[] };
expect(seenContext.messages).toEqual([]);
});
it("drops ambiguous mangled replay names instead of guessing a tool", async () => {
const messages = [
{
role: "assistant",
content: [{ type: "toolCall", id: "call_1", name: "functions.exec2", arguments: {} }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(
baseFn as never,
new Set(["exec", "exec2"]),
);
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as { messages: unknown[] };
expect(seenContext.messages).toEqual([]);
});
it("preserves matching tool results for retained errored assistant turns", async () => {
const messages = [
{
role: "assistant",
stopReason: "error",
content: [
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
{ type: "toolCall", name: "read", arguments: {} },
],
},
{
role: "toolResult",
toolCallId: "call_1",
toolName: "read",
content: [{ type: "text", text: "kept result" }],
isError: false,
},
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]));
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as { messages: unknown[] };
expect(seenContext.messages).toEqual([
{
role: "assistant",
stopReason: "error",
content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }],
},
{
role: "toolResult",
toolCallId: "call_1",
toolName: "read",
content: [{ type: "text", text: "kept result" }],
isError: false,
},
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
]);
});
it("revalidates turn ordering after dropping an assistant replay turn", async () => {
const messages = [
{
role: "user",
content: [{ type: "text", text: "first" }],
},
{
role: "assistant",
stopReason: "error",
content: [{ type: "toolCall", name: "read", arguments: {} }],
},
{
role: "user",
content: [{ type: "text", text: "second" }],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]), {
validateGeminiTurns: false,
validateAnthropicTurns: true,
});
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ role?: string; content?: unknown[] }>;
};
expect(seenContext.messages).toEqual([
{
role: "user",
content: [
{ type: "text", text: "first" },
{ type: "text", text: "second" },
],
},
]);
});
it("drops orphaned Anthropic user tool_result blocks after replay sanitization", async () => {
const messages = [
{
role: "assistant",
content: [
{ type: "text", text: "partial response" },
{ type: "toolUse", name: "read", input: { path: "." } },
],
},
{
role: "user",
content: [
{ type: "toolResult", toolUseId: "call_1", content: [{ type: "text", text: "stale" }] },
{ type: "text", text: "retry" },
],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]), {
validateGeminiTurns: false,
validateAnthropicTurns: true,
});
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ role?: string; content?: unknown[] }>;
};
expect(seenContext.messages).toEqual([
{
role: "assistant",
content: [{ type: "text", text: "partial response" }],
},
{
role: "user",
content: [{ type: "text", text: "retry" }],
},
]);
});
it("drops orphaned Anthropic user tool_result blocks after dropping an assistant replay turn", async () => {
const messages = [
{
role: "user",
content: [{ type: "text", text: "first" }],
},
{
role: "assistant",
stopReason: "error",
content: [{ type: "toolUse", name: "read", input: { path: "." } }],
},
{
role: "user",
content: [
{ type: "toolResult", toolUseId: "call_1", content: [{ type: "text", text: "stale" }] },
{ type: "text", text: "second" },
],
},
];
const baseFn = vi.fn((_model, _context) =>
createFakeStream({ events: [], resultMessage: { role: "assistant", content: [] } }),
);
const wrapped = wrapStreamFnSanitizeMalformedToolCalls(baseFn as never, new Set(["read"]), {
validateGeminiTurns: false,
validateAnthropicTurns: true,
});
const stream = wrapped({} as never, { messages } as never, {} as never) as
| FakeWrappedStream
| Promise<FakeWrappedStream>;
await Promise.resolve(stream);
expect(baseFn).toHaveBeenCalledTimes(1);
const seenContext = baseFn.mock.calls[0]?.[1] as {
messages: Array<{ role?: string; content?: unknown[] }>;
};
expect(seenContext.messages).toEqual([
{
role: "user",
content: [
{ type: "text", text: "first" },
{ type: "text", text: "second" },
],
},
]);
});
});
describe("wrapStreamFnRepairMalformedToolCallArguments", () => {
async function invokeWrappedStream(baseFn: (...args: never[]) => unknown) {
return await invokeWrappedTestStream(

View File

@ -36,6 +36,7 @@ import { isReasoningTagProvider } from "../../../utils/provider-utils.js";
import { resolveOpenClawAgentDir } from "../../agent-paths.js";
import { resolveSessionAgentIds } from "../../agent-scope.js";
import { createAnthropicPayloadLogger } from "../../anthropic-payload-log.js";
import { createAnthropicVertexStreamFnForModel } from "../../anthropic-vertex-stream.js";
import {
analyzeBootstrapBudget,
buildBootstrapPromptWarning,
@ -97,6 +98,7 @@ import { buildSystemPromptReport } from "../../system-prompt-report.js";
import { sanitizeToolCallIdsForCloudCodeAssist } from "../../tool-call-id.js";
import { resolveEffectiveToolFsWorkspaceOnly } from "../../tool-fs-policy.js";
import { normalizeToolName } from "../../tool-policy.js";
import type { TranscriptPolicy } from "../../transcript-policy.js";
import { resolveTranscriptPolicy } from "../../transcript-policy.js";
import { DEFAULT_BOOTSTRAP_FILENAME } from "../../workspace.js";
import { isRunnerAbortError } from "../abort.js";
@ -649,6 +651,200 @@ function isToolCallBlockType(type: unknown): boolean {
return type === "toolCall" || type === "toolUse" || type === "functionCall";
}
const REPLAY_TOOL_CALL_NAME_MAX_CHARS = 64;
type ReplayToolCallBlock = {
type?: unknown;
id?: unknown;
name?: unknown;
input?: unknown;
arguments?: unknown;
};
type ReplayToolCallSanitizeReport = {
messages: AgentMessage[];
droppedAssistantMessages: number;
};
type AnthropicToolResultContentBlock = {
type?: unknown;
toolUseId?: unknown;
};
function isReplayToolCallBlock(block: unknown): block is ReplayToolCallBlock {
if (!block || typeof block !== "object") {
return false;
}
return isToolCallBlockType((block as { type?: unknown }).type);
}
function replayToolCallHasInput(block: ReplayToolCallBlock): boolean {
const hasInput = "input" in block ? block.input !== undefined && block.input !== null : false;
const hasArguments =
"arguments" in block ? block.arguments !== undefined && block.arguments !== null : false;
return hasInput || hasArguments;
}
function replayToolCallNonEmptyString(value: unknown): value is string {
return typeof value === "string" && value.trim().length > 0;
}
function resolveReplayToolCallName(
rawName: string,
rawId: string,
allowedToolNames?: Set<string>,
): string | null {
if (rawName.length > REPLAY_TOOL_CALL_NAME_MAX_CHARS * 2) {
return null;
}
const normalized = normalizeToolCallNameForDispatch(rawName, allowedToolNames, rawId);
const trimmed = normalized.trim();
if (!trimmed || trimmed.length > REPLAY_TOOL_CALL_NAME_MAX_CHARS || /\s/.test(trimmed)) {
return null;
}
if (!allowedToolNames || allowedToolNames.size === 0) {
return trimmed;
}
return resolveExactAllowedToolName(trimmed, allowedToolNames);
}
function sanitizeReplayToolCallInputs(
messages: AgentMessage[],
allowedToolNames?: Set<string>,
): ReplayToolCallSanitizeReport {
let changed = false;
let droppedAssistantMessages = 0;
const out: AgentMessage[] = [];
for (const message of messages) {
if (!message || typeof message !== "object" || message.role !== "assistant") {
out.push(message);
continue;
}
if (!Array.isArray(message.content)) {
out.push(message);
continue;
}
const nextContent: typeof message.content = [];
let messageChanged = false;
for (const block of message.content) {
if (!isReplayToolCallBlock(block)) {
nextContent.push(block);
continue;
}
const replayBlock = block as ReplayToolCallBlock;
if (!replayToolCallHasInput(replayBlock) || !replayToolCallNonEmptyString(replayBlock.id)) {
changed = true;
messageChanged = true;
continue;
}
const rawName = typeof replayBlock.name === "string" ? replayBlock.name : "";
const resolvedName = resolveReplayToolCallName(rawName, replayBlock.id, allowedToolNames);
if (!resolvedName) {
changed = true;
messageChanged = true;
continue;
}
if (replayBlock.name !== resolvedName) {
nextContent.push({ ...(block as object), name: resolvedName } as typeof block);
changed = true;
messageChanged = true;
continue;
}
nextContent.push(block);
}
if (messageChanged) {
changed = true;
if (nextContent.length > 0) {
out.push({ ...message, content: nextContent });
} else {
droppedAssistantMessages += 1;
}
continue;
}
out.push(message);
}
return {
messages: changed ? out : messages,
droppedAssistantMessages,
};
}
function sanitizeAnthropicReplayToolResults(messages: AgentMessage[]): AgentMessage[] {
let changed = false;
const out: AgentMessage[] = [];
for (let index = 0; index < messages.length; index += 1) {
const message = messages[index];
if (!message || typeof message !== "object" || message.role !== "user") {
out.push(message);
continue;
}
if (!Array.isArray(message.content)) {
out.push(message);
continue;
}
const previous = messages[index - 1];
const validToolUseIds = new Set<string>();
if (previous && typeof previous === "object" && previous.role === "assistant") {
const previousContent = (previous as { content?: unknown }).content;
if (Array.isArray(previousContent)) {
for (const block of previousContent) {
if (!block || typeof block !== "object") {
continue;
}
const typedBlock = block as { type?: unknown; id?: unknown };
if (typedBlock.type !== "toolUse" || typeof typedBlock.id !== "string") {
continue;
}
const trimmedId = typedBlock.id.trim();
if (trimmedId) {
validToolUseIds.add(trimmedId);
}
}
}
}
const nextContent = message.content.filter((block) => {
if (!block || typeof block !== "object") {
return true;
}
const typedBlock = block as AnthropicToolResultContentBlock;
if (typedBlock.type !== "toolResult" || typeof typedBlock.toolUseId !== "string") {
return true;
}
return validToolUseIds.size > 0 && validToolUseIds.has(typedBlock.toolUseId);
});
if (nextContent.length === message.content.length) {
out.push(message);
continue;
}
changed = true;
if (nextContent.length > 0) {
out.push({ ...message, content: nextContent });
continue;
}
out.push({
...message,
content: [{ type: "text", text: "[tool results omitted]" }],
} as AgentMessage);
}
return changed ? out : messages;
}
function normalizeToolCallIdsInMessage(message: unknown): void {
if (!message || typeof message !== "object") {
return;
@ -797,6 +993,43 @@ export function wrapStreamFnTrimToolCallNames(
};
}
export function wrapStreamFnSanitizeMalformedToolCalls(
baseFn: StreamFn,
allowedToolNames?: Set<string>,
transcriptPolicy?: Pick<TranscriptPolicy, "validateGeminiTurns" | "validateAnthropicTurns">,
): StreamFn {
return (model, context, options) => {
const ctx = context as unknown as { messages?: unknown };
const messages = ctx?.messages;
if (!Array.isArray(messages)) {
return baseFn(model, context, options);
}
const sanitized = sanitizeReplayToolCallInputs(messages as AgentMessage[], allowedToolNames);
if (sanitized.messages === messages) {
return baseFn(model, context, options);
}
let nextMessages = sanitizeToolUseResultPairing(sanitized.messages, {
preserveErroredAssistantResults: true,
});
if (transcriptPolicy?.validateAnthropicTurns) {
nextMessages = sanitizeAnthropicReplayToolResults(nextMessages);
}
if (sanitized.droppedAssistantMessages > 0 || transcriptPolicy?.validateAnthropicTurns) {
if (transcriptPolicy?.validateGeminiTurns) {
nextMessages = validateGeminiTurns(nextMessages);
}
if (transcriptPolicy?.validateAnthropicTurns) {
nextMessages = validateAnthropicTurns(nextMessages);
}
}
const nextContext = {
...(context as unknown as Record<string, unknown>),
messages: nextMessages,
} as unknown;
return baseFn(model, nextContext as typeof context, options);
};
}
function extractBalancedJsonPrefix(raw: string): string | null {
let start = 0;
while (start < raw.length && /\s/.test(raw[start] ?? "")) {
@ -1989,6 +2222,10 @@ export async function runEmbeddedAttempt(
log.warn(`[ws-stream] no API key for provider=${params.provider}; using HTTP transport`);
activeSession.agent.streamFn = streamSimple;
}
} else if (params.model.provider === "anthropic-vertex") {
// Anthropic Vertex AI: inject AnthropicVertex client into pi-ai's
// streamAnthropic for GCP IAM auth instead of Anthropic API keys.
activeSession.agent.streamFn = createAnthropicVertexStreamFnForModel(params.model);
} else {
// Force a stable streamFn reference so vitest can reliably mock @mariozechner/pi-ai.
activeSession.agent.streamFn = streamSimple;
@ -2125,6 +2362,11 @@ export async function runEmbeddedAttempt(
// Some models emit tool names with surrounding whitespace (e.g. " read ").
// pi-agent-core dispatches tool calls with exact string matching, so normalize
// names on the live response stream before tool execution.
activeSession.agent.streamFn = wrapStreamFnSanitizeMalformedToolCalls(
activeSession.agent.streamFn,
allowedToolNames,
transcriptPolicy,
);
activeSession.agent.streamFn = wrapStreamFnTrimToolCallNames(
activeSession.agent.streamFn,
allowedToolNames,

View File

@ -69,6 +69,18 @@ describe("resolveProviderCapabilities", () => {
geminiThoughtSignatureModelHints: [],
dropThinkingBlockModelHints: ["claude"],
});
expect(resolveProviderCapabilities("anthropic-vertex")).toEqual({
anthropicToolSchemaMode: "native",
anthropicToolChoiceMode: "native",
providerFamily: "anthropic",
preserveAnthropicThinkingSignatures: true,
openAiCompatTurnValidation: true,
geminiThoughtSignatureSanitization: false,
transcriptToolCallIdMode: "default",
transcriptToolCallIdModelHints: [],
geminiThoughtSignatureModelHints: [],
dropThinkingBlockModelHints: ["claude"],
});
expect(resolveProviderCapabilities("amazon-bedrock")).toEqual({
anthropicToolSchemaMode: "native",
anthropicToolChoiceMode: "native",
@ -136,6 +148,7 @@ describe("resolveProviderCapabilities", () => {
it("tracks provider families and model-specific transcript quirks in the registry", () => {
expect(isOpenAiProviderFamily("openai")).toBe(true);
expect(isAnthropicProviderFamily("anthropic-vertex")).toBe(true);
expect(isAnthropicProviderFamily("amazon-bedrock")).toBe(true);
expect(
shouldDropThinkingBlocksForModel({
@ -143,6 +156,12 @@ describe("resolveProviderCapabilities", () => {
modelId: "claude-opus-4-6",
}),
).toBe(true);
expect(
shouldDropThinkingBlocksForModel({
provider: "anthropic-vertex",
modelId: "claude-sonnet-4-6",
}),
).toBe(true);
expect(
shouldDropThinkingBlocksForModel({
provider: "amazon-bedrock",

View File

@ -35,6 +35,10 @@ const DEFAULT_PROVIDER_CAPABILITIES: ProviderCapabilities = {
};
const CORE_PROVIDER_CAPABILITIES: Record<string, Partial<ProviderCapabilities>> = {
"anthropic-vertex": {
providerFamily: "anthropic",
dropThinkingBlockModelHints: ["claude"],
},
"amazon-bedrock": {
providerFamily: "anthropic",
dropThinkingBlockModelHints: ["claude"],

View File

@ -195,6 +195,10 @@ export type ToolCallInputRepairOptions = {
allowedToolNames?: Iterable<string>;
};
export type ToolUseResultPairingOptions = {
preserveErroredAssistantResults?: boolean;
};
export function stripToolResultDetails(messages: AgentMessage[]): AgentMessage[] {
let touched = false;
const out: AgentMessage[] = [];
@ -327,8 +331,11 @@ export function sanitizeToolCallInputs(
return repairToolCallInputs(messages, options).messages;
}
export function sanitizeToolUseResultPairing(messages: AgentMessage[]): AgentMessage[] {
return repairToolUseResultPairing(messages).messages;
export function sanitizeToolUseResultPairing(
messages: AgentMessage[],
options?: ToolUseResultPairingOptions,
): AgentMessage[] {
return repairToolUseResultPairing(messages, options).messages;
}
export type ToolUseRepairReport = {
@ -339,7 +346,10 @@ export type ToolUseRepairReport = {
moved: boolean;
};
export function repairToolUseResultPairing(messages: AgentMessage[]): ToolUseRepairReport {
export function repairToolUseResultPairing(
messages: AgentMessage[],
options?: ToolUseResultPairingOptions,
): ToolUseRepairReport {
// Anthropic (and Cloud Code Assist) reject transcripts where assistant tool calls are not
// immediately followed by matching tool results. Session files can end up with results
// displaced (e.g. after user turns) or duplicated. Repair by:
@ -390,18 +400,6 @@ export function repairToolUseResultPairing(messages: AgentMessage[]): ToolUseRep
const assistant = msg as Extract<AgentMessage, { role: "assistant" }>;
// Skip tool call extraction for aborted or errored assistant messages.
// When stopReason is "error" or "aborted", the tool_use blocks may be incomplete
// (e.g., partialJson: true) and should not have synthetic tool_results created.
// Creating synthetic results for incomplete tool calls causes API 400 errors:
// "unexpected tool_use_id found in tool_result blocks"
// See: https://github.com/openclaw/openclaw/issues/4597
const stopReason = (assistant as { stopReason?: string }).stopReason;
if (stopReason === "error" || stopReason === "aborted") {
out.push(msg);
continue;
}
const toolCalls = extractToolCallsFromAssistant(assistant);
if (toolCalls.length === 0) {
out.push(msg);
@ -459,6 +457,28 @@ export function repairToolUseResultPairing(messages: AgentMessage[]): ToolUseRep
}
}
// Aborted/errored assistant turns should never synthesize missing tool results, but
// the replay sanitizer can still legitimately retain real tool results for surviving
// tool calls in the same turn after malformed siblings are dropped.
const stopReason = (assistant as { stopReason?: string }).stopReason;
if (stopReason === "error" || stopReason === "aborted") {
out.push(msg);
if (options?.preserveErroredAssistantResults) {
for (const toolCall of toolCalls) {
const result = spanResultsById.get(toolCall.id);
if (!result) {
continue;
}
pushToolResult(result);
}
}
for (const rem of remainder) {
out.push(rem);
}
i = j - 1;
continue;
}
out.push(msg);
if (spanResultsById.size > 0 && remainder.length > 0) {

View File

@ -0,0 +1,61 @@
import bravePlugin from "../extensions/brave/index.js";
import firecrawlPlugin from "../extensions/firecrawl/index.js";
import googlePlugin from "../extensions/google/index.js";
import moonshotPlugin from "../extensions/moonshot/index.js";
import perplexityPlugin from "../extensions/perplexity/index.js";
import tavilyPlugin from "../extensions/tavily/index.js";
import xaiPlugin from "../extensions/xai/index.js";
import type { OpenClawPluginApi } from "./plugins/types.js";
type RegistrablePlugin = {
id: string;
register: (api: OpenClawPluginApi) => void;
};
export const bundledWebSearchPluginRegistrations: ReadonlyArray<{
readonly plugin: RegistrablePlugin;
credentialValue: unknown;
}> = [
{
get plugin() {
return bravePlugin;
},
credentialValue: "BSA-test",
},
{
get plugin() {
return firecrawlPlugin;
},
credentialValue: "fc-test",
},
{
get plugin() {
return googlePlugin;
},
credentialValue: "AIza-test",
},
{
get plugin() {
return moonshotPlugin;
},
credentialValue: "sk-test",
},
{
get plugin() {
return perplexityPlugin;
},
credentialValue: "pplx-test",
},
{
get plugin() {
return tavilyPlugin;
},
credentialValue: "tvly-test",
},
{
get plugin() {
return xaiPlugin;
},
credentialValue: "xai-test",
},
];

View File

@ -1,4 +1,4 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { afterEach, beforeEach, describe, expect, it } from "vitest";
import {
clearFastTestEnv,
loadRunCronIsolatedAgentTurn,
@ -8,11 +8,7 @@ import {
runWithModelFallbackMock,
} from "./run.test-harness.js";
type RunModule = typeof import("./run.js");
type SandboxConfigModule = typeof import("../../agents/sandbox/config.js");
let runCronIsolatedAgentTurn: RunModule["runCronIsolatedAgentTurn"];
let resolveSandboxConfigForAgent: SandboxConfigModule["resolveSandboxConfigForAgent"];
const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn();
function makeJob(overrides?: Record<string, unknown>) {
return {
@ -85,10 +81,7 @@ function expectDefaultSandboxPreserved(
describe("runCronIsolatedAgentTurn sandbox config preserved", () => {
let previousFastTestEnv: string | undefined;
beforeEach(async () => {
vi.resetModules();
runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn();
({ resolveSandboxConfigForAgent } = await import("../../agents/sandbox/config.js"));
beforeEach(() => {
previousFastTestEnv = clearFastTestEnv();
resetRunCronIsolatedAgentTurnHarness();
});
@ -132,6 +125,7 @@ describe("runCronIsolatedAgentTurn sandbox config preserved", () => {
expect(runWithModelFallbackMock).toHaveBeenCalledTimes(1);
const runCfg = runWithModelFallbackMock.mock.calls[0]?.[0]?.cfg;
const { resolveSandboxConfigForAgent } = await import("../../agents/sandbox/config.js");
const resolvedSandbox = resolveSandboxConfigForAgent(runCfg, "specialist");
expectDefaultSandboxPreserved(runCfg);

View File

@ -58,6 +58,13 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProviderPlu
throw new Error("OpenAI API key missing");
}
const controller = new AbortController();
const timeoutMs = req.timeoutMs;
const timeout =
typeof timeoutMs === "number" && Number.isFinite(timeoutMs) && timeoutMs > 0
? setTimeout(() => controller.abort(), timeoutMs)
: undefined;
const response = await fetch(`${resolveOpenAIBaseUrl(req.cfg)}/images/generations`, {
method: "POST",
headers: {
@ -70,6 +77,9 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProviderPlu
n: req.count ?? 1,
size: req.size ?? DEFAULT_SIZE,
}),
signal: controller.signal,
}).finally(() => {
clearTimeout(timeout);
});
if (!response.ok) {

View File

@ -25,6 +25,7 @@ export type ImageGenerationRequest = {
cfg: OpenClawConfig;
agentDir?: string;
authStore?: AuthProfileStore;
timeoutMs?: number;
count?: number;
size?: string;
aspectRatio?: string;

View File

@ -56,7 +56,23 @@
"OPENSSL_ENGINES",
"PYTHONSTARTUP",
"WGETRC",
"CURL_HOME"
"CURL_HOME",
"CLASSPATH",
"CGO_CFLAGS",
"CGO_LDFLAGS",
"GOFLAGS",
"CORECLR_PROFILER_PATH",
"PHPRC",
"PHP_INI_SCAN_DIR",
"DENO_DIR",
"BUN_CONFIG_REGISTRY",
"LUA_PATH",
"LUA_CPATH",
"GEM_HOME",
"GEM_PATH",
"BUNDLE_GEMFILE",
"COMPOSER_HOME",
"XDG_CONFIG_HOME"
],
"blockedOverridePrefixes": ["GIT_CONFIG_", "NPM_CONFIG_"],
"blockedPrefixes": ["DYLD_", "LD_", "BASH_FUNC_"]

View File

@ -8,6 +8,7 @@ import {
isDangerousHostEnvVarName,
normalizeEnvVarKey,
sanitizeHostExecEnv,
sanitizeHostExecEnvWithDiagnostics,
sanitizeSystemRunEnvOverrides,
} from "./host-env-security.js";
import { OPENCLAW_CLI_ENV_VALUE } from "./openclaw-exec-env.js";
@ -114,6 +115,10 @@ describe("sanitizeHostExecEnv", () => {
GIT_CONFIG_GLOBAL: "/tmp/gitconfig",
SHELLOPTS: "xtrace",
PS4: "$(touch /tmp/pwned)",
CLASSPATH: "/tmp/evil-classpath",
GOFLAGS: "-mod=mod",
PHPRC: "/tmp/evil-php.ini",
XDG_CONFIG_HOME: "/tmp/evil-config",
SAFE: "ok",
},
});
@ -128,6 +133,10 @@ describe("sanitizeHostExecEnv", () => {
expect(env.GIT_CONFIG_GLOBAL).toBeUndefined();
expect(env.SHELLOPTS).toBeUndefined();
expect(env.PS4).toBeUndefined();
expect(env.CLASSPATH).toBeUndefined();
expect(env.GOFLAGS).toBeUndefined();
expect(env.PHPRC).toBeUndefined();
expect(env.XDG_CONFIG_HOME).toBeUndefined();
expect(env.SAFE).toBe("ok");
expect(env.HOME).toBe("/tmp/trusted-home");
expect(env.ZDOTDIR).toBe("/tmp/trusted-zdotdir");
@ -183,7 +192,7 @@ describe("sanitizeHostExecEnv", () => {
expect(env.OPENCLAW_CLI).toBe(OPENCLAW_CLI_ENV_VALUE);
});
it("drops non-string inherited values and non-portable inherited keys", () => {
it("drops non-string inherited values while preserving non-portable inherited keys", () => {
const env = sanitizeHostExecEnv({
baseEnv: {
PATH: "/usr/bin:/bin",
@ -191,6 +200,7 @@ describe("sanitizeHostExecEnv", () => {
// oxlint-disable-next-line typescript/no-explicit-any
BAD_NUMBER: 1 as any,
"NOT-PORTABLE": "x",
"ProgramFiles(x86)": "C:\\Program Files (x86)",
},
});
@ -198,6 +208,8 @@ describe("sanitizeHostExecEnv", () => {
OPENCLAW_CLI: OPENCLAW_CLI_ENV_VALUE,
PATH: "/usr/bin:/bin",
GOOD: "1",
"NOT-PORTABLE": "x",
"ProgramFiles(x86)": "C:\\Program Files (x86)",
});
});
});
@ -212,11 +224,58 @@ describe("isDangerousHostEnvOverrideVarName", () => {
expect(isDangerousHostEnvOverrideVarName("git_config_global")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("GRADLE_USER_HOME")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("gradle_user_home")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("CLASSPATH")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("classpath")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("GOFLAGS")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("goflags")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("CORECLR_PROFILER_PATH")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("coreclr_profiler_path")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("XDG_CONFIG_HOME")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("xdg_config_home")).toBe(true);
expect(isDangerousHostEnvOverrideVarName("BASH_ENV")).toBe(false);
expect(isDangerousHostEnvOverrideVarName("FOO")).toBe(false);
});
});
describe("sanitizeHostExecEnvWithDiagnostics", () => {
it("reports blocked and invalid requested overrides", () => {
const result = sanitizeHostExecEnvWithDiagnostics({
baseEnv: {
PATH: "/usr/bin:/bin",
},
overrides: {
PATH: "/tmp/evil",
CLASSPATH: "/tmp/evil-classpath",
SAFE_KEY: "ok",
"BAD-KEY": "bad",
},
});
expect(result.rejectedOverrideBlockedKeys).toEqual(["CLASSPATH", "PATH"]);
expect(result.rejectedOverrideInvalidKeys).toEqual(["BAD-KEY"]);
expect(result.env.SAFE_KEY).toBe("ok");
expect(result.env.PATH).toBe("/usr/bin:/bin");
expect(result.env.CLASSPATH).toBeUndefined();
});
it("allows Windows-style override names while still rejecting invalid keys", () => {
const result = sanitizeHostExecEnvWithDiagnostics({
baseEnv: {
PATH: "/usr/bin:/bin",
"ProgramFiles(x86)": "C:\\Program Files (x86)",
},
overrides: {
"ProgramFiles(x86)": "D:\\SDKs",
"BAD-KEY": "bad",
},
});
expect(result.rejectedOverrideBlockedKeys).toEqual([]);
expect(result.rejectedOverrideInvalidKeys).toEqual(["BAD-KEY"]);
expect(result.env["ProgramFiles(x86)"]).toBe("D:\\SDKs");
});
});
describe("normalizeEnvVarKey", () => {
it("normalizes and validates keys", () => {
expect(normalizeEnvVarKey(" OPENROUTER_API_KEY ")).toBe("OPENROUTER_API_KEY");

View File

@ -2,6 +2,7 @@ import HOST_ENV_SECURITY_POLICY_JSON from "./host-env-security-policy.json" with
import { markOpenClawExecEnv } from "./openclaw-exec-env.js";
const PORTABLE_ENV_VAR_KEY = /^[A-Za-z_][A-Za-z0-9_]*$/;
const WINDOWS_COMPAT_OVERRIDE_ENV_VAR_KEY = /^[A-Za-z_][A-Za-z0-9_()]*$/;
type HostEnvSecurityPolicy = {
blockedKeys: string[];
@ -42,6 +43,17 @@ export const HOST_SHELL_WRAPPER_ALLOWED_OVERRIDE_ENV_KEYS = new Set<string>(
HOST_SHELL_WRAPPER_ALLOWED_OVERRIDE_ENV_KEY_VALUES,
);
export type HostExecEnvSanitizationResult = {
env: Record<string, string>;
rejectedOverrideBlockedKeys: string[];
rejectedOverrideInvalidKeys: string[];
};
export type HostExecEnvOverrideDiagnostics = {
rejectedOverrideBlockedKeys: string[];
rejectedOverrideInvalidKeys: string[];
};
export function normalizeEnvVarKey(
rawKey: string,
options?: { portable?: boolean },
@ -56,6 +68,17 @@ export function normalizeEnvVarKey(
return key;
}
function normalizeHostOverrideEnvVarKey(rawKey: string): string | null {
const key = normalizeEnvVarKey(rawKey);
if (!key) {
return null;
}
if (PORTABLE_ENV_VAR_KEY.test(key) || WINDOWS_COMPAT_OVERRIDE_ENV_VAR_KEY.test(key)) {
return key;
}
return null;
}
export function isDangerousHostEnvVarName(rawKey: string): boolean {
const key = normalizeEnvVarKey(rawKey);
if (!key) {
@ -80,15 +103,16 @@ export function isDangerousHostEnvOverrideVarName(rawKey: string): boolean {
return HOST_DANGEROUS_OVERRIDE_ENV_PREFIXES.some((prefix) => upper.startsWith(prefix));
}
function listNormalizedPortableEnvEntries(
function listNormalizedEnvEntries(
source: Record<string, string | undefined>,
options?: { portable?: boolean },
): Array<[string, string]> {
const entries: Array<[string, string]> = [];
for (const [rawKey, value] of Object.entries(source)) {
if (typeof value !== "string") {
continue;
}
const key = normalizeEnvVarKey(rawKey, { portable: true });
const key = normalizeEnvVarKey(rawKey, options);
if (!key) {
continue;
}
@ -97,41 +121,112 @@ function listNormalizedPortableEnvEntries(
return entries;
}
export function sanitizeHostExecEnv(params?: {
function sortUnique(values: Iterable<string>): string[] {
return Array.from(new Set(values)).toSorted((a, b) => a.localeCompare(b));
}
function sanitizeHostEnvOverridesWithDiagnostics(params?: {
overrides?: Record<string, string> | null;
blockPathOverrides?: boolean;
}): {
acceptedOverrides?: Record<string, string>;
rejectedOverrideBlockedKeys: string[];
rejectedOverrideInvalidKeys: string[];
} {
const overrides = params?.overrides ?? undefined;
if (!overrides) {
return {
acceptedOverrides: undefined,
rejectedOverrideBlockedKeys: [],
rejectedOverrideInvalidKeys: [],
};
}
const blockPathOverrides = params?.blockPathOverrides ?? true;
const acceptedOverrides: Record<string, string> = {};
const rejectedBlocked: string[] = [];
const rejectedInvalid: string[] = [];
for (const [rawKey, value] of Object.entries(overrides)) {
if (typeof value !== "string") {
continue;
}
const normalized = normalizeHostOverrideEnvVarKey(rawKey);
if (!normalized) {
const candidate = rawKey.trim();
rejectedInvalid.push(candidate || rawKey);
continue;
}
const upper = normalized.toUpperCase();
// PATH is part of the security boundary (command resolution + safe-bin checks). Never allow
// request-scoped PATH overrides from agents/gateways.
if (blockPathOverrides && upper === "PATH") {
rejectedBlocked.push(upper);
continue;
}
if (isDangerousHostEnvVarName(upper) || isDangerousHostEnvOverrideVarName(upper)) {
rejectedBlocked.push(upper);
continue;
}
acceptedOverrides[normalized] = value;
}
return {
acceptedOverrides,
rejectedOverrideBlockedKeys: sortUnique(rejectedBlocked),
rejectedOverrideInvalidKeys: sortUnique(rejectedInvalid),
};
}
export function sanitizeHostExecEnvWithDiagnostics(params?: {
baseEnv?: Record<string, string | undefined>;
overrides?: Record<string, string> | null;
blockPathOverrides?: boolean;
}): Record<string, string> {
}): HostExecEnvSanitizationResult {
const baseEnv = params?.baseEnv ?? process.env;
const overrides = params?.overrides ?? undefined;
const blockPathOverrides = params?.blockPathOverrides ?? true;
const merged: Record<string, string> = {};
for (const [key, value] of listNormalizedPortableEnvEntries(baseEnv)) {
for (const [key, value] of listNormalizedEnvEntries(baseEnv)) {
if (isDangerousHostEnvVarName(key)) {
continue;
}
merged[key] = value;
}
if (!overrides) {
return markOpenClawExecEnv(merged);
const overrideResult = sanitizeHostEnvOverridesWithDiagnostics({
overrides: params?.overrides ?? undefined,
blockPathOverrides: params?.blockPathOverrides ?? true,
});
if (overrideResult.acceptedOverrides) {
for (const [key, value] of Object.entries(overrideResult.acceptedOverrides)) {
merged[key] = value;
}
}
for (const [key, value] of listNormalizedPortableEnvEntries(overrides)) {
const upper = key.toUpperCase();
// PATH is part of the security boundary (command resolution + safe-bin checks). Never allow
// request-scoped PATH overrides from agents/gateways.
if (blockPathOverrides && upper === "PATH") {
continue;
}
if (isDangerousHostEnvVarName(upper) || isDangerousHostEnvOverrideVarName(upper)) {
continue;
}
merged[key] = value;
}
return {
env: markOpenClawExecEnv(merged),
rejectedOverrideBlockedKeys: overrideResult.rejectedOverrideBlockedKeys,
rejectedOverrideInvalidKeys: overrideResult.rejectedOverrideInvalidKeys,
};
}
return markOpenClawExecEnv(merged);
export function inspectHostExecEnvOverrides(params?: {
overrides?: Record<string, string> | null;
blockPathOverrides?: boolean;
}): HostExecEnvOverrideDiagnostics {
const result = sanitizeHostEnvOverridesWithDiagnostics(params);
return {
rejectedOverrideBlockedKeys: result.rejectedOverrideBlockedKeys,
rejectedOverrideInvalidKeys: result.rejectedOverrideInvalidKeys,
};
}
export function sanitizeHostExecEnv(params?: {
baseEnv?: Record<string, string | undefined>;
overrides?: Record<string, string> | null;
blockPathOverrides?: boolean;
}): Record<string, string> {
return sanitizeHostExecEnvWithDiagnostics(params).env;
}
export function sanitizeSystemRunEnvOverrides(params?: {
@ -146,7 +241,7 @@ export function sanitizeSystemRunEnvOverrides(params?: {
return overrides;
}
const filtered: Record<string, string> = {};
for (const [key, value] of listNormalizedPortableEnvEntries(overrides)) {
for (const [key, value] of listNormalizedEnvEntries(overrides, { portable: true })) {
if (!HOST_SHELL_WRAPPER_ALLOWED_OVERRIDE_ENV_KEYS.has(key.toUpperCase())) {
continue;
}

View File

@ -1,58 +1,72 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
const completeMock = vi.fn();
const minimaxUnderstandImageMock = vi.fn();
const ensureOpenClawModelsJsonMock = vi.fn(async () => {});
const getApiKeyForModelMock = vi.fn(async () => ({
apiKey: "oauth-test", // pragma: allowlist secret
source: "test",
mode: "oauth",
const hoisted = vi.hoisted(() => ({
completeMock: vi.fn(),
minimaxUnderstandImageMock: vi.fn(),
ensureOpenClawModelsJsonMock: vi.fn(async () => {}),
getApiKeyForModelMock: vi.fn(async () => ({
apiKey: "oauth-test", // pragma: allowlist secret
source: "test",
mode: "oauth",
})),
resolveApiKeyForProviderMock: vi.fn(async () => ({
apiKey: "oauth-test", // pragma: allowlist secret
source: "test",
mode: "oauth",
})),
requireApiKeyMock: vi.fn((auth: { apiKey?: string }) => auth.apiKey ?? ""),
setRuntimeApiKeyMock: vi.fn(),
discoverModelsMock: vi.fn(),
}));
const resolveApiKeyForProviderMock = vi.fn(async () => ({
apiKey: "oauth-test", // pragma: allowlist secret
source: "test",
mode: "oauth",
}));
const requireApiKeyMock = vi.fn((auth: { apiKey?: string }) => auth.apiKey ?? "");
const setRuntimeApiKeyMock = vi.fn();
const discoverModelsMock = vi.fn();
type ImageModule = typeof import("./image.js");
const {
completeMock,
minimaxUnderstandImageMock,
ensureOpenClawModelsJsonMock,
getApiKeyForModelMock,
resolveApiKeyForProviderMock,
requireApiKeyMock,
setRuntimeApiKeyMock,
discoverModelsMock,
} = hoisted;
let describeImageWithModel: ImageModule["describeImageWithModel"];
vi.mock("@mariozechner/pi-ai", async (importOriginal) => {
const actual = await importOriginal<typeof import("@mariozechner/pi-ai")>();
return {
...actual,
complete: completeMock,
};
});
vi.mock("../../agents/minimax-vlm.js", () => ({
isMinimaxVlmProvider: (provider: string) =>
provider === "minimax" || provider === "minimax-portal",
isMinimaxVlmModel: (provider: string, modelId: string) =>
(provider === "minimax" || provider === "minimax-portal") && modelId === "MiniMax-VL-01",
minimaxUnderstandImage: minimaxUnderstandImageMock,
}));
vi.mock("../../agents/models-config.js", () => ({
ensureOpenClawModelsJson: ensureOpenClawModelsJsonMock,
}));
vi.mock("../../agents/model-auth.js", () => ({
getApiKeyForModel: getApiKeyForModelMock,
resolveApiKeyForProvider: resolveApiKeyForProviderMock,
requireApiKey: requireApiKeyMock,
}));
vi.mock("../../agents/pi-model-discovery-runtime.js", () => ({
discoverAuthStorage: () => ({
setRuntimeApiKey: setRuntimeApiKeyMock,
}),
discoverModels: discoverModelsMock,
}));
const { describeImageWithModel } = await import("./image.js");
describe("describeImageWithModel", () => {
beforeEach(async () => {
vi.resetModules();
beforeEach(() => {
vi.clearAllMocks();
vi.doMock("@mariozechner/pi-ai", async (importOriginal) => {
const actual = await importOriginal<typeof import("@mariozechner/pi-ai")>();
return {
...actual,
complete: completeMock,
};
});
vi.doMock("../../agents/minimax-vlm.js", () => ({
isMinimaxVlmProvider: (provider: string) =>
provider === "minimax" || provider === "minimax-portal",
isMinimaxVlmModel: (provider: string, modelId: string) =>
(provider === "minimax" || provider === "minimax-portal") && modelId === "MiniMax-VL-01",
minimaxUnderstandImage: minimaxUnderstandImageMock,
}));
vi.doMock("../../agents/models-config.js", () => ({
ensureOpenClawModelsJson: ensureOpenClawModelsJsonMock,
}));
vi.doMock("../../agents/model-auth.js", () => ({
getApiKeyForModel: getApiKeyForModelMock,
resolveApiKeyForProvider: resolveApiKeyForProviderMock,
requireApiKey: requireApiKeyMock,
}));
vi.doMock("../../agents/pi-model-discovery-runtime.js", () => ({
discoverAuthStorage: () => ({
setRuntimeApiKey: setRuntimeApiKeyMock,
}),
discoverModels: discoverModelsMock,
}));
({ describeImageWithModel } = await import("./image.js"));
minimaxUnderstandImageMock.mockResolvedValue("portal ok");
discoverModelsMock.mockReturnValue({
find: vi.fn(() => ({

View File

@ -188,9 +188,19 @@ export async function describeImagesWithModel(
}
const context = buildImageContext(prompt, params.images);
const controller = new AbortController();
const timeout =
typeof params.timeoutMs === "number" &&
Number.isFinite(params.timeoutMs) &&
params.timeoutMs > 0
? setTimeout(() => controller.abort(), params.timeoutMs)
: undefined;
const message = await complete(model, context, {
apiKey,
maxTokens: resolveImageToolMaxTokens(model.maxTokens, params.maxTokens ?? 512),
signal: controller.signal,
}).finally(() => {
clearTimeout(timeout);
});
const text = coerceImageAssistantText({
message,

View File

@ -336,6 +336,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => {
preferMacAppExecHost: boolean;
runViaResponse?: ExecHostResponse | null;
command?: string[];
env?: Record<string, string>;
rawCommand?: string | null;
systemRunPlan?: SystemRunApprovalPlan | null;
cwd?: string;
@ -391,6 +392,7 @@ describe("handleSystemRunInvoke mac app exec host routing", () => {
client: {} as never,
params: {
command: params.command ?? ["echo", "ok"],
env: params.env,
rawCommand: params.rawCommand,
systemRunPlan: params.systemRunPlan,
cwd: params.cwd,
@ -1106,6 +1108,65 @@ describe("handleSystemRunInvoke mac app exec host routing", () => {
expectApprovalRequiredDenied({ sendNodeEvent, sendInvokeResult });
});
it("rejects blocked environment overrides before execution", async () => {
const { runCommand, sendInvokeResult } = await runSystemInvoke({
preferMacAppExecHost: false,
security: "full",
ask: "off",
env: { CLASSPATH: "/tmp/evil-classpath" },
});
expect(runCommand).not.toHaveBeenCalled();
expectInvokeErrorMessage(sendInvokeResult, {
message: "SYSTEM_RUN_DENIED: environment override rejected",
});
expectInvokeErrorMessage(sendInvokeResult, {
message: "CLASSPATH",
});
});
it("rejects blocked environment overrides for shell-wrapper commands", async () => {
const shellCommand =
process.platform === "win32"
? ["cmd.exe", "/d", "/s", "/c", "echo ok"]
: ["/bin/sh", "-lc", "echo ok"];
const { runCommand, sendInvokeResult } = await runSystemInvoke({
preferMacAppExecHost: false,
security: "full",
ask: "off",
command: shellCommand,
env: {
CLASSPATH: "/tmp/evil-classpath",
LANG: "C",
},
});
expect(runCommand).not.toHaveBeenCalled();
expectInvokeErrorMessage(sendInvokeResult, {
message: "SYSTEM_RUN_DENIED: environment override rejected",
});
expectInvokeErrorMessage(sendInvokeResult, {
message: "CLASSPATH",
});
});
it("rejects invalid non-portable environment override keys before execution", async () => {
const { runCommand, sendInvokeResult } = await runSystemInvoke({
preferMacAppExecHost: false,
security: "full",
ask: "off",
env: { "BAD-KEY": "x" },
});
expect(runCommand).not.toHaveBeenCalled();
expectInvokeErrorMessage(sendInvokeResult, {
message: "SYSTEM_RUN_DENIED: environment override rejected",
});
expectInvokeErrorMessage(sendInvokeResult, {
message: "BAD-KEY",
});
});
async function expectNestedEnvShellDenied(params: {
depth: number;
markerName: string;

View File

@ -14,7 +14,10 @@ import {
} from "../infra/exec-approvals.js";
import type { ExecHostRequest, ExecHostResponse, ExecHostRunResult } from "../infra/exec-host.js";
import { resolveExecSafeBinRuntimePolicy } from "../infra/exec-safe-bin-runtime-policy.js";
import { sanitizeSystemRunEnvOverrides } from "../infra/host-env-security.js";
import {
inspectHostExecEnvOverrides,
sanitizeSystemRunEnvOverrides,
} from "../infra/host-env-security.js";
import { normalizeSystemRunApprovalPlan } from "../infra/system-run-approval-binding.js";
import { resolveSystemRunCommandRequest } from "../infra/system-run-command.js";
import { logWarn } from "../logger.js";
@ -244,6 +247,34 @@ async function parseSystemRunPhase(
const sessionKey = opts.params.sessionKey?.trim() || "node";
const runId = opts.params.runId?.trim() || crypto.randomUUID();
const suppressNotifyOnExit = opts.params.suppressNotifyOnExit === true;
const envOverrideDiagnostics = inspectHostExecEnvOverrides({
overrides: opts.params.env ?? undefined,
blockPathOverrides: true,
});
if (
envOverrideDiagnostics.rejectedOverrideBlockedKeys.length > 0 ||
envOverrideDiagnostics.rejectedOverrideInvalidKeys.length > 0
) {
const details: string[] = [];
if (envOverrideDiagnostics.rejectedOverrideBlockedKeys.length > 0) {
details.push(
`blocked override keys: ${envOverrideDiagnostics.rejectedOverrideBlockedKeys.join(", ")}`,
);
}
if (envOverrideDiagnostics.rejectedOverrideInvalidKeys.length > 0) {
details.push(
`invalid non-portable override keys: ${envOverrideDiagnostics.rejectedOverrideInvalidKeys.join(", ")}`,
);
}
await opts.sendInvokeResult({
ok: false,
error: {
code: "INVALID_REQUEST",
message: `SYSTEM_RUN_DENIED: environment override rejected (${details.join("; ")})`,
},
});
return null;
}
const envOverrides = sanitizeSystemRunEnvOverrides({
overrides: opts.params.env ?? undefined,
shellWrapper: shellPayload !== null,

View File

@ -3,6 +3,19 @@ import { withEnv } from "../test-utils/env.js";
import { decodeCapturedOutputBuffer, parseWindowsCodePage, sanitizeEnv } from "./invoke.js";
import { buildNodeInvokeResultParams } from "./runner.js";
function getEnvValueCaseInsensitive(
env: Record<string, string>,
expectedKey: string,
): string | undefined {
const direct = env[expectedKey];
if (direct !== undefined) {
return direct;
}
const upper = expectedKey.toUpperCase();
const actualKey = Object.keys(env).find((key) => key.toUpperCase() === upper);
return actualKey ? env[actualKey] : undefined;
}
describe("node-host sanitizeEnv", () => {
it("ignores PATH overrides", () => {
withEnv({ PATH: "/usr/bin" }, () => {
@ -51,6 +64,13 @@ describe("node-host sanitizeEnv", () => {
expect(env.BASH_ENV).toBeUndefined();
});
});
it("preserves inherited non-portable Windows-style env keys", () => {
withEnv({ "ProgramFiles(x86)": "C:\\Program Files (x86)" }, () => {
const env = sanitizeEnv(undefined);
expect(getEnvValueCaseInsensitive(env, "ProgramFiles(x86)")).toBe("C:\\Program Files (x86)");
});
});
});
describe("node-host output decoding", () => {

View File

@ -0,0 +1,7 @@
// Matrix runtime helpers that are needed internally by the bundled extension
// but are too heavy for the light external runtime-api surface.
export { ensureConfiguredAcpBindingReady } from "../acp/persistent-bindings.lifecycle.js";
export { resolveConfiguredAcpBindingRecord } from "../acp/persistent-bindings.resolve.js";
export { maybeCreateMatrixMigrationSnapshot } from "../infra/matrix-migration-snapshot.js";
export { dispatchReplyFromConfigWithSettledDispatcher } from "./inbound-reply-dispatch.js";

View File

@ -0,0 +1,11 @@
// Narrow shared Matrix runtime exports for light runtime-api consumers.
export type {
ChannelDirectoryEntry,
ChannelMessageActionContext,
} from "../channels/plugins/types.js";
export type { OpenClawConfig } from "../config/config.js";
export { formatZonedTimestamp } from "../infra/format-time/format-datetime.js";
export type { PluginRuntime, RuntimeLogger } from "../plugins/runtime/types.js";
export type { RuntimeEnv } from "../runtime.js";
export type { WizardPrompter } from "../wizard/prompts.js";

View File

@ -27,8 +27,6 @@ export {
patchAllowlistUsersInConfigEntries,
summarizeMapping,
} from "../channels/allowlists/resolve-utils.js";
export { ensureConfiguredAcpBindingReady } from "../acp/persistent-bindings.lifecycle.js";
export { resolveConfiguredAcpBindingRecord } from "../acp/persistent-bindings.resolve.js";
export { resolveControlCommandGate } from "../channels/command-gating.js";
export type { NormalizedLocation } from "../channels/location.js";
export { formatLocationText, toLocationContext } from "../channels/location.js";
@ -112,7 +110,6 @@ export { ToolPolicySchema } from "../config/zod-schema.agent-runtime.js";
export { MarkdownConfigSchema } from "../config/zod-schema.core.js";
export { formatZonedTimestamp } from "../infra/format-time/format-datetime.js";
export { fetchWithSsrFGuard } from "../infra/net/fetch-guard.js";
export { maybeCreateMatrixMigrationSnapshot } from "../infra/matrix-migration-snapshot.js";
export {
getSessionBindingService,
registerSessionBindingAdapter,
@ -150,7 +147,6 @@ export { readJsonFileWithFallback, writeJsonFileAtomically } from "./json-store.
export { formatResolvedUnresolvedNote } from "./resolution-notes.js";
export { runPluginCommandWithTimeout } from "./run-command.js";
export { createLoggerBackedRuntime, resolveRuntimeEnv } from "./runtime.js";
export { dispatchReplyFromConfigWithSettledDispatcher } from "./inbound-reply-dispatch.js";
export {
buildProbeChannelStatusSummary,
collectStatusIssuesFromLastError,

View File

@ -41,6 +41,7 @@ export {
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
resolveCloudflareAiGatewayBaseUrl,
} from "../agents/cloudflare-ai-gateway.js";
export { resolveAnthropicVertexRegion } from "../agents/anthropic-vertex-provider.js";
export {
discoverHuggingfaceModels,
HUGGINGFACE_BASE_URL,

View File

@ -38,7 +38,7 @@ const RUNTIME_API_EXPORT_GUARDS: Record<string, readonly string[]> = {
"extensions/matrix/runtime-api.ts": [
'export * from "./src/auth-precedence.js";',
'export * from "./helper-api.js";',
'export { assertHttpUrlTargetsPrivateNetwork, closeDispatcher, createPinnedDispatcher, resolvePinnedHostnameWithPolicy, ssrfPolicyFromAllowPrivateNetwork, type LookupFn, type SsrFPolicy } from "openclaw/plugin-sdk/infra-runtime";',
'export { assertHttpUrlTargetsPrivateNetwork, closeDispatcher, createPinnedDispatcher, resolvePinnedHostnameWithPolicy, ssrfPolicyFromAllowPrivateNetwork, type LookupFn, type SsrFPolicy } from "openclaw/plugin-sdk/ssrf-runtime";',
'export { setMatrixThreadBindingIdleTimeoutBySessionKey, setMatrixThreadBindingMaxAgeBySessionKey } from "./thread-bindings-runtime.js";',
'export { writeJsonFileAtomically } from "../../src/plugin-sdk/json-store.js";',
'export type { ChannelDirectoryEntry, ChannelMessageActionContext, OpenClawConfig, PluginRuntime, RuntimeLogger, RuntimeEnv, WizardPrompter } from "../../src/plugin-sdk/matrix.js";',

View File

@ -0,0 +1,14 @@
// Narrow SSRF helpers for extensions that need pinned-dispatcher and policy
// utilities without loading the full infra-runtime surface.
export {
closeDispatcher,
createPinnedDispatcher,
resolvePinnedHostnameWithPolicy,
type LookupFn,
type SsrFPolicy,
} from "../infra/net/ssrf.js";
export {
assertHttpUrlTargetsPrivateNetwork,
ssrfPolicyFromAllowPrivateNetwork,
} from "./ssrf-policy.js";

View File

@ -36,6 +36,7 @@ import type {
import * as directoryRuntimeSdk from "openclaw/plugin-sdk/directory-runtime";
import * as infraRuntimeSdk from "openclaw/plugin-sdk/infra-runtime";
import * as lazyRuntimeSdk from "openclaw/plugin-sdk/lazy-runtime";
import * as matrixRuntimeSharedSdk from "openclaw/plugin-sdk/matrix-runtime-shared";
import * as mediaRuntimeSdk from "openclaw/plugin-sdk/media-runtime";
import * as ollamaSetupSdk from "openclaw/plugin-sdk/ollama-setup";
import * as providerAuthSdk from "openclaw/plugin-sdk/provider-auth";
@ -50,7 +51,9 @@ import * as sandboxSdk from "openclaw/plugin-sdk/sandbox";
import * as secretInputSdk from "openclaw/plugin-sdk/secret-input";
import * as selfHostedProviderSetupSdk from "openclaw/plugin-sdk/self-hosted-provider-setup";
import * as setupSdk from "openclaw/plugin-sdk/setup";
import * as ssrfRuntimeSdk from "openclaw/plugin-sdk/ssrf-runtime";
import * as testingSdk from "openclaw/plugin-sdk/testing";
import * as threadBindingsRuntimeSdk from "openclaw/plugin-sdk/thread-bindings-runtime";
import * as webhookIngressSdk from "openclaw/plugin-sdk/webhook-ingress";
import { describe, expect, expectTypeOf, it } from "vitest";
import type { ChannelMessageActionContext } from "../channels/plugins/types.js";
@ -523,6 +526,22 @@ describe("plugin-sdk subpath exports", () => {
expect(typeof conversationRuntimeSdk.createTopLevelChannelReplyToModeResolver).toBe("function");
});
it("exports narrow binding lifecycle helpers from the dedicated subpath", () => {
expect(typeof threadBindingsRuntimeSdk.resolveThreadBindingLifecycle).toBe("function");
});
it("exports narrow matrix runtime helpers from the dedicated subpath", () => {
expect(typeof matrixRuntimeSharedSdk.formatZonedTimestamp).toBe("function");
});
it("exports narrow ssrf helpers from the dedicated subpath", () => {
expect(typeof ssrfRuntimeSdk.closeDispatcher).toBe("function");
expect(typeof ssrfRuntimeSdk.createPinnedDispatcher).toBe("function");
expect(typeof ssrfRuntimeSdk.resolvePinnedHostnameWithPolicy).toBe("function");
expect(typeof ssrfRuntimeSdk.assertHttpUrlTargetsPrivateNetwork).toBe("function");
expect(typeof ssrfRuntimeSdk.ssrfPolicyFromAllowPrivateNetwork).toBe("function");
});
it("exports provider setup helpers from the dedicated subpath", () => {
expect(typeof providerSetupSdk.buildVllmProvider).toBe("function");
expect(typeof providerSetupSdk.discoverOpenAICompatibleSelfHostedProvider).toBe("function");

View File

@ -0,0 +1,9 @@
// Narrow thread-binding lifecycle helpers for extensions that need binding
// expiry and session-binding record types without loading the full
// conversation-runtime surface.
export { resolveThreadBindingLifecycle } from "../channels/thread-bindings-policy.js";
export type {
BindingTargetKind,
SessionBindingRecord,
} from "../infra/outbound/session-binding-service.js";

View File

@ -1,26 +0,0 @@
import bravePlugin from "../../extensions/brave/index.js";
import firecrawlPlugin from "../../extensions/firecrawl/index.js";
import googlePlugin from "../../extensions/google/index.js";
import moonshotPlugin from "../../extensions/moonshot/index.js";
import perplexityPlugin from "../../extensions/perplexity/index.js";
import tavilyPlugin from "../../extensions/tavily/index.js";
import xaiPlugin from "../../extensions/xai/index.js";
import type { OpenClawPluginApi } from "./types.js";
type RegistrablePlugin = {
id: string;
register: (api: OpenClawPluginApi) => void;
};
export const bundledWebSearchPluginRegistrations: ReadonlyArray<{
plugin: RegistrablePlugin;
credentialValue: unknown;
}> = [
{ plugin: bravePlugin, credentialValue: "BSA-test" },
{ plugin: firecrawlPlugin, credentialValue: "fc-test" },
{ plugin: googlePlugin, credentialValue: "AIza-test" },
{ plugin: moonshotPlugin, credentialValue: "sk-test" },
{ plugin: perplexityPlugin, credentialValue: "pplx-test" },
{ plugin: tavilyPlugin, credentialValue: "tvly-test" },
{ plugin: xaiPlugin, credentialValue: "xai-test" },
];

View File

@ -1,26 +1,61 @@
import { bundledWebSearchPluginRegistrations } from "./bundled-web-search-registry.js";
import { bundledWebSearchPluginRegistrations } from "../bundled-web-search-registry.js";
import { capturePluginRegistration } from "./captured-registration.js";
import type { PluginLoadOptions } from "./loader.js";
import { loadPluginManifestRegistry } from "./manifest-registry.js";
import type { PluginWebSearchProviderEntry } from "./types.js";
export const BUNDLED_WEB_SEARCH_PLUGIN_IDS = bundledWebSearchPluginRegistrations
.map((entry) => entry.plugin.id)
.toSorted((left, right) => left.localeCompare(right));
const bundledWebSearchPluginIdSet = new Set<string>(BUNDLED_WEB_SEARCH_PLUGIN_IDS);
type BundledWebSearchProviderEntry = PluginWebSearchProviderEntry & { pluginId: string };
type BundledWebSearchPluginRegistration = (typeof bundledWebSearchPluginRegistrations)[number];
let bundledWebSearchProvidersCache: BundledWebSearchProviderEntry[] | null = null;
let bundledWebSearchPluginIdsCache: string[] | null = null;
function resolveBundledWebSearchPlugin(
entry: BundledWebSearchPluginRegistration,
): BundledWebSearchPluginRegistration["plugin"] | null {
try {
return entry.plugin;
} catch {
return null;
}
}
function listBundledWebSearchPluginRegistrations() {
return bundledWebSearchPluginRegistrations
.map((entry) => {
const plugin = resolveBundledWebSearchPlugin(entry);
return plugin ? { ...entry, plugin } : null;
})
.filter(
(
entry,
): entry is BundledWebSearchPluginRegistration & {
plugin: BundledWebSearchPluginRegistration["plugin"];
} => Boolean(entry),
);
}
function loadBundledWebSearchPluginIds(): string[] {
if (!bundledWebSearchPluginIdsCache) {
bundledWebSearchPluginIdsCache = listBundledWebSearchPluginRegistrations()
.map(({ plugin }) => plugin.id)
.toSorted((left, right) => left.localeCompare(right));
}
return bundledWebSearchPluginIdsCache;
}
export function listBundledWebSearchPluginIds(): string[] {
return loadBundledWebSearchPluginIds();
}
function loadBundledWebSearchProviders(): BundledWebSearchProviderEntry[] {
if (!bundledWebSearchProvidersCache) {
bundledWebSearchProvidersCache = bundledWebSearchPluginRegistrations.flatMap(({ plugin }) =>
capturePluginRegistration(plugin).webSearchProviders.map((provider) => ({
...provider,
pluginId: plugin.id,
})),
bundledWebSearchProvidersCache = listBundledWebSearchPluginRegistrations().flatMap(
({ plugin }) =>
capturePluginRegistration(plugin).webSearchProviders.map((provider) => ({
...provider,
pluginId: plugin.id,
})),
);
}
return bundledWebSearchProvidersCache;
@ -36,6 +71,7 @@ export function resolveBundledWebSearchPluginIds(params: {
workspaceDir: params.workspaceDir,
env: params.env,
});
const bundledWebSearchPluginIdSet = new Set<string>(loadBundledWebSearchPluginIds());
return registry.plugins
.filter((plugin) => plugin.origin === "bundled" && bundledWebSearchPluginIdSet.has(plugin.id))
.map((plugin) => plugin.id)

View File

@ -34,7 +34,7 @@ import volcenginePlugin from "../../../extensions/volcengine/index.js";
import xaiPlugin from "../../../extensions/xai/index.js";
import xiaomiPlugin from "../../../extensions/xiaomi/index.js";
import zaiPlugin from "../../../extensions/zai/index.js";
import { bundledWebSearchPluginRegistrations } from "../bundled-web-search-registry.js";
import { bundledWebSearchPluginRegistrations } from "../../bundled-web-search-registry.js";
import { createCapturedPluginRegistration } from "../captured-registration.js";
import { resolvePluginProviders } from "../providers.js";
import type {

View File

@ -1,7 +1,7 @@
import type { OpenClawConfig } from "../config/config.js";
import { resolveSecretInputRef } from "../config/types.secrets.js";
import {
BUNDLED_WEB_SEARCH_PLUGIN_IDS,
listBundledWebSearchPluginIds,
resolveBundledWebSearchPluginId,
} from "../plugins/bundled-web-search.js";
import type {
@ -82,7 +82,7 @@ function hasCustomWebSearchPluginRisk(config: OpenClawConfig): boolean {
return true;
}
const bundledPluginIds = new Set<string>(BUNDLED_WEB_SEARCH_PLUGIN_IDS);
const bundledPluginIds = new Set<string>(listBundledWebSearchPluginIds());
const hasNonBundledPluginId = (pluginId: string) => !bundledPluginIds.has(pluginId.trim());
if (Array.isArray(plugins.allow) && plugins.allow.some(hasNonBundledPluginId)) {
return true;

View File

@ -1100,29 +1100,29 @@ description: test skill
},
] as const;
for (const testCase of cases) {
if (!testCase.supported) {
continue;
}
await Promise.all(
cases
.filter((testCase) => testCase.supported)
.map(async (testCase) => {
const fixture = await testCase.setup();
const configPath = path.join(fixture.stateDir, "openclaw.json");
await fs.writeFile(configPath, "{}\n", "utf-8");
if (!isWindows) {
await fs.chmod(configPath, 0o600);
}
const fixture = await testCase.setup();
const configPath = path.join(fixture.stateDir, "openclaw.json");
await fs.writeFile(configPath, "{}\n", "utf-8");
if (!isWindows) {
await fs.chmod(configPath, 0o600);
}
const res = await runSecurityAudit({
config: { agents: { defaults: { workspace: fixture.workspaceDir } } },
includeFilesystem: true,
includeChannelSecurity: false,
stateDir: fixture.stateDir,
configPath,
execDockerRawFn: execDockerRawUnavailable,
});
const res = await runSecurityAudit({
config: { agents: { defaults: { workspace: fixture.workspaceDir } } },
includeFilesystem: true,
includeChannelSecurity: false,
stateDir: fixture.stateDir,
configPath,
execDockerRawFn: execDockerRawUnavailable,
});
testCase.assert(res, fixture);
}
testCase.assert(res, fixture);
}),
);
});
it("scores small-model risk by tool/sandbox exposure", async () => {
@ -1554,20 +1554,24 @@ description: test skill
},
] as const;
for (const testCase of cases) {
const res = await audit(testCase.cfg);
if ("expectedFinding" in testCase) {
expect(res.findings, testCase.name).toEqual(
expect.arrayContaining([expect.objectContaining(testCase.expectedFinding)]),
await Promise.all(
cases.map(async (testCase) => {
const res = await audit(testCase.cfg);
if ("expectedFinding" in testCase) {
expect(res.findings, testCase.name).toEqual(
expect.arrayContaining([expect.objectContaining(testCase.expectedFinding)]),
);
}
const finding = res.findings.find(
(f) => f.checkId === "config.insecure_or_dangerous_flags",
);
}
const finding = res.findings.find((f) => f.checkId === "config.insecure_or_dangerous_flags");
expect(finding, testCase.name).toBeTruthy();
expect(finding?.severity, testCase.name).toBe("warn");
for (const detail of testCase.expectedDangerousDetails) {
expect(finding?.detail, `${testCase.name}:${detail}`).toContain(detail);
}
}
expect(finding, testCase.name).toBeTruthy();
expect(finding?.severity, testCase.name).toBe("warn");
for (const detail of testCase.expectedDangerousDetails) {
expect(finding?.detail, `${testCase.name}:${detail}`).toContain(detail);
}
}),
);
});
it.each([
@ -3116,17 +3120,19 @@ description: test skill
},
] as const;
for (const testCase of cases) {
const res = await testCase.run();
const expectedPresent = "expectedPresent" in testCase ? testCase.expectedPresent : [];
for (const checkId of expectedPresent) {
expect(hasFinding(res, checkId, "warn"), `${testCase.name}:${checkId}`).toBe(true);
}
const expectedAbsent = "expectedAbsent" in testCase ? testCase.expectedAbsent : [];
for (const checkId of expectedAbsent) {
expect(hasFinding(res, checkId), `${testCase.name}:${checkId}`).toBe(false);
}
}
await Promise.all(
cases.map(async (testCase) => {
const res = await testCase.run();
const expectedPresent = "expectedPresent" in testCase ? testCase.expectedPresent : [];
for (const checkId of expectedPresent) {
expect(hasFinding(res, checkId, "warn"), `${testCase.name}:${checkId}`).toBe(true);
}
const expectedAbsent = "expectedAbsent" in testCase ? testCase.expectedAbsent : [];
for (const checkId of expectedAbsent) {
expect(hasFinding(res, checkId), `${testCase.name}:${checkId}`).toBe(false);
}
}),
);
});
it("evaluates extension tool reachability findings", async () => {
@ -3339,9 +3345,17 @@ description: test skill
},
] as const;
for (const testCase of cases) {
const result = await testCase.run();
testCase.assert(result as never);
await Promise.all(
cases.slice(0, -1).map(async (testCase) => {
const result = await testCase.run();
testCase.assert(result as never);
}),
);
const scanFailureCase = cases.at(-1);
if (scanFailureCase) {
const result = await scanFailureCase.run();
scanFailureCase.assert(result as never);
}
});

File diff suppressed because it is too large Load Diff