fix: match provider-prefixed model IDs against original alias and resolve lint errors
Match config model IDs using the original (pre-normalization) provider name (e.g. nvidia-api/meta-llama) so the image input fallback works for aliased providers. Remove unnecessary type assertion in image.ts and rename unused variable in image-tool.test.ts to satisfy oxlint. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
86231b5dbf
commit
35a29f71f2
@ -982,7 +982,7 @@ describe("image tool custom provider fallback (#33185)", () => {
|
||||
)
|
||||
.mockResolvedValue({ text: "Image 1:\nfirst\n\nImage 2:\nsecond", model: "Qwen3.5" });
|
||||
|
||||
const res = await tool.execute("t1", {
|
||||
const _res = await tool.execute("t1", {
|
||||
prompt: "Compare these images.",
|
||||
images: [`data:image/png;base64,${pngB64}`, `data:image/png;base64,${pngB64}`],
|
||||
});
|
||||
|
||||
@ -294,6 +294,71 @@ describe("describeImageWithModel", () => {
|
||||
expect(completeMock).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("matches provider-prefixed model IDs against the original provider alias (#33185)", async () => {
|
||||
// When provider is "nvidia-api", resolvedRef.provider becomes "nvidia" after
|
||||
// normalization, but the user's config stores "nvidia-api/meta-llama". The
|
||||
// lookup must also try the original params.provider prefix.
|
||||
resolveModelWithRegistryMock.mockReturnValue({
|
||||
provider: "nvidia",
|
||||
id: "meta-llama",
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://integrate.api.nvidia.com/v1",
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4096,
|
||||
});
|
||||
completeMock.mockResolvedValue({
|
||||
role: "assistant",
|
||||
api: "openai-completions",
|
||||
provider: "nvidia",
|
||||
model: "meta-llama",
|
||||
stopReason: "stop",
|
||||
timestamp: Date.now(),
|
||||
content: [{ type: "text", text: "nvidia vision ok" }],
|
||||
});
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"nvidia-api": {
|
||||
baseUrl: "https://integrate.api.nvidia.com/v1",
|
||||
apiKey: "nvidia-key", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [
|
||||
{
|
||||
id: "nvidia-api/meta-llama",
|
||||
name: "meta-llama",
|
||||
reasoning: false,
|
||||
input: ["image", "text"] as Array<"text" | "image">,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4096,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await describeImageWithModel({
|
||||
cfg,
|
||||
agentDir: "/tmp/openclaw-agent",
|
||||
provider: "nvidia-api",
|
||||
model: "meta-llama",
|
||||
buffer: Buffer.from("png-bytes"),
|
||||
fileName: "image.png",
|
||||
mime: "image/png",
|
||||
prompt: "Describe the image.",
|
||||
timeoutMs: 1000,
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "nvidia vision ok",
|
||||
model: "meta-llama",
|
||||
});
|
||||
expect(completeMock).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("throws Unknown model when custom provider model is not resolvable at all (#33185)", async () => {
|
||||
resolveModelWithRegistryMock.mockReturnValue(undefined);
|
||||
|
||||
|
||||
@ -54,13 +54,14 @@ async function resolveImageRuntime(params: {
|
||||
// Use the full model resolution stack (registry → inline config → plugin →
|
||||
// ad-hoc provider config) instead of bare modelRegistry.find(), which misses
|
||||
// user-configured custom provider models (e.g. vllm, nvidia-api, iflow).
|
||||
let model = (resolveModelWithRegistry({
|
||||
provider: resolvedRef.provider,
|
||||
modelId: resolvedRef.model,
|
||||
modelRegistry,
|
||||
cfg: params.cfg,
|
||||
agentDir: params.agentDir,
|
||||
}) ?? null) as Model<Api> | null;
|
||||
let model: Model<Api> | null =
|
||||
resolveModelWithRegistry({
|
||||
provider: resolvedRef.provider,
|
||||
modelId: resolvedRef.model,
|
||||
modelRegistry,
|
||||
cfg: params.cfg,
|
||||
agentDir: params.agentDir,
|
||||
}) ?? null;
|
||||
|
||||
if (!model) {
|
||||
throw new Error(`Unknown model: ${resolvedRef.provider}/${resolvedRef.model}`);
|
||||
@ -71,13 +72,19 @@ async function resolveImageRuntime(params: {
|
||||
// ID matching which can miss provider-prefixed IDs (e.g. "vllm/Qwen3.5" in
|
||||
// config vs "Qwen3.5" after model ref parsing). Check the user's configured
|
||||
// model definition for explicit image support so the tool works correctly.
|
||||
// We also match against the original params.provider (pre-normalization) since
|
||||
// configs may use aliases like "nvidia-api/meta/..." while resolvedRef.provider
|
||||
// is normalized to "nvidia".
|
||||
if (!model.input?.includes("image")) {
|
||||
const providerConfig = findNormalizedProviderValue(
|
||||
params.cfg?.models?.providers,
|
||||
resolvedRef.provider,
|
||||
);
|
||||
const configuredModel = providerConfig?.models?.find(
|
||||
(m) => m.id === resolvedRef.model || m.id === `${resolvedRef.provider}/${resolvedRef.model}`,
|
||||
(m) =>
|
||||
m.id === resolvedRef.model ||
|
||||
m.id === `${resolvedRef.provider}/${resolvedRef.model}` ||
|
||||
m.id === `${params.provider}/${resolvedRef.model}`,
|
||||
);
|
||||
if (configuredModel?.input?.includes("image")) {
|
||||
model = {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user