fix: wire modelstudio env discovery (#40634) (thanks @pomelo-nwu)

This commit is contained in:
Peter Steinberger 2026-03-10 19:58:11 +00:00
parent 95eaa08781
commit 6d4241cbd9
9 changed files with 168 additions and 0 deletions

View File

@ -74,6 +74,7 @@ Docs: https://docs.openclaw.ai
- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant.
- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant.
- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf.
- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu.
## 2026.3.8

View File

@ -32,6 +32,7 @@ export const PROVIDER_ENV_API_KEY_CANDIDATES: Record<string, string[]> = {
mistral: ["MISTRAL_API_KEY"],
together: ["TOGETHER_API_KEY"],
qianfan: ["QIANFAN_API_KEY"],
modelstudio: ["MODELSTUDIO_API_KEY"],
ollama: ["OLLAMA_API_KEY"],
vllm: ["VLLM_API_KEY"],
kilocode: ["KILOCODE_API_KEY"],

View File

@ -230,6 +230,21 @@ describe("getApiKeyForModel", () => {
});
});
it("resolves Model Studio API key from env", async () => {
await withEnvAsync(
{ [envVar("MODELSTUDIO", "API", "KEY")]: "modelstudio-test-key" },
async () => {
// pragma: allowlist secret
const resolved = await resolveApiKeyForProvider({
provider: "modelstudio",
store: { version: 1, profiles: {} },
});
expect(resolved.apiKey).toBe("modelstudio-test-key");
expect(resolved.source).toContain("MODELSTUDIO_API_KEY");
},
);
});
it("resolves synthetic local auth key for configured ollama provider without apiKey", async () => {
await withEnvAsync({ OLLAMA_API_KEY: undefined }, async () => {
const resolved = await resolveApiKeyForProvider({

View File

@ -101,6 +101,7 @@ export const MODELS_CONFIG_IMPLICIT_ENV_VARS = [
"OPENROUTER_API_KEY",
"PI_CODING_AGENT_DIR",
"QIANFAN_API_KEY",
"MODELSTUDIO_API_KEY",
"QWEN_OAUTH_TOKEN",
"QWEN_PORTAL_API_KEY",
"SYNTHETIC_API_KEY",

View File

@ -0,0 +1,32 @@
import { mkdtempSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { describe, expect, it } from "vitest";
import { withEnvAsync } from "../test-utils/env.js";
import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js";
import { buildModelStudioProvider } from "./models-config.providers.js";
const modelStudioApiKeyEnv = ["MODELSTUDIO_API", "KEY"].join("_");
describe("Model Studio implicit provider", () => {
it("should include modelstudio when MODELSTUDIO_API_KEY is configured", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
const modelStudioApiKey = "test-key"; // pragma: allowlist secret
await withEnvAsync({ [modelStudioApiKeyEnv]: modelStudioApiKey }, async () => {
const providers = await resolveImplicitProvidersForTest({ agentDir });
expect(providers?.modelstudio).toBeDefined();
expect(providers?.modelstudio?.apiKey).toBe("MODELSTUDIO_API_KEY");
expect(providers?.modelstudio?.baseUrl).toBe("https://coding-intl.dashscope.aliyuncs.com/v1");
});
});
it("should build the static Model Studio provider catalog", () => {
const provider = buildModelStudioProvider();
const modelIds = provider.models.map((model) => model.id);
expect(provider.api).toBe("openai-completions");
expect(provider.baseUrl).toBe("https://coding-intl.dashscope.aliyuncs.com/v1");
expect(modelIds).toContain("qwen3.5-plus");
expect(modelIds).toContain("qwen3-coder-plus");
expect(modelIds).toContain("kimi-k2.5");
});
});

View File

@ -137,6 +137,90 @@ const QIANFAN_DEFAULT_COST = {
cacheWrite: 0,
};
export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1";
export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus";
const MODELSTUDIO_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
const MODELSTUDIO_MODEL_CATALOG: ReadonlyArray<ProviderModelConfig> = [
{
id: "qwen3.5-plus",
name: "qwen3.5-plus",
reasoning: false,
input: ["text", "image"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "qwen3-max-2026-01-23",
name: "qwen3-max-2026-01-23",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 65_536,
},
{
id: "qwen3-coder-next",
name: "qwen3-coder-next",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 65_536,
},
{
id: "qwen3-coder-plus",
name: "qwen3-coder-plus",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "MiniMax-M2.5",
name: "MiniMax-M2.5",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "glm-5",
name: "glm-5",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 202_752,
maxTokens: 16_384,
},
{
id: "glm-4.7",
name: "glm-4.7",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 202_752,
maxTokens: 16_384,
},
{
id: "kimi-k2.5",
name: "kimi-k2.5",
reasoning: false,
input: ["text", "image"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 32_768,
},
];
const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1";
const NVIDIA_DEFAULT_MODEL_ID = "nvidia/llama-3.1-nemotron-70b-instruct";
const NVIDIA_DEFAULT_CONTEXT_WINDOW = 131072;
@ -384,6 +468,14 @@ export function buildQianfanProvider(): ProviderConfig {
};
}
export function buildModelStudioProvider(): ProviderConfig {
return {
baseUrl: MODELSTUDIO_BASE_URL,
api: "openai-completions",
models: MODELSTUDIO_MODEL_CATALOG.map((model) => ({ ...model })),
};
}
export function buildNvidiaProvider(): ProviderConfig {
return {
baseUrl: NVIDIA_BASE_URL,

View File

@ -29,6 +29,7 @@ import {
buildKilocodeProvider,
buildMinimaxPortalProvider,
buildMinimaxProvider,
buildModelStudioProvider,
buildMoonshotProvider,
buildNvidiaProvider,
buildOpenAICodexProvider,
@ -46,8 +47,11 @@ export {
buildKimiCodingProvider,
buildKilocodeProvider,
buildNvidiaProvider,
buildModelStudioProvider,
buildQianfanProvider,
buildXiaomiProvider,
MODELSTUDIO_BASE_URL,
MODELSTUDIO_DEFAULT_MODEL_ID,
QIANFAN_BASE_URL,
QIANFAN_DEFAULT_MODEL_ID,
XIAOMI_DEFAULT_MODEL_ID,
@ -512,6 +516,7 @@ const SIMPLE_IMPLICIT_PROVIDER_LOADERS: ImplicitProviderLoader[] = [
apiKey,
})),
withApiKey("qianfan", async ({ apiKey }) => ({ ...buildQianfanProvider(), apiKey })),
withApiKey("modelstudio", async ({ apiKey }) => ({ ...buildModelStudioProvider(), apiKey })),
withApiKey("openrouter", async ({ apiKey }) => ({ ...buildOpenrouterProvider(), apiKey })),
withApiKey("nvidia", async ({ apiKey }) => ({ ...buildNvidiaProvider(), apiKey })),
withApiKey("kilocode", async ({ apiKey }) => ({

View File

@ -611,6 +611,26 @@ describe("onboard (non-interactive): provider auth", () => {
});
});
it("infers Model Studio auth choice from --modelstudio-api-key and sets default model", async () => {
await withOnboardEnv("openclaw-onboard-modelstudio-infer-", async (env) => {
const cfg = await runOnboardingAndReadConfig(env, {
modelstudioApiKey: "modelstudio-test-key", // pragma: allowlist secret
});
expect(cfg.auth?.profiles?.["modelstudio:default"]?.provider).toBe("modelstudio");
expect(cfg.auth?.profiles?.["modelstudio:default"]?.mode).toBe("api_key");
expect(cfg.models?.providers?.modelstudio?.baseUrl).toBe(
"https://coding-intl.dashscope.aliyuncs.com/v1",
);
expect(cfg.agents?.defaults?.model?.primary).toBe("modelstudio/qwen3.5-plus");
await expectApiKeyProfile({
profileId: "modelstudio:default",
provider: "modelstudio",
key: "modelstudio-test-key",
});
});
});
it("configures a custom provider from non-interactive flags", async () => {
await withOnboardEnv("openclaw-onboard-custom-provider-", async ({ configPath, runtime }) => {
await runNonInteractiveOnboardingWithDefaults(runtime, {

View File

@ -68,6 +68,7 @@ const SHELL_ENV_EXPECTED_KEYS = [
"OPENROUTER_API_KEY",
"AI_GATEWAY_API_KEY",
"MINIMAX_API_KEY",
"MODELSTUDIO_API_KEY",
"SYNTHETIC_API_KEY",
"KILOCODE_API_KEY",
"ELEVENLABS_API_KEY",