* Onboard: add Mistral auth choice and CLI flags * Onboard/Auth: add Mistral provider config defaults * Auth choice: wire Mistral API-key flow * Onboard non-interactive: support --mistral-api-key * Media understanding: add Mistral Voxtral audio provider * Changelog: note Mistral onboarding and media support * Docs: add Mistral provider and onboarding/media references * Tests: cover Mistral media registry/defaults and auth mapping * Memory: add Mistral embeddings provider support * Onboarding: refresh Mistral model metadata * Docs: document Mistral embeddings and endpoints * Memory: persist Mistral embedding client state in managers * Memory: add regressions for mistral provider wiring * Gateway: add live tool probe retry helper * Gateway: cover live tool probe retry helper * Gateway: retry malformed live tool-read probe responses * Memory: support plain-text batch error bodies * Tests: add Mistral Voxtral live transcription smoke * Docs: add Mistral live audio test command * Revert: remove Mistral live voice test and docs entry * Onboard: re-export Mistral default model ref from models * Changelog: credit joeVenner for Mistral work * fix: include Mistral in auto audio key fallback * Update CHANGELOG.md * Update CHANGELOG.md --------- Co-authored-by: Shakker <shakkerdroid@gmail.com>
61 lines
1.6 KiB
TypeScript
61 lines
1.6 KiB
TypeScript
import type { MediaUnderstandingCapability } from "./types.js";
|
|
|
|
const MB = 1024 * 1024;
|
|
|
|
export const DEFAULT_MAX_CHARS = 500;
|
|
export const DEFAULT_MAX_CHARS_BY_CAPABILITY: Record<
|
|
MediaUnderstandingCapability,
|
|
number | undefined
|
|
> = {
|
|
image: DEFAULT_MAX_CHARS,
|
|
audio: undefined,
|
|
video: DEFAULT_MAX_CHARS,
|
|
};
|
|
export const DEFAULT_MAX_BYTES: Record<MediaUnderstandingCapability, number> = {
|
|
image: 10 * MB,
|
|
audio: 20 * MB,
|
|
video: 50 * MB,
|
|
};
|
|
export const DEFAULT_TIMEOUT_SECONDS: Record<MediaUnderstandingCapability, number> = {
|
|
image: 60,
|
|
audio: 60,
|
|
video: 120,
|
|
};
|
|
export const DEFAULT_PROMPT: Record<MediaUnderstandingCapability, string> = {
|
|
image: "Describe the image.",
|
|
audio: "Transcribe the audio.",
|
|
video: "Describe the video.",
|
|
};
|
|
export const DEFAULT_VIDEO_MAX_BASE64_BYTES = 70 * MB;
|
|
export const DEFAULT_AUDIO_MODELS: Record<string, string> = {
|
|
groq: "whisper-large-v3-turbo",
|
|
openai: "gpt-4o-mini-transcribe",
|
|
deepgram: "nova-3",
|
|
mistral: "voxtral-mini-latest",
|
|
};
|
|
|
|
export const AUTO_AUDIO_KEY_PROVIDERS = [
|
|
"openai",
|
|
"groq",
|
|
"deepgram",
|
|
"google",
|
|
"mistral",
|
|
] as const;
|
|
export const AUTO_IMAGE_KEY_PROVIDERS = [
|
|
"openai",
|
|
"anthropic",
|
|
"google",
|
|
"minimax",
|
|
"zai",
|
|
] as const;
|
|
export const AUTO_VIDEO_KEY_PROVIDERS = ["google"] as const;
|
|
export const DEFAULT_IMAGE_MODELS: Record<string, string> = {
|
|
openai: "gpt-5-mini",
|
|
anthropic: "claude-opus-4-6",
|
|
google: "gemini-3-flash-preview",
|
|
minimax: "MiniMax-VL-01",
|
|
zai: "glm-4.6v",
|
|
};
|
|
export const CLI_OUTPUT_MAX_BUFFER = 5 * MB;
|
|
export const DEFAULT_MEDIA_CONCURRENCY = 2;
|