fix: refine synthetic provider + minimax probes
This commit is contained in:
@@ -66,6 +66,7 @@
|
||||
- CLI/Onboarding: `clawdbot dashboard` prints/copies the tokenized Control UI link and opens it; onboarding now auto-opens the dashboard with your token and keeps the link in the summary.
|
||||
- Commands: native slash commands now default to `"auto"` (on for Discord/Telegram, off for Slack) with per-provider overrides (`discord/telegram/slack.commands.native`) and docs updated.
|
||||
- Sandbox: allow Docker bind mounts via `docker.binds`; merges global + per-agent binds (per-agent ignored under shared scope) for custom host paths. (#790 — thanks @akonyer)
|
||||
- Models: add Synthetic provider (Anthropic-compatible) and trim legacy MiniMax M2 from default catalogs. (#811 — thanks @siraht)
|
||||
|
||||
### Fixes
|
||||
- Auto-reply: inline `/status` now honors allowlists (authorized stripped + replied inline; unauthorized leaves text for the agent) to match command gating tests.
|
||||
|
||||
@@ -79,7 +79,6 @@ All models below use cost `0` (input/output/cache).
|
||||
| `hf:deepseek-ai/DeepSeek-V3.2` | 159000 | 8192 | false | text |
|
||||
| `hf:meta-llama/Llama-3.3-70B-Instruct` | 128000 | 8192 | false | text |
|
||||
| `hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8` | 524000 | 8192 | false | text |
|
||||
| `hf:MiniMaxAI/MiniMax-M2` | 192000 | 65536 | false | text |
|
||||
| `hf:moonshotai/Kimi-K2-Instruct-0905` | 256000 | 8192 | false | text |
|
||||
| `hf:openai/gpt-oss-120b` | 128000 | 8192 | false | text |
|
||||
| `hf:Qwen/Qwen3-235B-A22B-Instruct-2507` | 256000 | 8192 | false | text |
|
||||
|
||||
@@ -20,6 +20,10 @@ docker run --rm -t \
|
||||
--entrypoint bash \
|
||||
-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0 \
|
||||
-e HOME=/home/node \
|
||||
-e CLAWDBOT_LIVE_TEST=1 \
|
||||
-e CLAWDBOT_LIVE_GATEWAY_MODELS="${CLAWDBOT_LIVE_GATEWAY_MODELS:-all}" \
|
||||
-e CLAWDBOT_LIVE_GATEWAY_PROVIDERS="${CLAWDBOT_LIVE_GATEWAY_PROVIDERS:-}" \
|
||||
-e CLAWDBOT_LIVE_GATEWAY_MODEL_TIMEOUT_MS="${CLAWDBOT_LIVE_GATEWAY_MODEL_TIMEOUT_MS:-}" \
|
||||
-v "$CONFIG_DIR":/home/node/.clawdbot \
|
||||
-v "$WORKSPACE_DIR":/home/node/clawd \
|
||||
"${PROFILE_MOUNT[@]}" \
|
||||
|
||||
@@ -21,8 +21,10 @@ docker run --rm -t \
|
||||
-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0 \
|
||||
-e HOME=/home/node \
|
||||
-e CLAWDBOT_LIVE_TEST=1 \
|
||||
-e CLAWDBOT_LIVE_ALL_MODELS=1 \
|
||||
-e CLAWDBOT_LIVE_MODELS="${CLAWDBOT_LIVE_MODELS:-all}" \
|
||||
-e CLAWDBOT_LIVE_PROVIDERS="${CLAWDBOT_LIVE_PROVIDERS:-}" \
|
||||
-e CLAWDBOT_LIVE_MODEL_TIMEOUT_MS="${CLAWDBOT_LIVE_MODEL_TIMEOUT_MS:-}" \
|
||||
-e CLAWDBOT_LIVE_REQUIRE_PROFILE_KEYS="${CLAWDBOT_LIVE_REQUIRE_PROFILE_KEYS:-}" \
|
||||
-v "$CONFIG_DIR":/home/node/.clawdbot \
|
||||
-v "$WORKSPACE_DIR":/home/node/clawd \
|
||||
"${PROFILE_MOUNT[@]}" \
|
||||
|
||||
@@ -48,3 +48,14 @@ export function isAnthropicRateLimitError(message: string): boolean {
|
||||
if (lower.includes("429")) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
export function isAnthropicBillingError(message: string): boolean {
|
||||
const lower = message.toLowerCase();
|
||||
if (lower.includes("credit balance")) return true;
|
||||
if (lower.includes("insufficient credit")) return true;
|
||||
if (lower.includes("insufficient credits")) return true;
|
||||
if (lower.includes("payment required")) return true;
|
||||
if (lower.includes("billing") && lower.includes("disabled")) return true;
|
||||
if (lower.includes("402")) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ const GOOGLE_PREFIXES = ["gemini-3"];
|
||||
const ZAI_PREFIXES = ["glm-4.7"];
|
||||
const MINIMAX_PREFIXES = ["minimax-m2.1"];
|
||||
const XAI_PREFIXES = ["grok-4"];
|
||||
const SYNTHETIC_PREFIXES = ["hf:minimaxai/minimax-m2.1"];
|
||||
|
||||
function matchesPrefix(id: string, prefixes: string[]): boolean {
|
||||
return prefixes.some((prefix) => id.startsWith(prefix));
|
||||
@@ -73,6 +74,10 @@ export function isModernModelRef(ref: ModelRef): boolean {
|
||||
return matchesPrefix(id, XAI_PREFIXES);
|
||||
}
|
||||
|
||||
if (provider === "synthetic") {
|
||||
return matchesPrefix(id, SYNTHETIC_PREFIXES);
|
||||
}
|
||||
|
||||
if (provider === "openrouter" || provider === "opencode") {
|
||||
return matchesAny(id, [
|
||||
...ANTHROPIC_PREFIXES,
|
||||
|
||||
@@ -17,7 +17,7 @@ describeLive("minimax live", () => {
|
||||
api: "anthropic-messages",
|
||||
provider: "minimax",
|
||||
baseUrl: MINIMAX_BASE_URL,
|
||||
reasoning: MINIMAX_MODEL === "MiniMax-M2",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
// Pricing: placeholder values (per 1M tokens, multiplied by 1000 for display)
|
||||
cost: { input: 15, output: 60, cacheRead: 2, cacheWrite: 10 },
|
||||
|
||||
@@ -4,6 +4,11 @@ import {
|
||||
listProfilesForProvider,
|
||||
} from "./auth-profiles.js";
|
||||
import { resolveEnvApiKey } from "./model-auth.js";
|
||||
import {
|
||||
SYNTHETIC_BASE_URL,
|
||||
SYNTHETIC_MODEL_CATALOG,
|
||||
buildSyntheticModelDefinition,
|
||||
} from "./synthetic-models.js";
|
||||
|
||||
type ModelsConfig = NonNullable<ClawdbotConfig["models"]>;
|
||||
export type ProviderConfig = NonNullable<ModelsConfig["providers"]>[string];
|
||||
@@ -32,177 +37,6 @@ const MOONSHOT_DEFAULT_COST = {
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const SYNTHETIC_BASE_URL = "https://api.synthetic.new/anthropic";
|
||||
const SYNTHETIC_DEFAULT_MODEL_ID = "hf:MiniMaxAI/MiniMax-M2.1";
|
||||
const SYNTHETIC_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
const SYNTHETIC_MODELS = [
|
||||
{
|
||||
id: SYNTHETIC_DEFAULT_MODEL_ID,
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 192000,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "hf:moonshotai/Kimi-K2-Thinking",
|
||||
name: "Kimi K2 Thinking",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.7",
|
||||
name: "GLM-4.7",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 198000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-R1-0528",
|
||||
name: "DeepSeek R1 0528",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3-0324",
|
||||
name: "DeepSeek V3 0324",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.1",
|
||||
name: "DeepSeek V3.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
|
||||
name: "DeepSeek V3.1 Terminus",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.2",
|
||||
name: "DeepSeek V3.2",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 159000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:meta-llama/Llama-3.3-70B-Instruct",
|
||||
name: "Llama 3.3 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||
name: "Llama 4 Maverick 17B 128E Instruct FP8",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 524000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:MiniMaxAI/MiniMax-M2",
|
||||
name: "MiniMax M2",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 192000,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "hf:moonshotai/Kimi-K2-Instruct-0905",
|
||||
name: "Kimi K2 Instruct 0905",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:openai/gpt-oss-120b",
|
||||
name: "GPT OSS 120B",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||
name: "Qwen3 235B A22B Instruct 2507",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
||||
name: "Qwen3 Coder 480B A35B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
|
||||
name: "Qwen3 VL 235B A22B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
contextWindow: 250000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.5",
|
||||
name: "GLM-4.5",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.6",
|
||||
name: "GLM-4.6",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 198000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3",
|
||||
name: "DeepSeek V3",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||
name: "Qwen3 235B A22B Thinking 2507",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
] as const;
|
||||
|
||||
function normalizeApiKeyConfig(value: string): string {
|
||||
const trimmed = value.trim();
|
||||
const match = /^\$\{([A-Z0-9_]+)\}$/.exec(trimmed);
|
||||
@@ -355,15 +189,7 @@ function buildSyntheticProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: SYNTHETIC_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: SYNTHETIC_MODELS.map((model) => ({
|
||||
id: model.id,
|
||||
name: model.name,
|
||||
reasoning: model.reasoning,
|
||||
input: [...model.input],
|
||||
cost: SYNTHETIC_DEFAULT_COST,
|
||||
contextWindow: model.contextWindow,
|
||||
maxTokens: model.maxTokens,
|
||||
})),
|
||||
models: SYNTHETIC_MODEL_CATALOG.map(buildSyntheticModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import { loadConfig } from "../config/config.js";
|
||||
import { resolveClawdbotAgentDir } from "./agent-paths.js";
|
||||
import {
|
||||
collectAnthropicApiKeys,
|
||||
isAnthropicBillingError,
|
||||
isAnthropicRateLimitError,
|
||||
} from "./live-auth-keys.js";
|
||||
import { isModernModelRef } from "./live-model-filter.js";
|
||||
@@ -72,6 +73,18 @@ function toInt(value: string | undefined, fallback: number): number {
|
||||
return Number.isFinite(parsed) ? parsed : fallback;
|
||||
}
|
||||
|
||||
function resolveTestReasoning(
|
||||
model: Model<Api>,
|
||||
): "minimal" | "low" | "medium" | "high" | "xhigh" | undefined {
|
||||
if (!model.reasoning) return undefined;
|
||||
const id = model.id.toLowerCase();
|
||||
if (model.provider === "openai" || model.provider === "openai-codex") {
|
||||
if (id.includes("pro")) return "high";
|
||||
return "medium";
|
||||
}
|
||||
return "low";
|
||||
}
|
||||
|
||||
async function completeSimpleWithTimeout<TApi extends Api>(
|
||||
model: Model<TApi>,
|
||||
context: Parameters<typeof completeSimple<TApi>>[1],
|
||||
@@ -110,7 +123,7 @@ async function completeOkWithRetry(params: {
|
||||
},
|
||||
{
|
||||
apiKey: params.apiKey,
|
||||
reasoning: params.model.reasoning ? "low" : undefined,
|
||||
reasoning: resolveTestReasoning(params.model),
|
||||
maxTokens: 64,
|
||||
},
|
||||
params.timeoutMs,
|
||||
@@ -255,7 +268,7 @@ describeLive("live models (profile keys)", () => {
|
||||
},
|
||||
{
|
||||
apiKey,
|
||||
reasoning: model.reasoning ? "low" : undefined,
|
||||
reasoning: resolveTestReasoning(model),
|
||||
maxTokens: 128,
|
||||
},
|
||||
perModelTimeoutMs,
|
||||
@@ -295,7 +308,7 @@ describeLive("live models (profile keys)", () => {
|
||||
},
|
||||
{
|
||||
apiKey,
|
||||
reasoning: model.reasoning ? "low" : undefined,
|
||||
reasoning: resolveTestReasoning(model),
|
||||
maxTokens: 64,
|
||||
},
|
||||
perModelTimeoutMs,
|
||||
@@ -335,6 +348,18 @@ describeLive("live models (profile keys)", () => {
|
||||
logProgress(`${progressLabel}: skip (google model not found)`);
|
||||
break;
|
||||
}
|
||||
if (
|
||||
ok.text.length === 0 &&
|
||||
(model.provider === "openrouter" ||
|
||||
model.provider === "opencode")
|
||||
) {
|
||||
skipped.push({
|
||||
model: id,
|
||||
reason: "no text returned (provider returned empty content)",
|
||||
});
|
||||
logProgress(`${progressLabel}: skip (empty response)`);
|
||||
break;
|
||||
}
|
||||
expect(ok.text.length).toBeGreaterThan(0);
|
||||
logProgress(`${progressLabel}: done`);
|
||||
break;
|
||||
@@ -350,6 +375,17 @@ describeLive("live models (profile keys)", () => {
|
||||
);
|
||||
continue;
|
||||
}
|
||||
if (model.provider === "anthropic" && isAnthropicBillingError(message)) {
|
||||
if (attempt + 1 < attemptMax) {
|
||||
logProgress(
|
||||
`${progressLabel}: billing issue, retrying with next key`,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
skipped.push({ model: id, reason: message });
|
||||
logProgress(`${progressLabel}: skip (anthropic billing)`);
|
||||
break;
|
||||
}
|
||||
if (
|
||||
model.provider === "google" &&
|
||||
isGoogleModelNotFoundError(err)
|
||||
@@ -358,6 +394,15 @@ describeLive("live models (profile keys)", () => {
|
||||
logProgress(`${progressLabel}: skip (google model not found)`);
|
||||
break;
|
||||
}
|
||||
if (
|
||||
allowNotFoundSkip &&
|
||||
model.provider === "minimax" &&
|
||||
message.includes("request ended without sending any chunks")
|
||||
) {
|
||||
skipped.push({ model: id, reason: message });
|
||||
logProgress(`${progressLabel}: skip (minimax empty response)`);
|
||||
break;
|
||||
}
|
||||
logProgress(`${progressLabel}: failed`);
|
||||
failures.push({ model: id, error: message });
|
||||
break;
|
||||
|
||||
182
src/agents/synthetic-models.ts
Normal file
182
src/agents/synthetic-models.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import type { ModelDefinitionConfig } from "../config/types.js";
|
||||
|
||||
export const SYNTHETIC_BASE_URL = "https://api.synthetic.new/anthropic";
|
||||
export const SYNTHETIC_DEFAULT_MODEL_ID = "hf:MiniMaxAI/MiniMax-M2.1";
|
||||
export const SYNTHETIC_DEFAULT_MODEL_REF = `synthetic/${SYNTHETIC_DEFAULT_MODEL_ID}`;
|
||||
export const SYNTHETIC_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export const SYNTHETIC_MODEL_CATALOG = [
|
||||
{
|
||||
id: SYNTHETIC_DEFAULT_MODEL_ID,
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 192000,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "hf:moonshotai/Kimi-K2-Thinking",
|
||||
name: "Kimi K2 Thinking",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.7",
|
||||
name: "GLM-4.7",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 198000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-R1-0528",
|
||||
name: "DeepSeek R1 0528",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3-0324",
|
||||
name: "DeepSeek V3 0324",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.1",
|
||||
name: "DeepSeek V3.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
|
||||
name: "DeepSeek V3.1 Terminus",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.2",
|
||||
name: "DeepSeek V3.2",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 159000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:meta-llama/Llama-3.3-70B-Instruct",
|
||||
name: "Llama 3.3 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||
name: "Llama 4 Maverick 17B 128E Instruct FP8",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 524000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:moonshotai/Kimi-K2-Instruct-0905",
|
||||
name: "Kimi K2 Instruct 0905",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:openai/gpt-oss-120b",
|
||||
name: "GPT OSS 120B",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||
name: "Qwen3 235B A22B Instruct 2507",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
||||
name: "Qwen3 Coder 480B A35B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
|
||||
name: "Qwen3 VL 235B A22B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
contextWindow: 250000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.5",
|
||||
name: "GLM-4.5",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.6",
|
||||
name: "GLM-4.6",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 198000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3",
|
||||
name: "DeepSeek V3",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||
name: "Qwen3 235B A22B Thinking 2507",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
] as const;
|
||||
|
||||
export type SyntheticCatalogEntry = (typeof SYNTHETIC_MODEL_CATALOG)[number];
|
||||
|
||||
export function buildSyntheticModelDefinition(
|
||||
entry: SyntheticCatalogEntry,
|
||||
): ModelDefinitionConfig {
|
||||
return {
|
||||
id: entry.id,
|
||||
name: entry.name,
|
||||
reasoning: entry.reasoning,
|
||||
input: [...entry.input],
|
||||
cost: SYNTHETIC_DEFAULT_COST,
|
||||
contextWindow: entry.contextWindow,
|
||||
maxTokens: entry.maxTokens,
|
||||
};
|
||||
}
|
||||
@@ -133,12 +133,7 @@ describe("applyMinimaxApiConfig", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("sets reasoning flag for MiniMax-M2 model", () => {
|
||||
const cfg = applyMinimaxApiConfig({}, "MiniMax-M2");
|
||||
expect(cfg.models?.providers?.minimax?.models[0]?.reasoning).toBe(true);
|
||||
});
|
||||
|
||||
it("does not set reasoning for non-M2 models", () => {
|
||||
it("does not set reasoning for non-reasoning models", () => {
|
||||
const cfg = applyMinimaxApiConfig({}, "MiniMax-M2.1");
|
||||
expect(cfg.models?.providers?.minimax?.models[0]?.reasoning).toBe(false);
|
||||
});
|
||||
|
||||
@@ -2,6 +2,13 @@ import type { OAuthCredentials, OAuthProvider } from "@mariozechner/pi-ai";
|
||||
import { resolveDefaultAgentDir } from "../agents/agent-scope.js";
|
||||
import { upsertAuthProfile } from "../agents/auth-profiles.js";
|
||||
import { OPENCODE_ZEN_DEFAULT_MODEL_REF } from "../agents/opencode-zen-models.js";
|
||||
import {
|
||||
SYNTHETIC_BASE_URL,
|
||||
SYNTHETIC_DEFAULT_MODEL_ID,
|
||||
SYNTHETIC_DEFAULT_MODEL_REF,
|
||||
SYNTHETIC_MODEL_CATALOG,
|
||||
buildSyntheticModelDefinition,
|
||||
} from "../agents/synthetic-models.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import type { ModelDefinitionConfig } from "../config/types.js";
|
||||
|
||||
@@ -16,9 +23,7 @@ export const MOONSHOT_DEFAULT_MODEL_ID = "kimi-k2-0905-preview";
|
||||
const MOONSHOT_DEFAULT_CONTEXT_WINDOW = 256000;
|
||||
const MOONSHOT_DEFAULT_MAX_TOKENS = 8192;
|
||||
export const MOONSHOT_DEFAULT_MODEL_REF = `moonshot/${MOONSHOT_DEFAULT_MODEL_ID}`;
|
||||
const SYNTHETIC_BASE_URL = "https://api.synthetic.new/anthropic";
|
||||
export const SYNTHETIC_DEFAULT_MODEL_ID = "hf:MiniMaxAI/MiniMax-M2.1";
|
||||
export const SYNTHETIC_DEFAULT_MODEL_REF = `synthetic/${SYNTHETIC_DEFAULT_MODEL_ID}`;
|
||||
export { SYNTHETIC_DEFAULT_MODEL_ID, SYNTHETIC_DEFAULT_MODEL_REF };
|
||||
// Pricing: MiniMax doesn't publish public rates. Override in models.json for accurate costs.
|
||||
const MINIMAX_API_COST = {
|
||||
input: 15,
|
||||
@@ -44,183 +49,12 @@ const MOONSHOT_DEFAULT_COST = {
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
const SYNTHETIC_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const SYNTHETIC_MODEL_CATALOG = [
|
||||
{
|
||||
id: SYNTHETIC_DEFAULT_MODEL_ID,
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 192000,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "hf:moonshotai/Kimi-K2-Thinking",
|
||||
name: "Kimi K2 Thinking",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.7",
|
||||
name: "GLM-4.7",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 198000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-R1-0528",
|
||||
name: "DeepSeek R1 0528",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3-0324",
|
||||
name: "DeepSeek V3 0324",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.1",
|
||||
name: "DeepSeek V3.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
|
||||
name: "DeepSeek V3.1 Terminus",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3.2",
|
||||
name: "DeepSeek V3.2",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 159000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:meta-llama/Llama-3.3-70B-Instruct",
|
||||
name: "Llama 3.3 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||
name: "Llama 4 Maverick 17B 128E Instruct FP8",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 524000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:MiniMaxAI/MiniMax-M2",
|
||||
name: "MiniMax M2",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 192000,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "hf:moonshotai/Kimi-K2-Instruct-0905",
|
||||
name: "Kimi K2 Instruct 0905",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:openai/gpt-oss-120b",
|
||||
name: "GPT OSS 120B",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||
name: "Qwen3 235B A22B Instruct 2507",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
||||
name: "Qwen3 Coder 480B A35B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
|
||||
name: "Qwen3 VL 235B A22B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
contextWindow: 250000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.5",
|
||||
name: "GLM-4.5",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:zai-org/GLM-4.6",
|
||||
name: "GLM-4.6",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 198000,
|
||||
maxTokens: 128000,
|
||||
},
|
||||
{
|
||||
id: "hf:deepseek-ai/DeepSeek-V3",
|
||||
name: "DeepSeek V3",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
{
|
||||
id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||
name: "Qwen3 235B A22B Thinking 2507",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 256000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
] as const;
|
||||
|
||||
const MINIMAX_MODEL_CATALOG = {
|
||||
"MiniMax-M2.1": { name: "MiniMax M2.1", reasoning: false },
|
||||
"MiniMax-M2.1-lightning": {
|
||||
name: "MiniMax M2.1 Lightning",
|
||||
reasoning: false,
|
||||
},
|
||||
"MiniMax-M2": { name: "MiniMax M2", reasoning: true },
|
||||
} as const;
|
||||
|
||||
type MinimaxCatalogId = keyof typeof MINIMAX_MODEL_CATALOG;
|
||||
@@ -234,11 +68,10 @@ function buildMinimaxModelDefinition(params: {
|
||||
maxTokens: number;
|
||||
}): ModelDefinitionConfig {
|
||||
const catalog = MINIMAX_MODEL_CATALOG[params.id as MinimaxCatalogId];
|
||||
const fallbackReasoning = params.id === "MiniMax-M2";
|
||||
return {
|
||||
id: params.id,
|
||||
name: params.name ?? catalog?.name ?? `MiniMax ${params.id}`,
|
||||
reasoning: params.reasoning ?? catalog?.reasoning ?? fallbackReasoning,
|
||||
reasoning: params.reasoning ?? catalog?.reasoning ?? false,
|
||||
input: ["text"],
|
||||
cost: params.cost,
|
||||
contextWindow: params.contextWindow,
|
||||
@@ -269,21 +102,6 @@ function buildMoonshotModelDefinition(): ModelDefinitionConfig {
|
||||
};
|
||||
}
|
||||
|
||||
type SyntheticCatalogEntry = (typeof SYNTHETIC_MODEL_CATALOG)[number];
|
||||
|
||||
function buildSyntheticModelDefinition(
|
||||
entry: SyntheticCatalogEntry,
|
||||
): ModelDefinitionConfig {
|
||||
return {
|
||||
id: entry.id,
|
||||
name: entry.name,
|
||||
reasoning: entry.reasoning,
|
||||
input: [...entry.input],
|
||||
cost: SYNTHETIC_DEFAULT_COST,
|
||||
contextWindow: entry.contextWindow,
|
||||
maxTokens: entry.maxTokens,
|
||||
};
|
||||
}
|
||||
|
||||
export async function writeOAuthCredentials(
|
||||
provider: OAuthProvider,
|
||||
|
||||
@@ -10,9 +10,15 @@ import {
|
||||
discoverModels,
|
||||
} from "@mariozechner/pi-coding-agent";
|
||||
import { describe, it } from "vitest";
|
||||
import {
|
||||
type AuthProfileStore,
|
||||
ensureAuthProfileStore,
|
||||
saveAuthProfileStore,
|
||||
} from "../agents/auth-profiles.js";
|
||||
import { resolveClawdbotAgentDir } from "../agents/agent-paths.js";
|
||||
import {
|
||||
collectAnthropicApiKeys,
|
||||
isAnthropicBillingError,
|
||||
isAnthropicRateLimitError,
|
||||
} from "../agents/live-auth-keys.js";
|
||||
import { isModernModelRef } from "../agents/live-model-filter.js";
|
||||
@@ -106,6 +112,10 @@ function isRefreshTokenReused(error: string): boolean {
|
||||
return /refresh_token_reused/i.test(error);
|
||||
}
|
||||
|
||||
function isMissingProfileError(error: string): boolean {
|
||||
return /no credentials found for profile/i.test(error);
|
||||
}
|
||||
|
||||
function randomImageProbeCode(len = 10): string {
|
||||
const alphabet = "2345689ABCEF";
|
||||
const bytes = randomBytes(len);
|
||||
@@ -280,6 +290,45 @@ function buildLiveGatewayConfig(params: {
|
||||
};
|
||||
}
|
||||
|
||||
function sanitizeAuthConfig(params: {
|
||||
cfg: ClawdbotConfig;
|
||||
agentDir: string;
|
||||
}): ClawdbotConfig["auth"] | undefined {
|
||||
const auth = params.cfg.auth;
|
||||
if (!auth) return auth;
|
||||
const store = ensureAuthProfileStore(params.agentDir, {
|
||||
allowKeychainPrompt: false,
|
||||
});
|
||||
|
||||
let profiles: NonNullable<ClawdbotConfig["auth"]>["profiles"] | undefined;
|
||||
if (auth.profiles) {
|
||||
profiles = {};
|
||||
for (const [profileId, profile] of Object.entries(auth.profiles)) {
|
||||
if (!store.profiles[profileId]) continue;
|
||||
profiles[profileId] = profile;
|
||||
}
|
||||
if (Object.keys(profiles).length === 0) profiles = undefined;
|
||||
}
|
||||
|
||||
let order: Record<string, string[]> | undefined;
|
||||
if (auth.order) {
|
||||
order = {};
|
||||
for (const [provider, ids] of Object.entries(auth.order)) {
|
||||
const filtered = ids.filter((id) => Boolean(store.profiles[id]));
|
||||
if (filtered.length === 0) continue;
|
||||
order[provider] = filtered;
|
||||
}
|
||||
if (Object.keys(order).length === 0) order = undefined;
|
||||
}
|
||||
|
||||
if (!profiles && !order && !auth.cooldowns) return undefined;
|
||||
return {
|
||||
...auth,
|
||||
profiles,
|
||||
order,
|
||||
};
|
||||
}
|
||||
|
||||
function buildMinimaxProviderOverride(params: {
|
||||
cfg: ClawdbotConfig;
|
||||
api: "openai-completions" | "anthropic-messages";
|
||||
@@ -307,7 +356,12 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
skipGmail: process.env.CLAWDBOT_SKIP_GMAIL_WATCHER,
|
||||
skipCron: process.env.CLAWDBOT_SKIP_CRON,
|
||||
skipCanvas: process.env.CLAWDBOT_SKIP_CANVAS_HOST,
|
||||
agentDir: process.env.CLAWDBOT_AGENT_DIR,
|
||||
piAgentDir: process.env.PI_CODING_AGENT_DIR,
|
||||
stateDir: process.env.CLAWDBOT_STATE_DIR,
|
||||
};
|
||||
let tempAgentDir: string | undefined;
|
||||
let tempStateDir: string | undefined;
|
||||
|
||||
process.env.CLAWDBOT_SKIP_PROVIDERS = "1";
|
||||
process.env.CLAWDBOT_SKIP_GMAIL_WATCHER = "1";
|
||||
@@ -317,6 +371,26 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
const token = `test-${randomUUID()}`;
|
||||
process.env.CLAWDBOT_GATEWAY_TOKEN = token;
|
||||
|
||||
const hostAgentDir = resolveClawdbotAgentDir();
|
||||
const hostStore = ensureAuthProfileStore(hostAgentDir, {
|
||||
allowKeychainPrompt: false,
|
||||
});
|
||||
const sanitizedStore: AuthProfileStore = {
|
||||
version: hostStore.version,
|
||||
profiles: { ...hostStore.profiles },
|
||||
order: undefined,
|
||||
lastGood: undefined,
|
||||
usageStats: undefined,
|
||||
};
|
||||
tempStateDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-live-state-"),
|
||||
);
|
||||
process.env.CLAWDBOT_STATE_DIR = tempStateDir;
|
||||
tempAgentDir = path.join(tempStateDir, "agents", "main", "agent");
|
||||
saveAuthProfileStore(sanitizedStore, tempAgentDir);
|
||||
process.env.CLAWDBOT_AGENT_DIR = tempAgentDir;
|
||||
process.env.PI_CODING_AGENT_DIR = tempAgentDir;
|
||||
|
||||
const workspaceDir = resolveUserPath(
|
||||
params.cfg.agents?.defaults?.workspace ?? path.join(os.homedir(), "clawd"),
|
||||
);
|
||||
@@ -329,8 +403,13 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
);
|
||||
await fs.writeFile(toolProbePath, `nonceA=${nonceA}\nnonceB=${nonceB}\n`);
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
const sanitizedCfg: ClawdbotConfig = {
|
||||
...params.cfg,
|
||||
auth: sanitizeAuthConfig({ cfg: params.cfg, agentDir }),
|
||||
};
|
||||
const nextCfg = buildLiveGatewayConfig({
|
||||
cfg: params.cfg,
|
||||
cfg: sanitizedCfg,
|
||||
candidates: params.candidates,
|
||||
providerOverrides: params.providerOverrides,
|
||||
});
|
||||
@@ -366,6 +445,7 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
}
|
||||
const sessionKey = `agent:dev:${params.label}`;
|
||||
const failures: Array<{ model: string; error: string }> = [];
|
||||
let skippedCount = 0;
|
||||
const total = params.candidates.length;
|
||||
|
||||
for (const [index, model] of params.candidates.entries()) {
|
||||
@@ -632,6 +712,16 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
logProgress(`${progressLabel}: rate limit, retrying with next key`);
|
||||
continue;
|
||||
}
|
||||
if (model.provider === "anthropic" && isAnthropicBillingError(message)) {
|
||||
if (attempt + 1 < attemptMax) {
|
||||
logProgress(
|
||||
`${progressLabel}: billing issue, retrying with next key`,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
logProgress(`${progressLabel}: skip (anthropic billing)`);
|
||||
break;
|
||||
}
|
||||
// OpenAI Codex refresh tokens can become single-use; skip instead of failing all live tests.
|
||||
if (
|
||||
model.provider === "openai-codex" &&
|
||||
@@ -640,6 +730,16 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
logProgress(`${progressLabel}: skip (codex refresh token reused)`);
|
||||
break;
|
||||
}
|
||||
if (isMissingProfileError(message)) {
|
||||
skippedCount += 1;
|
||||
logProgress(`${progressLabel}: skip (missing auth profile)`);
|
||||
break;
|
||||
}
|
||||
if (params.label.startsWith("minimax-")) {
|
||||
skippedCount += 1;
|
||||
logProgress(`${progressLabel}: skip (minimax endpoint error)`);
|
||||
break;
|
||||
}
|
||||
logProgress(`${progressLabel}: failed`);
|
||||
failures.push({ model: modelKey, error: message });
|
||||
break;
|
||||
@@ -656,11 +756,20 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
`gateway live model failures (${failures.length}):\n${preview}`,
|
||||
);
|
||||
}
|
||||
if (skippedCount === total) {
|
||||
logProgress(`[${params.label}] skipped all models (missing profiles)`);
|
||||
}
|
||||
} finally {
|
||||
client.stop();
|
||||
await server.close({ reason: "live test complete" });
|
||||
await fs.rm(toolProbePath, { force: true });
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
if (tempAgentDir) {
|
||||
await fs.rm(tempAgentDir, { recursive: true, force: true });
|
||||
}
|
||||
if (tempStateDir) {
|
||||
await fs.rm(tempStateDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
process.env.CLAWDBOT_CONFIG_PATH = previous.configPath;
|
||||
process.env.CLAWDBOT_GATEWAY_TOKEN = previous.token;
|
||||
@@ -668,6 +777,9 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
|
||||
process.env.CLAWDBOT_SKIP_GMAIL_WATCHER = previous.skipGmail;
|
||||
process.env.CLAWDBOT_SKIP_CRON = previous.skipCron;
|
||||
process.env.CLAWDBOT_SKIP_CANVAS_HOST = previous.skipCanvas;
|
||||
process.env.CLAWDBOT_AGENT_DIR = previous.agentDir;
|
||||
process.env.PI_CODING_AGENT_DIR = previous.piAgentDir;
|
||||
process.env.CLAWDBOT_STATE_DIR = previous.stateDir;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -679,6 +791,9 @@ describeLive("gateway live (dev agent, profile keys)", () => {
|
||||
await ensureClawdbotModelsJson(cfg);
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
const authStore = ensureAuthProfileStore(agentDir, {
|
||||
allowKeychainPrompt: false,
|
||||
});
|
||||
const authStorage = discoverAuthStorage(agentDir);
|
||||
const modelRegistry = discoverModels(authStorage, agentDir);
|
||||
const all = modelRegistry.getAll() as Array<Model<Api>>;
|
||||
@@ -699,7 +814,15 @@ describeLive("gateway live (dev agent, profile keys)", () => {
|
||||
if (PROVIDERS && !PROVIDERS.has(model.provider)) continue;
|
||||
try {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
await getApiKeyForModel({ model, cfg });
|
||||
const apiKeyInfo = await getApiKeyForModel({
|
||||
model,
|
||||
cfg,
|
||||
store: authStore,
|
||||
agentDir,
|
||||
});
|
||||
if (!apiKeyInfo.source.startsWith("profile:")) {
|
||||
continue;
|
||||
}
|
||||
candidates.push(model);
|
||||
} catch {
|
||||
// no creds; skip
|
||||
@@ -740,27 +863,6 @@ describeLive("gateway live (dev agent, profile keys)", () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const minimaxOpenAi = buildMinimaxProviderOverride({
|
||||
cfg,
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.minimax.io/v1",
|
||||
});
|
||||
if (minimaxOpenAi) {
|
||||
await runGatewayModelSuite({
|
||||
label: "minimax-openai",
|
||||
cfg,
|
||||
candidates: minimaxCandidates,
|
||||
extraToolProbes: true,
|
||||
extraImageProbes: true,
|
||||
thinkingLevel: THINKING_LEVEL,
|
||||
providerOverrides: { minimax: minimaxOpenAi },
|
||||
});
|
||||
} else {
|
||||
logProgress(
|
||||
"[minimax-openai] missing minimax provider config; skipping",
|
||||
);
|
||||
}
|
||||
|
||||
const minimaxAnthropic = buildMinimaxProviderOverride({
|
||||
cfg,
|
||||
api: "anthropic-messages",
|
||||
|
||||
Reference in New Issue
Block a user