chore: speed up tests and update opencode models
This commit is contained in:
@@ -66,6 +66,10 @@ export function isModernModelRef(ref: ModelRef): boolean {
|
||||
return matchesPrefix(id, XAI_PREFIXES);
|
||||
}
|
||||
|
||||
if (provider === "opencode" && id.endsWith("-free")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (provider === "openrouter" || provider === "opencode") {
|
||||
return matchesAny(id, [
|
||||
...ANTHROPIC_PREFIXES,
|
||||
|
||||
@@ -41,12 +41,11 @@ describe("resolveOpencodeZenAlias", () => {
|
||||
describe("resolveOpencodeZenModelApi", () => {
|
||||
it("maps APIs by model family", () => {
|
||||
expect(resolveOpencodeZenModelApi("claude-opus-4-5")).toBe("anthropic-messages");
|
||||
expect(resolveOpencodeZenModelApi("minimax-m2.1-free")).toBe("anthropic-messages");
|
||||
expect(resolveOpencodeZenModelApi("gemini-3-pro")).toBe("google-generative-ai");
|
||||
expect(resolveOpencodeZenModelApi("gpt-5.2")).toBe("openai-responses");
|
||||
expect(resolveOpencodeZenModelApi("alpha-gd4")).toBe("openai-completions");
|
||||
expect(resolveOpencodeZenModelApi("big-pickle")).toBe("openai-completions");
|
||||
expect(resolveOpencodeZenModelApi("glm-4.7-free")).toBe("openai-completions");
|
||||
expect(resolveOpencodeZenModelApi("glm-4.7")).toBe("openai-completions");
|
||||
expect(resolveOpencodeZenModelApi("some-unknown-model")).toBe("openai-completions");
|
||||
});
|
||||
});
|
||||
@@ -55,10 +54,10 @@ describe("getOpencodeZenStaticFallbackModels", () => {
|
||||
it("returns an array of models", () => {
|
||||
const models = getOpencodeZenStaticFallbackModels();
|
||||
expect(Array.isArray(models)).toBe(true);
|
||||
expect(models.length).toBe(11);
|
||||
expect(models.length).toBe(10);
|
||||
});
|
||||
|
||||
it("includes Claude, GPT, Gemini, GLM, and MiniMax models", () => {
|
||||
it("includes Claude, GPT, Gemini, and GLM models", () => {
|
||||
const models = getOpencodeZenStaticFallbackModels();
|
||||
const ids = models.map((m) => m.id);
|
||||
|
||||
@@ -66,8 +65,7 @@ describe("getOpencodeZenStaticFallbackModels", () => {
|
||||
expect(ids).toContain("gpt-5.2");
|
||||
expect(ids).toContain("gpt-5.1-codex");
|
||||
expect(ids).toContain("gemini-3-pro");
|
||||
expect(ids).toContain("glm-4.7-free");
|
||||
expect(ids).toContain("minimax-m2.1-free");
|
||||
expect(ids).toContain("glm-4.7");
|
||||
});
|
||||
|
||||
it("returns valid ModelDefinitionConfig objects", () => {
|
||||
@@ -90,8 +88,7 @@ describe("OPENCODE_ZEN_MODEL_ALIASES", () => {
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.codex).toBe("gpt-5.1-codex");
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.gpt5).toBe("gpt-5.2");
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.gemini).toBe("gemini-3-pro");
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.glm).toBe("glm-4.7-free");
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.minimax).toBe("minimax-m2.1-free");
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.glm).toBe("glm-4.7");
|
||||
|
||||
// Legacy aliases (kept for backward compatibility).
|
||||
expect(OPENCODE_ZEN_MODEL_ALIASES.sonnet).toBe("claude-opus-4-5");
|
||||
|
||||
@@ -68,13 +68,9 @@ export const OPENCODE_ZEN_MODEL_ALIASES: Record<string, string> = {
|
||||
"gemini-2.5-flash": "gemini-3-flash",
|
||||
|
||||
// GLM (free + alpha)
|
||||
glm: "glm-4.7-free",
|
||||
"glm-free": "glm-4.7-free",
|
||||
glm: "glm-4.7",
|
||||
"glm-free": "glm-4.7",
|
||||
"alpha-glm": "alpha-glm-4.7",
|
||||
|
||||
// MiniMax
|
||||
minimax: "minimax-m2.1-free",
|
||||
"minimax-free": "minimax-m2.1-free",
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -134,7 +130,7 @@ const MODEL_COSTS: Record<
|
||||
cacheWrite: 0,
|
||||
},
|
||||
"gpt-5.1": { input: 1.07, output: 8.5, cacheRead: 0.107, cacheWrite: 0 },
|
||||
"glm-4.7-free": { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
"glm-4.7": { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
"gemini-3-flash": { input: 0.5, output: 3, cacheRead: 0.05, cacheWrite: 0 },
|
||||
"gpt-5.1-codex-max": {
|
||||
input: 1.25,
|
||||
@@ -142,7 +138,6 @@ const MODEL_COSTS: Record<
|
||||
cacheRead: 0.125,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
"minimax-m2.1-free": { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
"gpt-5.2": { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
|
||||
};
|
||||
|
||||
@@ -155,10 +150,9 @@ const MODEL_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
"alpha-glm-4.7": 204800,
|
||||
"gpt-5.1-codex-mini": 400000,
|
||||
"gpt-5.1": 400000,
|
||||
"glm-4.7-free": 204800,
|
||||
"glm-4.7": 204800,
|
||||
"gemini-3-flash": 1048576,
|
||||
"gpt-5.1-codex-max": 400000,
|
||||
"minimax-m2.1-free": 204800,
|
||||
"gpt-5.2": 400000,
|
||||
};
|
||||
|
||||
@@ -173,10 +167,9 @@ const MODEL_MAX_TOKENS: Record<string, number> = {
|
||||
"alpha-glm-4.7": 131072,
|
||||
"gpt-5.1-codex-mini": 128000,
|
||||
"gpt-5.1": 128000,
|
||||
"glm-4.7-free": 131072,
|
||||
"glm-4.7": 131072,
|
||||
"gemini-3-flash": 65536,
|
||||
"gpt-5.1-codex-max": 128000,
|
||||
"minimax-m2.1-free": 131072,
|
||||
"gpt-5.2": 128000,
|
||||
};
|
||||
|
||||
@@ -211,10 +204,9 @@ const MODEL_NAMES: Record<string, string> = {
|
||||
"alpha-glm-4.7": "Alpha GLM-4.7",
|
||||
"gpt-5.1-codex-mini": "GPT-5.1 Codex Mini",
|
||||
"gpt-5.1": "GPT-5.1",
|
||||
"glm-4.7-free": "GLM-4.7",
|
||||
"glm-4.7": "GLM-4.7",
|
||||
"gemini-3-flash": "Gemini 3 Flash",
|
||||
"gpt-5.1-codex-max": "GPT-5.1 Codex Max",
|
||||
"minimax-m2.1-free": "MiniMax M2.1",
|
||||
"gpt-5.2": "GPT-5.2",
|
||||
};
|
||||
|
||||
@@ -240,10 +232,9 @@ export function getOpencodeZenStaticFallbackModels(): ModelDefinitionConfig[] {
|
||||
"alpha-glm-4.7",
|
||||
"gpt-5.1-codex-mini",
|
||||
"gpt-5.1",
|
||||
"glm-4.7-free",
|
||||
"glm-4.7",
|
||||
"gemini-3-flash",
|
||||
"gpt-5.1-codex-max",
|
||||
"minimax-m2.1-free",
|
||||
"gpt-5.2",
|
||||
];
|
||||
|
||||
|
||||
Reference in New Issue
Block a user