fix: default custom provider model fields

This commit is contained in:
Peter Steinberger
2026-01-25 00:01:33 +00:00
parent 3b53213b41
commit 5ad203e47b
5 changed files with 148 additions and 16 deletions

View File

@@ -23,6 +23,7 @@ Docs: https://docs.clawd.bot
- Exec: keep approvals for elevated ask unless full mode. (#1616) Thanks @ivancasco.
- Agents: auto-compact on context overflow prompt errors before failing. (#1627) Thanks @rodrigouroz.
- Agents: use the active auth profile for auto-compaction recovery.
- Models: default missing custom provider fields so minimal configs are accepted.
- Gateway: reduce log noise for late invokes + remote node probes; debounce skills refresh. (#1607) Thanks @petter-b.
- macOS: default direct-transport `ws://` URLs to port 18789; document `gateway.remote.transport`. (#1603) Thanks @ngutman.
- Voice Call: return stream TwiML for outbound conversation calls on initial Twilio webhook. (#1634)

View File

@@ -295,6 +295,16 @@ Example (OpenAIcompatible):
}
```
Notes:
- For custom providers, `reasoning`, `input`, `cost`, `contextWindow`, and `maxTokens` are optional.
When omitted, Clawdbot defaults to:
- `reasoning: false`
- `input: ["text"]`
- `cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }`
- `contextWindow: 200000`
- `maxTokens: 8192`
- Recommended: set explicit values that match your proxy/model limits.
## CLI examples
```bash

View File

@@ -1,7 +1,9 @@
import { DEFAULT_CONTEXT_TOKENS } from "../agents/defaults.js";
import { parseModelRef } from "../agents/model-selection.js";
import { resolveTalkApiKey } from "./talk.js";
import type { ClawdbotConfig } from "./types.js";
import { DEFAULT_AGENT_MAX_CONCURRENT, DEFAULT_SUBAGENT_MAX_CONCURRENT } from "./agent-limits.js";
import type { ModelDefinitionConfig } from "./types.models.js";
type WarnState = { warned: boolean };
@@ -23,6 +25,34 @@ const DEFAULT_MODEL_ALIASES: Readonly<Record<string, string>> = {
"gemini-flash": "google/gemini-3-flash-preview",
};
const DEFAULT_MODEL_COST: ModelDefinitionConfig["cost"] = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
const DEFAULT_MODEL_INPUT: ModelDefinitionConfig["input"] = ["text"];
const DEFAULT_MODEL_MAX_TOKENS = 8192;
type ModelDefinitionLike = Partial<ModelDefinitionConfig> &
Pick<ModelDefinitionConfig, "id" | "name">;
function isPositiveNumber(value: unknown): value is number {
return typeof value === "number" && Number.isFinite(value) && value > 0;
}
function resolveModelCost(
raw?: Partial<ModelDefinitionConfig["cost"]>,
): ModelDefinitionConfig["cost"] {
return {
input: typeof raw?.input === "number" ? raw.input : DEFAULT_MODEL_COST.input,
output: typeof raw?.output === "number" ? raw.output : DEFAULT_MODEL_COST.output,
cacheRead: typeof raw?.cacheRead === "number" ? raw.cacheRead : DEFAULT_MODEL_COST.cacheRead,
cacheWrite:
typeof raw?.cacheWrite === "number" ? raw.cacheWrite : DEFAULT_MODEL_COST.cacheWrite,
};
}
function resolveAnthropicDefaultAuthMode(cfg: ClawdbotConfig): AnthropicAuthDefaultsMode | null {
const profiles = cfg.auth?.profiles ?? {};
const anthropicProfiles = Object.entries(profiles).filter(
@@ -114,12 +144,77 @@ export function applyTalkApiKey(config: ClawdbotConfig): ClawdbotConfig {
}
export function applyModelDefaults(cfg: ClawdbotConfig): ClawdbotConfig {
const existingAgent = cfg.agents?.defaults;
if (!existingAgent) return cfg;
const existingModels = existingAgent.models ?? {};
if (Object.keys(existingModels).length === 0) return cfg;
let mutated = false;
let nextCfg = cfg;
const providerConfig = nextCfg.models?.providers;
if (providerConfig) {
const nextProviders = { ...providerConfig };
for (const [providerId, provider] of Object.entries(providerConfig)) {
const models = provider.models;
if (!Array.isArray(models) || models.length === 0) continue;
let providerMutated = false;
const nextModels = models.map((model) => {
const raw = model as ModelDefinitionLike;
let modelMutated = false;
const reasoning = typeof raw.reasoning === "boolean" ? raw.reasoning : false;
if (raw.reasoning !== reasoning) modelMutated = true;
const input = raw.input ?? [...DEFAULT_MODEL_INPUT];
if (raw.input === undefined) modelMutated = true;
const cost = resolveModelCost(raw.cost);
const costMutated =
!raw.cost ||
raw.cost.input !== cost.input ||
raw.cost.output !== cost.output ||
raw.cost.cacheRead !== cost.cacheRead ||
raw.cost.cacheWrite !== cost.cacheWrite;
if (costMutated) modelMutated = true;
const contextWindow = isPositiveNumber(raw.contextWindow)
? raw.contextWindow
: DEFAULT_CONTEXT_TOKENS;
if (raw.contextWindow !== contextWindow) modelMutated = true;
const defaultMaxTokens = Math.min(DEFAULT_MODEL_MAX_TOKENS, contextWindow);
const maxTokens = isPositiveNumber(raw.maxTokens) ? raw.maxTokens : defaultMaxTokens;
if (raw.maxTokens !== maxTokens) modelMutated = true;
if (!modelMutated) return model;
providerMutated = true;
return {
...raw,
reasoning,
input,
cost,
contextWindow,
maxTokens,
} as ModelDefinitionConfig;
});
if (!providerMutated) continue;
nextProviders[providerId] = { ...provider, models: nextModels };
mutated = true;
}
if (mutated) {
nextCfg = {
...nextCfg,
models: {
...nextCfg.models,
providers: nextProviders,
},
};
}
}
const existingAgent = nextCfg.agents?.defaults;
if (!existingAgent) return mutated ? nextCfg : cfg;
const existingModels = existingAgent.models ?? {};
if (Object.keys(existingModels).length === 0) return mutated ? nextCfg : cfg;
const nextModels: Record<string, { alias?: string }> = {
...existingModels,
};
@@ -135,9 +230,9 @@ export function applyModelDefaults(cfg: ClawdbotConfig): ClawdbotConfig {
if (!mutated) return cfg;
return {
...cfg,
...nextCfg,
agents: {
...cfg.agents,
...nextCfg.agents,
defaults: { ...existingAgent, models: nextModels },
},
};

View File

@@ -1,4 +1,5 @@
import { describe, expect, it } from "vitest";
import { DEFAULT_CONTEXT_TOKENS } from "../agents/defaults.js";
import { applyModelDefaults } from "./defaults.js";
import type { ClawdbotConfig } from "./types.js";
@@ -55,4 +56,28 @@ describe("applyModelDefaults", () => {
"gemini-flash",
);
});
it("fills missing model provider defaults", () => {
const cfg = {
models: {
providers: {
myproxy: {
baseUrl: "https://proxy.example/v1",
apiKey: "sk-test",
api: "openai-completions",
models: [{ id: "gpt-5.2", name: "GPT-5.2" }],
},
},
},
} satisfies ClawdbotConfig;
const next = applyModelDefaults(cfg);
const model = next.models?.providers?.myproxy?.models?.[0];
expect(model?.reasoning).toBe(false);
expect(model?.input).toEqual(["text"]);
expect(model?.cost).toEqual({ input: 0, output: 0, cacheRead: 0, cacheWrite: 0 });
expect(model?.contextWindow).toBe(DEFAULT_CONTEXT_TOKENS);
expect(model?.maxTokens).toBe(8192);
});
});

View File

@@ -28,18 +28,19 @@ export const ModelDefinitionSchema = z
id: z.string().min(1),
name: z.string().min(1),
api: ModelApiSchema.optional(),
reasoning: z.boolean(),
input: z.array(z.union([z.literal("text"), z.literal("image")])),
reasoning: z.boolean().optional(),
input: z.array(z.union([z.literal("text"), z.literal("image")])).optional(),
cost: z
.object({
input: z.number(),
output: z.number(),
cacheRead: z.number(),
cacheWrite: z.number(),
input: z.number().optional(),
output: z.number().optional(),
cacheRead: z.number().optional(),
cacheWrite: z.number().optional(),
})
.strict(),
contextWindow: z.number().positive(),
maxTokens: z.number().positive(),
.strict()
.optional(),
contextWindow: z.number().positive().optional(),
maxTokens: z.number().positive().optional(),
headers: z.record(z.string(), z.string()).optional(),
compat: ModelCompatSchema,
})