feat: add dynamic template variables to messages.responsePrefix (#923)

Adds support for template variables in `messages.responsePrefix` that
resolve dynamically at runtime with the actual model used (including
after fallback).

Supported variables (case-insensitive):
- {model} - short model name (e.g., "claude-opus-4-5", "gpt-4o")
- {modelFull} - full model identifier (e.g., "anthropic/claude-opus-4-5")
- {provider} - provider name (e.g., "anthropic", "openai")
- {thinkingLevel} or {think} - thinking level ("high", "low", "off")
- {identity.name} or {identityName} - agent identity name

Example: "[{model} | think:{thinkingLevel}]" → "[claude-opus-4-5 | think:high]"

Variables show the actual model used after fallback, not the intended
model. Unresolved variables remain as literal text.

Implementation:
- New module: src/auto-reply/reply/response-prefix-template.ts
- Template interpolation in normalize-reply.ts via context provider
- onModelSelected callback in agent-runner-execution.ts
- Updated all 6 provider message handlers (web, signal, discord,
  telegram, slack, imessage)
- 27 unit tests covering all variables and edge cases
- Documentation in docs/gateway/configuration.md and JSDoc

Fixes #923
This commit is contained in:
Sebastian
2026-01-14 23:05:08 -05:00
parent 6f5fc2276a
commit d0a4cce41e
15 changed files with 500 additions and 9 deletions

View File

@@ -1,4 +1,8 @@
import { resolveEffectiveMessagesConfig } from "../../../agents/identity.js";
import { resolveEffectiveMessagesConfig, resolveIdentityName } from "../../../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../../../auto-reply/reply/response-prefix-template.js";
import { resolveTextChunkLimit } from "../../../auto-reply/chunk.js";
import { formatAgentEnvelope } from "../../../auto-reply/envelope.js";
import { buildHistoryContext } from "../../../auto-reply/reply/history.js";
@@ -173,6 +177,11 @@ export async function processMessage(params: {
params.route.agentId,
).responsePrefix;
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(params.cfg, params.route.agentId),
};
const { queuedFinal } = await dispatchReplyWithBufferedBlockDispatcher({
ctx: {
Body: combinedBody,
@@ -210,6 +219,7 @@ export async function processMessage(params: {
replyResolver: params.replyResolver,
dispatcherOptions: {
responsePrefix,
responsePrefixContextProvider: () => prefixContext,
onHeartbeatStrip: () => {
if (!didLogHeartbeatStrip) {
didLogHeartbeatStrip = true;
@@ -267,6 +277,15 @@ export async function processMessage(params: {
typeof params.cfg.channels?.whatsapp?.blockStreaming === "boolean"
? !params.cfg.channels.whatsapp.blockStreaming
: undefined,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});