feat: add dynamic template variables to messages.responsePrefix (#923)

Adds support for template variables in `messages.responsePrefix` that
resolve dynamically at runtime with the actual model used (including
after fallback).

Supported variables (case-insensitive):
- {model} - short model name (e.g., "claude-opus-4-5", "gpt-4o")
- {modelFull} - full model identifier (e.g., "anthropic/claude-opus-4-5")
- {provider} - provider name (e.g., "anthropic", "openai")
- {thinkingLevel} or {think} - thinking level ("high", "low", "off")
- {identity.name} or {identityName} - agent identity name

Example: "[{model} | think:{thinkingLevel}]" → "[claude-opus-4-5 | think:high]"

Variables show the actual model used after fallback, not the intended
model. Unresolved variables remain as literal text.

Implementation:
- New module: src/auto-reply/reply/response-prefix-template.ts
- Template interpolation in normalize-reply.ts via context provider
- onModelSelected callback in agent-runner-execution.ts
- Updated all 6 provider message handlers (web, signal, discord,
  telegram, slack, imessage)
- 27 unit tests covering all variables and edge cases
- Documentation in docs/gateway/configuration.md and JSDoc

Fixes #923
This commit is contained in:
Sebastian
2026-01-14 23:05:08 -05:00
parent 6f5fc2276a
commit d0a4cce41e
15 changed files with 500 additions and 9 deletions

View File

@@ -1,5 +1,9 @@
// @ts-nocheck
import { resolveEffectiveMessagesConfig } from "../agents/identity.js";
import { resolveEffectiveMessagesConfig, resolveIdentityName } from "../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../auto-reply/reply/response-prefix-template.js";
import { EmbeddedBlockChunker } from "../agents/pi-embedded-block-chunker.js";
import { clearHistoryEntries } from "../auto-reply/reply/history.js";
import { dispatchReplyWithBufferedBlockDispatcher } from "../auto-reply/reply/provider-dispatcher.js";
@@ -114,12 +118,18 @@ export const dispatchTelegramMessage = async ({
Boolean(draftStream) ||
(typeof telegramCfg.blockStreaming === "boolean" ? !telegramCfg.blockStreaming : undefined);
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(cfg, route.agentId),
};
let didSendReply = false;
const { queuedFinal } = await dispatchReplyWithBufferedBlockDispatcher({
ctx: ctxPayload,
cfg,
dispatcherOptions: {
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
deliver: async (payload, info) => {
if (info.kind === "final") {
await flushDraft();
@@ -151,6 +161,15 @@ export const dispatchTelegramMessage = async ({
}
: undefined,
disableBlockStreaming,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});
draftStream?.stop();