feat: add dynamic template variables to messages.responsePrefix (#923)

Adds support for template variables in `messages.responsePrefix` that
resolve dynamically at runtime with the actual model used (including
after fallback).

Supported variables (case-insensitive):
- {model} - short model name (e.g., "claude-opus-4-5", "gpt-4o")
- {modelFull} - full model identifier (e.g., "anthropic/claude-opus-4-5")
- {provider} - provider name (e.g., "anthropic", "openai")
- {thinkingLevel} or {think} - thinking level ("high", "low", "off")
- {identity.name} or {identityName} - agent identity name

Example: "[{model} | think:{thinkingLevel}]" → "[claude-opus-4-5 | think:high]"

Variables show the actual model used after fallback, not the intended
model. Unresolved variables remain as literal text.

Implementation:
- New module: src/auto-reply/reply/response-prefix-template.ts
- Template interpolation in normalize-reply.ts via context provider
- onModelSelected callback in agent-runner-execution.ts
- Updated all 6 provider message handlers (web, signal, discord,
  telegram, slack, imessage)
- 27 unit tests covering all variables and edge cases
- Documentation in docs/gateway/configuration.md and JSDoc

Fixes #923
This commit is contained in:
Sebastian
2026-01-14 23:05:08 -05:00
parent 6f5fc2276a
commit d0a4cce41e
15 changed files with 500 additions and 9 deletions

View File

@@ -1184,6 +1184,31 @@ streaming, final replies) across channels unless already present.
If `messages.responsePrefix` is unset, no prefix is applied by default.
Set it to `"auto"` to derive `[{identity.name}]` for the routed agent (when set).
#### Template variables
The `responsePrefix` string can include template variables that resolve dynamically:
| Variable | Description | Example |
|----------|-------------|---------|
| `{model}` | Short model name | `claude-opus-4-5`, `gpt-4o` |
| `{modelFull}` | Full model identifier | `anthropic/claude-opus-4-5` |
| `{provider}` | Provider name | `anthropic`, `openai` |
| `{thinkingLevel}` | Current thinking level | `high`, `low`, `off` |
| `{identity.name}` | Agent identity name | (same as `"auto"` mode) |
Variables are case-insensitive (`{MODEL}` = `{model}`). `{think}` is an alias for `{thinkingLevel}`.
Unresolved variables remain as literal text.
```json5
{
messages: {
responsePrefix: "[{model} | think:{thinkingLevel}]"
}
}
```
Example output: `[claude-opus-4-5 | think:high] Here's my response...`
WhatsApp inbound prefix is configured via `channels.whatsapp.messagePrefix` (deprecated:
`messages.messagePrefix`). Default stays **unchanged**: `"[clawdbot]"` when
`channels.whatsapp.allowFrom` is empty, otherwise `""` (no prefix). When using

View File

@@ -26,6 +26,11 @@ export function resolveIdentityNamePrefix(
return `[${name}]`;
}
/** Returns just the identity name (without brackets) for template context. */
export function resolveIdentityName(cfg: ClawdbotConfig, agentId: string): string | undefined {
return resolveAgentIdentity(cfg, agentId)?.name?.trim() || undefined;
}
export function resolveMessagePrefix(
cfg: ClawdbotConfig,
agentId: string,

View File

@@ -125,6 +125,14 @@ export async function runAgentTurnWithFallback(params: {
resolveAgentIdFromSessionKey(params.followupRun.run.sessionKey),
),
run: (provider, model) => {
// Notify that model selection is complete (including after fallback).
// This allows responsePrefix template interpolation with the actual model.
params.opts?.onModelSelected?.({
provider,
model,
thinkLevel: params.followupRun.run.thinkLevel,
});
if (isCliProvider(provider, params.followupRun.run.config)) {
const startedAt = Date.now();
emitAgentEvent({

View File

@@ -1,9 +1,15 @@
import { stripHeartbeatToken } from "../heartbeat.js";
import { HEARTBEAT_TOKEN, isSilentReplyText, SILENT_REPLY_TOKEN } from "../tokens.js";
import type { ReplyPayload } from "../types.js";
import {
resolveResponsePrefixTemplate,
type ResponsePrefixContext,
} from "./response-prefix-template.js";
export type NormalizeReplyOptions = {
responsePrefix?: string;
/** Context for template variable interpolation in responsePrefix */
responsePrefixContext?: ResponsePrefixContext;
onHeartbeatStrip?: () => void;
stripHeartbeat?: boolean;
silentToken?: string;
@@ -36,13 +42,18 @@ export function normalizeReplyPayload(
text = stripped.text;
}
// Resolve template variables in responsePrefix if context is provided
const effectivePrefix = opts.responsePrefixContext
? resolveResponsePrefixTemplate(opts.responsePrefix, opts.responsePrefixContext)
: opts.responsePrefix;
if (
opts.responsePrefix &&
effectivePrefix &&
text &&
text.trim() !== HEARTBEAT_TOKEN &&
!text.startsWith(opts.responsePrefix)
!text.startsWith(effectivePrefix)
) {
text = `${opts.responsePrefix} ${text}`;
text = `${effectivePrefix} ${text}`;
}
return { ...payload, text };

View File

@@ -1,6 +1,7 @@
import type { HumanDelayConfig } from "../../config/types.js";
import type { GetReplyOptions, ReplyPayload } from "../types.js";
import { normalizeReplyPayload } from "./normalize-reply.js";
import type { ResponsePrefixContext } from "./response-prefix-template.js";
import type { TypingController } from "./typing.js";
export type ReplyDispatchKind = "tool" | "block" | "final";
@@ -33,6 +34,11 @@ const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
export type ReplyDispatcherOptions = {
deliver: ReplyDispatchDeliverer;
responsePrefix?: string;
/** Static context for response prefix template interpolation. */
responsePrefixContext?: ResponsePrefixContext;
/** Dynamic context provider for response prefix template interpolation.
* Called at normalization time, after model selection is complete. */
responsePrefixContextProvider?: () => ResponsePrefixContext;
onHeartbeatStrip?: () => void;
onIdle?: () => void;
onError?: ReplyDispatchErrorHandler;
@@ -61,10 +67,17 @@ export type ReplyDispatcher = {
function normalizeReplyPayloadInternal(
payload: ReplyPayload,
opts: Pick<ReplyDispatcherOptions, "responsePrefix" | "onHeartbeatStrip">,
opts: Pick<
ReplyDispatcherOptions,
"responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip"
>,
): ReplyPayload | null {
// Prefer dynamic context provider over static context
const prefixContext = opts.responsePrefixContextProvider?.() ?? opts.responsePrefixContext;
return normalizeReplyPayload(payload, {
responsePrefix: opts.responsePrefix,
responsePrefixContext: prefixContext,
onHeartbeatStrip: opts.onHeartbeatStrip,
});
}

View File

@@ -0,0 +1,181 @@
import { describe, expect, it } from "vitest";
import {
extractShortModelName,
hasTemplateVariables,
resolveResponsePrefixTemplate,
} from "./response-prefix-template.js";
describe("resolveResponsePrefixTemplate", () => {
it("returns undefined for undefined template", () => {
expect(resolveResponsePrefixTemplate(undefined, {})).toBeUndefined();
});
it("returns template as-is when no variables present", () => {
expect(resolveResponsePrefixTemplate("[Claude]", {})).toBe("[Claude]");
});
it("resolves {model} variable", () => {
const result = resolveResponsePrefixTemplate("[{model}]", {
model: "gpt-5.2",
});
expect(result).toBe("[gpt-5.2]");
});
it("resolves {modelFull} variable", () => {
const result = resolveResponsePrefixTemplate("[{modelFull}]", {
modelFull: "openai-codex/gpt-5.2",
});
expect(result).toBe("[openai-codex/gpt-5.2]");
});
it("resolves {provider} variable", () => {
const result = resolveResponsePrefixTemplate("[{provider}]", {
provider: "anthropic",
});
expect(result).toBe("[anthropic]");
});
it("resolves {thinkingLevel} variable", () => {
const result = resolveResponsePrefixTemplate("think:{thinkingLevel}", {
thinkingLevel: "high",
});
expect(result).toBe("think:high");
});
it("resolves {think} as alias for thinkingLevel", () => {
const result = resolveResponsePrefixTemplate("think:{think}", {
thinkingLevel: "low",
});
expect(result).toBe("think:low");
});
it("resolves {identity.name} variable", () => {
const result = resolveResponsePrefixTemplate("[{identity.name}]", {
identityName: "Clawdbot",
});
expect(result).toBe("[Clawdbot]");
});
it("resolves {identityName} as alias", () => {
const result = resolveResponsePrefixTemplate("[{identityName}]", {
identityName: "Clawdbot",
});
expect(result).toBe("[Clawdbot]");
});
it("resolves multiple variables", () => {
const result = resolveResponsePrefixTemplate("[{model} | think:{thinkingLevel}]", {
model: "claude-opus-4-5",
thinkingLevel: "high",
});
expect(result).toBe("[claude-opus-4-5 | think:high]");
});
it("leaves unresolved variables as-is", () => {
const result = resolveResponsePrefixTemplate("[{model}]", {});
expect(result).toBe("[{model}]");
});
it("leaves unrecognized variables as-is", () => {
const result = resolveResponsePrefixTemplate("[{unknownVar}]", {
model: "gpt-5.2",
});
expect(result).toBe("[{unknownVar}]");
});
it("handles case insensitivity", () => {
const result = resolveResponsePrefixTemplate("[{MODEL} | {ThinkingLevel}]", {
model: "gpt-5.2",
thinkingLevel: "low",
});
expect(result).toBe("[gpt-5.2 | low]");
});
it("handles mixed resolved and unresolved variables", () => {
const result = resolveResponsePrefixTemplate("[{model} | {provider}]", {
model: "gpt-5.2",
// provider not provided
});
expect(result).toBe("[gpt-5.2 | {provider}]");
});
it("handles complex template with all variables", () => {
const result = resolveResponsePrefixTemplate(
"[{identity.name}] {provider}/{model} (think:{thinkingLevel})",
{
identityName: "Clawdbot",
provider: "anthropic",
model: "claude-opus-4-5",
thinkingLevel: "high",
},
);
expect(result).toBe("[Clawdbot] anthropic/claude-opus-4-5 (think:high)");
});
});
describe("extractShortModelName", () => {
it("strips provider prefix", () => {
expect(extractShortModelName("openai/gpt-5.2")).toBe("gpt-5.2");
expect(extractShortModelName("anthropic/claude-opus-4-5")).toBe("claude-opus-4-5");
expect(extractShortModelName("openai-codex/gpt-5.2-codex")).toBe("gpt-5.2-codex");
});
it("strips date suffix", () => {
expect(extractShortModelName("claude-opus-4-5-20251101")).toBe("claude-opus-4-5");
expect(extractShortModelName("gpt-5.2-20250115")).toBe("gpt-5.2");
});
it("strips -latest suffix", () => {
expect(extractShortModelName("gpt-5.2-latest")).toBe("gpt-5.2");
expect(extractShortModelName("claude-sonnet-latest")).toBe("claude-sonnet");
});
it("handles model without provider", () => {
expect(extractShortModelName("gpt-5.2")).toBe("gpt-5.2");
expect(extractShortModelName("claude-opus-4-5")).toBe("claude-opus-4-5");
});
it("handles full path with provider and date suffix", () => {
expect(extractShortModelName("anthropic/claude-opus-4-5-20251101")).toBe("claude-opus-4-5");
});
it("preserves version numbers that look like dates but are not", () => {
// Date suffix must be exactly 8 digits at the end
expect(extractShortModelName("model-v1234567")).toBe("model-v1234567");
expect(extractShortModelName("model-123456789")).toBe("model-123456789");
});
});
describe("hasTemplateVariables", () => {
it("returns false for undefined", () => {
expect(hasTemplateVariables(undefined)).toBe(false);
});
it("returns false for empty string", () => {
expect(hasTemplateVariables("")).toBe(false);
});
it("returns false for static prefix", () => {
expect(hasTemplateVariables("[Claude]")).toBe(false);
});
it("returns true when template variables present", () => {
expect(hasTemplateVariables("[{model}]")).toBe(true);
expect(hasTemplateVariables("{provider}")).toBe(true);
expect(hasTemplateVariables("prefix {thinkingLevel} suffix")).toBe(true);
});
it("returns true for multiple variables", () => {
expect(hasTemplateVariables("[{model} | {provider}]")).toBe(true);
});
it("handles consecutive calls correctly (regex lastIndex reset)", () => {
// First call
expect(hasTemplateVariables("[{model}]")).toBe(true);
// Second call should still work
expect(hasTemplateVariables("[{model}]")).toBe(true);
// Static string should return false
expect(hasTemplateVariables("[Claude]")).toBe(false);
});
});

View File

@@ -0,0 +1,97 @@
/**
* Template interpolation for response prefix.
*
* Supports variables like `{model}`, `{provider}`, `{thinkingLevel}`, etc.
* Variables are case-insensitive and unresolved ones remain as literal text.
*/
export type ResponsePrefixContext = {
/** Short model name (e.g., "gpt-5.2", "claude-opus-4-5") */
model?: string;
/** Full model ID including provider (e.g., "openai-codex/gpt-5.2") */
modelFull?: string;
/** Provider name (e.g., "openai-codex", "anthropic") */
provider?: string;
/** Current thinking level (e.g., "high", "low", "off") */
thinkingLevel?: string;
/** Agent identity name */
identityName?: string;
};
// Regex pattern for template variables: {variableName} or {variable.name}
const TEMPLATE_VAR_PATTERN = /\{([a-zA-Z][a-zA-Z0-9.]*)\}/g;
/**
* Interpolate template variables in a response prefix string.
*
* @param template - The template string with `{variable}` placeholders
* @param context - Context object with values for interpolation
* @returns The interpolated string, or undefined if template is undefined
*
* @example
* resolveResponsePrefixTemplate("[{model} | think:{thinkingLevel}]", {
* model: "gpt-5.2",
* thinkingLevel: "high"
* })
* // Returns: "[gpt-5.2 | think:high]"
*/
export function resolveResponsePrefixTemplate(
template: string | undefined,
context: ResponsePrefixContext,
): string | undefined {
if (!template) return undefined;
return template.replace(TEMPLATE_VAR_PATTERN, (match, varName: string) => {
const normalizedVar = varName.toLowerCase();
switch (normalizedVar) {
case "model":
return context.model ?? match;
case "modelfull":
return context.modelFull ?? match;
case "provider":
return context.provider ?? match;
case "thinkinglevel":
case "think":
return context.thinkingLevel ?? match;
case "identity.name":
case "identityname":
return context.identityName ?? match;
default:
// Leave unrecognized variables as-is
return match;
}
});
}
/**
* Extract short model name from a full model string.
*
* Strips:
* - Provider prefix (e.g., "openai/" from "openai/gpt-5.2")
* - Date suffixes (e.g., "-20251101" from "claude-opus-4-5-20251101")
* - Common version suffixes (e.g., "-latest")
*
* @example
* extractShortModelName("openai-codex/gpt-5.2") // "gpt-5.2"
* extractShortModelName("claude-opus-4-5-20251101") // "claude-opus-4-5"
* extractShortModelName("gpt-5.2-latest") // "gpt-5.2"
*/
export function extractShortModelName(fullModel: string): string {
// Strip provider prefix
const slash = fullModel.lastIndexOf("/");
const modelPart = slash >= 0 ? fullModel.slice(slash + 1) : fullModel;
// Strip date suffixes (YYYYMMDD format)
return modelPart.replace(/-\d{8}$/, "").replace(/-latest$/, "");
}
/**
* Check if a template string contains any template variables.
*/
export function hasTemplateVariables(template: string | undefined): boolean {
if (!template) return false;
// Reset lastIndex since we're using a global regex
TEMPLATE_VAR_PATTERN.lastIndex = 0;
return TEMPLATE_VAR_PATTERN.test(template);
}

View File

@@ -5,6 +5,13 @@ export type BlockReplyContext = {
timeoutMs?: number;
};
/** Context passed to onModelSelected callback with actual model used. */
export type ModelSelectedContext = {
provider: string;
model: string;
thinkLevel: string | undefined;
};
export type GetReplyOptions = {
onReplyStart?: () => Promise<void> | void;
onTypingController?: (typing: TypingController) => void;
@@ -13,6 +20,9 @@ export type GetReplyOptions = {
onReasoningStream?: (payload: ReplyPayload) => Promise<void> | void;
onBlockReply?: (payload: ReplyPayload, context?: BlockReplyContext) => Promise<void> | void;
onToolResult?: (payload: ReplyPayload) => Promise<void> | void;
/** Called when the actual model is selected (including after fallback).
* Use this to get model/provider/thinkLevel for responsePrefix template interpolation. */
onModelSelected?: (ctx: ModelSelectedContext) => void;
disableBlockStreaming?: boolean;
/** Timeout for block reply delivery (ms). */
blockReplyTimeoutMs?: number;

View File

@@ -44,8 +44,21 @@ export type MessagesConfig = {
messagePrefix?: string;
/**
* Prefix auto-added to all outbound replies.
* - string: explicit prefix
*
* - string: explicit prefix (may include template variables)
* - special value: `"auto"` derives `[{agents.list[].identity.name}]` for the routed agent (when set)
*
* Supported template variables (case-insensitive):
* - `{model}` - short model name (e.g., `claude-opus-4-5`, `gpt-4o`)
* - `{modelFull}` - full model identifier (e.g., `anthropic/claude-opus-4-5`)
* - `{provider}` - provider name (e.g., `anthropic`, `openai`)
* - `{thinkingLevel}` or `{think}` - current thinking level (`high`, `low`, `off`)
* - `{identity.name}` or `{identityName}` - agent identity name
*
* Example: `"[{model} | think:{thinkingLevel}]"` → `"[claude-opus-4-5 | think:high]"`
*
* Unresolved variables remain as literal text (e.g., `{model}` if context unavailable).
*
* Default: none
*/
responsePrefix?: string;

View File

@@ -2,7 +2,12 @@ import {
resolveAckReaction,
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../../auto-reply/reply/response-prefix-template.js";
import { formatAgentEnvelope, formatThreadStarterEnvelope } from "../../auto-reply/envelope.js";
import { dispatchReplyFromConfig } from "../../auto-reply/reply/dispatch-from-config.js";
import { buildHistoryContextFromMap, clearHistoryEntries } from "../../auto-reply/reply/history.js";
@@ -280,8 +285,15 @@ export async function processDiscordMessage(ctx: DiscordMessagePreflightContext)
const typingChannelId = deliverTarget.startsWith("channel:")
? deliverTarget.slice("channel:".length)
: message.channelId;
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(cfg, route.agentId),
};
const { dispatcher, replyOptions, markDispatchIdle } = createReplyDispatcherWithTyping({
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
deliver: async (payload: ReplyPayload) => {
const replyToId = replyReference.use();
@@ -316,6 +328,15 @@ export async function processDiscordMessage(ctx: DiscordMessagePreflightContext)
typeof discordConfig?.blockStreaming === "boolean"
? !discordConfig.blockStreaming
: undefined,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});
markDispatchIdle();

View File

@@ -1,4 +1,12 @@
import { resolveEffectiveMessagesConfig, resolveHumanDelayConfig } from "../../agents/identity.js";
import {
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../../auto-reply/reply/response-prefix-template.js";
import { resolveTextChunkLimit } from "../../auto-reply/chunk.js";
import { hasControlCommand } from "../../auto-reply/command-detection.js";
import { formatAgentEnvelope } from "../../auto-reply/envelope.js";
@@ -341,8 +349,15 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
}
let didSendReply = false;
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(cfg, route.agentId),
};
const dispatcher = createReplyDispatcher({
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
deliver: async (payload) => {
await deliverReplies({
@@ -370,6 +385,15 @@ export async function monitorIMessageProvider(opts: MonitorIMessageOpts = {}): P
typeof accountInfo.config.blockStreaming === "boolean"
? !accountInfo.config.blockStreaming
: undefined,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});
if (!queuedFinal) {

View File

@@ -1,4 +1,12 @@
import { resolveEffectiveMessagesConfig, resolveHumanDelayConfig } from "../../agents/identity.js";
import {
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../../auto-reply/reply/response-prefix-template.js";
import { formatAgentEnvelope } from "../../auto-reply/envelope.js";
import { dispatchReplyFromConfig } from "../../auto-reply/reply/dispatch-from-config.js";
import { buildHistoryContextFromMap, clearHistoryEntries } from "../../auto-reply/reply/history.js";
@@ -310,8 +318,15 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) {
}
let didSendReply = false;
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(deps.cfg, route.agentId),
};
const dispatcher = createReplyDispatcher({
responsePrefix: resolveEffectiveMessagesConfig(deps.cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(deps.cfg, route.agentId),
deliver: async (payload) => {
await deps.deliverReplies({
@@ -338,6 +353,15 @@ export function createSignalEventHandler(deps: SignalEventHandlerDeps) {
replyOptions: {
disableBlockStreaming:
typeof deps.blockStreaming === "boolean" ? !deps.blockStreaming : undefined,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});
if (!queuedFinal) {

View File

@@ -1,7 +1,12 @@
import {
resolveEffectiveMessagesConfig,
resolveHumanDelayConfig,
resolveIdentityName,
} from "../../../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../../../auto-reply/reply/response-prefix-template.js";
import { dispatchReplyFromConfig } from "../../../auto-reply/reply/dispatch-from-config.js";
import { clearHistoryEntries } from "../../../auto-reply/reply/history.js";
import { createReplyDispatcherWithTyping } from "../../../auto-reply/reply/reply-dispatcher.js";
@@ -62,8 +67,15 @@ export async function dispatchPreparedSlackMessage(prepared: PreparedSlackMessag
};
let didSendReply = false;
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(cfg, route.agentId),
};
const { dispatcher, replyOptions, markDispatchIdle } = createReplyDispatcherWithTyping({
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
deliver: async (payload) => {
const replyThreadTs = replyPlan.nextThreadTs();
@@ -104,6 +116,15 @@ export async function dispatchPreparedSlackMessage(prepared: PreparedSlackMessag
typeof account.config.blockStreaming === "boolean"
? !account.config.blockStreaming
: undefined,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});
markDispatchIdle();

View File

@@ -1,5 +1,9 @@
// @ts-nocheck
import { resolveEffectiveMessagesConfig } from "../agents/identity.js";
import { resolveEffectiveMessagesConfig, resolveIdentityName } from "../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../auto-reply/reply/response-prefix-template.js";
import { EmbeddedBlockChunker } from "../agents/pi-embedded-block-chunker.js";
import { clearHistoryEntries } from "../auto-reply/reply/history.js";
import { dispatchReplyWithBufferedBlockDispatcher } from "../auto-reply/reply/provider-dispatcher.js";
@@ -114,12 +118,18 @@ export const dispatchTelegramMessage = async ({
Boolean(draftStream) ||
(typeof telegramCfg.blockStreaming === "boolean" ? !telegramCfg.blockStreaming : undefined);
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(cfg, route.agentId),
};
let didSendReply = false;
const { queuedFinal } = await dispatchReplyWithBufferedBlockDispatcher({
ctx: ctxPayload,
cfg,
dispatcherOptions: {
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId).responsePrefix,
responsePrefixContextProvider: () => prefixContext,
deliver: async (payload, info) => {
if (info.kind === "final") {
await flushDraft();
@@ -151,6 +161,15 @@ export const dispatchTelegramMessage = async ({
}
: undefined,
disableBlockStreaming,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});
draftStream?.stop();

View File

@@ -1,4 +1,8 @@
import { resolveEffectiveMessagesConfig } from "../../../agents/identity.js";
import { resolveEffectiveMessagesConfig, resolveIdentityName } from "../../../agents/identity.js";
import {
extractShortModelName,
type ResponsePrefixContext,
} from "../../../auto-reply/reply/response-prefix-template.js";
import { resolveTextChunkLimit } from "../../../auto-reply/chunk.js";
import { formatAgentEnvelope } from "../../../auto-reply/envelope.js";
import { buildHistoryContext } from "../../../auto-reply/reply/history.js";
@@ -173,6 +177,11 @@ export async function processMessage(params: {
params.route.agentId,
).responsePrefix;
// Create mutable context for response prefix template interpolation
let prefixContext: ResponsePrefixContext = {
identityName: resolveIdentityName(params.cfg, params.route.agentId),
};
const { queuedFinal } = await dispatchReplyWithBufferedBlockDispatcher({
ctx: {
Body: combinedBody,
@@ -210,6 +219,7 @@ export async function processMessage(params: {
replyResolver: params.replyResolver,
dispatcherOptions: {
responsePrefix,
responsePrefixContextProvider: () => prefixContext,
onHeartbeatStrip: () => {
if (!didLogHeartbeatStrip) {
didLogHeartbeatStrip = true;
@@ -267,6 +277,15 @@ export async function processMessage(params: {
typeof params.cfg.channels?.whatsapp?.blockStreaming === "boolean"
? !params.cfg.channels.whatsapp.blockStreaming
: undefined,
onModelSelected: (ctx) => {
prefixContext = {
...prefixContext,
provider: ctx.provider,
model: extractShortModelName(ctx.model),
modelFull: `${ctx.provider}/${ctx.model}`,
thinkingLevel: ctx.thinkLevel ?? "off",
};
},
},
});