fix: normalize routed replies

This commit is contained in:
Peter Steinberger
2026-01-09 13:47:03 +01:00
parent bfadc8f4ee
commit c2d185aab7
5 changed files with 104 additions and 48 deletions

View File

@@ -7,7 +7,6 @@
- Commands: accept /models as an alias for /model.
- Debugging: add raw model stream logging flags and document gateway watch mode.
- Agent: add claude-cli/opus-4.5 runner via Claude CLI with resume support (tools disabled).
- Auth: respect cooldown tracking even with explicit `auth.order` (avoid repeatedly trying known-bad keys). — thanks @steipete
- CLI: move `clawdbot message` to subcommands (`message send|poll|…`), fold Discord/Slack/Telegram/WhatsApp tools into `message`, and require `--provider` unless only one provider is configured.
- CLI: improve `logs` output (pretty/plain/JSONL), add gateway unreachable hint, and document logging.
- WhatsApp: route queued replies to the original sender instead of the bot's own number. (#534) — thanks @mcinteerj
@@ -15,7 +14,6 @@
- Deps: bump Pi to 0.40.0 and drop pi-ai patch (upstream 429 fix). (#543) — thanks @mcinteerj
- Security: per-agent mention patterns and group elevated directives now require explicit mention to avoid cross-agent toggles.
- Config: support inline env vars in config (`env.*` / `env.vars`) and document env precedence.
- Config: migrate routing/agent config into agents.list/agents.defaults and messages/tools/audio with default agent selection and per-agent identity config.
- Agent: enable adaptive context pruning by default for tool-result trimming.
- Doctor: check config/state permissions and offer to tighten them. — thanks @steipete
- Doctor/Daemon: audit supervisor configs, add --repair/--force flows, surface service config audits in daemon status, and document user vs system services. — thanks @steipete
@@ -58,7 +56,7 @@
- Onboarding: QuickStart jumps straight into provider selection with Telegram preselected when unset.
- Onboarding: QuickStart auto-installs the Gateway daemon with Node (no runtime picker).
- Onboarding: clarify WhatsApp owner number prompt and label pairing phone number.
- Onboarding: add hosted MiniMax M2.1 API key flow + config. (#495) — thanks @tobiasbischoff
- Auto-reply: normalize routed replies to drop NO_REPLY and apply response prefixes.
- Daemon runtime: remove Bun from selection options.
- CLI: restore hidden `gateway-daemon` alias for legacy launchd configs.
- Onboarding/Configure: add OpenAI API key flow that stores in shared `~/.clawdbot/.env` for launchd; simplify Anthropic token prompt order.
@@ -111,8 +109,8 @@
- New default: DM pairing (`dmPolicy="pairing"` / `discord.dm.policy="pairing"` / `slack.dm.policy="pairing"`).
- To keep old “open to everyone” behavior: set `dmPolicy="open"` and include `"*"` in the relevant `allowFrom` (Discord/Slack: `discord.dm.allowFrom` / `slack.dm.allowFrom`).
- Approve requests via `clawdbot pairing list --provider <provider>` + `clawdbot pairing approve --provider <provider> <code>`.
- Sandbox: default `agents.defaults.sandbox.scope` to `"agent"` (one container/workspace per agent). Use `"session"` for per-session isolation; `"shared"` disables cross-session isolation.
- Timestamps in agent envelopes are now UTC (compact `YYYY-MM-DDTHH:mmZ`); removed `messages.timestampPrefix`. Add `agents.defaults.userTimezone` to tell the model the users local time (system prompt only).
- Sandbox: default `agent.sandbox.scope` to `"agent"` (one container/workspace per agent). Use `"session"` for per-session isolation; `"shared"` disables cross-session isolation.
- Timestamps in agent envelopes are now UTC (compact `YYYY-MM-DDTHH:mmZ`); removed `messages.timestampPrefix`. Add `agent.userTimezone` to tell the model the users local time (system prompt only).
- Model config schema changes (auth profiles + model lists); doctor auto-migrates and the gateway rewrites legacy configs on startup.
- Commands: gate all slash commands to authorized senders; add `/compact` to manually compact session context.
- Groups: `whatsapp.groups`, `telegram.groups`, and `imessage.groups` now act as allowlists when set. Add `"*"` to keep allow-all behavior.
@@ -138,7 +136,7 @@
## 2026.1.5
### Highlights
- Models: add image-specific model config (`agents.defaults.imageModel` + fallbacks) and scan support.
- Models: add image-specific model config (`agent.imageModel` + fallbacks) and scan support.
- Agent tools: new `image` tool routed to the image model (when configured).
- Config: default model shorthands (`opus`, `sonnet`, `gpt`, `gpt-mini`, `gemini`, `gemini-flash`).
- Docs: document built-in model shorthands + precedence (user config wins).
@@ -163,7 +161,7 @@
- Agent tools: OpenAI-compatible tool JSON Schemas (fix `browser`, normalize union schemas).
- Onboarding: when running from source, auto-build missing Control UI assets (`bun run ui:build`).
- Discord/Slack: route reaction + system notifications to the correct session (no main-session bleed).
- Agent tools: honor `tools.allow` / `tools.deny` policy even when sandbox is off.
- Agent tools: honor `agent.tools` allow/deny policy even when sandbox is off.
- Discord: avoid duplicate replies when OpenAI emits repeated `message_end` events.
- Commands: unify /status (inline) and command auth across providers; group bypass for authorized control commands; remove Discord /clawd slash handler.
- CLI: run `clawdbot agent` via the Gateway by default; use `--local` to force embedded mode.

View File

@@ -0,0 +1,49 @@
import { stripHeartbeatToken } from "../heartbeat.js";
import { HEARTBEAT_TOKEN, SILENT_REPLY_TOKEN } from "../tokens.js";
import type { ReplyPayload } from "../types.js";
export type NormalizeReplyOptions = {
responsePrefix?: string;
onHeartbeatStrip?: () => void;
stripHeartbeat?: boolean;
silentToken?: string;
};
export function normalizeReplyPayload(
payload: ReplyPayload,
opts: NormalizeReplyOptions = {},
): ReplyPayload | null {
const hasMedia = Boolean(
payload.mediaUrl || (payload.mediaUrls?.length ?? 0) > 0,
);
const trimmed = payload.text?.trim() ?? "";
if (!trimmed && !hasMedia) return null;
const silentToken = opts.silentToken ?? SILENT_REPLY_TOKEN;
if (trimmed === silentToken && !hasMedia) return null;
let text = payload.text ?? undefined;
if (text && !trimmed) {
// Keep empty text when media exists so media-only replies still send.
text = "";
}
const shouldStripHeartbeat = opts.stripHeartbeat ?? true;
if (shouldStripHeartbeat && text?.includes(HEARTBEAT_TOKEN)) {
const stripped = stripHeartbeatToken(text, { mode: "message" });
if (stripped.didStrip) opts.onHeartbeatStrip?.();
if (stripped.shouldSkip && !hasMedia) return null;
text = stripped.text;
}
if (
opts.responsePrefix &&
text &&
text.trim() !== HEARTBEAT_TOKEN &&
!text.startsWith(opts.responsePrefix)
) {
text = `${opts.responsePrefix} ${text}`;
}
return { ...payload, text };
}

View File

@@ -1,5 +1,4 @@
import { stripHeartbeatToken } from "../heartbeat.js";
import { HEARTBEAT_TOKEN, SILENT_REPLY_TOKEN } from "../tokens.js";
import { normalizeReplyPayload } from "./normalize-reply.js";
import type { GetReplyOptions, ReplyPayload } from "../types.js";
import type { TypingController } from "./typing.js";
@@ -45,41 +44,14 @@ export type ReplyDispatcher = {
getQueuedCounts: () => Record<ReplyDispatchKind, number>;
};
function normalizeReplyPayload(
function normalizeReplyPayloadInternal(
payload: ReplyPayload,
opts: Pick<ReplyDispatcherOptions, "responsePrefix" | "onHeartbeatStrip">,
): ReplyPayload | null {
const hasMedia = Boolean(
payload.mediaUrl || (payload.mediaUrls?.length ?? 0) > 0,
);
const trimmed = payload.text?.trim() ?? "";
if (!trimmed && !hasMedia) return null;
// Avoid sending the explicit silent token when no media is attached.
if (trimmed === SILENT_REPLY_TOKEN && !hasMedia) return null;
let text = payload.text ?? undefined;
if (text && !trimmed) {
// Keep empty text when media exists so media-only replies still send.
text = "";
}
if (text?.includes(HEARTBEAT_TOKEN)) {
const stripped = stripHeartbeatToken(text, { mode: "message" });
if (stripped.didStrip) opts.onHeartbeatStrip?.();
if (stripped.shouldSkip && !hasMedia) return null;
text = stripped.text;
}
if (
opts.responsePrefix &&
text &&
text.trim() !== HEARTBEAT_TOKEN &&
!text.startsWith(opts.responsePrefix)
) {
text = `${opts.responsePrefix} ${text}`;
}
return { ...payload, text };
return normalizeReplyPayload(payload, {
responsePrefix: opts.responsePrefix,
onHeartbeatStrip: opts.onHeartbeatStrip,
});
}
export function createReplyDispatcher(
@@ -96,7 +68,7 @@ export function createReplyDispatcher(
};
const enqueue = (kind: ReplyDispatchKind, payload: ReplyPayload) => {
const normalized = normalizeReplyPayload(payload, options);
const normalized = normalizeReplyPayloadInternal(payload, options);
if (!normalized) return false;
queuedCounts[kind] += 1;
pending += 1;

View File

@@ -1,6 +1,7 @@
import { describe, expect, it, vi } from "vitest";
import type { ClawdbotConfig } from "../../config/config.js";
import { SILENT_REPLY_TOKEN } from "../tokens.js";
const mocks = vi.hoisted(() => ({
sendMessageDiscord: vi.fn(async () => ({ messageId: "m1", channelId: "c1" })),
@@ -68,6 +69,36 @@ describe("routeReply", () => {
expect(mocks.sendMessageSlack).not.toHaveBeenCalled();
});
it("drops silent token payloads", async () => {
mocks.sendMessageSlack.mockClear();
const res = await routeReply({
payload: { text: SILENT_REPLY_TOKEN },
channel: "slack",
to: "channel:C123",
cfg: {} as never,
});
expect(res.ok).toBe(true);
expect(mocks.sendMessageSlack).not.toHaveBeenCalled();
});
it("applies responsePrefix when routing", async () => {
mocks.sendMessageSlack.mockClear();
const cfg = {
messages: { responsePrefix: "[clawdbot]" },
} as unknown as ClawdbotConfig;
await routeReply({
payload: { text: "hi" },
channel: "slack",
to: "channel:C123",
cfg,
});
expect(mocks.sendMessageSlack).toHaveBeenCalledWith(
"channel:C123",
"[clawdbot] hi",
expect.any(Object),
);
});
it("passes thread id to Telegram sends", async () => {
mocks.sendMessageTelegram.mockClear();
await routeReply({

View File

@@ -17,6 +17,7 @@ import { sendMessageTelegram } from "../../telegram/send.js";
import { sendMessageWhatsApp } from "../../web/outbound.js";
import type { OriginatingChannelType } from "../templating.js";
import type { ReplyPayload } from "../types.js";
import { normalizeReplyPayload } from "./normalize-reply.js";
export type RouteReplyParams = {
/** The reply payload to send. */
@@ -59,13 +60,18 @@ export async function routeReply(
params;
// Debug: `pnpm test src/auto-reply/reply/route-reply.test.ts`
const text = payload.text ?? "";
const mediaUrls = (payload.mediaUrls?.filter(Boolean) ?? []).length
? (payload.mediaUrls?.filter(Boolean) as string[])
: payload.mediaUrl
? [payload.mediaUrl]
const normalized = normalizeReplyPayload(payload, {
responsePrefix: cfg.messages?.responsePrefix,
});
if (!normalized) return { ok: true };
const text = normalized.text ?? "";
const mediaUrls = (normalized.mediaUrls?.filter(Boolean) ?? []).length
? (normalized.mediaUrls?.filter(Boolean) as string[])
: normalized.mediaUrl
? [normalized.mediaUrl]
: [];
const replyToId = payload.replyToId;
const replyToId = normalized.replyToId;
// Skip empty replies.
if (!text.trim() && mediaUrls.length === 0) {