From ab994d2c635effac05ab1caecf7cd2833132018d Mon Sep 17 00:00:00 2001 From: Lloyd Date: Wed, 7 Jan 2026 22:56:46 -0500 Subject: [PATCH] feat(agent): add human-like delay between block replies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds `agent.humanDelay` config option to create natural rhythm between streamed message bubbles. When enabled, introduces a random delay (default 800-2500ms) between block replies, making multi-message responses feel more like natural human texting. Config example: ```json { "agent": { "blockStreamingDefault": "on", "humanDelay": { "enabled": true, "minMs": 800, "maxMs": 2500 } } } ``` - First message sends immediately - Subsequent messages wait a random delay before sending - Works with iMessage, Signal, and Discord providers 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- CHANGELOG.md | 1 + docs/concepts/messages.md | 1 + docs/concepts/streaming.md | 10 +++ docs/gateway/configuration-examples.md | 3 + docs/gateway/configuration.md | 9 +++ src/agents/agent-scope.ts | 2 + src/agents/identity.test.ts | 71 +++++-------------- src/agents/identity.ts | 20 +++++- src/auto-reply/reply/reply-dispatcher.test.ts | 51 +++++++++++++ src/auto-reply/reply/reply-dispatcher.ts | 41 ++++++++++- src/config/schema.ts | 9 +++ src/config/types.ts | 14 ++++ src/config/zod-schema.ts | 11 ++- src/discord/monitor.ts | 3 + src/imessage/monitor.ts | 6 +- src/msteams/reply-dispatcher.ts | 6 +- src/signal/monitor.ts | 6 +- src/slack/monitor.ts | 2 + 18 files changed, 206 insertions(+), 60 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c1703bb9..eed112cf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### New Features and Changes - Onboarding/Models: add first-class Z.AI (GLM) auth choice (`zai-api-key`) + `--zai-api-key` flag. +- Agents: add human-delay pacing between block replies (per-agent configurable). (#446) — thanks @tony-freedomology. ### Fixes - Agents/OpenAI: fix Responses tool-only → follow-up turn handling (avoid standalone `reasoning` items that trigger 400 “required following item”). diff --git a/docs/concepts/messages.md b/docs/concepts/messages.md index 531e5b635..8ea4e92c7 100644 --- a/docs/concepts/messages.md +++ b/docs/concepts/messages.md @@ -61,6 +61,7 @@ Key settings: - `agents.defaults.blockStreamingBreak` (`text_end|message_end`) - `agents.defaults.blockStreamingChunk` (`minChars|maxChars|breakPreference`) - `agents.defaults.blockStreamingCoalesce` (idle-based batching) +- `agents.defaults.humanDelay` (human-like pause between block replies) - Provider overrides: `*.blockStreaming` and `*.blockStreamingCoalesce` (non-Telegram providers require explicit `*.blockStreaming: true`) Details: [Streaming + chunking](/concepts/streaming). diff --git a/docs/concepts/streaming.md b/docs/concepts/streaming.md index 1093140bd..755071561 100644 --- a/docs/concepts/streaming.md +++ b/docs/concepts/streaming.md @@ -71,6 +71,16 @@ progressive output. - Provider overrides are available via `*.blockStreamingCoalesce` (including per-account configs). - Default coalesce `minChars` is bumped to 1500 for Signal/Slack/Discord unless overridden. +## Human-like pacing between blocks + +When block streaming is enabled, you can add a **randomized pause** between +block replies (after the first block). This makes multi-bubble responses feel +more natural. + +- Config: `agents.defaults.humanDelay` (override per agent via `agents.list[].humanDelay`). +- Modes: `off` (default), `natural` (800–2500ms), `custom` (`minMs`/`maxMs`). +- Applies only to **block replies**, not final replies or tool summaries. + ## “Stream chunks or everything” This maps to: diff --git a/docs/gateway/configuration-examples.md b/docs/gateway/configuration-examples.md index a0020af69..76149cb5a 100644 --- a/docs/gateway/configuration-examples.md +++ b/docs/gateway/configuration-examples.md @@ -218,6 +218,9 @@ Save to `~/.clawdbot/clawdbot.json` and you can DM the bot from that number. blockStreamingCoalesce: { idleMs: 1000 }, + humanDelay: { + mode: "natural" + }, timeoutSeconds: 600, mediaMaxMb: 5, typingIntervalSeconds: 5, diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index 8ce128e13..274b3fbb6 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -1186,6 +1186,15 @@ Block streaming: Provider overrides: `whatsapp.blockStreamingCoalesce`, `telegram.blockStreamingCoalesce`, `discord.blockStreamingCoalesce`, `slack.blockStreamingCoalesce`, `signal.blockStreamingCoalesce`, `imessage.blockStreamingCoalesce`, `msteams.blockStreamingCoalesce` (and per-account variants). +- `agents.defaults.humanDelay`: randomized pause between **block replies** after the first. + Modes: `off` (default), `natural` (800–2500ms), `custom` (use `minMs`/`maxMs`). + Per-agent override: `agents.list[].humanDelay`. + Example: + ```json5 + { + agents: { defaults: { humanDelay: { mode: "natural" } } } + } + ``` See [/concepts/streaming](/concepts/streaming) for behavior + chunking details. Typing indicators: diff --git a/src/agents/agent-scope.ts b/src/agents/agent-scope.ts index b252345ec..9ad92d830 100644 --- a/src/agents/agent-scope.ts +++ b/src/agents/agent-scope.ts @@ -22,6 +22,7 @@ type ResolvedAgentConfig = { workspace?: string; agentDir?: string; model?: string; + humanDelay?: AgentEntry["humanDelay"]; identity?: AgentEntry["identity"]; groupChat?: AgentEntry["groupChat"]; subagents?: AgentEntry["subagents"]; @@ -94,6 +95,7 @@ export function resolveAgentConfig( typeof entry.workspace === "string" ? entry.workspace : undefined, agentDir: typeof entry.agentDir === "string" ? entry.agentDir : undefined, model: typeof entry.model === "string" ? entry.model : undefined, + humanDelay: entry.humanDelay, identity: entry.identity, groupChat: entry.groupChat, subagents: diff --git a/src/agents/identity.test.ts b/src/agents/identity.test.ts index 2efe04cfb..fd163db67 100644 --- a/src/agents/identity.test.ts +++ b/src/agents/identity.test.ts @@ -1,65 +1,28 @@ import { describe, expect, it } from "vitest"; + import type { ClawdbotConfig } from "../config/config.js"; -import { resolveMessagePrefix, resolveResponsePrefix } from "./identity.js"; +import { resolveHumanDelayConfig } from "./identity.js"; -describe("message prefix resolution", () => { - it("returns configured messagePrefix override", () => { +describe("resolveHumanDelayConfig", () => { + it("returns undefined when no humanDelay config is set", () => { const cfg: ClawdbotConfig = {}; - expect( - resolveMessagePrefix(cfg, "main", { - configured: "[x]", - hasAllowFrom: true, - }), - ).toBe("[x]"); - expect( - resolveMessagePrefix(cfg, "main", { - configured: "", - hasAllowFrom: false, - }), - ).toBe(""); + expect(resolveHumanDelayConfig(cfg, "main")).toBeUndefined(); }); - it("defaults messagePrefix based on allowFrom + identity", () => { + it("merges defaults with per-agent overrides", () => { const cfg: ClawdbotConfig = { - agents: { list: [{ id: "main", identity: { name: "Richbot" } }] }, + agents: { + defaults: { + humanDelay: { mode: "natural", minMs: 800, maxMs: 1800 }, + }, + list: [{ id: "main", humanDelay: { mode: "custom", minMs: 400 } }], + }, }; - expect(resolveMessagePrefix(cfg, "main", { hasAllowFrom: true })).toBe(""); - expect(resolveMessagePrefix(cfg, "main", { hasAllowFrom: false })).toBe( - "[Richbot]", - ); - }); - it("falls back to [clawdbot] when identity is missing", () => { - const cfg: ClawdbotConfig = {}; - expect(resolveMessagePrefix(cfg, "main", { hasAllowFrom: false })).toBe( - "[clawdbot]", - ); - }); -}); - -describe("response prefix resolution", () => { - it("does not apply any default when unset", () => { - const cfg: ClawdbotConfig = { - agents: { list: [{ id: "main", identity: { name: "Richbot" } }] }, - }; - expect(resolveResponsePrefix(cfg, "main")).toBeUndefined(); - }); - - it("returns explicit responsePrefix when set", () => { - const cfg: ClawdbotConfig = { messages: { responsePrefix: "PFX" } }; - expect(resolveResponsePrefix(cfg, "main")).toBe("PFX"); - }); - - it("supports responsePrefix: auto (identity-derived opt-in)", () => { - const withIdentity: ClawdbotConfig = { - agents: { list: [{ id: "main", identity: { name: "Richbot" } }] }, - messages: { responsePrefix: "auto" }, - }; - expect(resolveResponsePrefix(withIdentity, "main")).toBe("[Richbot]"); - - const withoutIdentity: ClawdbotConfig = { - messages: { responsePrefix: "auto" }, - }; - expect(resolveResponsePrefix(withoutIdentity, "main")).toBeUndefined(); + expect(resolveHumanDelayConfig(cfg, "main")).toEqual({ + mode: "custom", + minMs: 400, + maxMs: 1800, + }); }); }); diff --git a/src/agents/identity.ts b/src/agents/identity.ts index 8efb8605b..fd634378c 100644 --- a/src/agents/identity.ts +++ b/src/agents/identity.ts @@ -1,4 +1,8 @@ -import type { ClawdbotConfig, IdentityConfig } from "../config/config.js"; +import type { + ClawdbotConfig, + HumanDelayConfig, + IdentityConfig, +} from "../config/config.js"; import { resolveAgentConfig } from "./agent-scope.js"; const DEFAULT_ACK_REACTION = "👀"; @@ -72,3 +76,17 @@ export function resolveEffectiveMessagesConfig( responsePrefix: resolveResponsePrefix(cfg, agentId), }; } + +export function resolveHumanDelayConfig( + cfg: ClawdbotConfig, + agentId: string, +): HumanDelayConfig | undefined { + const defaults = cfg.agents?.defaults?.humanDelay; + const overrides = resolveAgentConfig(cfg, agentId)?.humanDelay; + if (!defaults && !overrides) return undefined; + return { + mode: overrides?.mode ?? defaults?.mode, + minMs: overrides?.minMs ?? defaults?.minMs, + maxMs: overrides?.maxMs ?? defaults?.maxMs, + }; +} diff --git a/src/auto-reply/reply/reply-dispatcher.test.ts b/src/auto-reply/reply/reply-dispatcher.test.ts index 9493dd5a1..37456c6b0 100644 --- a/src/auto-reply/reply/reply-dispatcher.test.ts +++ b/src/auto-reply/reply/reply-dispatcher.test.ts @@ -103,4 +103,55 @@ describe("createReplyDispatcher", () => { await dispatcher.waitForIdle(); expect(onIdle).toHaveBeenCalledTimes(1); }); + + it("delays block replies after the first when humanDelay is natural", async () => { + vi.useFakeTimers(); + const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0); + const deliver = vi.fn().mockResolvedValue(undefined); + const dispatcher = createReplyDispatcher({ + deliver, + humanDelay: { mode: "natural" }, + }); + + dispatcher.sendBlockReply({ text: "first" }); + await Promise.resolve(); + expect(deliver).toHaveBeenCalledTimes(1); + + dispatcher.sendBlockReply({ text: "second" }); + await Promise.resolve(); + expect(deliver).toHaveBeenCalledTimes(1); + + await vi.advanceTimersByTimeAsync(799); + expect(deliver).toHaveBeenCalledTimes(1); + + await vi.advanceTimersByTimeAsync(1); + await dispatcher.waitForIdle(); + expect(deliver).toHaveBeenCalledTimes(2); + + randomSpy.mockRestore(); + vi.useRealTimers(); + }); + + it("uses custom bounds for humanDelay and clamps when max <= min", async () => { + vi.useFakeTimers(); + const deliver = vi.fn().mockResolvedValue(undefined); + const dispatcher = createReplyDispatcher({ + deliver, + humanDelay: { mode: "custom", minMs: 1200, maxMs: 400 }, + }); + + dispatcher.sendBlockReply({ text: "first" }); + await Promise.resolve(); + expect(deliver).toHaveBeenCalledTimes(1); + + dispatcher.sendBlockReply({ text: "second" }); + await vi.advanceTimersByTimeAsync(1199); + expect(deliver).toHaveBeenCalledTimes(1); + + await vi.advanceTimersByTimeAsync(1); + await dispatcher.waitForIdle(); + expect(deliver).toHaveBeenCalledTimes(2); + + vi.useRealTimers(); + }); }); diff --git a/src/auto-reply/reply/reply-dispatcher.ts b/src/auto-reply/reply/reply-dispatcher.ts index 02861af3d..2bda83a9e 100644 --- a/src/auto-reply/reply/reply-dispatcher.ts +++ b/src/auto-reply/reply/reply-dispatcher.ts @@ -1,3 +1,4 @@ +import type { HumanDelayConfig } from "../../config/types.js"; import type { GetReplyOptions, ReplyPayload } from "../types.js"; import { normalizeReplyPayload } from "./normalize-reply.js"; import type { TypingController } from "./typing.js"; @@ -14,12 +15,36 @@ type ReplyDispatchDeliverer = ( info: { kind: ReplyDispatchKind }, ) => Promise; +const DEFAULT_HUMAN_DELAY_MIN_MS = 800; +const DEFAULT_HUMAN_DELAY_MAX_MS = 2500; + +/** Generate a random delay within the configured range. */ +function getHumanDelay(config: HumanDelayConfig | undefined): number { + const mode = config?.mode ?? "off"; + if (mode === "off") return 0; + const min = + mode === "custom" + ? (config?.minMs ?? DEFAULT_HUMAN_DELAY_MIN_MS) + : DEFAULT_HUMAN_DELAY_MIN_MS; + const max = + mode === "custom" + ? (config?.maxMs ?? DEFAULT_HUMAN_DELAY_MAX_MS) + : DEFAULT_HUMAN_DELAY_MAX_MS; + if (max <= min) return min; + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +/** Sleep for a given number of milliseconds. */ +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); + export type ReplyDispatcherOptions = { deliver: ReplyDispatchDeliverer; responsePrefix?: string; onHeartbeatStrip?: () => void; onIdle?: () => void; onError?: ReplyDispatchErrorHandler; + /** Human-like delay between block replies for natural rhythm. */ + humanDelay?: HumanDelayConfig; }; export type ReplyDispatcherWithTypingOptions = Omit< @@ -60,6 +85,8 @@ export function createReplyDispatcher( let sendChain: Promise = Promise.resolve(); // Track in-flight deliveries so we can emit a reliable "idle" signal. let pending = 0; + // Track whether we've sent a block reply (for human delay - skip delay on first block). + let sentFirstBlock = false; // Serialize outbound replies to preserve tool/block/final order. const queuedCounts: Record = { tool: 0, @@ -72,8 +99,20 @@ export function createReplyDispatcher( if (!normalized) return false; queuedCounts[kind] += 1; pending += 1; + + // Determine if we should add human-like delay (only for block replies after the first). + const shouldDelay = kind === "block" && sentFirstBlock; + if (kind === "block") sentFirstBlock = true; + sendChain = sendChain - .then(() => options.deliver(normalized, { kind })) + .then(async () => { + // Add human-like delay between block replies for natural rhythm. + if (shouldDelay) { + const delayMs = getHumanDelay(options.humanDelay); + if (delayMs > 0) await sleep(delayMs); + } + await options.deliver(normalized, { kind }); + }) .catch((err) => { options.onError?.(err, { kind }); }) diff --git a/src/config/schema.ts b/src/config/schema.ts index f257012f5..5778968e8 100644 --- a/src/config/schema.ts +++ b/src/config/schema.ts @@ -104,6 +104,9 @@ const FIELD_LABELS: Record = { "agents.defaults.model.fallbacks": "Model Fallbacks", "agents.defaults.imageModel.primary": "Image Model", "agents.defaults.imageModel.fallbacks": "Image Model Fallbacks", + "agents.defaults.humanDelay.mode": "Human Delay Mode", + "agents.defaults.humanDelay.minMs": "Human Delay Min (ms)", + "agents.defaults.humanDelay.maxMs": "Human Delay Max (ms)", "commands.native": "Native Commands", "commands.text": "Text Commands", "commands.restart": "Allow Restart", @@ -177,6 +180,12 @@ const FIELD_HELP: Record = { "Optional image model (provider/model) used when the primary model lacks image input.", "agents.defaults.imageModel.fallbacks": "Ordered fallback image models (provider/model).", + "agents.defaults.humanDelay.mode": + 'Delay style for block replies ("off", "natural", "custom").', + "agents.defaults.humanDelay.minMs": + "Minimum delay in ms for custom humanDelay (default: 800).", + "agents.defaults.humanDelay.maxMs": + "Maximum delay in ms for custom humanDelay (default: 2500).", "commands.native": "Register native commands with connectors that support it (Discord/Slack/Telegram).", "commands.text": "Allow text command parsing (slash commands only).", diff --git a/src/config/types.ts b/src/config/types.ts index dbac18a92..506771655 100644 --- a/src/config/types.ts +++ b/src/config/types.ts @@ -22,6 +22,15 @@ export type BlockStreamingCoalesceConfig = { idleMs?: number; }; +export type HumanDelayConfig = { + /** Delay style for block replies (off|natural|custom). */ + mode?: "off" | "natural" | "custom"; + /** Minimum delay in milliseconds (default: 800). */ + minMs?: number; + /** Maximum delay in milliseconds (default: 2500). */ + maxMs?: number; +}; + export type SessionSendPolicyAction = "allow" | "deny"; export type SessionSendPolicyMatch = { provider?: string; @@ -922,6 +931,8 @@ export type AgentConfig = { workspace?: string; agentDir?: string; model?: string; + /** Human-like delay between block replies for this agent. */ + humanDelay?: HumanDelayConfig; identity?: IdentityConfig; groupChat?: GroupChatConfig; subagents?: { @@ -1317,6 +1328,8 @@ export type AgentDefaultsConfig = { * idleMs: wait time before flushing when idle. */ blockStreamingCoalesce?: BlockStreamingCoalesceConfig; + /** Human-like delay between block replies. */ + humanDelay?: HumanDelayConfig; timeoutSeconds?: number; /** Max inbound media size in MB for agent-visible attachments (text note or future image attach). */ mediaMaxMb?: number; @@ -1426,6 +1439,7 @@ export type ClawdbotConfig = { bindings?: AgentBinding[]; broadcast?: BroadcastConfig; audio?: AudioConfig; + routing?: RoutingConfig; messages?: MessagesConfig; commands?: CommandsConfig; session?: SessionConfig; diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index 39164e69f..c4314efc1 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -103,6 +103,14 @@ const BlockStreamingCoalesceSchema = z.object({ idleMs: z.number().int().nonnegative().optional(), }); +const HumanDelaySchema = z.object({ + mode: z + .union([z.literal("off"), z.literal("natural"), z.literal("custom")]) + .optional(), + minMs: z.number().int().nonnegative().optional(), + maxMs: z.number().int().nonnegative().optional(), +}); + const normalizeAllowFrom = (values?: Array): string[] => (values ?? []).map((v) => String(v).trim()).filter(Boolean); @@ -775,6 +783,7 @@ const AgentEntrySchema = z.object({ workspace: z.string().optional(), agentDir: z.string().optional(), model: z.string().optional(), + humanDelay: HumanDelaySchema.optional(), identity: IdentitySchema, groupChat: GroupChatSchema, subagents: z @@ -1043,6 +1052,7 @@ const AgentDefaultsSchema = z }) .optional(), blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(), + humanDelay: HumanDelaySchema.optional(), timeoutSeconds: z.number().int().positive().optional(), mediaMaxMb: z.number().positive().optional(), typingIntervalSeconds: z.number().int().positive().optional(), @@ -1089,7 +1099,6 @@ const AgentDefaultsSchema = z .optional(), }) .optional(); - export const ClawdbotSchema = z .object({ env: z diff --git a/src/discord/monitor.ts b/src/discord/monitor.ts index b58495768..dc418c71f 100644 --- a/src/discord/monitor.ts +++ b/src/discord/monitor.ts @@ -20,6 +20,7 @@ import { ApplicationCommandOptionType, Routes } from "discord-api-types/v10"; import { resolveAckReaction, resolveEffectiveMessagesConfig, + resolveHumanDelayConfig, } from "../agents/identity.js"; import { resolveTextChunkLimit } from "../auto-reply/chunk.js"; import { hasControlCommand } from "../auto-reply/command-detection.js"; @@ -1156,6 +1157,7 @@ export function createDiscordMessageHandler(params: { createReplyDispatcherWithTyping({ responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId) .responsePrefix, + humanDelay: resolveHumanDelayConfig(cfg, route.agentId), deliver: async (payload) => { await deliverDiscordReply({ replies: [payload], @@ -1662,6 +1664,7 @@ function createDiscordNativeCommand(params: { const dispatcher = createReplyDispatcher({ responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId) .responsePrefix, + humanDelay: resolveHumanDelayConfig(cfg, route.agentId), deliver: async (payload, _info) => { await deliverDiscordInteractionReply({ interaction, diff --git a/src/imessage/monitor.ts b/src/imessage/monitor.ts index 67af17e55..72e4989e7 100644 --- a/src/imessage/monitor.ts +++ b/src/imessage/monitor.ts @@ -1,4 +1,7 @@ -import { resolveEffectiveMessagesConfig } from "../agents/identity.js"; +import { + resolveEffectiveMessagesConfig, + resolveHumanDelayConfig, +} from "../agents/identity.js"; import { chunkText, resolveTextChunkLimit } from "../auto-reply/chunk.js"; import { hasControlCommand } from "../auto-reply/command-detection.js"; import { formatAgentEnvelope } from "../auto-reply/envelope.js"; @@ -444,6 +447,7 @@ export async function monitorIMessageProvider( const dispatcher = createReplyDispatcher({ responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId) .responsePrefix, + humanDelay: resolveHumanDelayConfig(cfg, route.agentId), deliver: async (payload) => { await deliverReplies({ replies: [payload], diff --git a/src/msteams/reply-dispatcher.ts b/src/msteams/reply-dispatcher.ts index 28d7a8030..982fc5aa8 100644 --- a/src/msteams/reply-dispatcher.ts +++ b/src/msteams/reply-dispatcher.ts @@ -1,4 +1,7 @@ -import { resolveEffectiveMessagesConfig } from "../agents/identity.js"; +import { + resolveEffectiveMessagesConfig, + resolveHumanDelayConfig, +} from "../agents/identity.js"; import { createReplyDispatcherWithTyping } from "../auto-reply/reply/reply-dispatcher.js"; import type { ClawdbotConfig, MSTeamsReplyStyle } from "../config/types.js"; import { danger } from "../globals.js"; @@ -40,6 +43,7 @@ export function createMSTeamsReplyDispatcher(params: { return createReplyDispatcherWithTyping({ responsePrefix: resolveEffectiveMessagesConfig(params.cfg, params.agentId) .responsePrefix, + humanDelay: resolveHumanDelayConfig(params.cfg, params.agentId), deliver: async (payload) => { const messages = renderReplyPayloadsToMessages([payload], { textChunkLimit: params.textLimit, diff --git a/src/signal/monitor.ts b/src/signal/monitor.ts index 30e34964a..022219ed9 100644 --- a/src/signal/monitor.ts +++ b/src/signal/monitor.ts @@ -1,4 +1,7 @@ -import { resolveEffectiveMessagesConfig } from "../agents/identity.js"; +import { + resolveEffectiveMessagesConfig, + resolveHumanDelayConfig, +} from "../agents/identity.js"; import { chunkText, resolveTextChunkLimit } from "../auto-reply/chunk.js"; import { formatAgentEnvelope } from "../auto-reply/envelope.js"; import { dispatchReplyFromConfig } from "../auto-reply/reply/dispatch-from-config.js"; @@ -655,6 +658,7 @@ export async function monitorSignalProvider( const dispatcher = createReplyDispatcher({ responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId) .responsePrefix, + humanDelay: resolveHumanDelayConfig(cfg, route.agentId), deliver: async (payload) => { await deliverReplies({ replies: [payload], diff --git a/src/slack/monitor.ts b/src/slack/monitor.ts index 46413e119..9bc3d0035 100644 --- a/src/slack/monitor.ts +++ b/src/slack/monitor.ts @@ -7,6 +7,7 @@ import type { WebClient as SlackWebClient } from "@slack/web-api"; import { resolveAckReaction, resolveEffectiveMessagesConfig, + resolveHumanDelayConfig, } from "../agents/identity.js"; import { chunkMarkdownText, @@ -1109,6 +1110,7 @@ export async function monitorSlackProvider(opts: MonitorSlackOpts = {}) { createReplyDispatcherWithTyping({ responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId) .responsePrefix, + humanDelay: resolveHumanDelayConfig(cfg, route.agentId), deliver: async (payload) => { const effectiveThreadTs = resolveSlackThreadTs({ replyToMode,