feat(agent): add human-like delay between block replies
Adds `agent.humanDelay` config option to create natural rhythm between
streamed message bubbles. When enabled, introduces a random delay
(default 800-2500ms) between block replies, making multi-message
responses feel more like natural human texting.
Config example:
```json
{
"agent": {
"blockStreamingDefault": "on",
"humanDelay": {
"enabled": true,
"minMs": 800,
"maxMs": 2500
}
}
}
```
- First message sends immediately
- Subsequent messages wait a random delay before sending
- Works with iMessage, Signal, and Discord providers
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -4,6 +4,7 @@
|
||||
|
||||
### New Features and Changes
|
||||
- Onboarding/Models: add first-class Z.AI (GLM) auth choice (`zai-api-key`) + `--zai-api-key` flag.
|
||||
- Agents: add human-delay pacing between block replies (per-agent configurable). (#446) — thanks @tony-freedomology.
|
||||
|
||||
### Fixes
|
||||
- Agents/OpenAI: fix Responses tool-only → follow-up turn handling (avoid standalone `reasoning` items that trigger 400 “required following item”).
|
||||
|
||||
@@ -61,6 +61,7 @@ Key settings:
|
||||
- `agents.defaults.blockStreamingBreak` (`text_end|message_end`)
|
||||
- `agents.defaults.blockStreamingChunk` (`minChars|maxChars|breakPreference`)
|
||||
- `agents.defaults.blockStreamingCoalesce` (idle-based batching)
|
||||
- `agents.defaults.humanDelay` (human-like pause between block replies)
|
||||
- Provider overrides: `*.blockStreaming` and `*.blockStreamingCoalesce` (non-Telegram providers require explicit `*.blockStreaming: true`)
|
||||
|
||||
Details: [Streaming + chunking](/concepts/streaming).
|
||||
|
||||
@@ -71,6 +71,16 @@ progressive output.
|
||||
- Provider overrides are available via `*.blockStreamingCoalesce` (including per-account configs).
|
||||
- Default coalesce `minChars` is bumped to 1500 for Signal/Slack/Discord unless overridden.
|
||||
|
||||
## Human-like pacing between blocks
|
||||
|
||||
When block streaming is enabled, you can add a **randomized pause** between
|
||||
block replies (after the first block). This makes multi-bubble responses feel
|
||||
more natural.
|
||||
|
||||
- Config: `agents.defaults.humanDelay` (override per agent via `agents.list[].humanDelay`).
|
||||
- Modes: `off` (default), `natural` (800–2500ms), `custom` (`minMs`/`maxMs`).
|
||||
- Applies only to **block replies**, not final replies or tool summaries.
|
||||
|
||||
## “Stream chunks or everything”
|
||||
|
||||
This maps to:
|
||||
|
||||
@@ -218,6 +218,9 @@ Save to `~/.clawdbot/clawdbot.json` and you can DM the bot from that number.
|
||||
blockStreamingCoalesce: {
|
||||
idleMs: 1000
|
||||
},
|
||||
humanDelay: {
|
||||
mode: "natural"
|
||||
},
|
||||
timeoutSeconds: 600,
|
||||
mediaMaxMb: 5,
|
||||
typingIntervalSeconds: 5,
|
||||
|
||||
@@ -1186,6 +1186,15 @@ Block streaming:
|
||||
Provider overrides: `whatsapp.blockStreamingCoalesce`, `telegram.blockStreamingCoalesce`,
|
||||
`discord.blockStreamingCoalesce`, `slack.blockStreamingCoalesce`, `signal.blockStreamingCoalesce`,
|
||||
`imessage.blockStreamingCoalesce`, `msteams.blockStreamingCoalesce` (and per-account variants).
|
||||
- `agents.defaults.humanDelay`: randomized pause between **block replies** after the first.
|
||||
Modes: `off` (default), `natural` (800–2500ms), `custom` (use `minMs`/`maxMs`).
|
||||
Per-agent override: `agents.list[].humanDelay`.
|
||||
Example:
|
||||
```json5
|
||||
{
|
||||
agents: { defaults: { humanDelay: { mode: "natural" } } }
|
||||
}
|
||||
```
|
||||
See [/concepts/streaming](/concepts/streaming) for behavior + chunking details.
|
||||
|
||||
Typing indicators:
|
||||
|
||||
@@ -22,6 +22,7 @@ type ResolvedAgentConfig = {
|
||||
workspace?: string;
|
||||
agentDir?: string;
|
||||
model?: string;
|
||||
humanDelay?: AgentEntry["humanDelay"];
|
||||
identity?: AgentEntry["identity"];
|
||||
groupChat?: AgentEntry["groupChat"];
|
||||
subagents?: AgentEntry["subagents"];
|
||||
@@ -94,6 +95,7 @@ export function resolveAgentConfig(
|
||||
typeof entry.workspace === "string" ? entry.workspace : undefined,
|
||||
agentDir: typeof entry.agentDir === "string" ? entry.agentDir : undefined,
|
||||
model: typeof entry.model === "string" ? entry.model : undefined,
|
||||
humanDelay: entry.humanDelay,
|
||||
identity: entry.identity,
|
||||
groupChat: entry.groupChat,
|
||||
subagents:
|
||||
|
||||
@@ -1,65 +1,28 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { resolveMessagePrefix, resolveResponsePrefix } from "./identity.js";
|
||||
import { resolveHumanDelayConfig } from "./identity.js";
|
||||
|
||||
describe("message prefix resolution", () => {
|
||||
it("returns configured messagePrefix override", () => {
|
||||
describe("resolveHumanDelayConfig", () => {
|
||||
it("returns undefined when no humanDelay config is set", () => {
|
||||
const cfg: ClawdbotConfig = {};
|
||||
expect(
|
||||
resolveMessagePrefix(cfg, "main", {
|
||||
configured: "[x]",
|
||||
hasAllowFrom: true,
|
||||
}),
|
||||
).toBe("[x]");
|
||||
expect(
|
||||
resolveMessagePrefix(cfg, "main", {
|
||||
configured: "",
|
||||
hasAllowFrom: false,
|
||||
}),
|
||||
).toBe("");
|
||||
expect(resolveHumanDelayConfig(cfg, "main")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("defaults messagePrefix based on allowFrom + identity", () => {
|
||||
it("merges defaults with per-agent overrides", () => {
|
||||
const cfg: ClawdbotConfig = {
|
||||
agents: { list: [{ id: "main", identity: { name: "Richbot" } }] },
|
||||
agents: {
|
||||
defaults: {
|
||||
humanDelay: { mode: "natural", minMs: 800, maxMs: 1800 },
|
||||
},
|
||||
list: [{ id: "main", humanDelay: { mode: "custom", minMs: 400 } }],
|
||||
},
|
||||
};
|
||||
expect(resolveMessagePrefix(cfg, "main", { hasAllowFrom: true })).toBe("");
|
||||
expect(resolveMessagePrefix(cfg, "main", { hasAllowFrom: false })).toBe(
|
||||
"[Richbot]",
|
||||
);
|
||||
});
|
||||
|
||||
it("falls back to [clawdbot] when identity is missing", () => {
|
||||
const cfg: ClawdbotConfig = {};
|
||||
expect(resolveMessagePrefix(cfg, "main", { hasAllowFrom: false })).toBe(
|
||||
"[clawdbot]",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("response prefix resolution", () => {
|
||||
it("does not apply any default when unset", () => {
|
||||
const cfg: ClawdbotConfig = {
|
||||
agents: { list: [{ id: "main", identity: { name: "Richbot" } }] },
|
||||
};
|
||||
expect(resolveResponsePrefix(cfg, "main")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns explicit responsePrefix when set", () => {
|
||||
const cfg: ClawdbotConfig = { messages: { responsePrefix: "PFX" } };
|
||||
expect(resolveResponsePrefix(cfg, "main")).toBe("PFX");
|
||||
});
|
||||
|
||||
it("supports responsePrefix: auto (identity-derived opt-in)", () => {
|
||||
const withIdentity: ClawdbotConfig = {
|
||||
agents: { list: [{ id: "main", identity: { name: "Richbot" } }] },
|
||||
messages: { responsePrefix: "auto" },
|
||||
};
|
||||
expect(resolveResponsePrefix(withIdentity, "main")).toBe("[Richbot]");
|
||||
|
||||
const withoutIdentity: ClawdbotConfig = {
|
||||
messages: { responsePrefix: "auto" },
|
||||
};
|
||||
expect(resolveResponsePrefix(withoutIdentity, "main")).toBeUndefined();
|
||||
expect(resolveHumanDelayConfig(cfg, "main")).toEqual({
|
||||
mode: "custom",
|
||||
minMs: 400,
|
||||
maxMs: 1800,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
import type { ClawdbotConfig, IdentityConfig } from "../config/config.js";
|
||||
import type {
|
||||
ClawdbotConfig,
|
||||
HumanDelayConfig,
|
||||
IdentityConfig,
|
||||
} from "../config/config.js";
|
||||
import { resolveAgentConfig } from "./agent-scope.js";
|
||||
|
||||
const DEFAULT_ACK_REACTION = "👀";
|
||||
@@ -72,3 +76,17 @@ export function resolveEffectiveMessagesConfig(
|
||||
responsePrefix: resolveResponsePrefix(cfg, agentId),
|
||||
};
|
||||
}
|
||||
|
||||
export function resolveHumanDelayConfig(
|
||||
cfg: ClawdbotConfig,
|
||||
agentId: string,
|
||||
): HumanDelayConfig | undefined {
|
||||
const defaults = cfg.agents?.defaults?.humanDelay;
|
||||
const overrides = resolveAgentConfig(cfg, agentId)?.humanDelay;
|
||||
if (!defaults && !overrides) return undefined;
|
||||
return {
|
||||
mode: overrides?.mode ?? defaults?.mode,
|
||||
minMs: overrides?.minMs ?? defaults?.minMs,
|
||||
maxMs: overrides?.maxMs ?? defaults?.maxMs,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -103,4 +103,55 @@ describe("createReplyDispatcher", () => {
|
||||
await dispatcher.waitForIdle();
|
||||
expect(onIdle).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("delays block replies after the first when humanDelay is natural", async () => {
|
||||
vi.useFakeTimers();
|
||||
const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0);
|
||||
const deliver = vi.fn().mockResolvedValue(undefined);
|
||||
const dispatcher = createReplyDispatcher({
|
||||
deliver,
|
||||
humanDelay: { mode: "natural" },
|
||||
});
|
||||
|
||||
dispatcher.sendBlockReply({ text: "first" });
|
||||
await Promise.resolve();
|
||||
expect(deliver).toHaveBeenCalledTimes(1);
|
||||
|
||||
dispatcher.sendBlockReply({ text: "second" });
|
||||
await Promise.resolve();
|
||||
expect(deliver).toHaveBeenCalledTimes(1);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(799);
|
||||
expect(deliver).toHaveBeenCalledTimes(1);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(1);
|
||||
await dispatcher.waitForIdle();
|
||||
expect(deliver).toHaveBeenCalledTimes(2);
|
||||
|
||||
randomSpy.mockRestore();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("uses custom bounds for humanDelay and clamps when max <= min", async () => {
|
||||
vi.useFakeTimers();
|
||||
const deliver = vi.fn().mockResolvedValue(undefined);
|
||||
const dispatcher = createReplyDispatcher({
|
||||
deliver,
|
||||
humanDelay: { mode: "custom", minMs: 1200, maxMs: 400 },
|
||||
});
|
||||
|
||||
dispatcher.sendBlockReply({ text: "first" });
|
||||
await Promise.resolve();
|
||||
expect(deliver).toHaveBeenCalledTimes(1);
|
||||
|
||||
dispatcher.sendBlockReply({ text: "second" });
|
||||
await vi.advanceTimersByTimeAsync(1199);
|
||||
expect(deliver).toHaveBeenCalledTimes(1);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(1);
|
||||
await dispatcher.waitForIdle();
|
||||
expect(deliver).toHaveBeenCalledTimes(2);
|
||||
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { HumanDelayConfig } from "../../config/types.js";
|
||||
import type { GetReplyOptions, ReplyPayload } from "../types.js";
|
||||
import { normalizeReplyPayload } from "./normalize-reply.js";
|
||||
import type { TypingController } from "./typing.js";
|
||||
@@ -14,12 +15,36 @@ type ReplyDispatchDeliverer = (
|
||||
info: { kind: ReplyDispatchKind },
|
||||
) => Promise<void>;
|
||||
|
||||
const DEFAULT_HUMAN_DELAY_MIN_MS = 800;
|
||||
const DEFAULT_HUMAN_DELAY_MAX_MS = 2500;
|
||||
|
||||
/** Generate a random delay within the configured range. */
|
||||
function getHumanDelay(config: HumanDelayConfig | undefined): number {
|
||||
const mode = config?.mode ?? "off";
|
||||
if (mode === "off") return 0;
|
||||
const min =
|
||||
mode === "custom"
|
||||
? (config?.minMs ?? DEFAULT_HUMAN_DELAY_MIN_MS)
|
||||
: DEFAULT_HUMAN_DELAY_MIN_MS;
|
||||
const max =
|
||||
mode === "custom"
|
||||
? (config?.maxMs ?? DEFAULT_HUMAN_DELAY_MAX_MS)
|
||||
: DEFAULT_HUMAN_DELAY_MAX_MS;
|
||||
if (max <= min) return min;
|
||||
return Math.floor(Math.random() * (max - min + 1)) + min;
|
||||
}
|
||||
|
||||
/** Sleep for a given number of milliseconds. */
|
||||
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
||||
export type ReplyDispatcherOptions = {
|
||||
deliver: ReplyDispatchDeliverer;
|
||||
responsePrefix?: string;
|
||||
onHeartbeatStrip?: () => void;
|
||||
onIdle?: () => void;
|
||||
onError?: ReplyDispatchErrorHandler;
|
||||
/** Human-like delay between block replies for natural rhythm. */
|
||||
humanDelay?: HumanDelayConfig;
|
||||
};
|
||||
|
||||
export type ReplyDispatcherWithTypingOptions = Omit<
|
||||
@@ -60,6 +85,8 @@ export function createReplyDispatcher(
|
||||
let sendChain: Promise<void> = Promise.resolve();
|
||||
// Track in-flight deliveries so we can emit a reliable "idle" signal.
|
||||
let pending = 0;
|
||||
// Track whether we've sent a block reply (for human delay - skip delay on first block).
|
||||
let sentFirstBlock = false;
|
||||
// Serialize outbound replies to preserve tool/block/final order.
|
||||
const queuedCounts: Record<ReplyDispatchKind, number> = {
|
||||
tool: 0,
|
||||
@@ -72,8 +99,20 @@ export function createReplyDispatcher(
|
||||
if (!normalized) return false;
|
||||
queuedCounts[kind] += 1;
|
||||
pending += 1;
|
||||
|
||||
// Determine if we should add human-like delay (only for block replies after the first).
|
||||
const shouldDelay = kind === "block" && sentFirstBlock;
|
||||
if (kind === "block") sentFirstBlock = true;
|
||||
|
||||
sendChain = sendChain
|
||||
.then(() => options.deliver(normalized, { kind }))
|
||||
.then(async () => {
|
||||
// Add human-like delay between block replies for natural rhythm.
|
||||
if (shouldDelay) {
|
||||
const delayMs = getHumanDelay(options.humanDelay);
|
||||
if (delayMs > 0) await sleep(delayMs);
|
||||
}
|
||||
await options.deliver(normalized, { kind });
|
||||
})
|
||||
.catch((err) => {
|
||||
options.onError?.(err, { kind });
|
||||
})
|
||||
|
||||
@@ -104,6 +104,9 @@ const FIELD_LABELS: Record<string, string> = {
|
||||
"agents.defaults.model.fallbacks": "Model Fallbacks",
|
||||
"agents.defaults.imageModel.primary": "Image Model",
|
||||
"agents.defaults.imageModel.fallbacks": "Image Model Fallbacks",
|
||||
"agents.defaults.humanDelay.mode": "Human Delay Mode",
|
||||
"agents.defaults.humanDelay.minMs": "Human Delay Min (ms)",
|
||||
"agents.defaults.humanDelay.maxMs": "Human Delay Max (ms)",
|
||||
"commands.native": "Native Commands",
|
||||
"commands.text": "Text Commands",
|
||||
"commands.restart": "Allow Restart",
|
||||
@@ -177,6 +180,12 @@ const FIELD_HELP: Record<string, string> = {
|
||||
"Optional image model (provider/model) used when the primary model lacks image input.",
|
||||
"agents.defaults.imageModel.fallbacks":
|
||||
"Ordered fallback image models (provider/model).",
|
||||
"agents.defaults.humanDelay.mode":
|
||||
'Delay style for block replies ("off", "natural", "custom").',
|
||||
"agents.defaults.humanDelay.minMs":
|
||||
"Minimum delay in ms for custom humanDelay (default: 800).",
|
||||
"agents.defaults.humanDelay.maxMs":
|
||||
"Maximum delay in ms for custom humanDelay (default: 2500).",
|
||||
"commands.native":
|
||||
"Register native commands with connectors that support it (Discord/Slack/Telegram).",
|
||||
"commands.text": "Allow text command parsing (slash commands only).",
|
||||
|
||||
@@ -22,6 +22,15 @@ export type BlockStreamingCoalesceConfig = {
|
||||
idleMs?: number;
|
||||
};
|
||||
|
||||
export type HumanDelayConfig = {
|
||||
/** Delay style for block replies (off|natural|custom). */
|
||||
mode?: "off" | "natural" | "custom";
|
||||
/** Minimum delay in milliseconds (default: 800). */
|
||||
minMs?: number;
|
||||
/** Maximum delay in milliseconds (default: 2500). */
|
||||
maxMs?: number;
|
||||
};
|
||||
|
||||
export type SessionSendPolicyAction = "allow" | "deny";
|
||||
export type SessionSendPolicyMatch = {
|
||||
provider?: string;
|
||||
@@ -922,6 +931,8 @@ export type AgentConfig = {
|
||||
workspace?: string;
|
||||
agentDir?: string;
|
||||
model?: string;
|
||||
/** Human-like delay between block replies for this agent. */
|
||||
humanDelay?: HumanDelayConfig;
|
||||
identity?: IdentityConfig;
|
||||
groupChat?: GroupChatConfig;
|
||||
subagents?: {
|
||||
@@ -1317,6 +1328,8 @@ export type AgentDefaultsConfig = {
|
||||
* idleMs: wait time before flushing when idle.
|
||||
*/
|
||||
blockStreamingCoalesce?: BlockStreamingCoalesceConfig;
|
||||
/** Human-like delay between block replies. */
|
||||
humanDelay?: HumanDelayConfig;
|
||||
timeoutSeconds?: number;
|
||||
/** Max inbound media size in MB for agent-visible attachments (text note or future image attach). */
|
||||
mediaMaxMb?: number;
|
||||
@@ -1426,6 +1439,7 @@ export type ClawdbotConfig = {
|
||||
bindings?: AgentBinding[];
|
||||
broadcast?: BroadcastConfig;
|
||||
audio?: AudioConfig;
|
||||
routing?: RoutingConfig;
|
||||
messages?: MessagesConfig;
|
||||
commands?: CommandsConfig;
|
||||
session?: SessionConfig;
|
||||
|
||||
@@ -103,6 +103,14 @@ const BlockStreamingCoalesceSchema = z.object({
|
||||
idleMs: z.number().int().nonnegative().optional(),
|
||||
});
|
||||
|
||||
const HumanDelaySchema = z.object({
|
||||
mode: z
|
||||
.union([z.literal("off"), z.literal("natural"), z.literal("custom")])
|
||||
.optional(),
|
||||
minMs: z.number().int().nonnegative().optional(),
|
||||
maxMs: z.number().int().nonnegative().optional(),
|
||||
});
|
||||
|
||||
const normalizeAllowFrom = (values?: Array<string | number>): string[] =>
|
||||
(values ?? []).map((v) => String(v).trim()).filter(Boolean);
|
||||
|
||||
@@ -775,6 +783,7 @@ const AgentEntrySchema = z.object({
|
||||
workspace: z.string().optional(),
|
||||
agentDir: z.string().optional(),
|
||||
model: z.string().optional(),
|
||||
humanDelay: HumanDelaySchema.optional(),
|
||||
identity: IdentitySchema,
|
||||
groupChat: GroupChatSchema,
|
||||
subagents: z
|
||||
@@ -1043,6 +1052,7 @@ const AgentDefaultsSchema = z
|
||||
})
|
||||
.optional(),
|
||||
blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(),
|
||||
humanDelay: HumanDelaySchema.optional(),
|
||||
timeoutSeconds: z.number().int().positive().optional(),
|
||||
mediaMaxMb: z.number().positive().optional(),
|
||||
typingIntervalSeconds: z.number().int().positive().optional(),
|
||||
@@ -1089,7 +1099,6 @@ const AgentDefaultsSchema = z
|
||||
.optional(),
|
||||
})
|
||||
.optional();
|
||||
|
||||
export const ClawdbotSchema = z
|
||||
.object({
|
||||
env: z
|
||||
|
||||
@@ -20,6 +20,7 @@ import { ApplicationCommandOptionType, Routes } from "discord-api-types/v10";
|
||||
import {
|
||||
resolveAckReaction,
|
||||
resolveEffectiveMessagesConfig,
|
||||
resolveHumanDelayConfig,
|
||||
} from "../agents/identity.js";
|
||||
import { resolveTextChunkLimit } from "../auto-reply/chunk.js";
|
||||
import { hasControlCommand } from "../auto-reply/command-detection.js";
|
||||
@@ -1156,6 +1157,7 @@ export function createDiscordMessageHandler(params: {
|
||||
createReplyDispatcherWithTyping({
|
||||
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId)
|
||||
.responsePrefix,
|
||||
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
|
||||
deliver: async (payload) => {
|
||||
await deliverDiscordReply({
|
||||
replies: [payload],
|
||||
@@ -1662,6 +1664,7 @@ function createDiscordNativeCommand(params: {
|
||||
const dispatcher = createReplyDispatcher({
|
||||
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId)
|
||||
.responsePrefix,
|
||||
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
|
||||
deliver: async (payload, _info) => {
|
||||
await deliverDiscordInteractionReply({
|
||||
interaction,
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { resolveEffectiveMessagesConfig } from "../agents/identity.js";
|
||||
import {
|
||||
resolveEffectiveMessagesConfig,
|
||||
resolveHumanDelayConfig,
|
||||
} from "../agents/identity.js";
|
||||
import { chunkText, resolveTextChunkLimit } from "../auto-reply/chunk.js";
|
||||
import { hasControlCommand } from "../auto-reply/command-detection.js";
|
||||
import { formatAgentEnvelope } from "../auto-reply/envelope.js";
|
||||
@@ -444,6 +447,7 @@ export async function monitorIMessageProvider(
|
||||
const dispatcher = createReplyDispatcher({
|
||||
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId)
|
||||
.responsePrefix,
|
||||
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
|
||||
deliver: async (payload) => {
|
||||
await deliverReplies({
|
||||
replies: [payload],
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { resolveEffectiveMessagesConfig } from "../agents/identity.js";
|
||||
import {
|
||||
resolveEffectiveMessagesConfig,
|
||||
resolveHumanDelayConfig,
|
||||
} from "../agents/identity.js";
|
||||
import { createReplyDispatcherWithTyping } from "../auto-reply/reply/reply-dispatcher.js";
|
||||
import type { ClawdbotConfig, MSTeamsReplyStyle } from "../config/types.js";
|
||||
import { danger } from "../globals.js";
|
||||
@@ -40,6 +43,7 @@ export function createMSTeamsReplyDispatcher(params: {
|
||||
return createReplyDispatcherWithTyping({
|
||||
responsePrefix: resolveEffectiveMessagesConfig(params.cfg, params.agentId)
|
||||
.responsePrefix,
|
||||
humanDelay: resolveHumanDelayConfig(params.cfg, params.agentId),
|
||||
deliver: async (payload) => {
|
||||
const messages = renderReplyPayloadsToMessages([payload], {
|
||||
textChunkLimit: params.textLimit,
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { resolveEffectiveMessagesConfig } from "../agents/identity.js";
|
||||
import {
|
||||
resolveEffectiveMessagesConfig,
|
||||
resolveHumanDelayConfig,
|
||||
} from "../agents/identity.js";
|
||||
import { chunkText, resolveTextChunkLimit } from "../auto-reply/chunk.js";
|
||||
import { formatAgentEnvelope } from "../auto-reply/envelope.js";
|
||||
import { dispatchReplyFromConfig } from "../auto-reply/reply/dispatch-from-config.js";
|
||||
@@ -655,6 +658,7 @@ export async function monitorSignalProvider(
|
||||
const dispatcher = createReplyDispatcher({
|
||||
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId)
|
||||
.responsePrefix,
|
||||
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
|
||||
deliver: async (payload) => {
|
||||
await deliverReplies({
|
||||
replies: [payload],
|
||||
|
||||
@@ -7,6 +7,7 @@ import type { WebClient as SlackWebClient } from "@slack/web-api";
|
||||
import {
|
||||
resolveAckReaction,
|
||||
resolveEffectiveMessagesConfig,
|
||||
resolveHumanDelayConfig,
|
||||
} from "../agents/identity.js";
|
||||
import {
|
||||
chunkMarkdownText,
|
||||
@@ -1109,6 +1110,7 @@ export async function monitorSlackProvider(opts: MonitorSlackOpts = {}) {
|
||||
createReplyDispatcherWithTyping({
|
||||
responsePrefix: resolveEffectiveMessagesConfig(cfg, route.agentId)
|
||||
.responsePrefix,
|
||||
humanDelay: resolveHumanDelayConfig(cfg, route.agentId),
|
||||
deliver: async (payload) => {
|
||||
const effectiveThreadTs = resolveSlackThreadTs({
|
||||
replyToMode,
|
||||
|
||||
Reference in New Issue
Block a user