feat(slash-commands): usage footer modes

This commit is contained in:
Peter Steinberger
2026-01-18 05:35:22 +00:00
parent e7a4931932
commit 2dabce59ce
38 changed files with 370 additions and 303 deletions

View File

@@ -8,6 +8,7 @@ Docs: https://docs.clawd.bot
- Exec: add host/security/ask routing for gateway + node exec. - Exec: add host/security/ask routing for gateway + node exec.
- macOS: migrate exec approvals to `~/.clawdbot/exec-approvals.json` with per-agent allowlists and skill auto-allow toggle. - macOS: migrate exec approvals to `~/.clawdbot/exec-approvals.json` with per-agent allowlists and skill auto-allow toggle.
- macOS: add approvals socket UI server + node exec lifecycle events. - macOS: add approvals socket UI server + node exec lifecycle events.
- Slash commands: replace `/cost` with `/usage off|tokens|full` to control per-response usage footer; `/usage` no longer aliases `/status`. (Supersedes #1140) — thanks @Nachx639.
- Docs: refresh exec/elevated/exec-approvals docs for the new flow. https://docs.clawd.bot/tools/exec-approvals - Docs: refresh exec/elevated/exec-approvals docs for the new flow. https://docs.clawd.bot/tools/exec-approvals
### Fixes ### Fixes

View File

@@ -249,7 +249,7 @@ Send these in WhatsApp/Telegram/Slack/Microsoft Teams/WebChat (group commands ar
- `/compact` — compact session context (summary) - `/compact` — compact session context (summary)
- `/think <level>` — off|minimal|low|medium|high|xhigh (GPT-5.2 + Codex models only) - `/think <level>` — off|minimal|low|medium|high|xhigh (GPT-5.2 + Codex models only)
- `/verbose on|off` - `/verbose on|off`
- `/cost on|off` — append per-response token/cost usage lines - `/usage off|tokens|full` — per-response usage footer
- `/restart` — restart the gateway (owner-only in groups) - `/restart` — restart the gateway (owner-only in groups)
- `/activation mention|always` — group activation toggle (groups only) - `/activation mention|always` — group activation toggle (groups only)

View File

@@ -522,7 +522,7 @@ Options:
Clawdbot can surface provider usage/quota when OAuth/API creds are available. Clawdbot can surface provider usage/quota when OAuth/API creds are available.
Surfaces: Surfaces:
- `/status` (alias: `/usage`; adds a short usage line when available) - `/status` (adds a short provider usage line when available)
- `clawdbot status --usage` (prints full provider breakdown) - `clawdbot status --usage` (prints full provider breakdown)
- macOS menu bar (Usage section under Context) - macOS menu bar (Usage section under Context)

View File

@@ -21,7 +21,7 @@ Context is *not the same thing* as “memory”: memory can be stored on disk an
- `/status` → quick “how full is my window?” view + session settings. - `/status` → quick “how full is my window?” view + session settings.
- `/context list` → whats injected + rough sizes (per file + totals). - `/context list` → whats injected + rough sizes (per file + totals).
- `/context detail` → deeper breakdown: per-file, per-tool schema sizes, per-skill entry sizes, and system prompt size. - `/context detail` → deeper breakdown: per-file, per-tool schema sizes, per-skill entry sizes, and system prompt size.
- `/cost on` → append per-reply usage line to normal replies. - `/usage tokens` → append per-reply usage footer to normal replies.
- `/compact` → summarize older history into a compact entry to free window space. - `/compact` → summarize older history into a compact entry to free window space.
See also: [Slash commands](/tools/slash-commands), [Token use & costs](/token-use), [Compaction](/concepts/compaction). See also: [Slash commands](/tools/slash-commands), [Token use & costs](/token-use), [Compaction](/concepts/compaction).
@@ -149,4 +149,3 @@ Docs: [Session](/concepts/session), [Compaction](/concepts/compaction), [Session
- `System prompt (estimate)` = computed on the fly when no run report exists (or when running via a CLI backend that doesnt generate the report). - `System prompt (estimate)` = computed on the fly when no run report exists (or when running via a CLI backend that doesnt generate the report).
Either way, it reports sizes and top contributors; it does **not** dump the full system prompt or tool schemas. Either way, it reports sizes and top contributors; it does **not** dump the full system prompt or tool schemas.

View File

@@ -12,7 +12,7 @@ read_when:
## Where it shows up ## Where it shows up
- `/status` in chats: emojirich status card with session tokens + estimated cost (API key only). Provider usage shows for the **current model provider** when available. - `/status` in chats: emojirich status card with session tokens + estimated cost (API key only). Provider usage shows for the **current model provider** when available.
- `/cost on|off` in chats: toggles perresponse usage lines (OAuth shows tokens only). - `/usage off|tokens|full` in chats: per-response usage footer (OAuth shows tokens only).
- CLI: `clawdbot status --usage` prints a full per-provider breakdown. - CLI: `clawdbot status --usage` prints a full per-provider breakdown.
- CLI: `clawdbot channels list` prints the same usage snapshot alongside provider config (use `--no-usage` to skip). - CLI: `clawdbot channels list` prints the same usage snapshot alongside provider config (use `--no-usage` to skip).
- macOS menu bar: “Usage” section under Context (only if available). - macOS menu bar: “Usage” section under Context (only if available).

View File

@@ -42,13 +42,13 @@ Use these in chat:
- `/status`**emojirich status card** with the session model, context usage, - `/status`**emojirich status card** with the session model, context usage,
last response input/output tokens, and **estimated cost** (API key only). last response input/output tokens, and **estimated cost** (API key only).
- `/cost on|off` → appends a **per-response usage line** to every reply. - `/usage off|tokens|full` → appends a **per-response usage footer** to every reply.
- Persists per session (stored as `responseUsage`). - Persists per session (stored as `responseUsage`).
- OAuth auth **hides cost** (tokens only). - OAuth auth **hides cost** (tokens only).
Other surfaces: Other surfaces:
- **TUI/Web TUI:** `/status` + `/cost` are supported. - **TUI/Web TUI:** `/status` + `/usage` are supported.
- **CLI:** `clawdbot status --usage` and `clawdbot channels list` show - **CLI:** `clawdbot status --usage` and `clawdbot channels list` show
provider quota windows (not per-response costs). provider quota windows (not per-response costs).

View File

@@ -17,7 +17,7 @@ There are two related systems:
- In normal chat messages (not directive-only), they are treated as “inline hints” and do **not** persist session settings. - In normal chat messages (not directive-only), they are treated as “inline hints” and do **not** persist session settings.
- In directive-only messages (the message contains only directives), they persist to the session and reply with an acknowledgement. - In directive-only messages (the message contains only directives), they persist to the session and reply with an acknowledgement.
There are also a few **inline shortcuts** (allowlisted/authorized senders only): `/help`, `/commands`, `/status` (`/usage`), `/whoami` (`/id`). There are also a few **inline shortcuts** (allowlisted/authorized senders only): `/help`, `/commands`, `/status`, `/whoami` (`/id`).
They run immediately, are stripped before the model sees the message, and the remaining text continues through the normal flow. They run immediately, are stripped before the model sees the message, and the remaining text continues through the normal flow.
## Config ## Config
@@ -60,12 +60,11 @@ Text + native (when enabled):
- `/commands` - `/commands`
- `/status` (show current status; includes provider usage/quota for the current model provider when available) - `/status` (show current status; includes provider usage/quota for the current model provider when available)
- `/context [list|detail|json]` (explain “context”; `detail` shows per-file + per-tool + per-skill + system prompt size) - `/context [list|detail|json]` (explain “context”; `detail` shows per-file + per-tool + per-skill + system prompt size)
- `/usage` (alias: `/status`)
- `/whoami` (show your sender id; alias: `/id`) - `/whoami` (show your sender id; alias: `/id`)
- `/subagents list|stop|log|info|send` (inspect, stop, log, or message sub-agent runs for the current session) - `/subagents list|stop|log|info|send` (inspect, stop, log, or message sub-agent runs for the current session)
- `/config show|get|set|unset` (persist config to disk, owner-only; requires `commands.config: true`) - `/config show|get|set|unset` (persist config to disk, owner-only; requires `commands.config: true`)
- `/debug show|set|unset|reset` (runtime overrides, owner-only; requires `commands.debug: true`) - `/debug show|set|unset|reset` (runtime overrides, owner-only; requires `commands.debug: true`)
- `/cost on|off` (toggle per-response usage line) - `/usage off|tokens|full` (per-response usage footer)
- `/stop` - `/stop`
- `/restart` - `/restart`
- `/dock-telegram` (alias: `/dock_telegram`) (switch replies to Telegram) - `/dock-telegram` (alias: `/dock_telegram`) (switch replies to Telegram)
@@ -90,8 +89,8 @@ Text-only:
Notes: Notes:
- Commands accept an optional `:` between the command and args (e.g. `/think: high`, `/send: on`, `/help:`). - Commands accept an optional `:` between the command and args (e.g. `/think: high`, `/send: on`, `/help:`).
- `/status` and `/usage` show the same status output; for full provider usage breakdown, use `clawdbot status --usage`. - For full provider usage breakdown, use `clawdbot status --usage`.
- `/cost` appends per-response token usage; it only shows dollar cost when the model uses an API key (OAuth hides cost). - `/usage` controls the per-response usage footer. It only shows dollar cost when the model uses an API key (OAuth hides cost).
- `/restart` is disabled by default; set `commands.restart: true` to enable it. - `/restart` is disabled by default; set `commands.restart: true` to enable it.
- `/verbose` is meant for debugging and extra visibility; keep it **off** in normal use. - `/verbose` is meant for debugging and extra visibility; keep it **off** in normal use.
- `/reasoning` (and `/verbose`) are risky in group settings: they may reveal internal reasoning or tool output you did not intend to expose. Prefer leaving them off, especially in group chats. - `/reasoning` (and `/verbose`) are risky in group settings: they may reveal internal reasoning or tool output you did not intend to expose. Prefer leaving them off, especially in group chats.
@@ -99,15 +98,15 @@ Notes:
- **Group mention gating:** command-only messages from allowlisted senders bypass mention requirements. - **Group mention gating:** command-only messages from allowlisted senders bypass mention requirements.
- **Inline shortcuts (allowlisted senders only):** certain commands also work when embedded in a normal message and are stripped before the model sees the remaining text. - **Inline shortcuts (allowlisted senders only):** certain commands also work when embedded in a normal message and are stripped before the model sees the remaining text.
- Example: `hey /status` triggers a status reply, and the remaining text continues through the normal flow. - Example: `hey /status` triggers a status reply, and the remaining text continues through the normal flow.
- Currently: `/help`, `/commands`, `/status` (`/usage`), `/whoami` (`/id`). - Currently: `/help`, `/commands`, `/status`, `/whoami` (`/id`).
- Unauthorized command-only messages are silently ignored, and inline `/...` tokens are treated as plain text. - Unauthorized command-only messages are silently ignored, and inline `/...` tokens are treated as plain text.
- **Skill commands:** `user-invocable` skills are exposed as slash commands. Names are sanitized to `a-z0-9_` (max 32 chars); collisions get numeric suffixes (e.g. `_2`). - **Skill commands:** `user-invocable` skills are exposed as slash commands. Names are sanitized to `a-z0-9_` (max 32 chars); collisions get numeric suffixes (e.g. `_2`).
- **Native command arguments:** Discord uses autocomplete for dynamic options (and button menus when you omit required args). Telegram and Slack show a button menu when a command supports choices and you omit the arg. - **Native command arguments:** Discord uses autocomplete for dynamic options (and button menus when you omit required args). Telegram and Slack show a button menu when a command supports choices and you omit the arg.
## Usage vs cost (what shows where) ## Usage surfaces (what shows where)
- **Provider usage/quota** (example: “Claude 80% left”) shows up in `/status` for the current model provider when usage tracking is enabled. - **Provider usage/quota** (example: “Claude 80% left”) shows up in `/status` for the current model provider when usage tracking is enabled.
- **Per-response tokens/cost** is controlled by `/cost on|off` (appended to normal replies). - **Per-response tokens/cost** is controlled by `/usage off|tokens|full` (appended to normal replies).
- `/model status` is about **models/auth/endpoints**, not usage. - `/model status` is about **models/auth/endpoints**, not usage.
## Model selection (`/model`) ## Model selection (`/model`)

View File

@@ -77,7 +77,7 @@ Session controls:
- `/think <off|minimal|low|medium|high>` - `/think <off|minimal|low|medium|high>`
- `/verbose <on|full|off>` - `/verbose <on|full|off>`
- `/reasoning <on|off|stream>` - `/reasoning <on|off|stream>`
- `/cost <on|off>` - `/usage <off|tokens|full>`
- `/elevated <on|off>` (alias: `/elev`) - `/elevated <on|off>` (alias: `/elev`)
- `/activation <mention|always>` - `/activation <mention|always>`
- `/deliver <on|off>` - `/deliver <on|off>`

View File

@@ -146,78 +146,83 @@ const readSessionMessages = async (sessionFile: string) => {
}; };
describe("runEmbeddedPiAgent", () => { describe("runEmbeddedPiAgent", () => {
it("appends new user + assistant after existing transcript entries", { timeout: 90_000 }, async () => { it(
const { SessionManager } = await import("@mariozechner/pi-coding-agent"); "appends new user + assistant after existing transcript entries",
{ timeout: 90_000 },
async () => {
const { SessionManager } = await import("@mariozechner/pi-coding-agent");
const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-agent-")); const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-agent-"));
const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-workspace-")); const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-workspace-"));
const sessionFile = path.join(workspaceDir, "session.jsonl"); const sessionFile = path.join(workspaceDir, "session.jsonl");
const sessionManager = SessionManager.open(sessionFile); const sessionManager = SessionManager.open(sessionFile);
sessionManager.appendMessage({ sessionManager.appendMessage({
role: "user", role: "user",
content: [{ type: "text", text: "seed user" }], content: [{ type: "text", text: "seed user" }],
}); });
sessionManager.appendMessage({ sessionManager.appendMessage({
role: "assistant", role: "assistant",
content: [{ type: "text", text: "seed assistant" }], content: [{ type: "text", text: "seed assistant" }],
stopReason: "stop", stopReason: "stop",
api: "openai-responses", api: "openai-responses",
provider: "openai", provider: "openai",
model: "mock-1", model: "mock-1",
usage: { usage: {
input: 1, input: 1,
output: 1, output: 1,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 2,
cost: {
input: 0,
output: 0,
cacheRead: 0, cacheRead: 0,
cacheWrite: 0, cacheWrite: 0,
total: 0, totalTokens: 2,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
}, },
}, timestamp: Date.now(),
timestamp: Date.now(), });
});
const cfg = makeOpenAiConfig(["mock-1"]); const cfg = makeOpenAiConfig(["mock-1"]);
await ensureModels(cfg, agentDir); await ensureModels(cfg, agentDir);
await runEmbeddedPiAgent({ await runEmbeddedPiAgent({
sessionId: "session:test", sessionId: "session:test",
sessionKey: testSessionKey, sessionKey: testSessionKey,
sessionFile, sessionFile,
workspaceDir, workspaceDir,
config: cfg, config: cfg,
prompt: "hello", prompt: "hello",
provider: "openai", provider: "openai",
model: "mock-1", model: "mock-1",
timeoutMs: 5_000, timeoutMs: 5_000,
agentDir, agentDir,
enqueue: immediateEnqueue, enqueue: immediateEnqueue,
}); });
const messages = await readSessionMessages(sessionFile); const messages = await readSessionMessages(sessionFile);
const seedUserIndex = messages.findIndex( const seedUserIndex = messages.findIndex(
(message) => message?.role === "user" && textFromContent(message.content) === "seed user", (message) => message?.role === "user" && textFromContent(message.content) === "seed user",
); );
const seedAssistantIndex = messages.findIndex( const seedAssistantIndex = messages.findIndex(
(message) => (message) =>
message?.role === "assistant" && textFromContent(message.content) === "seed assistant", message?.role === "assistant" && textFromContent(message.content) === "seed assistant",
); );
const newUserIndex = messages.findIndex( const newUserIndex = messages.findIndex(
(message) => message?.role === "user" && textFromContent(message.content) === "hello", (message) => message?.role === "user" && textFromContent(message.content) === "hello",
); );
const newAssistantIndex = messages.findIndex( const newAssistantIndex = messages.findIndex(
(message, index) => index > newUserIndex && message?.role === "assistant", (message, index) => index > newUserIndex && message?.role === "assistant",
); );
expect(seedUserIndex).toBeGreaterThanOrEqual(0); expect(seedUserIndex).toBeGreaterThanOrEqual(0);
expect(seedAssistantIndex).toBeGreaterThan(seedUserIndex); expect(seedAssistantIndex).toBeGreaterThan(seedUserIndex);
expect(newUserIndex).toBeGreaterThan(seedAssistantIndex); expect(newUserIndex).toBeGreaterThan(seedAssistantIndex);
expect(newAssistantIndex).toBeGreaterThan(newUserIndex); expect(newAssistantIndex).toBeGreaterThan(newUserIndex);
}, 45_000); },
45_000,
);
it("persists multi-turn user/assistant ordering across runs", async () => { it("persists multi-turn user/assistant ordering across runs", async () => {
const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-agent-")); const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-agent-"));
const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-workspace-")); const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-workspace-"));

View File

@@ -183,7 +183,7 @@ export function buildAgentSystemPrompt(params: {
sessions_send: "Send a message to another session/sub-agent", sessions_send: "Send a message to another session/sub-agent",
sessions_spawn: "Spawn a sub-agent session", sessions_spawn: "Spawn a sub-agent session",
session_status: session_status:
"Show a /status-equivalent status card (usage/cost + Reasoning/Verbose/Elevated); optional per-session model override", "Show a /status-equivalent status card (usage + Reasoning/Verbose/Elevated); optional per-session model override",
image: "Analyze an image with the configured image model", image: "Analyze an image with the configured image model",
}; };

View File

@@ -48,8 +48,8 @@ describe("commands registry args", () => {
it("resolves auto arg menus when missing a choice arg", () => { it("resolves auto arg menus when missing a choice arg", () => {
const command: ChatCommandDefinition = { const command: ChatCommandDefinition = {
key: "cost", key: "usage",
description: "cost", description: "usage",
textAliases: [], textAliases: [],
scope: "both", scope: "both",
argsMenu: "auto", argsMenu: "auto",
@@ -59,20 +59,20 @@ describe("commands registry args", () => {
name: "mode", name: "mode",
description: "mode", description: "mode",
type: "string", type: "string",
choices: ["on", "off"], choices: ["off", "tokens", "full"],
}, },
], ],
}; };
const menu = resolveCommandArgMenu({ command, args: undefined, cfg: {} as never }); const menu = resolveCommandArgMenu({ command, args: undefined, cfg: {} as never });
expect(menu?.arg.name).toBe("mode"); expect(menu?.arg.name).toBe("mode");
expect(menu?.choices).toEqual(["on", "off"]); expect(menu?.choices).toEqual(["off", "tokens", "full"]);
}); });
it("does not show menus when arg already provided", () => { it("does not show menus when arg already provided", () => {
const command: ChatCommandDefinition = { const command: ChatCommandDefinition = {
key: "cost", key: "usage",
description: "cost", description: "usage",
textAliases: [], textAliases: [],
scope: "both", scope: "both",
argsMenu: "auto", argsMenu: "auto",
@@ -82,14 +82,14 @@ describe("commands registry args", () => {
name: "mode", name: "mode",
description: "mode", description: "mode",
type: "string", type: "string",
choices: ["on", "off"], choices: ["off", "tokens", "full"],
}, },
], ],
}; };
const menu = resolveCommandArgMenu({ const menu = resolveCommandArgMenu({
command, command,
args: { values: { mode: "on" } }, args: { values: { mode: "tokens" } },
cfg: {} as never, cfg: {} as never,
}); });
expect(menu).toBeNull(); expect(menu).toBeNull();
@@ -130,8 +130,8 @@ describe("commands registry args", () => {
it("does not show menus when args were provided as raw text only", () => { it("does not show menus when args were provided as raw text only", () => {
const command: ChatCommandDefinition = { const command: ChatCommandDefinition = {
key: "cost", key: "usage",
description: "cost", description: "usage",
textAliases: [], textAliases: [],
scope: "both", scope: "both",
argsMenu: "auto", argsMenu: "auto",
@@ -141,7 +141,7 @@ describe("commands registry args", () => {
name: "mode", name: "mode",
description: "on or off", description: "on or off",
type: "string", type: "string",
choices: ["on", "off"], choices: ["off", "tokens", "full"],
}, },
], ],
}; };

View File

@@ -225,16 +225,16 @@ export const CHAT_COMMANDS: ChatCommandDefinition[] = (() => {
formatArgs: COMMAND_ARG_FORMATTERS.debug, formatArgs: COMMAND_ARG_FORMATTERS.debug,
}), }),
defineChatCommand({ defineChatCommand({
key: "cost", key: "usage",
nativeName: "cost", nativeName: "usage",
description: "Toggle per-response usage line.", description: "Toggle per-response usage line.",
textAlias: "/cost", textAlias: "/usage",
args: [ args: [
{ {
name: "mode", name: "mode",
description: "on or off", description: "off, tokens, or full",
type: "string", type: "string",
choices: ["on", "off"], choices: ["off", "tokens", "full"],
}, },
], ],
argsMenu: "auto", argsMenu: "auto",
@@ -431,7 +431,6 @@ export const CHAT_COMMANDS: ChatCommandDefinition[] = (() => {
.map((dock) => defineDockCommand(dock)), .map((dock) => defineDockCommand(dock)),
]; ];
registerAlias(commands, "status", "/usage");
registerAlias(commands, "whoami", "/id"); registerAlias(commands, "whoami", "/id");
registerAlias(commands, "think", "/thinking", "/t"); registerAlias(commands, "think", "/thinking", "/t");
registerAlias(commands, "verbose", "/v"); registerAlias(commands, "verbose", "/v");

View File

@@ -144,10 +144,10 @@ describe("directive parsing", () => {
expect(res.cleaned).toBe("thats not /tmp/hello"); expect(res.cleaned).toBe("thats not /tmp/hello");
}); });
it("preserves spacing when stripping usage directives before paths", () => { it("does not treat /usage as a status directive", () => {
const res = extractStatusDirective("thats not /usage:/tmp/hello"); const res = extractStatusDirective("thats not /usage:/tmp/hello");
expect(res.hasDirective).toBe(true); expect(res.hasDirective).toBe(false);
expect(res.cleaned).toBe("thats not /tmp/hello"); expect(res.cleaned).toBe("thats not /usage:/tmp/hello");
}); });
it("parses queue options and modes", () => { it("parses queue options and modes", () => {

View File

@@ -159,12 +159,12 @@ describe("trigger handling", () => {
expect(String(replies[0]?.text ?? "")).toContain("Model:"); expect(String(replies[0]?.text ?? "")).toContain("Model:");
}); });
}); });
it("emits /usage once (alias of /status)", async () => { it("sets per-response usage footer via /usage", async () => {
await withTempHome(async (home) => { await withTempHome(async (home) => {
const blockReplies: Array<{ text?: string }> = []; const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig( const res = await getReplyFromConfig(
{ {
Body: "/usage", Body: "/usage tokens",
From: "+1000", From: "+1000",
To: "+2000", To: "+2000",
Provider: "whatsapp", Provider: "whatsapp",
@@ -181,7 +181,8 @@ describe("trigger handling", () => {
const replies = res ? (Array.isArray(res) ? res : [res]) : []; const replies = res ? (Array.isArray(res) ? res : [res]) : [];
expect(blockReplies.length).toBe(0); expect(blockReplies.length).toBe(0);
expect(replies.length).toBe(1); expect(replies.length).toBe(1);
expect(String(replies[0]?.text ?? "")).toContain("Model:"); expect(String(replies[0]?.text ?? "")).toContain("Usage footer: tokens");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
}); });
}); });
it("sends one inline status and still returns agent reply for mixed text", async () => { it("sends one inline status and still returns agent reply for mixed text", async () => {

View File

@@ -203,21 +203,4 @@ describe("trigger handling", () => {
expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
}); });
}); });
it("reports status via /usage without invoking the agent", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/usage",
From: "+1002",
To: "+2000",
CommandAuthorized: true,
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Clawdbot");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
}); });

View File

@@ -457,10 +457,16 @@ export async function runReplyAgent(params: {
} }
} }
const responseUsageEnabled = const responseUsageRaw =
(activeSessionEntry?.responseUsage ?? activeSessionEntry?.responseUsage ??
(sessionKey ? activeSessionStore?.[sessionKey]?.responseUsage : undefined)) === "on"; (sessionKey ? activeSessionStore?.[sessionKey]?.responseUsage : undefined);
if (responseUsageEnabled && hasNonzeroUsage(usage)) { const responseUsageMode =
responseUsageRaw === "full"
? "full"
: responseUsageRaw === "tokens" || responseUsageRaw === "on"
? "tokens"
: "off";
if (responseUsageMode !== "off" && hasNonzeroUsage(usage)) {
const authMode = resolveModelAuthMode(providerUsed, cfg); const authMode = resolveModelAuthMode(providerUsed, cfg);
const showCost = authMode === "api-key"; const showCost = authMode === "api-key";
const costConfig = showCost const costConfig = showCost
@@ -470,11 +476,14 @@ export async function runReplyAgent(params: {
config: cfg, config: cfg,
}) })
: undefined; : undefined;
const formatted = formatResponseUsageLine({ let formatted = formatResponseUsageLine({
usage, usage,
showCost, showCost,
costConfig, costConfig,
}); });
if (formatted && responseUsageMode === "full" && sessionKey) {
formatted = `${formatted} · session ${sessionKey}`;
}
if (formatted) responseUsageLine = formatted; if (formatted) responseUsageLine = formatted;
} }

View File

@@ -20,6 +20,7 @@ import {
handleRestartCommand, handleRestartCommand,
handleSendPolicyCommand, handleSendPolicyCommand,
handleStopCommand, handleStopCommand,
handleUsageCommand,
} from "./commands-session.js"; } from "./commands-session.js";
import type { import type {
CommandHandler, CommandHandler,
@@ -31,6 +32,7 @@ const HANDLERS: CommandHandler[] = [
handleBashCommand, handleBashCommand,
handleActivationCommand, handleActivationCommand,
handleSendPolicyCommand, handleSendPolicyCommand,
handleUsageCommand,
handleRestartCommand, handleRestartCommand,
handleHelpCommand, handleHelpCommand,
handleCommandsListCommand, handleCommandsListCommand,

View File

@@ -6,6 +6,7 @@ import { createInternalHookEvent, triggerInternalHook } from "../../hooks/intern
import { scheduleGatewaySigusr1Restart, triggerClawdbotRestart } from "../../infra/restart.js"; import { scheduleGatewaySigusr1Restart, triggerClawdbotRestart } from "../../infra/restart.js";
import { parseActivationCommand } from "../group-activation.js"; import { parseActivationCommand } from "../group-activation.js";
import { parseSendPolicyCommand } from "../send-policy.js"; import { parseSendPolicyCommand } from "../send-policy.js";
import { normalizeUsageDisplay } from "../thinking.js";
import { import {
formatAbortReplyText, formatAbortReplyText,
isAbortTrigger, isAbortTrigger,
@@ -127,6 +128,57 @@ export const handleSendPolicyCommand: CommandHandler = async (params, allowTextC
}; };
}; };
export const handleUsageCommand: CommandHandler = async (params, allowTextCommands) => {
if (!allowTextCommands) return null;
const normalized = params.command.commandBodyNormalized;
if (normalized !== "/usage" && !normalized.startsWith("/usage ")) return null;
if (!params.command.isAuthorizedSender) {
logVerbose(
`Ignoring /usage from unauthorized sender: ${params.command.senderId || "<unknown>"}`,
);
return { shouldContinue: false };
}
const rawArgs = normalized === "/usage" ? "" : normalized.slice("/usage".length).trim();
const requested = rawArgs ? normalizeUsageDisplay(rawArgs) : undefined;
if (rawArgs && !requested) {
return {
shouldContinue: false,
reply: { text: "⚙️ Usage: /usage off|tokens|full" },
};
}
const currentRaw =
params.sessionEntry?.responseUsage ??
(params.sessionKey ? params.sessionStore?.[params.sessionKey]?.responseUsage : undefined);
const current =
currentRaw === "full"
? "full"
: currentRaw === "tokens" || currentRaw === "on"
? "tokens"
: "off";
const next = requested ?? (current === "off" ? "tokens" : current === "tokens" ? "full" : "off");
if (params.sessionEntry && params.sessionStore && params.sessionKey) {
if (next === "off") delete params.sessionEntry.responseUsage;
else params.sessionEntry.responseUsage = next;
params.sessionEntry.updatedAt = Date.now();
params.sessionStore[params.sessionKey] = params.sessionEntry;
if (params.storePath) {
await updateSessionStore(params.storePath, (store) => {
store[params.sessionKey] = params.sessionEntry as SessionEntry;
});
}
}
return {
shouldContinue: false,
reply: {
text: `⚙️ Usage footer: ${next}.`,
},
};
};
export const handleRestartCommand: CommandHandler = async (params, allowTextCommands) => { export const handleRestartCommand: CommandHandler = async (params, allowTextCommands) => {
if (!allowTextCommands) return null; if (!allowTextCommands) return null;
if (params.command.commandBodyNormalized !== "/restart") return null; if (params.command.commandBodyNormalized !== "/restart") return null;

View File

@@ -10,7 +10,10 @@ import {
resolveAuthProfileOrder, resolveAuthProfileOrder,
} from "../../agents/auth-profiles.js"; } from "../../agents/auth-profiles.js";
import { getCustomProviderApiKey, resolveEnvApiKey } from "../../agents/model-auth.js"; import { getCustomProviderApiKey, resolveEnvApiKey } from "../../agents/model-auth.js";
import { resolveInternalSessionKey, resolveMainSessionAlias } from "../../agents/tools/sessions-helpers.js"; import {
resolveInternalSessionKey,
resolveMainSessionAlias,
} from "../../agents/tools/sessions-helpers.js";
import { normalizeProviderId } from "../../agents/model-selection.js"; import { normalizeProviderId } from "../../agents/model-selection.js";
import type { ClawdbotConfig } from "../../config/config.js"; import type { ClawdbotConfig } from "../../config/config.js";
import type { SessionEntry, SessionScope } from "../../config/sessions.js"; import type { SessionEntry, SessionScope } from "../../config/sessions.js";

View File

@@ -180,10 +180,7 @@ export const handleSubagentsCommand: CommandHandler = async (params, allowTextCo
const sorted = sortSubagentRuns(runs); const sorted = sortSubagentRuns(runs);
const active = sorted.filter((entry) => !entry.endedAt); const active = sorted.filter((entry) => !entry.endedAt);
const done = sorted.length - active.length; const done = sorted.length - active.length;
const lines = [ const lines = ["🧭 Subagents (current session)", `Active: ${active.length} · Done: ${done}`];
"🧭 Subagents (current session)",
`Active: ${active.length} · Done: ${done}`,
];
sorted.forEach((entry, index) => { sorted.forEach((entry, index) => {
const status = formatRunStatus(entry); const status = formatRunStatus(entry);
const label = formatRunLabel(entry); const label = formatRunLabel(entry);
@@ -396,8 +393,7 @@ export const handleSubagentsCommand: CommandHandler = async (params, allowTextCo
shouldContinue: false, shouldContinue: false,
reply: { reply: {
text: text:
replyText ?? replyText ?? `✅ Sent to ${formatRunLabel(resolved.entry)} (run ${runId.slice(0, 8)}).`,
`✅ Sent to ${formatRunLabel(resolved.entry)} (run ${runId.slice(0, 8)}).`,
}, },
}; };
} }

View File

@@ -1,6 +1,9 @@
import { describe, expect, it, vi } from "vitest"; import { describe, expect, it, vi } from "vitest";
import { addSubagentRunForTests, resetSubagentRegistryForTests } from "../../agents/subagent-registry.js"; import {
addSubagentRunForTests,
resetSubagentRegistryForTests,
} from "../../agents/subagent-registry.js";
import type { ClawdbotConfig } from "../../config/config.js"; import type { ClawdbotConfig } from "../../config/config.js";
import * as internalHooks from "../../hooks/internal-hooks.js"; import * as internalHooks from "../../hooks/internal-hooks.js";
import type { MsgContext } from "../templating.js"; import type { MsgContext } from "../templating.js";

View File

@@ -149,7 +149,7 @@ export function extractStatusDirective(body?: string): {
hasDirective: boolean; hasDirective: boolean;
} { } {
if (!body) return { cleaned: "", hasDirective: false }; if (!body) return { cleaned: "", hasDirective: false };
return extractSimpleDirective(body, ["status", "usage"]); return extractSimpleDirective(body, ["status"]);
} }
export type { ElevatedLevel, ReasoningLevel, ThinkLevel, VerboseLevel }; export type { ElevatedLevel, ReasoningLevel, ThinkLevel, VerboseLevel };

View File

@@ -6,7 +6,7 @@ const INLINE_SIMPLE_COMMAND_ALIASES = new Map<string, string>([
]); ]);
const INLINE_SIMPLE_COMMAND_RE = /(?:^|\s)\/(help|commands|whoami|id)(?=$|\s|:)/i; const INLINE_SIMPLE_COMMAND_RE = /(?:^|\s)\/(help|commands|whoami|id)(?=$|\s|:)/i;
const INLINE_STATUS_RE = /(?:^|\s)\/(?:status|usage)(?=$|\s|:)(?:\s*:\s*)?/gi; const INLINE_STATUS_RE = /(?:^|\s)\/status(?=$|\s|:)(?:\s*:\s*)?/gi;
export function extractInlineSimpleCommand(body?: string): { export function extractInlineSimpleCommand(body?: string): {
command: string; command: string;

View File

@@ -49,12 +49,10 @@ describe("subagents utils", () => {
it("formats run status from outcome and timestamps", () => { it("formats run status from outcome and timestamps", () => {
expect(formatRunStatus({ ...baseRun })).toBe("running"); expect(formatRunStatus({ ...baseRun })).toBe("running");
expect(formatRunStatus({ ...baseRun, endedAt: 2000, outcome: { status: "ok" } })).toBe( expect(formatRunStatus({ ...baseRun, endedAt: 2000, outcome: { status: "ok" } })).toBe("done");
"done", expect(formatRunStatus({ ...baseRun, endedAt: 2000, outcome: { status: "timeout" } })).toBe(
"timeout",
); );
expect(
formatRunStatus({ ...baseRun, endedAt: 2000, outcome: { status: "timeout" } }),
).toBe("timeout");
}); });
it("formats duration short for seconds and minutes", () => { it("formats duration short for seconds and minutes", () => {

View File

@@ -28,10 +28,7 @@ export function resolveSubagentLabel(entry: SubagentRunRecord, fallback = "subag
return raw || fallback; return raw || fallback;
} }
export function formatRunLabel( export function formatRunLabel(entry: SubagentRunRecord, options?: { maxLength?: number }) {
entry: SubagentRunRecord,
options?: { maxLength?: number },
) {
const raw = resolveSubagentLabel(entry); const raw = resolveSubagentLabel(entry);
const maxLength = options?.maxLength ?? 72; const maxLength = options?.maxLength ?? 72;
if (!Number.isFinite(maxLength) || maxLength <= 0) return raw; if (!Number.isFinite(maxLength) || maxLength <= 0) return raw;

View File

@@ -383,7 +383,7 @@ export function buildHelpMessage(cfg?: ClawdbotConfig): string {
"/reasoning on|off", "/reasoning on|off",
"/elevated on|off", "/elevated on|off",
"/model <id>", "/model <id>",
"/cost on|off", "/usage off|tokens|full",
]; ];
if (cfg?.commands?.config === true) options.push("/config show"); if (cfg?.commands?.config === true) options.push("/config show");
if (cfg?.commands?.debug === true) options.push("/debug show"); if (cfg?.commands?.debug === true) options.push("/debug show");

View File

@@ -2,7 +2,7 @@ export type ThinkLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh"
export type VerboseLevel = "off" | "on" | "full"; export type VerboseLevel = "off" | "on" | "full";
export type ElevatedLevel = "off" | "on"; export type ElevatedLevel = "off" | "on";
export type ReasoningLevel = "off" | "on" | "stream"; export type ReasoningLevel = "off" | "on" | "stream";
export type UsageDisplayLevel = "off" | "on"; export type UsageDisplayLevel = "off" | "tokens" | "full";
function normalizeProviderId(provider?: string | null): string { function normalizeProviderId(provider?: string | null): string {
if (!provider) return ""; if (!provider) return "";
@@ -92,12 +92,14 @@ export function normalizeVerboseLevel(raw?: string | null): VerboseLevel | undef
return undefined; return undefined;
} }
// Normalize response-usage display flags used to toggle cost/token lines. // Normalize response-usage display modes used to toggle per-response usage footers.
export function normalizeUsageDisplay(raw?: string | null): UsageDisplayLevel | undefined { export function normalizeUsageDisplay(raw?: string | null): UsageDisplayLevel | undefined {
if (!raw) return undefined; if (!raw) return undefined;
const key = raw.toLowerCase(); const key = raw.toLowerCase();
if (["off", "false", "no", "0", "disable", "disabled"].includes(key)) return "off"; if (["off", "false", "no", "0", "disable", "disabled"].includes(key)) return "off";
if (["on", "true", "yes", "1", "enable", "enabled"].includes(key)) return "on"; if (["on", "true", "yes", "1", "enable", "enabled"].includes(key)) return "tokens";
if (["tokens", "token", "tok", "minimal", "min"].includes(key)) return "tokens";
if (["full", "session"].includes(key)) return "full";
return undefined; return undefined;
} }

View File

@@ -42,7 +42,7 @@ export type SessionEntry = {
verboseLevel?: string; verboseLevel?: string;
reasoningLevel?: string; reasoningLevel?: string;
elevatedLevel?: string; elevatedLevel?: string;
responseUsage?: "on" | "off"; responseUsage?: "on" | "off" | "tokens" | "full";
providerOverride?: string; providerOverride?: string;
modelOverride?: string; modelOverride?: string;
authProfileOverride?: string; authProfileOverride?: string;

View File

@@ -252,153 +252,158 @@ async function connectClient(params: { url: string; token: string }) {
} }
describe("gateway (mock openai): tool calling", () => { describe("gateway (mock openai): tool calling", () => {
it("runs a Read tool call end-to-end via gateway agent loop", { timeout: 90_000 }, async () => { it(
const prev = { "runs a Read tool call end-to-end via gateway agent loop",
home: process.env.HOME, { timeout: 90_000 },
configPath: process.env.CLAWDBOT_CONFIG_PATH, async () => {
token: process.env.CLAWDBOT_GATEWAY_TOKEN, const prev = {
skipChannels: process.env.CLAWDBOT_SKIP_CHANNELS, home: process.env.HOME,
skipGmail: process.env.CLAWDBOT_SKIP_GMAIL_WATCHER, configPath: process.env.CLAWDBOT_CONFIG_PATH,
skipCron: process.env.CLAWDBOT_SKIP_CRON, token: process.env.CLAWDBOT_GATEWAY_TOKEN,
skipCanvas: process.env.CLAWDBOT_SKIP_CANVAS_HOST, skipChannels: process.env.CLAWDBOT_SKIP_CHANNELS,
}; skipGmail: process.env.CLAWDBOT_SKIP_GMAIL_WATCHER,
skipCron: process.env.CLAWDBOT_SKIP_CRON,
skipCanvas: process.env.CLAWDBOT_SKIP_CANVAS_HOST,
};
const originalFetch = globalThis.fetch; const originalFetch = globalThis.fetch;
const openaiResponsesUrl = "https://api.openai.com/v1/responses"; const openaiResponsesUrl = "https://api.openai.com/v1/responses";
const isOpenAIResponsesRequest = (url: string) => const isOpenAIResponsesRequest = (url: string) =>
url === openaiResponsesUrl || url === openaiResponsesUrl ||
url.startsWith(`${openaiResponsesUrl}/`) || url.startsWith(`${openaiResponsesUrl}/`) ||
url.startsWith(`${openaiResponsesUrl}?`); url.startsWith(`${openaiResponsesUrl}?`);
const fetchImpl = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => { const fetchImpl = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
const url = const url =
typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url; typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
if (isOpenAIResponsesRequest(url)) { if (isOpenAIResponsesRequest(url)) {
const bodyText = const bodyText =
typeof (init as { body?: unknown } | undefined)?.body !== "undefined" typeof (init as { body?: unknown } | undefined)?.body !== "undefined"
? decodeBodyText((init as { body?: unknown }).body) ? decodeBodyText((init as { body?: unknown }).body)
: input instanceof Request : input instanceof Request
? await input.clone().text() ? await input.clone().text()
: ""; : "";
const parsed = bodyText ? (JSON.parse(bodyText) as Record<string, unknown>) : {}; const parsed = bodyText ? (JSON.parse(bodyText) as Record<string, unknown>) : {};
const inputItems = Array.isArray(parsed.input) ? parsed.input : []; const inputItems = Array.isArray(parsed.input) ? parsed.input : [];
return await buildOpenAIResponsesSse({ input: inputItems }); return await buildOpenAIResponsesSse({ input: inputItems });
} }
if (!originalFetch) { if (!originalFetch) {
throw new Error(`fetch is not available (url=${url})`); throw new Error(`fetch is not available (url=${url})`);
} }
return await originalFetch(input, init); return await originalFetch(input, init);
}; };
// TypeScript: Bun's fetch typing includes extra properties; keep this test portable. // TypeScript: Bun's fetch typing includes extra properties; keep this test portable.
(globalThis as unknown as { fetch: unknown }).fetch = fetchImpl; (globalThis as unknown as { fetch: unknown }).fetch = fetchImpl;
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-gw-mock-home-")); const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-gw-mock-home-"));
process.env.HOME = tempHome; process.env.HOME = tempHome;
process.env.CLAWDBOT_SKIP_CHANNELS = "1"; process.env.CLAWDBOT_SKIP_CHANNELS = "1";
process.env.CLAWDBOT_SKIP_GMAIL_WATCHER = "1"; process.env.CLAWDBOT_SKIP_GMAIL_WATCHER = "1";
process.env.CLAWDBOT_SKIP_CRON = "1"; process.env.CLAWDBOT_SKIP_CRON = "1";
process.env.CLAWDBOT_SKIP_CANVAS_HOST = "1"; process.env.CLAWDBOT_SKIP_CANVAS_HOST = "1";
const token = `test-${randomUUID()}`; const token = `test-${randomUUID()}`;
process.env.CLAWDBOT_GATEWAY_TOKEN = token; process.env.CLAWDBOT_GATEWAY_TOKEN = token;
const workspaceDir = path.join(tempHome, "clawd"); const workspaceDir = path.join(tempHome, "clawd");
await fs.mkdir(workspaceDir, { recursive: true }); await fs.mkdir(workspaceDir, { recursive: true });
const nonceA = randomUUID(); const nonceA = randomUUID();
const nonceB = randomUUID(); const nonceB = randomUUID();
const toolProbePath = path.join(workspaceDir, `.clawdbot-tool-probe.${nonceA}.txt`); const toolProbePath = path.join(workspaceDir, `.clawdbot-tool-probe.${nonceA}.txt`);
await fs.writeFile(toolProbePath, `nonceA=${nonceA}\nnonceB=${nonceB}\n`); await fs.writeFile(toolProbePath, `nonceA=${nonceA}\nnonceB=${nonceB}\n`);
const configDir = path.join(tempHome, ".clawdbot"); const configDir = path.join(tempHome, ".clawdbot");
await fs.mkdir(configDir, { recursive: true }); await fs.mkdir(configDir, { recursive: true });
const configPath = path.join(configDir, "clawdbot.json"); const configPath = path.join(configDir, "clawdbot.json");
const cfg = { const cfg = {
agents: { defaults: { workspace: workspaceDir } }, agents: { defaults: { workspace: workspaceDir } },
models: { models: {
mode: "replace", mode: "replace",
providers: { providers: {
openai: { openai: {
baseUrl: "https://api.openai.com/v1", baseUrl: "https://api.openai.com/v1",
apiKey: "test", apiKey: "test",
api: "openai-responses", api: "openai-responses",
models: [ models: [
{ {
id: "gpt-5.2", id: "gpt-5.2",
name: "gpt-5.2", name: "gpt-5.2",
api: "openai-responses", api: "openai-responses",
reasoning: false, reasoning: false,
input: ["text"], input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128_000, contextWindow: 128_000,
maxTokens: 4096, maxTokens: 4096,
}, },
], ],
},
}, },
}, },
}, gateway: { auth: { token } },
gateway: { auth: { token } }, };
};
await fs.writeFile(configPath, `${JSON.stringify(cfg, null, 2)}\n`); await fs.writeFile(configPath, `${JSON.stringify(cfg, null, 2)}\n`);
process.env.CLAWDBOT_CONFIG_PATH = configPath; process.env.CLAWDBOT_CONFIG_PATH = configPath;
const port = await getFreeGatewayPort(); const port = await getFreeGatewayPort();
const server = await startGatewayServer(port, { const server = await startGatewayServer(port, {
bind: "loopback", bind: "loopback",
auth: { mode: "token", token }, auth: { mode: "token", token },
controlUiEnabled: false, controlUiEnabled: false,
});
const client = await connectClient({
url: `ws://127.0.0.1:${port}`,
token,
});
try {
const sessionKey = "agent:dev:mock-openai";
await client.request<Record<string, unknown>>("sessions.patch", {
key: sessionKey,
model: "openai/gpt-5.2",
}); });
const runId = randomUUID(); const client = await connectClient({
const payload = await client.request<{ url: `ws://127.0.0.1:${port}`,
status?: unknown; token,
result?: unknown; });
}>(
"agent",
{
sessionKey,
idempotencyKey: `idem-${runId}`,
message:
`Call the read tool on "${toolProbePath}". ` +
`Then reply with exactly: ${nonceA} ${nonceB}. No extra text.`,
deliver: false,
},
{ expectFinal: true },
);
expect(payload?.status).toBe("ok"); try {
const text = extractPayloadText(payload?.result); const sessionKey = "agent:dev:mock-openai";
expect(text).toContain(nonceA);
expect(text).toContain(nonceB); await client.request<Record<string, unknown>>("sessions.patch", {
} finally { key: sessionKey,
client.stop(); model: "openai/gpt-5.2",
await server.close({ reason: "mock openai test complete" }); });
await fs.rm(tempHome, { recursive: true, force: true });
(globalThis as unknown as { fetch: unknown }).fetch = originalFetch; const runId = randomUUID();
process.env.HOME = prev.home; const payload = await client.request<{
process.env.CLAWDBOT_CONFIG_PATH = prev.configPath; status?: unknown;
process.env.CLAWDBOT_GATEWAY_TOKEN = prev.token; result?: unknown;
process.env.CLAWDBOT_SKIP_CHANNELS = prev.skipChannels; }>(
process.env.CLAWDBOT_SKIP_GMAIL_WATCHER = prev.skipGmail; "agent",
process.env.CLAWDBOT_SKIP_CRON = prev.skipCron; {
process.env.CLAWDBOT_SKIP_CANVAS_HOST = prev.skipCanvas; sessionKey,
} idempotencyKey: `idem-${runId}`,
}, 30_000); message:
`Call the read tool on "${toolProbePath}". ` +
`Then reply with exactly: ${nonceA} ${nonceB}. No extra text.`,
deliver: false,
},
{ expectFinal: true },
);
expect(payload?.status).toBe("ok");
const text = extractPayloadText(payload?.result);
expect(text).toContain(nonceA);
expect(text).toContain(nonceB);
} finally {
client.stop();
await server.close({ reason: "mock openai test complete" });
await fs.rm(tempHome, { recursive: true, force: true });
(globalThis as unknown as { fetch: unknown }).fetch = originalFetch;
process.env.HOME = prev.home;
process.env.CLAWDBOT_CONFIG_PATH = prev.configPath;
process.env.CLAWDBOT_GATEWAY_TOKEN = prev.token;
process.env.CLAWDBOT_SKIP_CHANNELS = prev.skipChannels;
process.env.CLAWDBOT_SKIP_GMAIL_WATCHER = prev.skipGmail;
process.env.CLAWDBOT_SKIP_CRON = prev.skipCron;
process.env.CLAWDBOT_SKIP_CANVAS_HOST = prev.skipCanvas;
}
},
30_000,
);
}); });

View File

@@ -35,7 +35,14 @@ export const SessionsPatchParamsSchema = Type.Object(
verboseLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), verboseLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
reasoningLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), reasoningLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
responseUsage: Type.Optional( responseUsage: Type.Optional(
Type.Union([Type.Literal("on"), Type.Literal("off"), Type.Null()]), Type.Union([
Type.Literal("off"),
Type.Literal("tokens"),
Type.Literal("full"),
// Backward compat with older clients/stores.
Type.Literal("on"),
Type.Null(),
]),
), ),
elevatedLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), elevatedLevel: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),
model: Type.Optional(Type.Union([NonEmptyString, Type.Null()])), model: Type.Optional(Type.Union([NonEmptyString, Type.Null()])),

View File

@@ -196,7 +196,6 @@ export const handleBridgeEvent = async (
? obj.exitCode ? obj.exitCode
: undefined; : undefined;
const timedOut = obj.timedOut === true; const timedOut = obj.timedOut === true;
const success = obj.success === true;
const output = typeof obj.output === "string" ? obj.output.trim() : ""; const output = typeof obj.output === "string" ? obj.output.trim() : "";
const reason = typeof obj.reason === "string" ? obj.reason.trim() : ""; const reason = typeof obj.reason === "string" ? obj.reason.trim() : "";

View File

@@ -31,7 +31,7 @@ export type GatewaySessionRow = {
inputTokens?: number; inputTokens?: number;
outputTokens?: number; outputTokens?: number;
totalTokens?: number; totalTokens?: number;
responseUsage?: "on" | "off"; responseUsage?: "on" | "off" | "tokens" | "full";
modelProvider?: string; modelProvider?: string;
model?: string; model?: string;
contextTokens?: number; contextTokens?: number;

View File

@@ -132,7 +132,7 @@ export async function applySessionsPatchToStore(params: {
delete next.responseUsage; delete next.responseUsage;
} else if (raw !== undefined) { } else if (raw !== undefined) {
const normalized = normalizeUsageDisplay(String(raw)); const normalized = normalizeUsageDisplay(String(raw));
if (!normalized) return invalid('invalid responseUsage (use "on"|"off")'); if (!normalized) return invalid('invalid responseUsage (use "off"|"tokens"|"full")');
if (normalized === "off") delete next.responseUsage; if (normalized === "off") delete next.responseUsage;
else next.responseUsage = normalized; else next.responseUsage = normalized;
} }

View File

@@ -97,8 +97,8 @@ describe("Slack native command argument menus", () => {
const { commands, ctx, account } = createHarness(); const { commands, ctx, account } = createHarness();
registerSlackMonitorSlashCommands({ ctx: ctx as never, account: account as never }); registerSlackMonitorSlashCommands({ ctx: ctx as never, account: account as never });
const handler = commands.get("/cost"); const handler = commands.get("/usage");
if (!handler) throw new Error("Missing /cost handler"); if (!handler) throw new Error("Missing /usage handler");
const respond = vi.fn().mockResolvedValue(undefined); const respond = vi.fn().mockResolvedValue(undefined);
const ack = vi.fn().mockResolvedValue(undefined); const ack = vi.fn().mockResolvedValue(undefined);
@@ -133,7 +133,7 @@ describe("Slack native command argument menus", () => {
await handler({ await handler({
ack: vi.fn().mockResolvedValue(undefined), ack: vi.fn().mockResolvedValue(undefined),
action: { action: {
value: encodeValue({ command: "cost", arg: "mode", value: "on", userId: "U1" }), value: encodeValue({ command: "usage", arg: "mode", value: "tokens", userId: "U1" }),
}, },
body: { body: {
user: { id: "U1", name: "Ada" }, user: { id: "U1", name: "Ada" },
@@ -145,7 +145,7 @@ describe("Slack native command argument menus", () => {
expect(dispatchMock).toHaveBeenCalledTimes(1); expect(dispatchMock).toHaveBeenCalledTimes(1);
const call = dispatchMock.mock.calls[0]?.[0] as { ctx?: { Body?: string } }; const call = dispatchMock.mock.calls[0]?.[0] as { ctx?: { Body?: string } };
expect(call.ctx?.Body).toBe("/cost on"); expect(call.ctx?.Body).toBe("/usage tokens");
}); });
it("rejects menu clicks from other users", async () => { it("rejects menu clicks from other users", async () => {
@@ -159,7 +159,7 @@ describe("Slack native command argument menus", () => {
await handler({ await handler({
ack: vi.fn().mockResolvedValue(undefined), ack: vi.fn().mockResolvedValue(undefined),
action: { action: {
value: encodeValue({ command: "cost", arg: "mode", value: "on", userId: "U1" }), value: encodeValue({ command: "usage", arg: "mode", value: "tokens", userId: "U1" }),
}, },
body: { body: {
user: { id: "U2", name: "Eve" }, user: { id: "U2", name: "Eve" },

View File

@@ -5,7 +5,7 @@ const VERBOSE_LEVELS = ["on", "off"];
const REASONING_LEVELS = ["on", "off"]; const REASONING_LEVELS = ["on", "off"];
const ELEVATED_LEVELS = ["on", "off"]; const ELEVATED_LEVELS = ["on", "off"];
const ACTIVATION_LEVELS = ["mention", "always"]; const ACTIVATION_LEVELS = ["mention", "always"];
const TOGGLE = ["on", "off"]; const USAGE_FOOTER_LEVELS = ["off", "tokens", "full"];
export type ParsedCommand = { export type ParsedCommand = {
name: string; name: string;
@@ -73,10 +73,10 @@ export function getSlashCommands(options: SlashCommandOptions = {}): SlashComman
})), })),
}, },
{ {
name: "cost", name: "usage",
description: "Toggle per-response usage line", description: "Toggle per-response usage line",
getArgumentCompletions: (prefix) => getArgumentCompletions: (prefix) =>
TOGGLE.filter((v) => v.startsWith(prefix.toLowerCase())).map((value) => ({ USAGE_FOOTER_LEVELS.filter((v) => v.startsWith(prefix.toLowerCase())).map((value) => ({
value, value,
label: value, label: value,
})), })),
@@ -129,7 +129,7 @@ export function helpText(options: SlashCommandOptions = {}): string {
`/think <${thinkLevels}>`, `/think <${thinkLevels}>`,
"/verbose <on|off>", "/verbose <on|off>",
"/reasoning <on|off>", "/reasoning <on|off>",
"/cost <on|off>", "/usage <off|tokens|full>",
"/elevated <on|off>", "/elevated <on|off>",
"/elev <on|off>", "/elev <on|off>",
"/activation <mention|always>", "/activation <mention|always>",

View File

@@ -52,7 +52,7 @@ export type GatewaySessionList = {
inputTokens?: number | null; inputTokens?: number | null;
outputTokens?: number | null; outputTokens?: number | null;
totalTokens?: number | null; totalTokens?: number | null;
responseUsage?: "on" | "off"; responseUsage?: "on" | "off" | "tokens" | "full";
modelProvider?: string; modelProvider?: string;
label?: string; label?: string;
displayName?: string; displayName?: string;

View File

@@ -317,23 +317,30 @@ export function createCommandHandlers(context: CommandHandlerContext) {
chatLog.addSystem(`reasoning failed: ${String(err)}`); chatLog.addSystem(`reasoning failed: ${String(err)}`);
} }
break; break;
case "cost": { case "usage": {
const normalized = args ? normalizeUsageDisplay(args) : undefined; const normalized = args ? normalizeUsageDisplay(args) : undefined;
if (args && !normalized) { if (args && !normalized) {
chatLog.addSystem("usage: /cost <on|off>"); chatLog.addSystem("usage: /usage <off|tokens|full>");
break; break;
} }
const current = state.sessionInfo.responseUsage === "on" ? "on" : "off"; const currentRaw = state.sessionInfo.responseUsage;
const next = normalized ?? (current === "on" ? "off" : "on"); const current =
currentRaw === "full"
? "full"
: currentRaw === "tokens" || currentRaw === "on"
? "tokens"
: "off";
const next =
normalized ?? (current === "off" ? "tokens" : current === "tokens" ? "full" : "off");
try { try {
await client.patchSession({ await client.patchSession({
key: state.currentSessionKey, key: state.currentSessionKey,
responseUsage: next === "off" ? null : next, responseUsage: next === "off" ? null : next,
}); });
chatLog.addSystem(next === "on" ? "usage line enabled" : "usage line disabled"); chatLog.addSystem(`usage footer: ${next}`);
await refreshSessionInfo(); await refreshSessionInfo();
} catch (err) { } catch (err) {
chatLog.addSystem(`cost failed: ${String(err)}`); chatLog.addSystem(`usage failed: ${String(err)}`);
} }
break; break;
} }

View File

@@ -34,7 +34,7 @@ export type SessionInfo = {
inputTokens?: number | null; inputTokens?: number | null;
outputTokens?: number | null; outputTokens?: number | null;
totalTokens?: number | null; totalTokens?: number | null;
responseUsage?: "on" | "off"; responseUsage?: "on" | "off" | "tokens" | "full";
updatedAt?: number | null; updatedAt?: number | null;
displayName?: string; displayName?: string;
}; };