Files
clawdbot/src/auto-reply/status.ts
2026-01-08 22:14:24 -05:00

323 lines
10 KiB
TypeScript
Raw Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import fs from "node:fs";
import { lookupContextTokens } from "../agents/context.js";
import {
DEFAULT_CONTEXT_TOKENS,
DEFAULT_MODEL,
DEFAULT_PROVIDER,
} from "../agents/defaults.js";
import { resolveModelAuthMode } from "../agents/model-auth.js";
import { resolveConfiguredModelRef } from "../agents/model-selection.js";
import {
derivePromptTokens,
normalizeUsage,
type UsageLike,
} from "../agents/usage.js";
import type { ClawdbotConfig } from "../config/config.js";
import {
resolveSessionFilePath,
type SessionEntry,
type SessionScope,
} from "../config/sessions.js";
import { resolveCommitHash } from "../infra/git-commit.js";
import {
estimateUsageCost,
formatTokenCount as formatTokenCountShared,
formatUsd,
resolveModelCostConfig,
} from "../utils/usage-format.js";
import { VERSION } from "../version.js";
import { listChatCommands } from "./commands-registry.js";
import type {
ElevatedLevel,
ReasoningLevel,
ThinkLevel,
VerboseLevel,
} from "./thinking.js";
type AgentConfig = NonNullable<ClawdbotConfig["agent"]>;
export const formatTokenCount = formatTokenCountShared;
type QueueStatus = {
mode?: string;
depth?: number;
debounceMs?: number;
cap?: number;
dropPolicy?: string;
showDetails?: boolean;
};
type StatusArgs = {
config?: ClawdbotConfig;
agent: AgentConfig;
sessionEntry?: SessionEntry;
sessionKey?: string;
sessionScope?: SessionScope;
groupActivation?: "mention" | "always";
resolvedThink?: ThinkLevel;
resolvedVerbose?: VerboseLevel;
resolvedReasoning?: ReasoningLevel;
resolvedElevated?: ElevatedLevel;
modelAuth?: string;
usageLine?: string;
queue?: QueueStatus;
includeTranscriptUsage?: boolean;
};
const formatTokens = (
total: number | null | undefined,
contextTokens: number | null,
) => {
const ctx = contextTokens ?? null;
if (total == null) {
const ctxLabel = ctx ? formatTokenCount(ctx) : "?";
return `?/${ctxLabel}`;
}
const pct = ctx ? Math.min(999, Math.round((total / ctx) * 100)) : null;
const totalLabel = formatTokenCount(total);
const ctxLabel = ctx ? formatTokenCount(ctx) : "?";
return `${totalLabel}/${ctxLabel}${pct !== null ? ` (${pct}%)` : ""}`;
};
export const formatContextUsageShort = (
total: number | null | undefined,
contextTokens: number | null | undefined,
) => `Context ${formatTokens(total, contextTokens ?? null)}`;
const formatQueueDetails = (queue?: QueueStatus) => {
if (!queue) return "";
const depth = typeof queue.depth === "number" ? `depth ${queue.depth}` : null;
if (!queue.showDetails) {
return depth ? ` (${depth})` : "";
}
const detailParts: string[] = [];
if (depth) detailParts.push(depth);
if (typeof queue.debounceMs === "number") {
const ms = Math.max(0, Math.round(queue.debounceMs));
const label =
ms >= 1000
? `${ms % 1000 === 0 ? ms / 1000 : (ms / 1000).toFixed(1)}s`
: `${ms}ms`;
detailParts.push(`debounce ${label}`);
}
if (typeof queue.cap === "number") detailParts.push(`cap ${queue.cap}`);
if (queue.dropPolicy) detailParts.push(`drop ${queue.dropPolicy}`);
return detailParts.length ? ` (${detailParts.join(" · ")})` : "";
};
const readUsageFromSessionLog = (
sessionId?: string,
sessionEntry?: SessionEntry,
):
| {
input: number;
output: number;
promptTokens: number;
total: number;
model?: string;
}
| undefined => {
// Transcripts are stored at the session file path (fallback: ~/.clawdbot/sessions/<SessionId>.jsonl)
if (!sessionId) return undefined;
const logPath = resolveSessionFilePath(sessionId, sessionEntry);
if (!fs.existsSync(logPath)) return undefined;
try {
const lines = fs.readFileSync(logPath, "utf-8").split(/\n+/);
let input = 0;
let output = 0;
let promptTokens = 0;
let model: string | undefined;
let lastUsage: ReturnType<typeof normalizeUsage> | undefined;
for (const line of lines) {
if (!line.trim()) continue;
try {
const parsed = JSON.parse(line) as {
message?: {
usage?: UsageLike;
model?: string;
};
usage?: UsageLike;
model?: string;
};
const usageRaw = parsed.message?.usage ?? parsed.usage;
const usage = normalizeUsage(usageRaw);
if (usage) lastUsage = usage;
model = parsed.message?.model ?? parsed.model ?? model;
} catch {
// ignore bad lines
}
}
if (!lastUsage) return undefined;
input = lastUsage.input ?? 0;
output = lastUsage.output ?? 0;
promptTokens =
derivePromptTokens(lastUsage) ?? lastUsage.total ?? input + output;
const total = lastUsage.total ?? promptTokens + output;
if (promptTokens === 0 && total === 0) return undefined;
return { input, output, promptTokens, total, model };
} catch {
return undefined;
}
};
const formatUsagePair = (input?: number | null, output?: number | null) => {
if (input == null && output == null) return null;
const inputLabel = typeof input === "number" ? formatTokenCount(input) : "?";
const outputLabel =
typeof output === "number" ? formatTokenCount(output) : "?";
return `usage ${inputLabel} in / ${outputLabel} out`;
};
export function buildStatusMessage(args: StatusArgs): string {
const entry = args.sessionEntry;
const resolved = resolveConfiguredModelRef({
cfg: { agent: args.agent ?? {} },
defaultProvider: DEFAULT_PROVIDER,
defaultModel: DEFAULT_MODEL,
});
const provider =
entry?.providerOverride ?? resolved.provider ?? DEFAULT_PROVIDER;
let model = entry?.modelOverride ?? resolved.model ?? DEFAULT_MODEL;
let contextTokens =
entry?.contextTokens ??
args.agent?.contextTokens ??
lookupContextTokens(model) ??
DEFAULT_CONTEXT_TOKENS;
let inputTokens = entry?.inputTokens;
let outputTokens = entry?.outputTokens;
let totalTokens =
entry?.totalTokens ??
(entry?.inputTokens ?? 0) + (entry?.outputTokens ?? 0);
// Prefer prompt-size tokens from the session transcript when it looks larger
// (cached prompt tokens are often missing from agent meta/store).
if (args.includeTranscriptUsage) {
const logUsage = readUsageFromSessionLog(entry?.sessionId, entry);
if (logUsage) {
const candidate = logUsage.promptTokens || logUsage.total;
if (!totalTokens || totalTokens === 0 || candidate > totalTokens) {
totalTokens = candidate;
}
if (!model) model = logUsage.model ?? model;
if (!contextTokens && logUsage.model) {
contextTokens = lookupContextTokens(logUsage.model) ?? contextTokens;
}
if (!inputTokens || inputTokens === 0) inputTokens = logUsage.input;
if (!outputTokens || outputTokens === 0) outputTokens = logUsage.output;
}
}
const thinkLevel = args.resolvedThink ?? args.agent?.thinkingDefault ?? "off";
const verboseLevel =
args.resolvedVerbose ?? args.agent?.verboseDefault ?? "off";
const reasoningLevel = args.resolvedReasoning ?? "off";
const elevatedLevel =
args.resolvedElevated ??
args.sessionEntry?.elevatedLevel ??
args.agent?.elevatedDefault ??
"on";
const isGroupSession =
entry?.chatType === "group" ||
entry?.chatType === "room" ||
Boolean(args.sessionKey?.includes(":group:")) ||
Boolean(args.sessionKey?.includes(":channel:")) ||
Boolean(args.sessionKey?.startsWith("group:"));
const groupActivationValue = isGroupSession
? (args.groupActivation ?? entry?.groupActivation ?? "mention")
: undefined;
const authMode =
args.modelAuth ?? resolveModelAuthMode(provider, args.config);
const showCost = authMode === "api-key";
const costConfig = showCost
? resolveModelCostConfig({
provider,
model,
config: args.config,
})
: undefined;
const hasUsage =
typeof inputTokens === "number" || typeof outputTokens === "number";
const cost =
showCost && hasUsage
? estimateUsageCost({
usage: {
input: inputTokens ?? undefined,
output: outputTokens ?? undefined,
},
cost: costConfig,
})
: undefined;
const costLabel = showCost && hasUsage ? formatUsd(cost) : undefined;
const parts: Array<string | null> = [];
parts.push(`status ${args.sessionKey ?? "unknown"}`);
const modelLabel = model ? `${provider}/${model}` : "unknown";
const authLabel = authMode && authMode !== "unknown" ? ` (${authMode})` : "";
parts.push(`model ${modelLabel}${authLabel}`);
const usagePair = formatUsagePair(inputTokens, outputTokens);
if (usagePair) parts.push(usagePair);
if (costLabel) parts.push(`cost ${costLabel}`);
const contextSummary = formatContextUsageShort(
totalTokens && totalTokens > 0 ? totalTokens : null,
contextTokens ?? null,
);
parts.push(contextSummary);
parts.push(`compactions ${entry?.compactionCount ?? 0}`);
parts.push(`think ${thinkLevel}`);
parts.push(`verbose ${verboseLevel}`);
parts.push(`reasoning ${reasoningLevel}`);
parts.push(`elevated ${elevatedLevel}`);
if (groupActivationValue) parts.push(`activation ${groupActivationValue}`);
const queueMode = args.queue?.mode ?? "unknown";
const queueDetails = formatQueueDetails(args.queue);
parts.push(`queue ${queueMode}${queueDetails}`);
if (args.usageLine) parts.push(args.usageLine);
return parts.filter(Boolean).join(" · ");
}
export function buildHelpMessage(): string {
return [
" Help",
"Shortcuts: /new reset | /compact [instructions] | /restart relink",
"Options: /think <level> | /verbose on|off | /reasoning on|off | /elevated on|off | /model <id> | /cost on|off",
"More: /commands for all slash commands"
].join("\n");
}
export function buildCommandsMessage(): string {
const lines = [" Slash commands"];
for (const command of listChatCommands()) {
const primary = `/${command.nativeName}`;
const seen = new Set<string>();
const aliases = command.textAliases
.map((alias) => alias.trim())
.filter(Boolean)
.filter((alias) => alias.toLowerCase() !== primary.toLowerCase())
.filter((alias) => {
const key = alias.toLowerCase();
if (seen.has(key)) return false;
seen.add(key);
return true;
});
const aliasLabel = aliases.length
? ` (aliases: ${aliases.join(", ")})`
: "";
const scopeLabel = command.supportsNative === false ? " (text-only)" : "";
lines.push(`${primary}${aliasLabel}${scopeLabel} - ${command.description}`);
}
return lines.join("\n");
}