refactor(src): split oversized modules

This commit is contained in:
Peter Steinberger
2026-01-14 01:08:15 +00:00
parent b2179de839
commit bcbfb357be
675 changed files with 91476 additions and 73453 deletions

View File

@@ -0,0 +1,173 @@
import fs from "node:fs/promises";
import path from "node:path";
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { ClawdbotConfig } from "../../config/config.js";
import type { WorkspaceBootstrapFile } from "../workspace.js";
import type { EmbeddedContextFile } from "./types.js";
type ContentBlockWithSignature = {
thought_signature?: unknown;
[key: string]: unknown;
};
/**
* Strips Claude-style thought_signature fields from content blocks.
*
* Gemini expects thought signatures as base64-encoded bytes, but Claude stores message ids
* like "msg_abc123...". We only strip "msg_*" to preserve any provider-valid signatures.
*/
export function stripThoughtSignatures<T>(content: T): T {
if (!Array.isArray(content)) return content;
return content.map((block) => {
if (!block || typeof block !== "object") return block;
const rec = block as ContentBlockWithSignature;
const signature = rec.thought_signature;
if (typeof signature !== "string" || !signature.startsWith("msg_")) {
return block;
}
const { thought_signature: _signature, ...rest } = rec;
return rest;
}) as T;
}
export const DEFAULT_BOOTSTRAP_MAX_CHARS = 20_000;
const BOOTSTRAP_HEAD_RATIO = 0.7;
const BOOTSTRAP_TAIL_RATIO = 0.2;
type TrimBootstrapResult = {
content: string;
truncated: boolean;
maxChars: number;
originalLength: number;
};
export function resolveBootstrapMaxChars(cfg?: ClawdbotConfig): number {
const raw = cfg?.agents?.defaults?.bootstrapMaxChars;
if (typeof raw === "number" && Number.isFinite(raw) && raw > 0) {
return Math.floor(raw);
}
return DEFAULT_BOOTSTRAP_MAX_CHARS;
}
function trimBootstrapContent(
content: string,
fileName: string,
maxChars: number,
): TrimBootstrapResult {
const trimmed = content.trimEnd();
if (trimmed.length <= maxChars) {
return {
content: trimmed,
truncated: false,
maxChars,
originalLength: trimmed.length,
};
}
const headChars = Math.floor(maxChars * BOOTSTRAP_HEAD_RATIO);
const tailChars = Math.floor(maxChars * BOOTSTRAP_TAIL_RATIO);
const head = trimmed.slice(0, headChars);
const tail = trimmed.slice(-tailChars);
const marker = [
"",
`[...truncated, read ${fileName} for full content...]`,
`…(truncated ${fileName}: kept ${headChars}+${tailChars} chars of ${trimmed.length})…`,
"",
].join("\n");
const contentWithMarker = [head, marker, tail].join("\n");
return {
content: contentWithMarker,
truncated: true,
maxChars,
originalLength: trimmed.length,
};
}
export async function ensureSessionHeader(params: {
sessionFile: string;
sessionId: string;
cwd: string;
}) {
const file = params.sessionFile;
try {
await fs.stat(file);
return;
} catch {
// create
}
await fs.mkdir(path.dirname(file), { recursive: true });
const sessionVersion = 2;
const entry = {
type: "session",
version: sessionVersion,
id: params.sessionId,
timestamp: new Date().toISOString(),
cwd: params.cwd,
};
await fs.writeFile(file, `${JSON.stringify(entry)}\n`, "utf-8");
}
export function buildBootstrapContextFiles(
files: WorkspaceBootstrapFile[],
opts?: { warn?: (message: string) => void; maxChars?: number },
): EmbeddedContextFile[] {
const maxChars = opts?.maxChars ?? DEFAULT_BOOTSTRAP_MAX_CHARS;
const result: EmbeddedContextFile[] = [];
for (const file of files) {
if (file.missing) {
result.push({
path: file.name,
content: `[MISSING] Expected at: ${file.path}`,
});
continue;
}
const trimmed = trimBootstrapContent(
file.content ?? "",
file.name,
maxChars,
);
if (!trimmed.content) continue;
if (trimmed.truncated) {
opts?.warn?.(
`workspace bootstrap file ${file.name} is ${trimmed.originalLength} chars (limit ${trimmed.maxChars}); truncating in injected context`,
);
}
result.push({
path: file.name,
content: trimmed.content,
});
}
return result;
}
export function sanitizeGoogleTurnOrdering(
messages: AgentMessage[],
): AgentMessage[] {
const GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT = "(session bootstrap)";
const first = messages[0] as
| { role?: unknown; content?: unknown }
| undefined;
const role = first?.role;
const content = first?.content;
if (
role === "user" &&
typeof content === "string" &&
content.trim() === GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT
) {
return messages;
}
if (role !== "assistant") return messages;
// Cloud Code Assist rejects histories that begin with a model turn (tool call or text).
// Prepend a tiny synthetic user turn so the rest of the transcript can be used.
const bootstrap: AgentMessage = {
role: "user",
content: GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT,
timestamp: Date.now(),
} as AgentMessage;
return [bootstrap, ...messages];
}

View File

@@ -0,0 +1,220 @@
import type { AssistantMessage } from "@mariozechner/pi-ai";
import type { ClawdbotConfig } from "../../config/config.js";
import { formatSandboxToolPolicyBlockedMessage } from "../sandbox.js";
import type { FailoverReason } from "./types.js";
export function isContextOverflowError(errorMessage?: string): boolean {
if (!errorMessage) return false;
const lower = errorMessage.toLowerCase();
return (
lower.includes("request_too_large") ||
lower.includes("request exceeds the maximum size") ||
lower.includes("context length exceeded") ||
lower.includes("maximum context length") ||
lower.includes("prompt is too long") ||
lower.includes("context overflow") ||
(lower.includes("413") && lower.includes("too large"))
);
}
export function isCompactionFailureError(errorMessage?: string): boolean {
if (!errorMessage) return false;
if (!isContextOverflowError(errorMessage)) return false;
const lower = errorMessage.toLowerCase();
return (
lower.includes("summarization failed") ||
lower.includes("auto-compaction") ||
lower.includes("compaction failed") ||
lower.includes("compaction")
);
}
export function formatAssistantErrorText(
msg: AssistantMessage,
opts?: { cfg?: ClawdbotConfig; sessionKey?: string },
): string | undefined {
if (msg.stopReason !== "error") return undefined;
const raw = (msg.errorMessage ?? "").trim();
if (!raw) return "LLM request failed with an unknown error.";
const unknownTool =
raw.match(/unknown tool[:\s]+["']?([a-z0-9_-]+)["']?/i) ??
raw.match(
/tool\s+["']?([a-z0-9_-]+)["']?\s+(?:not found|is not available)/i,
);
if (unknownTool?.[1]) {
const rewritten = formatSandboxToolPolicyBlockedMessage({
cfg: opts?.cfg,
sessionKey: opts?.sessionKey,
toolName: unknownTool[1],
});
if (rewritten) return rewritten;
}
if (isContextOverflowError(raw)) {
return (
"Context overflow: prompt too large for the model. " +
"Try again with less input or a larger-context model."
);
}
if (/incorrect role information|roles must alternate/i.test(raw)) {
return (
"Message ordering conflict - please try again. " +
"If this persists, use /new to start a fresh session."
);
}
const invalidRequest = raw.match(
/"type":"invalid_request_error".*?"message":"([^"]+)"/,
);
if (invalidRequest?.[1]) {
return `LLM request rejected: ${invalidRequest[1]}`;
}
if (isOverloadedErrorMessage(raw)) {
return "The AI service is temporarily overloaded. Please try again in a moment.";
}
return raw.length > 600 ? `${raw.slice(0, 600)}` : raw;
}
export function isRateLimitAssistantError(
msg: AssistantMessage | undefined,
): boolean {
if (!msg || msg.stopReason !== "error") return false;
return isRateLimitErrorMessage(msg.errorMessage ?? "");
}
type ErrorPattern = RegExp | string;
const ERROR_PATTERNS = {
rateLimit: [
/rate[_ ]limit|too many requests|429/,
"exceeded your current quota",
"resource has been exhausted",
"quota exceeded",
"resource_exhausted",
"usage limit",
],
overloaded: [
/overloaded_error|"type"\s*:\s*"overloaded_error"/i,
"overloaded",
],
timeout: [
"timeout",
"timed out",
"deadline exceeded",
"context deadline exceeded",
],
billing: [
/\b402\b/,
"payment required",
"insufficient credits",
"credit balance",
"plans & billing",
],
auth: [
/invalid[_ ]?api[_ ]?key/,
"incorrect api key",
"invalid token",
"authentication",
"unauthorized",
"forbidden",
"access denied",
"expired",
"token has expired",
/\b401\b/,
/\b403\b/,
"no credentials found",
"no api key found",
],
format: [
"invalid_request_error",
"string should match pattern",
"tool_use.id",
"tool_use_id",
"messages.1.content.1.tool_use.id",
"invalid request format",
],
} as const;
function matchesErrorPatterns(
raw: string,
patterns: readonly ErrorPattern[],
): boolean {
if (!raw) return false;
const value = raw.toLowerCase();
return patterns.some((pattern) =>
pattern instanceof RegExp ? pattern.test(value) : value.includes(pattern),
);
}
export function isRateLimitErrorMessage(raw: string): boolean {
return matchesErrorPatterns(raw, ERROR_PATTERNS.rateLimit);
}
export function isTimeoutErrorMessage(raw: string): boolean {
return matchesErrorPatterns(raw, ERROR_PATTERNS.timeout);
}
export function isBillingErrorMessage(raw: string): boolean {
const value = raw.toLowerCase();
if (!value) return false;
if (matchesErrorPatterns(value, ERROR_PATTERNS.billing)) return true;
return (
value.includes("billing") &&
(value.includes("upgrade") ||
value.includes("credits") ||
value.includes("payment") ||
value.includes("plan"))
);
}
export function isBillingAssistantError(
msg: AssistantMessage | undefined,
): boolean {
if (!msg || msg.stopReason !== "error") return false;
return isBillingErrorMessage(msg.errorMessage ?? "");
}
export function isAuthErrorMessage(raw: string): boolean {
return matchesErrorPatterns(raw, ERROR_PATTERNS.auth);
}
export function isOverloadedErrorMessage(raw: string): boolean {
return matchesErrorPatterns(raw, ERROR_PATTERNS.overloaded);
}
export function isCloudCodeAssistFormatError(raw: string): boolean {
return matchesErrorPatterns(raw, ERROR_PATTERNS.format);
}
export function isAuthAssistantError(
msg: AssistantMessage | undefined,
): boolean {
if (!msg || msg.stopReason !== "error") return false;
return isAuthErrorMessage(msg.errorMessage ?? "");
}
export function classifyFailoverReason(raw: string): FailoverReason | null {
if (isRateLimitErrorMessage(raw)) return "rate_limit";
if (isOverloadedErrorMessage(raw)) return "rate_limit";
if (isCloudCodeAssistFormatError(raw)) return "format";
if (isBillingErrorMessage(raw)) return "billing";
if (isTimeoutErrorMessage(raw)) return "timeout";
if (isAuthErrorMessage(raw)) return "auth";
return null;
}
export function isFailoverErrorMessage(raw: string): boolean {
return classifyFailoverReason(raw) !== null;
}
export function isFailoverAssistantError(
msg: AssistantMessage | undefined,
): boolean {
if (!msg || msg.stopReason !== "error") return false;
return isFailoverErrorMessage(msg.errorMessage ?? "");
}

View File

@@ -0,0 +1,151 @@
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import { sanitizeGoogleTurnOrdering } from "./bootstrap.js";
export function isGoogleModelApi(api?: string | null): boolean {
return (
api === "google-gemini-cli" ||
api === "google-generative-ai" ||
api === "google-antigravity"
);
}
export { sanitizeGoogleTurnOrdering };
/**
* Downgrades tool calls that are missing `thought_signature` (required by Gemini)
* into text representations, to prevent 400 INVALID_ARGUMENT errors.
* Also converts corresponding tool results into user messages.
*/
type GeminiToolCallBlock = {
type?: unknown;
thought_signature?: unknown;
id?: unknown;
toolCallId?: unknown;
name?: unknown;
toolName?: unknown;
arguments?: unknown;
input?: unknown;
};
export function downgradeGeminiHistory(
messages: AgentMessage[],
): AgentMessage[] {
const downgradedIds = new Set<string>();
const out: AgentMessage[] = [];
const resolveToolResultId = (
msg: Extract<AgentMessage, { role: "toolResult" }>,
): string | undefined => {
const toolCallId = (msg as { toolCallId?: unknown }).toolCallId;
if (typeof toolCallId === "string" && toolCallId) return toolCallId;
const toolUseId = (msg as { toolUseId?: unknown }).toolUseId;
if (typeof toolUseId === "string" && toolUseId) return toolUseId;
return undefined;
};
for (const msg of messages) {
if (!msg || typeof msg !== "object") {
out.push(msg);
continue;
}
const role = (msg as { role?: unknown }).role;
if (role === "assistant") {
const assistantMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
if (!Array.isArray(assistantMsg.content)) {
out.push(msg);
continue;
}
let hasDowngraded = false;
const newContent = assistantMsg.content.map((block) => {
if (!block || typeof block !== "object") return block;
const blockRecord = block as GeminiToolCallBlock;
const type = blockRecord.type;
if (
type === "toolCall" ||
type === "functionCall" ||
type === "toolUse"
) {
const hasSignature = Boolean(blockRecord.thought_signature);
if (!hasSignature) {
const id =
typeof blockRecord.id === "string"
? blockRecord.id
: typeof blockRecord.toolCallId === "string"
? blockRecord.toolCallId
: undefined;
const name =
typeof blockRecord.name === "string"
? blockRecord.name
: typeof blockRecord.toolName === "string"
? blockRecord.toolName
: undefined;
const args =
blockRecord.arguments !== undefined
? blockRecord.arguments
: blockRecord.input;
if (id) downgradedIds.add(id);
hasDowngraded = true;
const argsText =
typeof args === "string" ? args : JSON.stringify(args, null, 2);
return {
type: "text",
text: `[Tool Call: ${name ?? "unknown"}${
id ? ` (ID: ${id})` : ""
}]\nArguments: ${argsText}`,
};
}
}
return block;
});
out.push(
hasDowngraded
? ({ ...assistantMsg, content: newContent } as AgentMessage)
: msg,
);
continue;
}
if (role === "toolResult") {
const toolMsg = msg as Extract<AgentMessage, { role: "toolResult" }>;
const toolResultId = resolveToolResultId(toolMsg);
if (toolResultId && downgradedIds.has(toolResultId)) {
let textContent = "";
if (Array.isArray(toolMsg.content)) {
textContent = toolMsg.content
.map((entry) => {
if (entry && typeof entry === "object") {
const text = (entry as { text?: unknown }).text;
if (typeof text === "string") return text;
}
return JSON.stringify(entry);
})
.join("\n");
} else {
textContent = JSON.stringify(toolMsg.content);
}
out.push({
role: "user",
content: [
{
type: "text",
text: `[Tool Result for ID ${toolResultId}]\n${textContent}`,
},
],
} as AgentMessage);
continue;
}
}
out.push(msg);
}
return out;
}

View File

@@ -0,0 +1,124 @@
import type {
AgentMessage,
AgentToolResult,
} from "@mariozechner/pi-agent-core";
import { sanitizeToolCallIdsForCloudCodeAssist } from "../tool-call-id.js";
import { sanitizeContentBlocksImages } from "../tool-images.js";
import { stripThoughtSignatures } from "./bootstrap.js";
type ContentBlock = AgentToolResult<unknown>["content"][number];
export function isEmptyAssistantMessageContent(
message: Extract<AgentMessage, { role: "assistant" }>,
): boolean {
const content = message.content;
if (content == null) return true;
if (!Array.isArray(content)) return false;
return content.every((block) => {
if (!block || typeof block !== "object") return true;
const rec = block as { type?: unknown; text?: unknown };
if (rec.type !== "text") return false;
return typeof rec.text !== "string" || rec.text.trim().length === 0;
});
}
function isEmptyAssistantErrorMessage(
message: Extract<AgentMessage, { role: "assistant" }>,
): boolean {
if (message.stopReason !== "error") return false;
return isEmptyAssistantMessageContent(message);
}
export async function sanitizeSessionMessagesImages(
messages: AgentMessage[],
label: string,
options?: { sanitizeToolCallIds?: boolean; enforceToolCallLast?: boolean },
): Promise<AgentMessage[]> {
// We sanitize historical session messages because Anthropic can reject a request
// if the transcript contains oversized base64 images (see MAX_IMAGE_DIMENSION_PX).
const sanitizedIds = options?.sanitizeToolCallIds
? sanitizeToolCallIdsForCloudCodeAssist(messages)
: messages;
const out: AgentMessage[] = [];
for (const msg of sanitizedIds) {
if (!msg || typeof msg !== "object") {
out.push(msg);
continue;
}
const role = (msg as { role?: unknown }).role;
if (role === "toolResult") {
const toolMsg = msg as Extract<AgentMessage, { role: "toolResult" }>;
const content = Array.isArray(toolMsg.content) ? toolMsg.content : [];
const nextContent = (await sanitizeContentBlocksImages(
content as ContentBlock[],
label,
)) as unknown as typeof toolMsg.content;
out.push({ ...toolMsg, content: nextContent });
continue;
}
if (role === "user") {
const userMsg = msg as Extract<AgentMessage, { role: "user" }>;
const content = userMsg.content;
if (Array.isArray(content)) {
const nextContent = (await sanitizeContentBlocksImages(
content as unknown as ContentBlock[],
label,
)) as unknown as typeof userMsg.content;
out.push({ ...userMsg, content: nextContent });
continue;
}
}
if (role === "assistant") {
const assistantMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
if (isEmptyAssistantErrorMessage(assistantMsg)) {
continue;
}
const content = assistantMsg.content;
if (Array.isArray(content)) {
const strippedContent = stripThoughtSignatures(content);
const filteredContent = strippedContent.filter((block) => {
if (!block || typeof block !== "object") return true;
const rec = block as { type?: unknown; text?: unknown };
if (rec.type !== "text" || typeof rec.text !== "string") return true;
return rec.text.trim().length > 0;
});
const normalizedContent = options?.enforceToolCallLast
? (() => {
let lastToolIndex = -1;
for (let i = filteredContent.length - 1; i >= 0; i -= 1) {
const block = filteredContent[i];
if (!block || typeof block !== "object") continue;
const type = (block as { type?: unknown }).type;
if (
type === "functionCall" ||
type === "toolUse" ||
type === "toolCall"
) {
lastToolIndex = i;
break;
}
}
if (lastToolIndex === -1) return filteredContent;
return filteredContent.slice(0, lastToolIndex + 1);
})()
: filteredContent;
const finalContent = (await sanitizeContentBlocksImages(
normalizedContent as unknown as ContentBlock[],
label,
)) as unknown as typeof assistantMsg.content;
if (finalContent.length === 0) {
continue;
}
out.push({ ...assistantMsg, content: finalContent });
continue;
}
}
out.push(msg);
}
return out;
}

View File

@@ -0,0 +1,47 @@
const MIN_DUPLICATE_TEXT_LENGTH = 10;
/**
* Normalize text for duplicate comparison.
* - Trims whitespace
* - Lowercases
* - Strips emoji (Emoji_Presentation and Extended_Pictographic)
* - Collapses multiple spaces to single space
*/
export function normalizeTextForComparison(text: string): string {
return text
.trim()
.toLowerCase()
.replace(/\p{Emoji_Presentation}|\p{Extended_Pictographic}/gu, "")
.replace(/\s+/g, " ")
.trim();
}
export function isMessagingToolDuplicateNormalized(
normalized: string,
normalizedSentTexts: string[],
): boolean {
if (normalizedSentTexts.length === 0) return false;
if (!normalized || normalized.length < MIN_DUPLICATE_TEXT_LENGTH)
return false;
return normalizedSentTexts.some((normalizedSent) => {
if (!normalizedSent || normalizedSent.length < MIN_DUPLICATE_TEXT_LENGTH)
return false;
return (
normalized.includes(normalizedSent) || normalizedSent.includes(normalized)
);
});
}
export function isMessagingToolDuplicate(
text: string,
sentTexts: string[],
): boolean {
if (sentTexts.length === 0) return false;
const normalized = normalizeTextForComparison(text);
if (!normalized || normalized.length < MIN_DUPLICATE_TEXT_LENGTH)
return false;
return isMessagingToolDuplicateNormalized(
normalized,
sentTexts.map(normalizeTextForComparison),
);
}

View File

@@ -0,0 +1,39 @@
import {
normalizeThinkLevel,
type ThinkLevel,
} from "../../auto-reply/thinking.js";
function extractSupportedValues(raw: string): string[] {
const match =
raw.match(/supported values are:\s*([^\n.]+)/i) ??
raw.match(/supported values:\s*([^\n.]+)/i);
if (!match?.[1]) return [];
const fragment = match[1];
const quoted = Array.from(fragment.matchAll(/['"]([^'"]+)['"]/g)).map(
(entry) => entry[1]?.trim(),
);
if (quoted.length > 0) {
return quoted.filter((entry): entry is string => Boolean(entry));
}
return fragment
.split(/,|\band\b/gi)
.map((entry) => entry.replace(/^[^a-zA-Z]+|[^a-zA-Z]+$/g, "").trim())
.filter(Boolean);
}
export function pickFallbackThinkingLevel(params: {
message?: string;
attempted: Set<ThinkLevel>;
}): ThinkLevel | undefined {
const raw = params.message?.trim();
if (!raw) return undefined;
const supported = extractSupportedValues(raw);
if (supported.length === 0) return undefined;
for (const entry of supported) {
const normalized = normalizeThinkLevel(entry);
if (!normalized) continue;
if (params.attempted.has(normalized)) continue;
return normalized;
}
return undefined;
}

View File

@@ -0,0 +1,124 @@
import type { AgentMessage } from "@mariozechner/pi-agent-core";
/**
* Validates and fixes conversation turn sequences for Gemini API.
* Gemini requires strict alternating user→assistant→tool→user pattern.
* Merges consecutive assistant messages together.
*/
export function validateGeminiTurns(messages: AgentMessage[]): AgentMessage[] {
if (!Array.isArray(messages) || messages.length === 0) {
return messages;
}
const result: AgentMessage[] = [];
let lastRole: string | undefined;
for (const msg of messages) {
if (!msg || typeof msg !== "object") {
result.push(msg);
continue;
}
const msgRole = (msg as { role?: unknown }).role as string | undefined;
if (!msgRole) {
result.push(msg);
continue;
}
if (msgRole === lastRole && lastRole === "assistant") {
const lastMsg = result[result.length - 1];
const currentMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
if (lastMsg && typeof lastMsg === "object") {
const lastAsst = lastMsg as Extract<
AgentMessage,
{ role: "assistant" }
>;
const mergedContent = [
...(Array.isArray(lastAsst.content) ? lastAsst.content : []),
...(Array.isArray(currentMsg.content) ? currentMsg.content : []),
];
const merged: Extract<AgentMessage, { role: "assistant" }> = {
...lastAsst,
content: mergedContent,
...(currentMsg.usage && { usage: currentMsg.usage }),
...(currentMsg.stopReason && { stopReason: currentMsg.stopReason }),
...(currentMsg.errorMessage && {
errorMessage: currentMsg.errorMessage,
}),
};
result[result.length - 1] = merged;
continue;
}
}
result.push(msg);
lastRole = msgRole;
}
return result;
}
export function mergeConsecutiveUserTurns(
previous: Extract<AgentMessage, { role: "user" }>,
current: Extract<AgentMessage, { role: "user" }>,
): Extract<AgentMessage, { role: "user" }> {
const mergedContent = [
...(Array.isArray(previous.content) ? previous.content : []),
...(Array.isArray(current.content) ? current.content : []),
];
return {
...current,
content: mergedContent,
timestamp: current.timestamp ?? previous.timestamp,
};
}
/**
* Validates and fixes conversation turn sequences for Anthropic API.
* Anthropic requires strict alternating user→assistant pattern.
* Merges consecutive user messages together.
*/
export function validateAnthropicTurns(
messages: AgentMessage[],
): AgentMessage[] {
if (!Array.isArray(messages) || messages.length === 0) {
return messages;
}
const result: AgentMessage[] = [];
let lastRole: string | undefined;
for (const msg of messages) {
if (!msg || typeof msg !== "object") {
result.push(msg);
continue;
}
const msgRole = (msg as { role?: unknown }).role as string | undefined;
if (!msgRole) {
result.push(msg);
continue;
}
if (msgRole === lastRole && lastRole === "user") {
const lastMsg = result[result.length - 1];
const currentMsg = msg as Extract<AgentMessage, { role: "user" }>;
if (lastMsg && typeof lastMsg === "object") {
const lastUser = lastMsg as Extract<AgentMessage, { role: "user" }>;
const merged = mergeConsecutiveUserTurns(lastUser, currentMsg);
result[result.length - 1] = merged;
continue;
}
}
result.push(msg);
lastRole = msgRole;
}
return result;
}

View File

@@ -0,0 +1,9 @@
export type EmbeddedContextFile = { path: string; content: string };
export type FailoverReason =
| "auth"
| "format"
| "rate_limit"
| "billing"
| "timeout"
| "unknown";