fix: auto-compact on context overflow promptError before returning error (#1627)

* fix: detect Anthropic 'Request size exceeds model context window' as context overflow

Anthropic now returns 'Request size exceeds model context window' instead of
the previously detected 'prompt is too long' format. This new error message
was not recognized by isContextOverflowError(), causing auto-compaction to
NOT trigger. Users would see the raw error twice without any recovery attempt.

Changes:
- Add 'exceeds model context window' and 'request size exceeds' to
  isContextOverflowError() detection patterns
- Add tests that fail without the fix, verifying both the raw error
  string and the JSON-wrapped format from Anthropic's API
- Add test for formatAssistantErrorText to ensure the friendly
  'Context overflow' message is shown instead of the raw error

Note: The upstream pi-ai package (@mariozechner/pi-ai) also needs a fix
in its OVERFLOW_PATTERNS regex: /exceeds the context window/i should be
changed to /exceeds.*context window/i to match both 'the' and 'model'
variants for triggering auto-compaction retry.

* fix(tests): remove unused imports and helper from test files

Remove WorkspaceBootstrapFile references and _makeFile helper that were
incorrectly copied from another test file. These caused type errors and
were unrelated to the context overflow detection tests.

* fix: trigger auto-compaction on context overflow promptError

When the LLM rejects a request with a context overflow error that surfaces
as a promptError (thrown exception rather than streamed error), the existing
auto-compaction in pi-coding-agent never triggers. This happens because the
error bypasses the agent's message_end → agent_end → _checkCompaction path.

This fix adds a fallback compaction attempt directly in the run loop:
- Detects context overflow in promptError (excluding compaction_failure)
- Calls compactEmbeddedPiSessionDirect (bypassing lane queues since already in-lane)
- Retries the prompt after successful compaction
- Limits to one compaction attempt per run to prevent infinite loops

Fixes: context overflow errors shown to user without auto-compaction attempt

* style: format compact.ts and run.ts with oxfmt

* fix: tighten context overflow match (#1627) (thanks @rodrigouroz)

---------

Co-authored-by: Claude <claude@anthropic.com>
Co-authored-by: Peter Steinberger <steipete@gmail.com>
This commit is contained in:
Rodrigo Uroz
2026-01-24 19:09:24 -03:00
committed by GitHub
parent ac00065727
commit 9ceac415c5
7 changed files with 719 additions and 364 deletions

View File

@@ -16,6 +16,7 @@ Docs: https://docs.clawd.bot
- Heartbeat: normalize target identifiers for consistent routing. - Heartbeat: normalize target identifiers for consistent routing.
- Telegram: use wrapped fetch for long-polling on Node to normalize AbortSignal handling. (#1639) - Telegram: use wrapped fetch for long-polling on Node to normalize AbortSignal handling. (#1639)
- Exec: keep approvals for elevated ask unless full mode. (#1616) Thanks @ivancasco. - Exec: keep approvals for elevated ask unless full mode. (#1616) Thanks @ivancasco.
- Agents: auto-compact on context overflow prompt errors before failing. (#1627) Thanks @rodrigouroz.
- Gateway: reduce log noise for late invokes + remote node probes; debounce skills refresh. (#1607) Thanks @petter-b. - Gateway: reduce log noise for late invokes + remote node probes; debounce skills refresh. (#1607) Thanks @petter-b.
- macOS: default direct-transport `ws://` URLs to port 18789; document `gateway.remote.transport`. (#1603) Thanks @ngutman. - macOS: default direct-transport `ws://` URLs to port 18789; document `gateway.remote.transport`. (#1603) Thanks @ngutman.

View File

@@ -1,15 +1,7 @@
import type { AssistantMessage } from "@mariozechner/pi-ai"; import type { AssistantMessage } from "@mariozechner/pi-ai";
import { describe, expect, it } from "vitest"; import { describe, expect, it } from "vitest";
import { formatAssistantErrorText } from "./pi-embedded-helpers.js"; import { formatAssistantErrorText } from "./pi-embedded-helpers.js";
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
const _makeFile = (overrides: Partial<WorkspaceBootstrapFile>): WorkspaceBootstrapFile => ({
name: DEFAULT_AGENTS_FILENAME,
path: "/tmp/AGENTS.md",
content: "",
missing: false,
...overrides,
});
describe("formatAssistantErrorText", () => { describe("formatAssistantErrorText", () => {
const makeAssistantError = (errorMessage: string): AssistantMessage => const makeAssistantError = (errorMessage: string): AssistantMessage =>
({ ({
@@ -21,6 +13,16 @@ describe("formatAssistantErrorText", () => {
const msg = makeAssistantError("request_too_large"); const msg = makeAssistantError("request_too_large");
expect(formatAssistantErrorText(msg)).toContain("Context overflow"); expect(formatAssistantErrorText(msg)).toContain("Context overflow");
}); });
it("returns context overflow for Anthropic 'Request size exceeds model context window'", () => {
// This is the new Anthropic error format that wasn't being detected.
// Without the fix, this falls through to the invalidRequest regex and returns
// "LLM request rejected: Request size exceeds model context window"
// instead of the context overflow message, preventing auto-compaction.
const msg = makeAssistantError(
'{"type":"error","error":{"type":"invalid_request_error","message":"Request size exceeds model context window"}}',
);
expect(formatAssistantErrorText(msg)).toContain("Context overflow");
});
it("returns a friendly message for Anthropic role ordering", () => { it("returns a friendly message for Anthropic role ordering", () => {
const msg = makeAssistantError('messages: roles must alternate between "user" and "assistant"'); const msg = makeAssistantError('messages: roles must alternate between "user" and "assistant"');
expect(formatAssistantErrorText(msg)).toContain("Message ordering conflict"); expect(formatAssistantErrorText(msg)).toContain("Message ordering conflict");

View File

@@ -1,14 +1,6 @@
import { describe, expect, it } from "vitest"; import { describe, expect, it } from "vitest";
import { isContextOverflowError } from "./pi-embedded-helpers.js"; import { isContextOverflowError } from "./pi-embedded-helpers.js";
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
const _makeFile = (overrides: Partial<WorkspaceBootstrapFile>): WorkspaceBootstrapFile => ({
name: DEFAULT_AGENTS_FILENAME,
path: "/tmp/AGENTS.md",
content: "",
missing: false,
...overrides,
});
describe("isContextOverflowError", () => { describe("isContextOverflowError", () => {
it("matches known overflow hints", () => { it("matches known overflow hints", () => {
const samples = [ const samples = [
@@ -24,7 +16,34 @@ describe("isContextOverflowError", () => {
expect(isContextOverflowError(sample)).toBe(true); expect(isContextOverflowError(sample)).toBe(true);
} }
}); });
it("matches Anthropic 'Request size exceeds model context window' error", () => {
// Anthropic returns this error format when the prompt exceeds the context window.
// Without this fix, auto-compaction is NOT triggered because neither
// isContextOverflowError nor pi-ai's isContextOverflow recognizes this pattern.
// The user sees: "LLM request rejected: Request size exceeds model context window"
// instead of automatic compaction + retry.
const anthropicRawError =
'{"type":"error","error":{"type":"invalid_request_error","message":"Request size exceeds model context window"}}';
expect(isContextOverflowError(anthropicRawError)).toBe(true);
});
it("matches 'exceeds model context window' in various formats", () => {
const samples = [
"Request size exceeds model context window",
"request size exceeds model context window",
'400 {"type":"error","error":{"type":"invalid_request_error","message":"Request size exceeds model context window"}}',
"The request size exceeds model context window limit",
];
for (const sample of samples) {
expect(isContextOverflowError(sample)).toBe(true);
}
});
it("ignores unrelated errors", () => { it("ignores unrelated errors", () => {
expect(isContextOverflowError("rate limit exceeded")).toBe(false); expect(isContextOverflowError("rate limit exceeded")).toBe(false);
expect(isContextOverflowError("request size exceeds upload limit")).toBe(false);
expect(isContextOverflowError("model not found")).toBe(false);
expect(isContextOverflowError("authentication failed")).toBe(false);
}); });
}); });

View File

@@ -7,12 +7,19 @@ import type { FailoverReason } from "./types.js";
export function isContextOverflowError(errorMessage?: string): boolean { export function isContextOverflowError(errorMessage?: string): boolean {
if (!errorMessage) return false; if (!errorMessage) return false;
const lower = errorMessage.toLowerCase(); const lower = errorMessage.toLowerCase();
const hasRequestSizeExceeds = lower.includes("request size exceeds");
const hasContextWindow =
lower.includes("context window") ||
lower.includes("context length") ||
lower.includes("maximum context length");
return ( return (
lower.includes("request_too_large") || lower.includes("request_too_large") ||
lower.includes("request exceeds the maximum size") || lower.includes("request exceeds the maximum size") ||
lower.includes("context length exceeded") || lower.includes("context length exceeded") ||
lower.includes("maximum context length") || lower.includes("maximum context length") ||
lower.includes("prompt is too long") || lower.includes("prompt is too long") ||
lower.includes("exceeds model context window") ||
(hasRequestSizeExceeds && hasContextWindow) ||
lower.includes("context overflow") || lower.includes("context overflow") ||
(lower.includes("413") && lower.includes("too large")) (lower.includes("413") && lower.includes("too large"))
); );

View File

@@ -68,7 +68,7 @@ import { formatUserTime, resolveUserTimeFormat, resolveUserTimezone } from "../d
import { describeUnknownError, mapThinkingLevel, resolveExecToolDefaults } from "./utils.js"; import { describeUnknownError, mapThinkingLevel, resolveExecToolDefaults } from "./utils.js";
import { buildTtsSystemPromptHint } from "../../tts/tts.js"; import { buildTtsSystemPromptHint } from "../../tts/tts.js";
export async function compactEmbeddedPiSession(params: { export type CompactEmbeddedPiSessionParams = {
sessionId: string; sessionId: string;
sessionKey?: string; sessionKey?: string;
messageChannel?: string; messageChannel?: string;
@@ -97,13 +97,15 @@ export async function compactEmbeddedPiSession(params: {
enqueue?: typeof enqueueCommand; enqueue?: typeof enqueueCommand;
extraSystemPrompt?: string; extraSystemPrompt?: string;
ownerNumbers?: string[]; ownerNumbers?: string[];
}): Promise<EmbeddedPiCompactResult> { };
const sessionLane = resolveSessionLane(params.sessionKey?.trim() || params.sessionId);
const globalLane = resolveGlobalLane(params.lane); /**
const enqueueGlobal = * Core compaction logic without lane queueing.
params.enqueue ?? ((task, opts) => enqueueCommandInLane(globalLane, task, opts)); * Use this when already inside a session/global lane to avoid deadlocks.
return enqueueCommandInLane(sessionLane, () => */
enqueueGlobal(async () => { export async function compactEmbeddedPiSessionDirect(
params: CompactEmbeddedPiSessionParams,
): Promise<EmbeddedPiCompactResult> {
const resolvedWorkspace = resolveUserPath(params.workspaceDir); const resolvedWorkspace = resolveUserPath(params.workspaceDir);
const prevCwd = process.cwd(); const prevCwd = process.cwd();
@@ -138,8 +140,7 @@ export async function compactEmbeddedPiSession(params: {
); );
} }
} else if (model.provider === "github-copilot") { } else if (model.provider === "github-copilot") {
const { resolveCopilotApiToken } = const { resolveCopilotApiToken } = await import("../../providers/github-copilot-token.js");
await import("../../providers/github-copilot-token.js");
const copilotToken = await resolveCopilotApiToken({ const copilotToken = await resolveCopilotApiToken({
githubToken: apiKeyInfo.apiKey, githubToken: apiKeyInfo.apiKey,
}); });
@@ -177,8 +178,7 @@ export async function compactEmbeddedPiSession(params: {
let restoreSkillEnv: (() => void) | undefined; let restoreSkillEnv: (() => void) | undefined;
process.chdir(effectiveWorkspace); process.chdir(effectiveWorkspace);
try { try {
const shouldLoadSkillEntries = const shouldLoadSkillEntries = !params.skillsSnapshot || !params.skillsSnapshot.resolvedSkills;
!params.skillsSnapshot || !params.skillsSnapshot.resolvedSkills;
const skillEntries = shouldLoadSkillEntries const skillEntries = shouldLoadSkillEntries
? loadWorkspaceSkillEntries(effectiveWorkspace) ? loadWorkspaceSkillEntries(effectiveWorkspace)
: []; : [];
@@ -231,9 +231,7 @@ export async function compactEmbeddedPiSession(params: {
const tools = sanitizeToolsForGoogle({ tools: toolsRaw, provider }); const tools = sanitizeToolsForGoogle({ tools: toolsRaw, provider });
logToolSchemasForGoogle({ tools, provider }); logToolSchemasForGoogle({ tools, provider });
const machineName = await getMachineDisplayName(); const machineName = await getMachineDisplayName();
const runtimeChannel = normalizeMessageChannel( const runtimeChannel = normalizeMessageChannel(params.messageChannel ?? params.messageProvider);
params.messageChannel ?? params.messageProvider,
);
let runtimeCapabilities = runtimeChannel let runtimeCapabilities = runtimeChannel
? (resolveChannelCapabilities({ ? (resolveChannelCapabilities({
cfg: params.config, cfg: params.config,
@@ -249,9 +247,7 @@ export async function compactEmbeddedPiSession(params: {
if (inlineButtonsScope !== "off") { if (inlineButtonsScope !== "off") {
if (!runtimeCapabilities) runtimeCapabilities = []; if (!runtimeCapabilities) runtimeCapabilities = [];
if ( if (
!runtimeCapabilities.some( !runtimeCapabilities.some((cap) => String(cap).trim().toLowerCase() === "inlinebuttons")
(cap) => String(cap).trim().toLowerCase() === "inlinebuttons",
)
) { ) {
runtimeCapabilities.push("inlineButtons"); runtimeCapabilities.push("inlineButtons");
} }
@@ -445,6 +441,21 @@ export async function compactEmbeddedPiSession(params: {
restoreSkillEnv?.(); restoreSkillEnv?.();
process.chdir(prevCwd); process.chdir(prevCwd);
} }
}), }
/**
* Compacts a session with lane queueing (session lane + global lane).
* Use this from outside a lane context. If already inside a lane, use
* `compactEmbeddedPiSessionDirect` to avoid deadlocks.
*/
export async function compactEmbeddedPiSession(
params: CompactEmbeddedPiSessionParams,
): Promise<EmbeddedPiCompactResult> {
const sessionLane = resolveSessionLane(params.sessionKey?.trim() || params.sessionId);
const globalLane = resolveGlobalLane(params.lane);
const enqueueGlobal =
params.enqueue ?? ((task, opts) => enqueueCommandInLane(globalLane, task, opts));
return enqueueCommandInLane(sessionLane, () =>
enqueueGlobal(async () => compactEmbeddedPiSessionDirect(params)),
); );
} }

View File

@@ -0,0 +1,281 @@
import { describe, expect, it, vi, beforeEach } from "vitest";
vi.mock("./run/attempt.js", () => ({
runEmbeddedAttempt: vi.fn(),
}));
vi.mock("./compact.js", () => ({
compactEmbeddedPiSessionDirect: vi.fn(),
}));
vi.mock("./model.js", () => ({
resolveModel: vi.fn(() => ({
model: {
id: "test-model",
provider: "anthropic",
contextWindow: 200000,
api: "messages",
},
error: null,
authStorage: {
setRuntimeApiKey: vi.fn(),
},
modelRegistry: {},
})),
}));
vi.mock("../model-auth.js", () => ({
ensureAuthProfileStore: vi.fn(() => ({})),
getApiKeyForModel: vi.fn(async () => ({
apiKey: "test-key",
source: "test",
})),
resolveAuthProfileOrder: vi.fn(() => []),
}));
vi.mock("../models-config.js", () => ({
ensureClawdbotModelsJson: vi.fn(async () => {}),
}));
vi.mock("../context-window-guard.js", () => ({
CONTEXT_WINDOW_HARD_MIN_TOKENS: 1000,
CONTEXT_WINDOW_WARN_BELOW_TOKENS: 5000,
evaluateContextWindowGuard: vi.fn(() => ({
shouldWarn: false,
shouldBlock: false,
tokens: 200000,
source: "model",
})),
resolveContextWindowInfo: vi.fn(() => ({
tokens: 200000,
source: "model",
})),
}));
vi.mock("../../process/command-queue.js", () => ({
enqueueCommandInLane: vi.fn((_lane: string, task: () => unknown) => task()),
}));
vi.mock("../../utils.js", () => ({
resolveUserPath: vi.fn((p: string) => p),
}));
vi.mock("../../utils/message-channel.js", () => ({
isMarkdownCapableMessageChannel: vi.fn(() => true),
}));
vi.mock("../agent-paths.js", () => ({
resolveClawdbotAgentDir: vi.fn(() => "/tmp/agent-dir"),
}));
vi.mock("../auth-profiles.js", () => ({
markAuthProfileFailure: vi.fn(async () => {}),
markAuthProfileGood: vi.fn(async () => {}),
markAuthProfileUsed: vi.fn(async () => {}),
}));
vi.mock("../defaults.js", () => ({
DEFAULT_CONTEXT_TOKENS: 200000,
DEFAULT_MODEL: "test-model",
DEFAULT_PROVIDER: "anthropic",
}));
vi.mock("../failover-error.js", () => ({
FailoverError: class extends Error {
constructor(msg: string) {
super(msg);
}
},
resolveFailoverStatus: vi.fn(),
}));
vi.mock("../usage.js", () => ({
normalizeUsage: vi.fn(() => undefined),
}));
vi.mock("./lanes.js", () => ({
resolveSessionLane: vi.fn(() => "session-lane"),
resolveGlobalLane: vi.fn(() => "global-lane"),
}));
vi.mock("./logger.js", () => ({
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
}));
vi.mock("./run/payloads.js", () => ({
buildEmbeddedRunPayloads: vi.fn(() => []),
}));
vi.mock("./utils.js", () => ({
describeUnknownError: vi.fn((err: unknown) => {
if (err instanceof Error) return err.message;
return String(err);
}),
}));
vi.mock("../pi-embedded-helpers.js", async () => {
return {
isCompactionFailureError: (msg?: string) => {
if (!msg) return false;
const lower = msg.toLowerCase();
return lower.includes("request_too_large") && lower.includes("summarization failed");
},
isContextOverflowError: (msg?: string) => {
if (!msg) return false;
const lower = msg.toLowerCase();
return lower.includes("request_too_large") || lower.includes("request size exceeds");
},
isFailoverAssistantError: vi.fn(() => false),
isFailoverErrorMessage: vi.fn(() => false),
isAuthAssistantError: vi.fn(() => false),
isRateLimitAssistantError: vi.fn(() => false),
classifyFailoverReason: vi.fn(() => null),
formatAssistantErrorText: vi.fn(() => ""),
pickFallbackThinkingLevel: vi.fn(() => null),
isTimeoutErrorMessage: vi.fn(() => false),
parseImageDimensionError: vi.fn(() => null),
};
});
import { runEmbeddedPiAgent } from "./run.js";
import { runEmbeddedAttempt } from "./run/attempt.js";
import { compactEmbeddedPiSessionDirect } from "./compact.js";
import { log } from "./logger.js";
import type { EmbeddedRunAttemptResult } from "./run/types.js";
const mockedRunEmbeddedAttempt = vi.mocked(runEmbeddedAttempt);
const mockedCompactDirect = vi.mocked(compactEmbeddedPiSessionDirect);
function makeAttemptResult(
overrides: Partial<EmbeddedRunAttemptResult> = {},
): EmbeddedRunAttemptResult {
return {
aborted: false,
timedOut: false,
promptError: null,
sessionIdUsed: "test-session",
assistantTexts: ["Hello!"],
toolMetas: [],
lastAssistant: undefined,
messagesSnapshot: [],
didSendViaMessagingTool: false,
messagingToolSentTexts: [],
messagingToolSentTargets: [],
cloudCodeAssistFormatError: false,
...overrides,
};
}
const baseParams = {
sessionId: "test-session",
sessionKey: "test-key",
sessionFile: "/tmp/session.json",
workspaceDir: "/tmp/workspace",
prompt: "hello",
timeoutMs: 30000,
runId: "run-1",
};
describe("overflow compaction in run loop", () => {
beforeEach(() => {
vi.clearAllMocks();
});
it("retries after successful compaction on context overflow promptError", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
mockedRunEmbeddedAttempt
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }))
.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
mockedCompactDirect.mockResolvedValueOnce({
ok: true,
compacted: true,
result: {
summary: "Compacted session",
firstKeptEntryId: "entry-5",
tokensBefore: 150000,
},
});
const result = await runEmbeddedPiAgent(baseParams);
expect(mockedCompactDirect).toHaveBeenCalledTimes(1);
expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(2);
expect(log.warn).toHaveBeenCalledWith(
expect.stringContaining("context overflow detected; attempting auto-compaction"),
);
expect(log.info).toHaveBeenCalledWith(expect.stringContaining("auto-compaction succeeded"));
// Should not be an error result
expect(result.meta.error).toBeUndefined();
});
it("returns error if compaction fails", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
mockedRunEmbeddedAttempt.mockResolvedValue(makeAttemptResult({ promptError: overflowError }));
mockedCompactDirect.mockResolvedValueOnce({
ok: false,
compacted: false,
reason: "nothing to compact",
});
const result = await runEmbeddedPiAgent(baseParams);
expect(mockedCompactDirect).toHaveBeenCalledTimes(1);
expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(1);
expect(result.meta.error?.kind).toBe("context_overflow");
expect(result.payloads?.[0]?.isError).toBe(true);
expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("auto-compaction failed"));
});
it("returns error if overflow happens again after compaction", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
mockedRunEmbeddedAttempt
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }))
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }));
mockedCompactDirect.mockResolvedValueOnce({
ok: true,
compacted: true,
result: {
summary: "Compacted",
firstKeptEntryId: "entry-3",
tokensBefore: 180000,
},
});
const result = await runEmbeddedPiAgent(baseParams);
// Compaction attempted only once
expect(mockedCompactDirect).toHaveBeenCalledTimes(1);
// Two attempts: first overflow -> compact -> retry -> second overflow -> return error
expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(2);
expect(result.meta.error?.kind).toBe("context_overflow");
expect(result.payloads?.[0]?.isError).toBe(true);
});
it("does not attempt compaction for compaction_failure errors", async () => {
const compactionFailureError = new Error(
"request_too_large: summarization failed - Request size exceeds model context window",
);
mockedRunEmbeddedAttempt.mockResolvedValue(
makeAttemptResult({ promptError: compactionFailureError }),
);
const result = await runEmbeddedPiAgent(baseParams);
expect(mockedCompactDirect).not.toHaveBeenCalled();
expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(1);
expect(result.meta.error?.kind).toBe("compaction_failure");
});
});

View File

@@ -42,6 +42,7 @@ import {
} from "../pi-embedded-helpers.js"; } from "../pi-embedded-helpers.js";
import { normalizeUsage, type UsageLike } from "../usage.js"; import { normalizeUsage, type UsageLike } from "../usage.js";
import { compactEmbeddedPiSessionDirect } from "./compact.js";
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js"; import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
import { log } from "./logger.js"; import { log } from "./logger.js";
import { resolveModel } from "./model.js"; import { resolveModel } from "./model.js";
@@ -290,6 +291,7 @@ export async function runEmbeddedPiAgent(
} }
} }
let overflowCompactionAttempted = false;
try { try {
while (true) { while (true) {
attemptedThinking.add(thinkLevel); attemptedThinking.add(thinkLevel);
@@ -358,9 +360,41 @@ export async function runEmbeddedPiAgent(
if (promptError && !aborted) { if (promptError && !aborted) {
const errorText = describeUnknownError(promptError); const errorText = describeUnknownError(promptError);
if (isContextOverflowError(errorText)) { if (isContextOverflowError(errorText)) {
const kind = isCompactionFailureError(errorText) const isCompactionFailure = isCompactionFailureError(errorText);
? "compaction_failure" // Attempt auto-compaction on context overflow (not compaction_failure)
: "context_overflow"; if (!isCompactionFailure && !overflowCompactionAttempted) {
log.warn(
`context overflow detected; attempting auto-compaction for ${provider}/${modelId}`,
);
overflowCompactionAttempted = true;
const compactResult = await compactEmbeddedPiSessionDirect({
sessionId: params.sessionId,
sessionKey: params.sessionKey,
messageChannel: params.messageChannel,
messageProvider: params.messageProvider,
agentAccountId: params.agentAccountId,
sessionFile: params.sessionFile,
workspaceDir: params.workspaceDir,
agentDir,
config: params.config,
skillsSnapshot: params.skillsSnapshot,
provider,
model: modelId,
thinkLevel,
reasoningLevel: params.reasoningLevel,
bashElevated: params.bashElevated,
extraSystemPrompt: params.extraSystemPrompt,
ownerNumbers: params.ownerNumbers,
});
if (compactResult.compacted) {
log.info(`auto-compaction succeeded for ${provider}/${modelId}; retrying prompt`);
continue;
}
log.warn(
`auto-compaction failed for ${provider}/${modelId}: ${compactResult.reason ?? "nothing to compact"}`,
);
}
const kind = isCompactionFailure ? "compaction_failure" : "context_overflow";
return { return {
payloads: [ payloads: [
{ {