fix: sanitize user-facing errors and strip final tags

Co-authored-by: Drake Thomsen <drake.thomsen@example.com>
This commit is contained in:
Peter Steinberger
2026-01-16 03:00:40 +00:00
parent d9f9e93dee
commit 23e4ba845c
13 changed files with 239 additions and 31 deletions

View File

@@ -256,6 +256,27 @@ export async function runEmbeddedPiAgent(
},
};
}
// Handle role ordering errors with a user-friendly message
if (/incorrect role information|roles must alternate/i.test(errorText)) {
return {
payloads: [
{
text:
"Message ordering conflict - please try again. " +
"If this persists, use /new to start a fresh session.",
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta: {
sessionId: sessionIdUsed,
provider,
model: model.id,
},
},
};
}
const promptFailoverReason = classifyFailoverReason(errorText);
if (promptFailoverReason && promptFailoverReason !== "timeout" && lastProfileId) {
await markAuthProfileFailure({
@@ -339,14 +360,15 @@ export async function runEmbeddedPiAgent(
if (rotated) continue;
if (fallbackConfigured) {
// Prefer formatted error message (user-friendly) over raw errorMessage
const message =
lastAssistant?.errorMessage?.trim() ||
(lastAssistant
? formatAssistantErrorText(lastAssistant, {
cfg: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
})
: "") ||
: undefined) ||
lastAssistant?.errorMessage?.trim() ||
(timedOut
? "LLM request timed out."
: rateLimitFailure

View File

@@ -419,14 +419,33 @@ export async function runEmbeddedAttempt(
try {
const promptStartedAt = Date.now();
log.debug(`embedded run prompt start: runId=${params.runId} sessionId=${params.sessionId}`);
try {
await activeSession.prompt(params.prompt, { images: params.images });
} catch (err) {
promptError = err;
} finally {
log.debug(
`embedded run prompt end: runId=${params.runId} sessionId=${params.sessionId} durationMs=${Date.now() - promptStartedAt}`,
// Check if last message is a user message to prevent consecutive user turns
const lastMsg = activeSession.messages[activeSession.messages.length - 1];
const lastMsgRole = lastMsg && typeof lastMsg === "object" ? (lastMsg as { role?: unknown }).role : undefined;
if (lastMsgRole === "user") {
// Last message was a user message. Adding another user message would create
// consecutive user turns, violating Anthropic's role ordering requirement.
// This can happen when:
// 1. A previous heartbeat didn't get a response
// 2. A user message errored before getting an assistant response
// Skip this prompt to prevent "400 Incorrect role information" error.
log.warn(
`Skipping prompt because last message is a user message (would create consecutive user turns). ` +
`runId=${params.runId} sessionId=${params.sessionId}`
);
promptError = new Error("Incorrect role information: consecutive user messages would violate role ordering");
} else {
try {
await activeSession.prompt(params.prompt, { images: params.images });
} catch (err) {
promptError = err;
} finally {
log.debug(
`embedded run prompt end: runId=${params.runId} sessionId=${params.sessionId} durationMs=${Date.now() - promptStartedAt}`,
);
}
}
try {

View File

@@ -63,6 +63,8 @@ export function buildEmbeddedRunPayloads(params: {
const normalizedRawErrorText = rawErrorMessage
? normalizeTextForComparison(rawErrorMessage)
: null;
const normalizedErrorText = errorText ? normalizeTextForComparison(errorText) : null;
const genericErrorText = "The AI service returned an error. Please try again.";
if (errorText) replyItems.push({ text: errorText, isError: true });
const inlineToolResults =
@@ -102,6 +104,11 @@ export function buildEmbeddedRunPayloads(params: {
if (!lastAssistantErrored) return false;
const trimmed = text.trim();
if (!trimmed) return false;
if (errorText) {
const normalized = normalizeTextForComparison(trimmed);
if (normalized && normalizedErrorText && normalized === normalizedErrorText) return true;
if (trimmed === genericErrorText) return true;
}
if (rawErrorMessage && trimmed === rawErrorMessage) return true;
if (normalizedRawErrorText) {
const normalized = normalizeTextForComparison(trimmed);