Merge pull request #687 from evalexpr/fix/usage-limit-fallback

fix: add 'usage limit' to rate limit detection patterns
This commit is contained in:
Peter Steinberger
2026-01-10 22:13:19 +00:00
committed by GitHub
3 changed files with 10 additions and 0 deletions

View File

@@ -13,6 +13,7 @@
- Docker: allow optional home volume + extra bind mounts in `docker-setup.sh`. (#679) — thanks @gabriel-trigo.
### Fixes
- Agents: recognize "usage limit" errors as rate limits for failover. (#687) — thanks @evalexpr.
- CLI: avoid success message when daemon restart is skipped. (#685) — thanks @carlulsoe.
- Gateway: disable the OpenAI-compatible `/v1/chat/completions` endpoint by default; enable via `gateway.http.endpoints.chatCompletions.enabled=true`.
- macOS: stabilize bridge tunnels, guard invoke senders on disconnect, and drain stdout/stderr to avoid deadlocks. (#676) — thanks @ngutman.

View File

@@ -272,6 +272,14 @@ describe("classifyFailoverReason", () => {
);
expect(classifyFailoverReason("bad request")).toBeNull();
});
it("classifies OpenAI usage limit errors as rate_limit", () => {
expect(
classifyFailoverReason(
"You have hit your ChatGPT usage limit (plus plan)",
),
).toBe("rate_limit");
});
});
describe("isCloudCodeAssistFormatError", () => {

View File

@@ -343,6 +343,7 @@ const ERROR_PATTERNS = {
"resource has been exhausted",
"quota exceeded",
"resource_exhausted",
"usage limit",
],
timeout: [
"timeout",