From 6c451f47f453bd9ec3ec0c95aab829aeb65b2ea8 Mon Sep 17 00:00:00 2001 From: TideFinder <68721273+papago2355@users.noreply.github.com> Date: Tue, 27 Jan 2026 14:28:04 +0900 Subject: [PATCH] =?UTF-8?q?Fix=20a=20subtle=20bug:=20`modelDefault`=20does?= =?UTF-8?q?n=E2=80=99t=20apply=20when=20provider=20=3D=3D=3D=20"auto"=20(#?= =?UTF-8?q?2576)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix a subtle bug: `modelDefault` doesn’t apply when provider === "auto" 1.Fix bugs when provider === "auto" which can lead model end up get "" 2. Fix to only include remote if you actually have any remote fields. (Is this intentional?) * Refactor memory-search.ts to simplify remote checks Remove redundant hasRemote variable and simplify includeRemote condition. * oxfmt-friendly version oxfmt-friendly version * fix: local updates for PR #2576 Co-authored-by: papago2355 * fix: memory search auto defaults (#2576) (thanks @papago2355) --------- Co-authored-by: Gustavo Madeira Santana Co-authored-by: papago2355 --- CHANGELOG.md | 1 + src/agents/memory-search.ts | 11 ++++++-- src/memory/embeddings.test.ts | 47 +++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ab80bdde..e6bb640bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ Status: unreleased. - **BREAKING:** Gateway auth mode "none" is removed; gateway now requires token/password (Tailscale Serve identity still allowed). ### Fixes +- Memory Search: keep auto provider model defaults and only include remote when configured. (#2576) Thanks @papago2355. - macOS: auto-scroll to bottom when sending a new message while scrolled up. (#2471) Thanks @kennyklee. - Gateway: suppress AbortError and transient network errors in unhandled rejections. (#2451) Thanks @Glucksberg. - TTS: keep /tts status replies on text-only commands and avoid duplicate block-stream audio. (#2451) Thanks @Glucksberg. diff --git a/src/agents/memory-search.ts b/src/agents/memory-search.ts index c89bad422..9eb35f3ee 100644 --- a/src/agents/memory-search.ts +++ b/src/agents/memory-search.ts @@ -119,9 +119,16 @@ function mergeConfig( const provider = overrides?.provider ?? defaults?.provider ?? "auto"; const defaultRemote = defaults?.remote; const overrideRemote = overrides?.remote; - const hasRemote = Boolean(defaultRemote || overrideRemote); + const hasRemoteConfig = Boolean( + overrideRemote?.baseUrl || + overrideRemote?.apiKey || + overrideRemote?.headers || + defaultRemote?.baseUrl || + defaultRemote?.apiKey || + defaultRemote?.headers, + ); const includeRemote = - hasRemote || provider === "openai" || provider === "gemini" || provider === "auto"; + hasRemoteConfig || provider === "openai" || provider === "gemini" || provider === "auto"; const batch = { enabled: overrideRemote?.batch?.enabled ?? defaultRemote?.batch?.enabled ?? true, wait: overrideRemote?.batch?.wait ?? defaultRemote?.batch?.wait ?? true, diff --git a/src/memory/embeddings.test.ts b/src/memory/embeddings.test.ts index e37bca3cd..1809b24b8 100644 --- a/src/memory/embeddings.test.ts +++ b/src/memory/embeddings.test.ts @@ -1,5 +1,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import { DEFAULT_GEMINI_EMBEDDING_MODEL } from "./embeddings-gemini.js"; + vi.mock("../agents/model-auth.js", () => ({ resolveApiKeyForProvider: vi.fn(), requireApiKey: (auth: { apiKey?: string; mode?: string }, provider: string) => { @@ -193,6 +195,13 @@ describe("embedding provider auto selection", () => { }); it("uses gemini when openai is missing", async () => { + const fetchMock = vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ embedding: { values: [1, 2, 3] } }), + })) as unknown as typeof fetch; + vi.stubGlobal("fetch", fetchMock); + const { createEmbeddingProvider } = await import("./embeddings.js"); const authModule = await import("../agents/model-auth.js"); vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => { @@ -214,6 +223,44 @@ describe("embedding provider auto selection", () => { expect(result.requestedProvider).toBe("auto"); expect(result.provider.id).toBe("gemini"); + await result.provider.embedQuery("hello"); + const [url] = fetchMock.mock.calls[0] ?? []; + expect(url).toBe( + `https://generativelanguage.googleapis.com/v1beta/models/${DEFAULT_GEMINI_EMBEDDING_MODEL}:embedContent`, + ); + }); + + it("keeps explicit model when openai is selected", async () => { + const fetchMock = vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ data: [{ embedding: [1, 2, 3] }] }), + })) as unknown as typeof fetch; + vi.stubGlobal("fetch", fetchMock); + + const { createEmbeddingProvider } = await import("./embeddings.js"); + const authModule = await import("../agents/model-auth.js"); + vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => { + if (provider === "openai") { + return { apiKey: "openai-key", source: "env: OPENAI_API_KEY", mode: "api-key" }; + } + throw new Error(`Unexpected provider ${provider}`); + }); + + const result = await createEmbeddingProvider({ + config: {} as never, + provider: "auto", + model: "text-embedding-3-small", + fallback: "none", + }); + + expect(result.requestedProvider).toBe("auto"); + expect(result.provider.id).toBe("openai"); + await result.provider.embedQuery("hello"); + const [url, init] = fetchMock.mock.calls[0] ?? []; + expect(url).toBe("https://api.openai.com/v1/embeddings"); + const payload = JSON.parse(String(init?.body ?? "{}")) as { model?: string }; + expect(payload.model).toBe("text-embedding-3-small"); }); });