feat(memory): add gemini embeddings + auto select providers
Co-authored-by: Gustavo Madeira Santana <gumadeiras@gmail.com>
This commit is contained in:
80
CHANGELOG.md
80
CHANGELOG.md
@@ -2,62 +2,30 @@
|
||||
|
||||
Docs: https://docs.clawd.bot
|
||||
|
||||
## 2026.1.17-7
|
||||
## 2026.1.18-4
|
||||
|
||||
### Changes
|
||||
- Exec approvals: add `clawdbot approvals` CLI for viewing and updating gateway/node allowlists.
|
||||
- CLI: add `clawdbot service` gateway/node management and a `clawdbot node status` alias.
|
||||
- Status: show gateway + node service summaries in `clawdbot status` and `status --all`.
|
||||
- Control UI: add gateway/node target selector for exec approvals.
|
||||
- Docs: add approvals/service references and refresh node/control UI docs.
|
||||
- Dependencies: update core + plugin deps (grammy, vitest, openai, Microsoft agents hosting, etc.).
|
||||
- Memory: add Gemini native embeddings support and auto-select providers when keys are available. (#1167) — thanks @gumadeiras.
|
||||
- Memory: add Gemini batch indexing and atomic reindexing for memory stores. (#1167) — thanks @gumadeiras.
|
||||
- macOS: switch PeekabooBridge integration to the tagged Swift Package Manager release (no submodule).
|
||||
- macOS: stop syncing Peekaboo as a git submodule in postinstall.
|
||||
- Swabble: use the tagged Commander Swift package release.
|
||||
- CLI: add `clawdbot acp client` interactive ACP harness for debugging.
|
||||
- Plugins: route command detection/text chunking helpers through the plugin runtime and drop runtime exports from the SDK.
|
||||
- Memory: add native Gemini embeddings provider for memory search. (#1151)
|
||||
- Agents: add local docs path resolution and include docs/mirror/source/community pointers in the system prompt.
|
||||
- Slack: add HTTP webhook mode via Bolt HTTP receiver for Events API deployments. (#1143) — thanks @jdrhyne.
|
||||
- Nodes: report core/ui versions in node list + presence; surface both in CLI + macOS UI.
|
||||
<<<<<<< HEAD
|
||||
- Agents: add local docs path resolution and include docs/mirror/source/community pointers in the system prompt.
|
||||
- Slack: add HTTP webhook mode via Bolt HTTP receiver for Events API deployments. (#1143) — thanks @jdrhyne.
|
||||
||||||| parent of 903e9be49 (feat: surface node core/ui versions in macOS)
|
||||
- CLI: show memory status/index across agents with richer source detail. (#1167) — thanks @gumadeiras.
|
||||
- Memory: add native Gemini embeddings provider for memory search. (#1151) — thanks @gumadeiras.
|
||||
- Docs: refresh FAQ for node approvals, session resets, and memory provider defaults. (#1167) — thanks @gumadeiras.
|
||||
|
||||
### Fixes
|
||||
- Tools: clamp bash foreground duration using shared helpers.
|
||||
- Tools: clamp bash foreground duration using shared helpers. (#1167) — thanks @gumadeiras.
|
||||
- Auth profiles: keep auto-pinned preference while allowing rotation on failover; user pins stay locked. (#1138) — thanks @cheeeee.
|
||||
- macOS: avoid touching launchd in Remote over SSH so quitting the app no longer disables the remote gateway. (#1105)
|
||||
- Memory: index atomically so failed reindex preserves the previous memory database. (#1151)
|
||||
- Memory: avoid sqlite-vec unique constraint failures when reindexing duplicate chunk ids. (#1151)
|
||||
|
||||
## 2026.1.18-5
|
||||
|
||||
### Changes
|
||||
- Dependencies: update core + plugin deps (grammy, vitest, openai, Microsoft agents hosting, etc.).
|
||||
- Memory: index atomically so failed reindex preserves the previous memory database. (#1151) — thanks @gumadeiras.
|
||||
- Exec approvals: fix command token parsing for PATH resolution. (#1167) — thanks @gumadeiras.
|
||||
|
||||
## 2026.1.18-3
|
||||
|
||||
### Changes
|
||||
=======
|
||||
- Nodes: report core/ui versions in node list + presence; surface both in CLI + macOS UI.
|
||||
|
||||
### Fixes
|
||||
- Auth profiles: keep auto-pinned preference while allowing rotation on failover; user pins stay locked. (#1138) — thanks @cheeeee.
|
||||
- macOS: avoid touching launchd in Remote over SSH so quitting the app no longer disables the remote gateway. (#1105)
|
||||
- Memory: index atomically so failed reindex preserves the previous memory database. (#1151)
|
||||
- Memory: avoid sqlite-vec unique constraint failures when reindexing duplicate chunk ids. (#1151)
|
||||
|
||||
## 2026.1.18-5
|
||||
|
||||
### Changes
|
||||
- Dependencies: update core + plugin deps (grammy, vitest, openai, Microsoft agents hosting, etc.).
|
||||
|
||||
## 2026.1.18-3
|
||||
|
||||
### Changes
|
||||
>>>>>>> 903e9be49 (feat: surface node core/ui versions in macOS)
|
||||
- Exec: add host/security/ask routing for gateway + node exec.
|
||||
- Exec: add `/exec` directive for per-session exec defaults (host/security/ask/node).
|
||||
- macOS: migrate exec approvals to `~/.clawdbot/exec-approvals.json` with per-agent allowlists and skill auto-allow toggle.
|
||||
@@ -71,13 +39,6 @@ Docs: https://docs.clawd.bot
|
||||
- Docs: refresh exec/elevated/exec-approvals docs for the new flow. https://docs.clawd.bot/tools/exec-approvals
|
||||
- Docs: add node host CLI + update exec approvals/bridge protocol docs. https://docs.clawd.bot/cli/node
|
||||
- ACP: add experimental ACP support for IDE integrations (`clawdbot acp`). Thanks @visionik.
|
||||
- Tools: allow `sessions_spawn` to override thinking level for sub-agent runs.
|
||||
- Channels: unify thread/topic allowlist matching + command/mention gating helpers across core providers.
|
||||
- Models: add Qwen Portal OAuth provider support. (#1120) — thanks @mukhtharcm.
|
||||
- Memory: add `--verbose` logging for memory status + batch indexing details.
|
||||
- Memory: allow parallel OpenAI batch indexing jobs (default concurrency: 2).
|
||||
- macOS: add per-agent exec approvals with allowlists, skill CLI auto-allow, and settings UI.
|
||||
- Docs: add exec approvals guide and link from tools index. https://docs.clawd.bot/tools/exec-approvals
|
||||
|
||||
### Fixes
|
||||
- Auth profiles: keep auto-pinned preference while allowing rotation on failover; user pins stay locked. (#1138) — thanks @cheeeee.
|
||||
@@ -90,10 +51,11 @@ Docs: https://docs.clawd.bot
|
||||
- Tools: return a companion-app-required message when node exec is requested with no paired node.
|
||||
- Streaming: emit assistant deltas for OpenAI-compatible SSE chunks. (#1147) — thanks @alauppe.
|
||||
- Model fallback: treat timeout aborts as failover while preserving user aborts. (#1137) — thanks @cheeeee.
|
||||
|
||||
## 2026.1.18-2
|
||||
|
||||
### Fixes
|
||||
- Tests: stabilize plugin SDK resolution and embedded agent timeouts.
|
||||
- Memory: apply OpenAI batch defaults even without explicit remote config.
|
||||
- macOS: bundle Textual resources in packaged app builds to avoid code block crashes. (#1006)
|
||||
- Discord: only emit slow listener warnings after 30s.
|
||||
|
||||
## 2026.1.17-6
|
||||
|
||||
@@ -121,6 +83,22 @@ Docs: https://docs.clawd.bot
|
||||
### Fixes
|
||||
- Voice call: include request query in Twilio webhook verification when publicUrl is set. (#864)
|
||||
|
||||
## 2026.1.18-1
|
||||
|
||||
### Changes
|
||||
- Tools: allow `sessions_spawn` to override thinking level for sub-agent runs.
|
||||
- Channels: unify thread/topic allowlist matching + command/mention gating helpers across core providers.
|
||||
- Models: add Qwen Portal OAuth provider support. (#1120) — thanks @mukhtharcm.
|
||||
- Memory: add `--verbose` logging for memory status + batch indexing details.
|
||||
- Memory: allow parallel OpenAI batch indexing jobs (default concurrency: 2).
|
||||
- macOS: add per-agent exec approvals with allowlists, skill CLI auto-allow, and settings UI.
|
||||
- Docs: add exec approvals guide and link from tools index. https://docs.clawd.bot/tools/exec-approvals
|
||||
|
||||
### Fixes
|
||||
- Memory: apply OpenAI batch defaults even without explicit remote config.
|
||||
- macOS: bundle Textual resources in packaged app builds to avoid code block crashes. (#1006)
|
||||
- Tools: return a companion-app-required message when `system.run` is requested without a supporting node.
|
||||
- Discord: only emit slow listener warnings after 30s.
|
||||
## 2026.1.17-3
|
||||
|
||||
### Changes
|
||||
|
||||
@@ -79,37 +79,46 @@ semantic queries can find related notes even when wording differs.
|
||||
Defaults:
|
||||
- Enabled by default.
|
||||
- Watches memory files for changes (debounced).
|
||||
- Uses remote embeddings (OpenAI) unless configured for local.
|
||||
- Uses remote embeddings by default. If `memorySearch.provider` is not set, Clawdbot auto-selects:
|
||||
1. `local` if a `memorySearch.local.modelPath` is configured and the file exists.
|
||||
2. `openai` if an OpenAI key can be resolved.
|
||||
3. `gemini` if a Gemini key can be resolved.
|
||||
4. Otherwise memory search stays disabled until configured.
|
||||
- Local mode uses node-llama-cpp and may require `pnpm approve-builds`.
|
||||
- Uses sqlite-vec (when available) to accelerate vector search inside SQLite.
|
||||
|
||||
Remote embeddings **require** an API key for the embedding provider. By default
|
||||
this is OpenAI (`OPENAI_API_KEY` or `models.providers.openai.apiKey`). Codex
|
||||
OAuth only covers chat/completions and does **not** satisfy embeddings for
|
||||
memory search. When using a custom OpenAI-compatible endpoint, set
|
||||
`memorySearch.remote.apiKey` (and optional `memorySearch.remote.headers`).
|
||||
Remote embeddings **require** an API key for the embedding provider. Clawdbot
|
||||
resolves keys from auth profiles, `models.providers.*.apiKey`, or environment
|
||||
variables. Codex OAuth only covers chat/completions and does **not** satisfy
|
||||
embeddings for memory search. For Gemini, use `GEMINI_API_KEY` or
|
||||
`models.providers.google.apiKey`. When using a custom OpenAI-compatible endpoint,
|
||||
set `memorySearch.remote.apiKey` (and optional `memorySearch.remote.headers`).
|
||||
|
||||
If you want to use **Gemini embeddings** directly, set the provider to `gemini`:
|
||||
### Gemini embeddings (native)
|
||||
|
||||
Set the provider to `gemini` to use the Gemini embeddings API directly:
|
||||
|
||||
```json5
|
||||
agents: {
|
||||
defaults: {
|
||||
memorySearch: {
|
||||
provider: "gemini",
|
||||
model: "gemini-embedding-001", // default
|
||||
model: "gemini-embedding-001",
|
||||
remote: {
|
||||
apiKey: "${GEMINI_API_KEY}"
|
||||
apiKey: "YOUR_GEMINI_API_KEY"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Gemini uses `GEMINI_API_KEY` (or `models.providers.google.apiKey`). Override
|
||||
`memorySearch.remote.baseUrl` to point at a custom Gemini-compatible endpoint.
|
||||
Notes:
|
||||
- `remote.baseUrl` is optional (defaults to the Gemini API base URL).
|
||||
- `remote.headers` lets you add extra headers if needed.
|
||||
- Default model: `gemini-embedding-001`.
|
||||
|
||||
If you want to use a **custom OpenAI-compatible endpoint** (like OpenRouter or a proxy),
|
||||
you can use the `remote` configuration:
|
||||
If you want to use a **custom OpenAI-compatible endpoint** (OpenRouter, vLLM, or a proxy),
|
||||
you can use the `remote` configuration with the OpenAI provider:
|
||||
|
||||
```json5
|
||||
agents: {
|
||||
@@ -118,8 +127,8 @@ agents: {
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
remote: {
|
||||
baseUrl: "https://proxy.example/v1",
|
||||
apiKey: "YOUR_PROXY_KEY",
|
||||
baseUrl: "https://api.example.com/v1/",
|
||||
apiKey: "YOUR_OPENAI_COMPAT_API_KEY",
|
||||
headers: { "X-Custom-Header": "value" }
|
||||
}
|
||||
}
|
||||
@@ -130,11 +139,16 @@ agents: {
|
||||
If you don't want to set an API key, use `memorySearch.provider = "local"` or set
|
||||
`memorySearch.fallback = "none"`.
|
||||
|
||||
Batch indexing (OpenAI only):
|
||||
- Enabled by default for OpenAI embeddings. Set `agents.defaults.memorySearch.remote.batch.enabled = false` to disable.
|
||||
Fallbacks:
|
||||
- `memorySearch.fallback` can be `openai`, `gemini`, `local`, or `none`.
|
||||
- The fallback provider is only used when the primary embedding provider fails.
|
||||
|
||||
Batch indexing (OpenAI + Gemini):
|
||||
- Enabled by default for OpenAI and Gemini embeddings. Set `agents.defaults.memorySearch.remote.batch.enabled = false` to disable.
|
||||
- Default behavior waits for batch completion; tune `remote.batch.wait`, `remote.batch.pollIntervalMs`, and `remote.batch.timeoutMinutes` if needed.
|
||||
- Set `remote.batch.concurrency` to control how many batch jobs we submit in parallel (default: 2).
|
||||
- Batch mode currently applies only when `memorySearch.provider = "openai"` and uses your OpenAI API key.
|
||||
- Batch mode applies when `memorySearch.provider = "openai"` or `"gemini"` and uses the corresponding API key.
|
||||
- Gemini batch jobs use the async embeddings batch endpoint and require Gemini Batch API availability.
|
||||
|
||||
Why OpenAI batch is fast + cheap:
|
||||
- For large backfills, OpenAI is typically the fastest option we support because we can submit many embedding requests in a single batch job and let OpenAI process them asynchronously.
|
||||
|
||||
@@ -22,6 +22,21 @@ describe("memory search config", () => {
|
||||
expect(resolved).toBeNull();
|
||||
});
|
||||
|
||||
it("defaults provider to auto when unspecified", () => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
memorySearch: {
|
||||
enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const resolved = resolveMemorySearchConfig(cfg, "main");
|
||||
expect(resolved?.provider).toBe("auto");
|
||||
expect(resolved?.fallback).toBe("none");
|
||||
});
|
||||
|
||||
it("merges defaults and overrides", () => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
@@ -126,6 +141,7 @@ describe("memory search config", () => {
|
||||
agents: {
|
||||
defaults: {
|
||||
memorySearch: {
|
||||
provider: "openai",
|
||||
remote: {
|
||||
baseUrl: "https://default.example/v1",
|
||||
apiKey: "default-key",
|
||||
@@ -166,6 +182,7 @@ describe("memory search config", () => {
|
||||
agents: {
|
||||
defaults: {
|
||||
memorySearch: {
|
||||
provider: "openai",
|
||||
sources: ["memory", "sessions"],
|
||||
},
|
||||
},
|
||||
@@ -189,6 +206,7 @@ describe("memory search config", () => {
|
||||
agents: {
|
||||
defaults: {
|
||||
memorySearch: {
|
||||
provider: "openai",
|
||||
sources: ["memory", "sessions"],
|
||||
experimental: { sessionMemory: true },
|
||||
},
|
||||
|
||||
@@ -3,13 +3,13 @@ import path from "node:path";
|
||||
|
||||
import type { ClawdbotConfig, MemorySearchConfig } from "../config/config.js";
|
||||
import { resolveStateDir } from "../config/paths.js";
|
||||
import { resolveUserPath } from "../utils.js";
|
||||
import { clampInt, clampNumber, resolveUserPath } from "../utils.js";
|
||||
import { resolveAgentConfig } from "./agent-scope.js";
|
||||
|
||||
export type ResolvedMemorySearchConfig = {
|
||||
enabled: boolean;
|
||||
sources: Array<"memory" | "sessions">;
|
||||
provider: "openai" | "gemini" | "local";
|
||||
provider: "openai" | "local" | "gemini" | "auto";
|
||||
remote?: {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
@@ -25,7 +25,7 @@ export type ResolvedMemorySearchConfig = {
|
||||
experimental: {
|
||||
sessionMemory: boolean;
|
||||
};
|
||||
fallback: "openai" | "none";
|
||||
fallback: "openai" | "gemini" | "local" | "none";
|
||||
model: string;
|
||||
local: {
|
||||
modelPath?: string;
|
||||
@@ -110,34 +110,40 @@ function mergeConfig(
|
||||
const enabled = overrides?.enabled ?? defaults?.enabled ?? true;
|
||||
const sessionMemory =
|
||||
overrides?.experimental?.sessionMemory ?? defaults?.experimental?.sessionMemory ?? false;
|
||||
const provider = overrides?.provider ?? defaults?.provider ?? "openai";
|
||||
const hasRemote = Boolean(defaults?.remote || overrides?.remote);
|
||||
const includeRemote = hasRemote || provider === "openai" || provider === "gemini";
|
||||
const provider = overrides?.provider ?? defaults?.provider ?? "auto";
|
||||
const defaultRemote = defaults?.remote;
|
||||
const overrideRemote = overrides?.remote;
|
||||
const hasRemote = Boolean(defaultRemote || overrideRemote);
|
||||
const includeRemote =
|
||||
hasRemote || provider === "openai" || provider === "gemini" || provider === "auto";
|
||||
const batch = {
|
||||
enabled: overrides?.remote?.batch?.enabled ?? defaults?.remote?.batch?.enabled ?? true,
|
||||
wait: overrides?.remote?.batch?.wait ?? defaults?.remote?.batch?.wait ?? true,
|
||||
enabled: overrideRemote?.batch?.enabled ?? defaultRemote?.batch?.enabled ?? true,
|
||||
wait: overrideRemote?.batch?.wait ?? defaultRemote?.batch?.wait ?? true,
|
||||
concurrency: Math.max(
|
||||
1,
|
||||
overrides?.remote?.batch?.concurrency ?? defaults?.remote?.batch?.concurrency ?? 2,
|
||||
overrideRemote?.batch?.concurrency ?? defaultRemote?.batch?.concurrency ?? 2,
|
||||
),
|
||||
pollIntervalMs:
|
||||
overrides?.remote?.batch?.pollIntervalMs ?? defaults?.remote?.batch?.pollIntervalMs ?? 2000,
|
||||
overrideRemote?.batch?.pollIntervalMs ?? defaultRemote?.batch?.pollIntervalMs ?? 2000,
|
||||
timeoutMinutes:
|
||||
overrides?.remote?.batch?.timeoutMinutes ?? defaults?.remote?.batch?.timeoutMinutes ?? 60,
|
||||
overrideRemote?.batch?.timeoutMinutes ?? defaultRemote?.batch?.timeoutMinutes ?? 60,
|
||||
};
|
||||
const remote = includeRemote
|
||||
? {
|
||||
baseUrl: overrides?.remote?.baseUrl ?? defaults?.remote?.baseUrl,
|
||||
apiKey: overrides?.remote?.apiKey ?? defaults?.remote?.apiKey,
|
||||
headers: overrides?.remote?.headers ?? defaults?.remote?.headers,
|
||||
baseUrl: overrideRemote?.baseUrl ?? defaultRemote?.baseUrl,
|
||||
apiKey: overrideRemote?.apiKey ?? defaultRemote?.apiKey,
|
||||
headers: overrideRemote?.headers ?? defaultRemote?.headers,
|
||||
batch,
|
||||
}
|
||||
: undefined;
|
||||
const fallback = overrides?.fallback ?? defaults?.fallback ?? "openai";
|
||||
const model =
|
||||
overrides?.model ??
|
||||
defaults?.model ??
|
||||
(provider === "gemini" ? DEFAULT_GEMINI_MODEL : DEFAULT_OPENAI_MODEL);
|
||||
const fallback = overrides?.fallback ?? defaults?.fallback ?? "none";
|
||||
const modelDefault =
|
||||
provider === "gemini"
|
||||
? DEFAULT_GEMINI_MODEL
|
||||
: provider === "openai"
|
||||
? DEFAULT_OPENAI_MODEL
|
||||
: undefined;
|
||||
const model = overrides?.model ?? defaults?.model ?? modelDefault ?? "";
|
||||
const local = {
|
||||
modelPath: overrides?.local?.modelPath ?? defaults?.local?.modelPath,
|
||||
modelCacheDir: overrides?.local?.modelCacheDir ?? defaults?.local?.modelCacheDir,
|
||||
@@ -194,14 +200,14 @@ function mergeConfig(
|
||||
maxEntries: overrides?.cache?.maxEntries ?? defaults?.cache?.maxEntries,
|
||||
};
|
||||
|
||||
const overlap = Math.max(0, Math.min(chunking.overlap, chunking.tokens - 1));
|
||||
const minScore = Math.max(0, Math.min(1, query.minScore));
|
||||
const vectorWeight = Math.max(0, Math.min(1, hybrid.vectorWeight));
|
||||
const textWeight = Math.max(0, Math.min(1, hybrid.textWeight));
|
||||
const overlap = clampNumber(chunking.overlap, 0, Math.max(0, chunking.tokens - 1));
|
||||
const minScore = clampNumber(query.minScore, 0, 1);
|
||||
const vectorWeight = clampNumber(hybrid.vectorWeight, 0, 1);
|
||||
const textWeight = clampNumber(hybrid.textWeight, 0, 1);
|
||||
const sum = vectorWeight + textWeight;
|
||||
const normalizedVectorWeight = sum > 0 ? vectorWeight / sum : DEFAULT_HYBRID_VECTOR_WEIGHT;
|
||||
const normalizedTextWeight = sum > 0 ? textWeight / sum : DEFAULT_HYBRID_TEXT_WEIGHT;
|
||||
const candidateMultiplier = Math.max(1, Math.min(20, Math.floor(hybrid.candidateMultiplier)));
|
||||
const candidateMultiplier = clampInt(hybrid.candidateMultiplier, 1, 20);
|
||||
return {
|
||||
enabled,
|
||||
sources,
|
||||
|
||||
@@ -381,19 +381,19 @@ const FIELD_HELP: Record<string, string> = {
|
||||
"agents.defaults.memorySearch.remote.headers":
|
||||
"Extra headers for remote embeddings (merged; remote overrides OpenAI headers).",
|
||||
"agents.defaults.memorySearch.remote.batch.enabled":
|
||||
"Enable OpenAI Batch API for memory embeddings (default: true).",
|
||||
"Enable batch API for memory embeddings (OpenAI/Gemini; default: true).",
|
||||
"agents.defaults.memorySearch.remote.batch.wait":
|
||||
"Wait for OpenAI batch completion when indexing (default: true).",
|
||||
"Wait for batch completion when indexing (default: true).",
|
||||
"agents.defaults.memorySearch.remote.batch.concurrency":
|
||||
"Max concurrent OpenAI batch jobs for memory indexing (default: 2).",
|
||||
"Max concurrent embedding batch jobs for memory indexing (default: 2).",
|
||||
"agents.defaults.memorySearch.remote.batch.pollIntervalMs":
|
||||
"Polling interval in ms for OpenAI batch status (default: 2000).",
|
||||
"Polling interval in ms for batch status (default: 2000).",
|
||||
"agents.defaults.memorySearch.remote.batch.timeoutMinutes":
|
||||
"Timeout in minutes for OpenAI batch indexing (default: 60).",
|
||||
"Timeout in minutes for batch indexing (default: 60).",
|
||||
"agents.defaults.memorySearch.local.modelPath":
|
||||
"Local GGUF model path or hf: URI (node-llama-cpp).",
|
||||
"agents.defaults.memorySearch.fallback":
|
||||
'Fallback to OpenAI when local embeddings fail ("openai" or "none").',
|
||||
'Fallback provider when embeddings fail ("openai", "gemini", "local", or "none").',
|
||||
"agents.defaults.memorySearch.store.path":
|
||||
"SQLite index path (default: ~/.clawdbot/memory/{agentId}.sqlite).",
|
||||
"agents.defaults.memorySearch.store.vector.enabled":
|
||||
|
||||
@@ -121,16 +121,30 @@ export type ToolPolicyConfig = {
|
||||
};
|
||||
|
||||
export type ExecToolConfig = {
|
||||
/** Exec host routing (default: sandbox). */
|
||||
host?: "sandbox" | "gateway" | "node";
|
||||
/** Exec security mode (default: deny). */
|
||||
security?: "deny" | "allowlist" | "full";
|
||||
/** Exec ask mode (default: on-miss). */
|
||||
ask?: "off" | "on-miss" | "always";
|
||||
/** Default node binding for exec.host=node (node id/name). */
|
||||
node?: string;
|
||||
/** Default time (ms) before an exec command auto-backgrounds. */
|
||||
backgroundMs?: number;
|
||||
/** Default timeout (seconds) before auto-killing exec commands. */
|
||||
timeoutSec?: number;
|
||||
/** How long to keep finished sessions in memory (ms). */
|
||||
cleanupMs?: number;
|
||||
/** Emit a system event and heartbeat when a backgrounded exec exits. */
|
||||
notifyOnExit?: boolean;
|
||||
/** apply_patch subtool configuration (experimental). */
|
||||
applyPatch?: {
|
||||
/** Enable apply_patch for OpenAI models (default: false). */
|
||||
enabled?: boolean;
|
||||
/**
|
||||
* Optional allowlist of model ids that can use apply_patch.
|
||||
* Accepts either raw ids (e.g. "gpt-5.2") or full ids (e.g. "openai/gpt-5.2").
|
||||
*/
|
||||
allowModels?: string[];
|
||||
};
|
||||
};
|
||||
@@ -176,7 +190,7 @@ export type MemorySearchConfig = {
|
||||
apiKey?: string;
|
||||
headers?: Record<string, string>;
|
||||
batch?: {
|
||||
/** Enable OpenAI Batch API for embedding indexing (default: true). */
|
||||
/** Enable batch API for embedding indexing (OpenAI/Gemini; default: true). */
|
||||
enabled?: boolean;
|
||||
/** Wait for batch completion (default: true). */
|
||||
wait?: boolean;
|
||||
@@ -188,8 +202,8 @@ export type MemorySearchConfig = {
|
||||
timeoutMinutes?: number;
|
||||
};
|
||||
};
|
||||
/** Fallback behavior when local embeddings fail. */
|
||||
fallback?: "openai" | "none";
|
||||
/** Fallback behavior when embeddings fail. */
|
||||
fallback?: "openai" | "gemini" | "local" | "none";
|
||||
/** Embedding model id (remote) or alias (local). */
|
||||
model?: string;
|
||||
/** Local embedding settings (node-llama-cpp). */
|
||||
|
||||
@@ -183,24 +183,6 @@ export const AgentToolsSchema = z
|
||||
allowFrom: ElevatedAllowFromSchema,
|
||||
})
|
||||
.optional(),
|
||||
exec: z
|
||||
.object({
|
||||
host: z.enum(["sandbox", "gateway", "node"]).optional(),
|
||||
security: z.enum(["deny", "allowlist", "full"]).optional(),
|
||||
ask: z.enum(["off", "on-miss", "always"]).optional(),
|
||||
node: z.string().optional(),
|
||||
backgroundMs: z.number().int().positive().optional(),
|
||||
timeoutSec: z.number().int().positive().optional(),
|
||||
cleanupMs: z.number().int().positive().optional(),
|
||||
notifyOnExit: z.boolean().optional(),
|
||||
applyPatch: z
|
||||
.object({
|
||||
enabled: z.boolean().optional(),
|
||||
allowModels: z.array(z.string()).optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.optional(),
|
||||
sandbox: z
|
||||
.object({
|
||||
tools: ToolPolicySchema,
|
||||
@@ -218,7 +200,7 @@ export const MemorySearchSchema = z
|
||||
sessionMemory: z.boolean().optional(),
|
||||
})
|
||||
.optional(),
|
||||
provider: z.union([z.literal("openai"), z.literal("gemini"), z.literal("local")]).optional(),
|
||||
provider: z.union([z.literal("openai"), z.literal("local"), z.literal("gemini")]).optional(),
|
||||
remote: z
|
||||
.object({
|
||||
baseUrl: z.string().optional(),
|
||||
@@ -235,7 +217,9 @@ export const MemorySearchSchema = z
|
||||
.optional(),
|
||||
})
|
||||
.optional(),
|
||||
fallback: z.union([z.literal("openai"), z.literal("none")]).optional(),
|
||||
fallback: z
|
||||
.union([z.literal("openai"), z.literal("gemini"), z.literal("local"), z.literal("none")])
|
||||
.optional(),
|
||||
model: z.string().optional(),
|
||||
local: z
|
||||
.object({
|
||||
|
||||
145
src/memory/embeddings-gemini.ts
Normal file
145
src/memory/embeddings-gemini.ts
Normal file
@@ -0,0 +1,145 @@
|
||||
import { resolveApiKeyForProvider } from "../agents/model-auth.js";
|
||||
import { createSubsystemLogger } from "../logging.js";
|
||||
import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js";
|
||||
|
||||
export type GeminiEmbeddingClient = {
|
||||
baseUrl: string;
|
||||
headers: Record<string, string>;
|
||||
model: string;
|
||||
modelPath: string;
|
||||
};
|
||||
|
||||
const DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta";
|
||||
export const DEFAULT_GEMINI_EMBEDDING_MODEL = "gemini-embedding-001";
|
||||
const debugEmbeddings = process.env.CLAWDBOT_DEBUG_MEMORY_EMBEDDINGS === "1";
|
||||
const log = createSubsystemLogger("memory/embeddings");
|
||||
|
||||
const debugLog = (message: string, meta?: Record<string, unknown>) => {
|
||||
if (!debugEmbeddings) return;
|
||||
const suffix = meta ? ` ${JSON.stringify(meta)}` : "";
|
||||
log.raw(`${message}${suffix}`);
|
||||
};
|
||||
|
||||
function resolveRemoteApiKey(remoteApiKey?: string): string | undefined {
|
||||
const trimmed = remoteApiKey?.trim();
|
||||
if (!trimmed) return undefined;
|
||||
if (trimmed === "GOOGLE_API_KEY" || trimmed === "GEMINI_API_KEY") {
|
||||
return process.env[trimmed]?.trim();
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function normalizeGeminiModel(model: string): string {
|
||||
const trimmed = model.trim();
|
||||
if (!trimmed) return DEFAULT_GEMINI_EMBEDDING_MODEL;
|
||||
const withoutPrefix = trimmed.replace(/^models\//, "");
|
||||
if (withoutPrefix.startsWith("gemini/")) return withoutPrefix.slice("gemini/".length);
|
||||
if (withoutPrefix.startsWith("google/")) return withoutPrefix.slice("google/".length);
|
||||
return withoutPrefix;
|
||||
}
|
||||
|
||||
function normalizeGeminiBaseUrl(raw: string): string {
|
||||
const trimmed = raw.replace(/\/+$/, "");
|
||||
const openAiIndex = trimmed.indexOf("/openai");
|
||||
if (openAiIndex > -1) return trimmed.slice(0, openAiIndex);
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function buildGeminiModelPath(model: string): string {
|
||||
return model.startsWith("models/") ? model : `models/${model}`;
|
||||
}
|
||||
|
||||
export async function createGeminiEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<{ provider: EmbeddingProvider; client: GeminiEmbeddingClient }> {
|
||||
const client = await resolveGeminiEmbeddingClient(options);
|
||||
const baseUrl = client.baseUrl.replace(/\/$/, "");
|
||||
const embedUrl = `${baseUrl}/${client.modelPath}:embedContent`;
|
||||
const batchUrl = `${baseUrl}/${client.modelPath}:batchEmbedContents`;
|
||||
|
||||
const embedQuery = async (text: string): Promise<number[]> => {
|
||||
if (!text.trim()) return [];
|
||||
const res = await fetch(embedUrl, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({
|
||||
content: { parts: [{ text }] },
|
||||
taskType: "RETRIEVAL_QUERY",
|
||||
}),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const payload = await res.text();
|
||||
throw new Error(`gemini embeddings failed: ${res.status} ${payload}`);
|
||||
}
|
||||
const payload = (await res.json()) as { embedding?: { values?: number[] } };
|
||||
return payload.embedding?.values ?? [];
|
||||
};
|
||||
|
||||
const embedBatch = async (texts: string[]): Promise<number[][]> => {
|
||||
if (texts.length === 0) return [];
|
||||
const requests = texts.map((text) => ({
|
||||
model: client.modelPath,
|
||||
content: { parts: [{ text }] },
|
||||
taskType: "RETRIEVAL_DOCUMENT",
|
||||
}));
|
||||
const res = await fetch(batchUrl, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({ requests }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const payload = await res.text();
|
||||
throw new Error(`gemini embeddings failed: ${res.status} ${payload}`);
|
||||
}
|
||||
const payload = (await res.json()) as { embeddings?: Array<{ values?: number[] }> };
|
||||
const embeddings = Array.isArray(payload.embeddings) ? payload.embeddings : [];
|
||||
return texts.map((_, index) => embeddings[index]?.values ?? []);
|
||||
};
|
||||
|
||||
return {
|
||||
provider: {
|
||||
id: "gemini",
|
||||
model: client.model,
|
||||
embedQuery,
|
||||
embedBatch,
|
||||
},
|
||||
client,
|
||||
};
|
||||
}
|
||||
|
||||
export async function resolveGeminiEmbeddingClient(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<GeminiEmbeddingClient> {
|
||||
const remote = options.remote;
|
||||
const remoteApiKey = resolveRemoteApiKey(remote?.apiKey);
|
||||
const remoteBaseUrl = remote?.baseUrl?.trim();
|
||||
|
||||
const { apiKey } = remoteApiKey
|
||||
? { apiKey: remoteApiKey }
|
||||
: await resolveApiKeyForProvider({
|
||||
provider: "google",
|
||||
cfg: options.config,
|
||||
agentDir: options.agentDir,
|
||||
});
|
||||
|
||||
const providerConfig = options.config.models?.providers?.google;
|
||||
const rawBaseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_GEMINI_BASE_URL;
|
||||
const baseUrl = normalizeGeminiBaseUrl(rawBaseUrl);
|
||||
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-goog-api-key": apiKey,
|
||||
...headerOverrides,
|
||||
};
|
||||
const model = normalizeGeminiModel(options.model);
|
||||
const modelPath = buildGeminiModelPath(model);
|
||||
debugLog("memory embeddings: gemini client", {
|
||||
rawBaseUrl,
|
||||
baseUrl,
|
||||
model,
|
||||
modelPath,
|
||||
embedEndpoint: `${baseUrl}/${modelPath}:embedContent`,
|
||||
batchEndpoint: `${baseUrl}/${modelPath}:batchEmbedContents`,
|
||||
});
|
||||
return { baseUrl, headers, model, modelPath };
|
||||
}
|
||||
83
src/memory/embeddings-openai.ts
Normal file
83
src/memory/embeddings-openai.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
import { resolveApiKeyForProvider } from "../agents/model-auth.js";
|
||||
import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js";
|
||||
|
||||
export type OpenAiEmbeddingClient = {
|
||||
baseUrl: string;
|
||||
headers: Record<string, string>;
|
||||
model: string;
|
||||
};
|
||||
|
||||
export const DEFAULT_OPENAI_EMBEDDING_MODEL = "text-embedding-3-small";
|
||||
const DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1";
|
||||
|
||||
export function normalizeOpenAiModel(model: string): string {
|
||||
const trimmed = model.trim();
|
||||
if (!trimmed) return DEFAULT_OPENAI_EMBEDDING_MODEL;
|
||||
if (trimmed.startsWith("openai/")) return trimmed.slice("openai/".length);
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
export async function createOpenAiEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<{ provider: EmbeddingProvider; client: OpenAiEmbeddingClient }> {
|
||||
const client = await resolveOpenAiEmbeddingClient(options);
|
||||
const url = `${client.baseUrl.replace(/\/$/, "")}/embeddings`;
|
||||
|
||||
const embed = async (input: string[]): Promise<number[][]> => {
|
||||
if (input.length === 0) return [];
|
||||
const res = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({ model: client.model, input }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`openai embeddings failed: ${res.status} ${text}`);
|
||||
}
|
||||
const payload = (await res.json()) as {
|
||||
data?: Array<{ embedding?: number[] }>;
|
||||
};
|
||||
const data = payload.data ?? [];
|
||||
return data.map((entry) => entry.embedding ?? []);
|
||||
};
|
||||
|
||||
return {
|
||||
provider: {
|
||||
id: "openai",
|
||||
model: client.model,
|
||||
embedQuery: async (text) => {
|
||||
const [vec] = await embed([text]);
|
||||
return vec ?? [];
|
||||
},
|
||||
embedBatch: embed,
|
||||
},
|
||||
client,
|
||||
};
|
||||
}
|
||||
|
||||
export async function resolveOpenAiEmbeddingClient(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<OpenAiEmbeddingClient> {
|
||||
const remote = options.remote;
|
||||
const remoteApiKey = remote?.apiKey?.trim();
|
||||
const remoteBaseUrl = remote?.baseUrl?.trim();
|
||||
|
||||
const { apiKey } = remoteApiKey
|
||||
? { apiKey: remoteApiKey }
|
||||
: await resolveApiKeyForProvider({
|
||||
provider: "openai",
|
||||
cfg: options.config,
|
||||
agentDir: options.agentDir,
|
||||
});
|
||||
|
||||
const providerConfig = options.config.models?.providers?.openai;
|
||||
const baseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_OPENAI_BASE_URL;
|
||||
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
...headerOverrides,
|
||||
};
|
||||
const model = normalizeOpenAiModel(options.model);
|
||||
return { baseUrl, headers, model };
|
||||
}
|
||||
@@ -108,7 +108,7 @@ describe("embedding provider remote overrides", () => {
|
||||
expect(headers.Authorization).toBe("Bearer provider-key");
|
||||
});
|
||||
|
||||
it("uses gemini embedContent endpoint with x-goog-api-key", async () => {
|
||||
it("builds Gemini embeddings requests with api key header", async () => {
|
||||
const fetchMock = vi.fn(async () => ({
|
||||
ok: true,
|
||||
status: 200,
|
||||
@@ -119,29 +119,94 @@ describe("embedding provider remote overrides", () => {
|
||||
const { createEmbeddingProvider } = await import("./embeddings.js");
|
||||
const authModule = await import("../agents/model-auth.js");
|
||||
vi.mocked(authModule.resolveApiKeyForProvider).mockResolvedValue({
|
||||
apiKey: "gemini-key",
|
||||
apiKey: "provider-key",
|
||||
});
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await createEmbeddingProvider({
|
||||
config: {} as never,
|
||||
config: cfg as never,
|
||||
provider: "gemini",
|
||||
remote: {
|
||||
baseUrl: "https://gemini.example/v1beta",
|
||||
apiKey: "gemini-key",
|
||||
},
|
||||
model: "gemini-embedding-001",
|
||||
model: "text-embedding-004",
|
||||
fallback: "openai",
|
||||
});
|
||||
|
||||
await result.provider.embedQuery("hello");
|
||||
|
||||
const [url, init] = fetchMock.mock.calls[0] ?? [];
|
||||
expect(url).toBe("https://gemini.example/v1beta/models/gemini-embedding-001:embedContent");
|
||||
expect(url).toBe(
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent",
|
||||
);
|
||||
const headers = (init?.headers ?? {}) as Record<string, string>;
|
||||
expect(headers["x-goog-api-key"]).toBe("gemini-key");
|
||||
expect(headers["Content-Type"]).toBe("application/json");
|
||||
});
|
||||
});
|
||||
|
||||
describe("embedding provider auto selection", () => {
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks();
|
||||
vi.resetModules();
|
||||
vi.unstubAllGlobals();
|
||||
});
|
||||
|
||||
it("prefers openai when a key resolves", async () => {
|
||||
const { createEmbeddingProvider } = await import("./embeddings.js");
|
||||
const authModule = await import("../agents/model-auth.js");
|
||||
vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => {
|
||||
if (provider === "openai") {
|
||||
return { apiKey: "openai-key", source: "env: OPENAI_API_KEY" };
|
||||
}
|
||||
throw new Error(`No API key found for provider "${provider}".`);
|
||||
});
|
||||
|
||||
const result = await createEmbeddingProvider({
|
||||
config: {} as never,
|
||||
provider: "auto",
|
||||
model: "",
|
||||
fallback: "none",
|
||||
});
|
||||
|
||||
expect(result.requestedProvider).toBe("auto");
|
||||
expect(result.provider.id).toBe("openai");
|
||||
});
|
||||
|
||||
it("uses gemini when openai is missing", async () => {
|
||||
const { createEmbeddingProvider } = await import("./embeddings.js");
|
||||
const authModule = await import("../agents/model-auth.js");
|
||||
vi.mocked(authModule.resolveApiKeyForProvider).mockImplementation(async ({ provider }) => {
|
||||
if (provider === "openai") {
|
||||
throw new Error('No API key found for provider "openai".');
|
||||
}
|
||||
if (provider === "google") {
|
||||
return { apiKey: "gemini-key", source: "env: GEMINI_API_KEY" };
|
||||
}
|
||||
throw new Error(`Unexpected provider ${provider}`);
|
||||
});
|
||||
|
||||
const result = await createEmbeddingProvider({
|
||||
config: {} as never,
|
||||
provider: "auto",
|
||||
model: "",
|
||||
fallback: "none",
|
||||
});
|
||||
|
||||
expect(result.requestedProvider).toBe("auto");
|
||||
expect(result.provider.id).toBe("gemini");
|
||||
});
|
||||
});
|
||||
|
||||
describe("embedding provider local fallback", () => {
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks();
|
||||
|
||||
@@ -1,8 +1,21 @@
|
||||
import fsSync from "node:fs";
|
||||
|
||||
import type { Llama, LlamaEmbeddingContext, LlamaModel } from "node-llama-cpp";
|
||||
import { resolveApiKeyForProvider } from "../agents/model-auth.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { resolveUserPath } from "../utils.js";
|
||||
import {
|
||||
createGeminiEmbeddingProvider,
|
||||
type GeminiEmbeddingClient,
|
||||
} from "./embeddings-gemini.js";
|
||||
import {
|
||||
createOpenAiEmbeddingProvider,
|
||||
type OpenAiEmbeddingClient,
|
||||
} from "./embeddings-openai.js";
|
||||
import { importNodeLlamaCpp } from "./node-llama.js";
|
||||
|
||||
export type { GeminiEmbeddingClient } from "./embeddings-gemini.js";
|
||||
export type { OpenAiEmbeddingClient } from "./embeddings-openai.js";
|
||||
|
||||
export type EmbeddingProvider = {
|
||||
id: string;
|
||||
model: string;
|
||||
@@ -12,230 +25,49 @@ export type EmbeddingProvider = {
|
||||
|
||||
export type EmbeddingProviderResult = {
|
||||
provider: EmbeddingProvider;
|
||||
requestedProvider: "openai" | "gemini" | "local";
|
||||
fallbackFrom?: "local";
|
||||
requestedProvider: "openai" | "local" | "gemini" | "auto";
|
||||
fallbackFrom?: "openai" | "local" | "gemini";
|
||||
fallbackReason?: string;
|
||||
openAi?: OpenAiEmbeddingClient;
|
||||
gemini?: GeminiEmbeddingClient;
|
||||
};
|
||||
|
||||
export type OpenAiEmbeddingClient = {
|
||||
baseUrl: string;
|
||||
headers: Record<string, string>;
|
||||
model: string;
|
||||
};
|
||||
|
||||
export type GeminiEmbeddingClient = {
|
||||
baseUrl: string;
|
||||
headers: Record<string, string>;
|
||||
model: string;
|
||||
};
|
||||
|
||||
export type EmbeddingProviderOptions = {
|
||||
config: ClawdbotConfig;
|
||||
agentDir?: string;
|
||||
provider: "openai" | "gemini" | "local";
|
||||
provider: "openai" | "local" | "gemini" | "auto";
|
||||
remote?: {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
headers?: Record<string, string>;
|
||||
};
|
||||
model: string;
|
||||
fallback: "openai" | "none";
|
||||
fallback: "openai" | "gemini" | "local" | "none";
|
||||
local?: {
|
||||
modelPath?: string;
|
||||
modelCacheDir?: string;
|
||||
};
|
||||
};
|
||||
|
||||
const DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1";
|
||||
const DEFAULT_LOCAL_MODEL = "hf:ggml-org/embeddinggemma-300M-GGUF/embeddinggemma-300M-Q8_0.gguf";
|
||||
const DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta";
|
||||
const DEFAULT_GEMINI_MODEL = "gemini-embedding-001";
|
||||
|
||||
function normalizeOpenAiModel(model: string): string {
|
||||
const trimmed = model.trim();
|
||||
if (!trimmed) return "text-embedding-3-small";
|
||||
if (trimmed.startsWith("openai/")) return trimmed.slice("openai/".length);
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function normalizeGeminiModel(model: string): string {
|
||||
const trimmed = model.trim();
|
||||
if (!trimmed) return DEFAULT_GEMINI_MODEL;
|
||||
if (trimmed.startsWith("models/")) return trimmed.slice("models/".length);
|
||||
if (trimmed.startsWith("google/")) return trimmed.slice("google/".length);
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
async function createOpenAiEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<{ provider: EmbeddingProvider; client: OpenAiEmbeddingClient }> {
|
||||
const client = await resolveOpenAiEmbeddingClient(options);
|
||||
const url = `${client.baseUrl.replace(/\/$/, "")}/embeddings`;
|
||||
|
||||
const embed = async (input: string[]): Promise<number[][]> => {
|
||||
if (input.length === 0) return [];
|
||||
const res = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({ model: client.model, input }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`openai embeddings failed: ${res.status} ${text}`);
|
||||
}
|
||||
const payload = (await res.json()) as {
|
||||
data?: Array<{ embedding?: number[] }>;
|
||||
};
|
||||
const data = payload.data ?? [];
|
||||
return data.map((entry) => entry.embedding ?? []);
|
||||
};
|
||||
|
||||
return {
|
||||
provider: {
|
||||
id: "openai",
|
||||
model: client.model,
|
||||
embedQuery: async (text) => {
|
||||
const [vec] = await embed([text]);
|
||||
return vec ?? [];
|
||||
},
|
||||
embedBatch: embed,
|
||||
},
|
||||
client,
|
||||
};
|
||||
}
|
||||
|
||||
function extractGeminiEmbeddingValues(entry: unknown): number[] {
|
||||
if (!entry || typeof entry !== "object") return [];
|
||||
const record = entry as { values?: unknown; embedding?: { values?: unknown } };
|
||||
const values = record.values ?? record.embedding?.values;
|
||||
if (!Array.isArray(values)) return [];
|
||||
return values.filter((value): value is number => typeof value === "number");
|
||||
}
|
||||
|
||||
function parseGeminiEmbeddings(payload: unknown): number[][] {
|
||||
if (!payload || typeof payload !== "object") return [];
|
||||
const data = payload as { embedding?: unknown; embeddings?: unknown[] };
|
||||
if (Array.isArray(data.embeddings)) {
|
||||
return data.embeddings.map((entry) => extractGeminiEmbeddingValues(entry));
|
||||
function canAutoSelectLocal(options: EmbeddingProviderOptions): boolean {
|
||||
const modelPath = options.local?.modelPath?.trim();
|
||||
if (!modelPath) return false;
|
||||
if (/^(hf:|https?:)/i.test(modelPath)) return false;
|
||||
const resolved = resolveUserPath(modelPath);
|
||||
try {
|
||||
return fsSync.statSync(resolved).isFile();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
if (data.embedding) {
|
||||
return [extractGeminiEmbeddingValues(data.embedding)];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
async function createGeminiEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<{ provider: EmbeddingProvider; client: GeminiEmbeddingClient }> {
|
||||
const client = await resolveGeminiEmbeddingClient(options);
|
||||
const baseUrl = client.baseUrl.replace(/\/$/, "");
|
||||
const model = `models/${client.model}`;
|
||||
|
||||
const embedContent = async (input: string): Promise<number[]> => {
|
||||
const res = await fetch(`${baseUrl}/${model}:embedContent`, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({
|
||||
model,
|
||||
content: { parts: [{ text: input }] },
|
||||
}),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`gemini embeddings failed: ${res.status} ${text}`);
|
||||
}
|
||||
const payload = await res.json();
|
||||
const embeddings = parseGeminiEmbeddings(payload);
|
||||
return embeddings[0] ?? [];
|
||||
};
|
||||
|
||||
const embedBatch = async (input: string[]): Promise<number[][]> => {
|
||||
if (input.length === 0) return [];
|
||||
const res = await fetch(`${baseUrl}/${model}:batchEmbedContents`, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({
|
||||
requests: input.map((text) => ({
|
||||
model,
|
||||
content: { parts: [{ text }] },
|
||||
})),
|
||||
}),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`gemini embeddings failed: ${res.status} ${text}`);
|
||||
}
|
||||
const payload = await res.json();
|
||||
const embeddings = parseGeminiEmbeddings(payload);
|
||||
return embeddings;
|
||||
};
|
||||
|
||||
return {
|
||||
provider: {
|
||||
id: "gemini",
|
||||
model: client.model,
|
||||
embedQuery: embedContent,
|
||||
embedBatch,
|
||||
},
|
||||
client,
|
||||
};
|
||||
function isMissingApiKeyError(err: unknown): boolean {
|
||||
const message = formatError(err);
|
||||
return message.includes("No API key found for provider");
|
||||
}
|
||||
|
||||
async function resolveOpenAiEmbeddingClient(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<OpenAiEmbeddingClient> {
|
||||
const remote = options.remote;
|
||||
const remoteApiKey = remote?.apiKey?.trim();
|
||||
const remoteBaseUrl = remote?.baseUrl?.trim();
|
||||
|
||||
const { apiKey } = remoteApiKey
|
||||
? { apiKey: remoteApiKey }
|
||||
: await resolveApiKeyForProvider({
|
||||
provider: "openai",
|
||||
cfg: options.config,
|
||||
agentDir: options.agentDir,
|
||||
});
|
||||
|
||||
const providerConfig = options.config.models?.providers?.openai;
|
||||
const baseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_OPENAI_BASE_URL;
|
||||
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
...headerOverrides,
|
||||
};
|
||||
const model = normalizeOpenAiModel(options.model);
|
||||
return { baseUrl, headers, model };
|
||||
}
|
||||
|
||||
async function resolveGeminiEmbeddingClient(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<GeminiEmbeddingClient> {
|
||||
const remote = options.remote;
|
||||
const remoteApiKey = remote?.apiKey?.trim();
|
||||
const remoteBaseUrl = remote?.baseUrl?.trim();
|
||||
|
||||
const { apiKey } = remoteApiKey
|
||||
? { apiKey: remoteApiKey }
|
||||
: await resolveApiKeyForProvider({
|
||||
provider: "google",
|
||||
cfg: options.config,
|
||||
agentDir: options.agentDir,
|
||||
});
|
||||
|
||||
const providerConfig = options.config.models?.providers?.google;
|
||||
const baseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_GEMINI_BASE_URL;
|
||||
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-goog-api-key": apiKey,
|
||||
...headerOverrides,
|
||||
};
|
||||
const model = normalizeGeminiModel(options.model);
|
||||
return { baseUrl, headers, model };
|
||||
}
|
||||
|
||||
async function createLocalEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
@@ -289,35 +121,80 @@ export async function createEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<EmbeddingProviderResult> {
|
||||
const requestedProvider = options.provider;
|
||||
if (options.provider === "gemini") {
|
||||
const { provider, client } = await createGeminiEmbeddingProvider(options);
|
||||
return { provider, requestedProvider, gemini: client };
|
||||
}
|
||||
if (options.provider === "local") {
|
||||
try {
|
||||
const fallback = options.fallback;
|
||||
|
||||
const createProvider = async (id: "openai" | "local" | "gemini") => {
|
||||
if (id === "local") {
|
||||
const provider = await createLocalEmbeddingProvider(options);
|
||||
return { provider, requestedProvider };
|
||||
} catch (err) {
|
||||
const reason = formatLocalSetupError(err);
|
||||
if (options.fallback === "openai") {
|
||||
try {
|
||||
const { provider, client } = await createOpenAiEmbeddingProvider(options);
|
||||
return {
|
||||
provider,
|
||||
requestedProvider,
|
||||
fallbackFrom: "local",
|
||||
fallbackReason: reason,
|
||||
openAi: client,
|
||||
};
|
||||
} catch (fallbackErr) {
|
||||
throw new Error(`${reason}\n\nFallback to OpenAI failed: ${formatError(fallbackErr)}`);
|
||||
}
|
||||
}
|
||||
throw new Error(reason);
|
||||
return { provider };
|
||||
}
|
||||
if (id === "gemini") {
|
||||
const { provider, client } = await createGeminiEmbeddingProvider(options);
|
||||
return { provider, gemini: client };
|
||||
}
|
||||
const { provider, client } = await createOpenAiEmbeddingProvider(options);
|
||||
return { provider, openAi: client };
|
||||
};
|
||||
|
||||
const formatPrimaryError = (err: unknown, provider: "openai" | "local" | "gemini") =>
|
||||
provider === "local" ? formatLocalSetupError(err) : formatError(err);
|
||||
|
||||
if (requestedProvider === "auto") {
|
||||
const missingKeyErrors: string[] = [];
|
||||
let localError: string | null = null;
|
||||
|
||||
if (canAutoSelectLocal(options)) {
|
||||
try {
|
||||
const local = await createProvider("local");
|
||||
return { ...local, requestedProvider };
|
||||
} catch (err) {
|
||||
localError = formatLocalSetupError(err);
|
||||
}
|
||||
}
|
||||
|
||||
for (const provider of ["openai", "gemini"] as const) {
|
||||
try {
|
||||
const result = await createProvider(provider);
|
||||
return { ...result, requestedProvider };
|
||||
} catch (err) {
|
||||
const message = formatPrimaryError(err, provider);
|
||||
if (isMissingApiKeyError(err)) {
|
||||
missingKeyErrors.push(message);
|
||||
continue;
|
||||
}
|
||||
throw new Error(message);
|
||||
}
|
||||
}
|
||||
|
||||
const details = [...missingKeyErrors, localError].filter(Boolean) as string[];
|
||||
if (details.length > 0) {
|
||||
throw new Error(details.join("\n\n"));
|
||||
}
|
||||
throw new Error("No embeddings provider available.");
|
||||
}
|
||||
|
||||
try {
|
||||
const primary = await createProvider(requestedProvider);
|
||||
return { ...primary, requestedProvider };
|
||||
} catch (primaryErr) {
|
||||
const reason = formatPrimaryError(primaryErr, requestedProvider);
|
||||
if (fallback && fallback !== "none" && fallback !== requestedProvider) {
|
||||
try {
|
||||
const fallbackResult = await createProvider(fallback);
|
||||
return {
|
||||
...fallbackResult,
|
||||
requestedProvider,
|
||||
fallbackFrom: requestedProvider,
|
||||
fallbackReason: reason,
|
||||
};
|
||||
} catch (fallbackErr) {
|
||||
throw new Error(
|
||||
`${reason}\n\nFallback to ${fallback} failed: ${formatError(fallbackErr)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
throw new Error(reason);
|
||||
}
|
||||
const { provider, client } = await createOpenAiEmbeddingProvider(options);
|
||||
return { provider, requestedProvider, openAi: client };
|
||||
}
|
||||
|
||||
function formatError(err: unknown): string {
|
||||
|
||||
Reference in New Issue
Block a user