feat: add OpenAI batch memory indexing
This commit is contained in:
@@ -2,6 +2,11 @@
|
||||
|
||||
Docs: https://docs.clawd.bot
|
||||
|
||||
## 2026.1.17-3
|
||||
|
||||
### Changes
|
||||
- Memory: add OpenAI Batch API indexing for embeddings when configured.
|
||||
|
||||
## 2026.1.17-2
|
||||
|
||||
### Changes
|
||||
|
||||
@@ -108,6 +108,11 @@ agents: {
|
||||
If you don't want to set an API key, use `memorySearch.provider = "local"` or set
|
||||
`memorySearch.fallback = "none"`.
|
||||
|
||||
Batch indexing (OpenAI only):
|
||||
- Set `agents.defaults.memorySearch.remote.batch.enabled = true` to submit embeddings via the OpenAI Batch API.
|
||||
- Default behavior waits for batch completion; tune `remote.batch.wait`, `remote.batch.pollIntervalMs`, and `remote.batch.timeoutMinutes` if needed.
|
||||
- Batch mode currently applies only when `memorySearch.provider = "openai"` and uses your OpenAI API key.
|
||||
|
||||
Config example:
|
||||
|
||||
```json5
|
||||
@@ -117,6 +122,9 @@ agents: {
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
fallback: "openai",
|
||||
remote: {
|
||||
batch: { enabled: true }
|
||||
},
|
||||
sync: { watch: true }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,12 @@ describe("memory search config", () => {
|
||||
baseUrl: "https://agent.example/v1",
|
||||
apiKey: "default-key",
|
||||
headers: { "X-Default": "on" },
|
||||
batch: {
|
||||
enabled: false,
|
||||
wait: true,
|
||||
pollIntervalMs: 5000,
|
||||
timeoutMinutes: 60,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -14,6 +14,12 @@ export type ResolvedMemorySearchConfig = {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
headers?: Record<string, string>;
|
||||
batch?: {
|
||||
enabled: boolean;
|
||||
wait: boolean;
|
||||
pollIntervalMs: number;
|
||||
timeoutMinutes: number;
|
||||
};
|
||||
};
|
||||
experimental: {
|
||||
sessionMemory: boolean;
|
||||
@@ -89,11 +95,24 @@ function mergeConfig(
|
||||
overrides?.experimental?.sessionMemory ?? defaults?.experimental?.sessionMemory ?? false;
|
||||
const provider = overrides?.provider ?? defaults?.provider ?? "openai";
|
||||
const hasRemote = Boolean(defaults?.remote || overrides?.remote);
|
||||
const batch = {
|
||||
enabled: overrides?.remote?.batch?.enabled ?? defaults?.remote?.batch?.enabled ?? false,
|
||||
wait: overrides?.remote?.batch?.wait ?? defaults?.remote?.batch?.wait ?? true,
|
||||
pollIntervalMs:
|
||||
overrides?.remote?.batch?.pollIntervalMs ??
|
||||
defaults?.remote?.batch?.pollIntervalMs ??
|
||||
5000,
|
||||
timeoutMinutes:
|
||||
overrides?.remote?.batch?.timeoutMinutes ??
|
||||
defaults?.remote?.batch?.timeoutMinutes ??
|
||||
60,
|
||||
};
|
||||
const remote = hasRemote
|
||||
? {
|
||||
baseUrl: overrides?.remote?.baseUrl ?? defaults?.remote?.baseUrl,
|
||||
apiKey: overrides?.remote?.apiKey ?? defaults?.remote?.apiKey,
|
||||
headers: overrides?.remote?.headers ?? defaults?.remote?.headers,
|
||||
batch,
|
||||
}
|
||||
: undefined;
|
||||
const fallback = overrides?.fallback ?? defaults?.fallback ?? "openai";
|
||||
|
||||
@@ -366,6 +366,14 @@ const FIELD_HELP: Record<string, string> = {
|
||||
"agents.defaults.memorySearch.remote.apiKey": "Custom API key for the remote embedding provider.",
|
||||
"agents.defaults.memorySearch.remote.headers":
|
||||
"Extra headers for remote embeddings (merged; remote overrides OpenAI headers).",
|
||||
"agents.defaults.memorySearch.remote.batch.enabled":
|
||||
"Enable OpenAI Batch API for memory embeddings (default: false).",
|
||||
"agents.defaults.memorySearch.remote.batch.wait":
|
||||
"Wait for OpenAI batch completion when indexing (default: true).",
|
||||
"agents.defaults.memorySearch.remote.batch.pollIntervalMs":
|
||||
"Polling interval in ms for OpenAI batch status (default: 5000).",
|
||||
"agents.defaults.memorySearch.remote.batch.timeoutMinutes":
|
||||
"Timeout in minutes for OpenAI batch indexing (default: 60).",
|
||||
"agents.defaults.memorySearch.local.modelPath":
|
||||
"Local GGUF model path or hf: URI (node-llama-cpp).",
|
||||
"agents.defaults.memorySearch.fallback":
|
||||
|
||||
@@ -158,6 +158,16 @@ export type MemorySearchConfig = {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
headers?: Record<string, string>;
|
||||
batch?: {
|
||||
/** Enable OpenAI Batch API for embedding indexing (default: false). */
|
||||
enabled?: boolean;
|
||||
/** Wait for batch completion (default: true). */
|
||||
wait?: boolean;
|
||||
/** Poll interval in ms (default: 5000). */
|
||||
pollIntervalMs?: number;
|
||||
/** Timeout in minutes (default: 60). */
|
||||
timeoutMinutes?: number;
|
||||
};
|
||||
};
|
||||
/** Fallback behavior when local embeddings fail. */
|
||||
fallback?: "openai" | "none";
|
||||
|
||||
@@ -206,6 +206,14 @@ export const MemorySearchSchema = z
|
||||
baseUrl: z.string().optional(),
|
||||
apiKey: z.string().optional(),
|
||||
headers: z.record(z.string(), z.string()).optional(),
|
||||
batch: z
|
||||
.object({
|
||||
enabled: z.boolean().optional(),
|
||||
wait: z.boolean().optional(),
|
||||
pollIntervalMs: z.number().int().nonnegative().optional(),
|
||||
timeoutMinutes: z.number().int().positive().optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.optional(),
|
||||
fallback: z.union([z.literal("openai"), z.literal("none")]).optional(),
|
||||
|
||||
@@ -15,6 +15,13 @@ export type EmbeddingProviderResult = {
|
||||
requestedProvider: "openai" | "local";
|
||||
fallbackFrom?: "local";
|
||||
fallbackReason?: string;
|
||||
openAi?: OpenAiEmbeddingClient;
|
||||
};
|
||||
|
||||
export type OpenAiEmbeddingClient = {
|
||||
baseUrl: string;
|
||||
headers: Record<string, string>;
|
||||
model: string;
|
||||
};
|
||||
|
||||
export type EmbeddingProviderOptions = {
|
||||
@@ -46,7 +53,45 @@ function normalizeOpenAiModel(model: string): string {
|
||||
|
||||
async function createOpenAiEmbeddingProvider(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<EmbeddingProvider> {
|
||||
): Promise<{ provider: EmbeddingProvider; client: OpenAiEmbeddingClient }> {
|
||||
const client = await resolveOpenAiEmbeddingClient(options);
|
||||
const url = `${client.baseUrl.replace(/\/$/, "")}/embeddings`;
|
||||
|
||||
const embed = async (input: string[]): Promise<number[][]> => {
|
||||
if (input.length === 0) return [];
|
||||
const res = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: client.headers,
|
||||
body: JSON.stringify({ model: client.model, input }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`openai embeddings failed: ${res.status} ${text}`);
|
||||
}
|
||||
const payload = (await res.json()) as {
|
||||
data?: Array<{ embedding?: number[] }>;
|
||||
};
|
||||
const data = payload.data ?? [];
|
||||
return data.map((entry) => entry.embedding ?? []);
|
||||
};
|
||||
|
||||
return {
|
||||
provider: {
|
||||
id: "openai",
|
||||
model: client.model,
|
||||
embedQuery: async (text) => {
|
||||
const [vec] = await embed([text]);
|
||||
return vec ?? [];
|
||||
},
|
||||
embedBatch: embed,
|
||||
},
|
||||
client,
|
||||
};
|
||||
}
|
||||
|
||||
async function resolveOpenAiEmbeddingClient(
|
||||
options: EmbeddingProviderOptions,
|
||||
): Promise<OpenAiEmbeddingClient> {
|
||||
const remote = options.remote;
|
||||
const remoteApiKey = remote?.apiKey?.trim();
|
||||
const remoteBaseUrl = remote?.baseUrl?.trim();
|
||||
@@ -61,7 +106,6 @@ async function createOpenAiEmbeddingProvider(
|
||||
|
||||
const providerConfig = options.config.models?.providers?.openai;
|
||||
const baseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_OPENAI_BASE_URL;
|
||||
const url = `${baseUrl.replace(/\/$/, "")}/embeddings`;
|
||||
const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers);
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
@@ -69,34 +113,7 @@ async function createOpenAiEmbeddingProvider(
|
||||
...headerOverrides,
|
||||
};
|
||||
const model = normalizeOpenAiModel(options.model);
|
||||
|
||||
const embed = async (input: string[]): Promise<number[][]> => {
|
||||
if (input.length === 0) return [];
|
||||
const res = await fetch(url, {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify({ model, input }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`openai embeddings failed: ${res.status} ${text}`);
|
||||
}
|
||||
const payload = (await res.json()) as {
|
||||
data?: Array<{ embedding?: number[] }>;
|
||||
};
|
||||
const data = payload.data ?? [];
|
||||
return data.map((entry) => entry.embedding ?? []);
|
||||
};
|
||||
|
||||
return {
|
||||
id: "openai",
|
||||
model,
|
||||
embedQuery: async (text) => {
|
||||
const [vec] = await embed([text]);
|
||||
return vec ?? [];
|
||||
},
|
||||
embedBatch: embed,
|
||||
};
|
||||
return { baseUrl, headers, model };
|
||||
}
|
||||
|
||||
async function createLocalEmbeddingProvider(
|
||||
@@ -159,12 +176,13 @@ export async function createEmbeddingProvider(
|
||||
const reason = formatLocalSetupError(err);
|
||||
if (options.fallback === "openai") {
|
||||
try {
|
||||
const provider = await createOpenAiEmbeddingProvider(options);
|
||||
const { provider, client } = await createOpenAiEmbeddingProvider(options);
|
||||
return {
|
||||
provider,
|
||||
requestedProvider,
|
||||
fallbackFrom: "local",
|
||||
fallbackReason: reason,
|
||||
openAi: client,
|
||||
};
|
||||
} catch (fallbackErr) {
|
||||
throw new Error(`${reason}\n\nFallback to OpenAI failed: ${formatError(fallbackErr)}`);
|
||||
@@ -173,8 +191,8 @@ export async function createEmbeddingProvider(
|
||||
throw new Error(reason);
|
||||
}
|
||||
}
|
||||
const provider = await createOpenAiEmbeddingProvider(options);
|
||||
return { provider, requestedProvider };
|
||||
const { provider, client } = await createOpenAiEmbeddingProvider(options);
|
||||
return { provider, requestedProvider, openAi: client };
|
||||
}
|
||||
|
||||
function formatError(err: unknown): string {
|
||||
|
||||
148
src/memory/manager.batch.test.ts
Normal file
148
src/memory/manager.batch.test.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
import { getMemorySearchManager, type MemoryIndexManager } from "./index.js";
|
||||
|
||||
const embedBatch = vi.fn(async () => []);
|
||||
const embedQuery = vi.fn(async () => [0.5, 0.5, 0.5]);
|
||||
|
||||
vi.mock("./embeddings.js", () => ({
|
||||
createEmbeddingProvider: async () => ({
|
||||
requestedProvider: "openai",
|
||||
provider: {
|
||||
id: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
embedQuery,
|
||||
embedBatch,
|
||||
},
|
||||
openAi: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
headers: { Authorization: "Bearer test", "Content-Type": "application/json" },
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
}),
|
||||
}));
|
||||
|
||||
describe("memory indexing with OpenAI batches", () => {
|
||||
let workspaceDir: string;
|
||||
let indexPath: string;
|
||||
let manager: MemoryIndexManager | null = null;
|
||||
|
||||
beforeEach(async () => {
|
||||
embedBatch.mockClear();
|
||||
embedQuery.mockClear();
|
||||
workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-mem-batch-"));
|
||||
indexPath = path.join(workspaceDir, "index.sqlite");
|
||||
await fs.mkdir(path.join(workspaceDir, "memory"));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.unstubAllGlobals();
|
||||
if (manager) {
|
||||
await manager.close();
|
||||
manager = null;
|
||||
}
|
||||
await fs.rm(workspaceDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("uses OpenAI batch uploads when enabled", async () => {
|
||||
const content = ["hello", "from", "batch"].join("\n\n");
|
||||
await fs.writeFile(path.join(workspaceDir, "memory", "2026-01-07.md"), content);
|
||||
|
||||
let uploadedRequests: Array<{ custom_id?: string }> = [];
|
||||
const fetchMock = vi.fn(async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
const url =
|
||||
typeof input === "string"
|
||||
? input
|
||||
: input instanceof URL
|
||||
? input.toString()
|
||||
: input.url;
|
||||
if (url.endsWith("/files")) {
|
||||
const body = init?.body;
|
||||
if (!(body instanceof FormData)) {
|
||||
throw new Error("expected FormData upload");
|
||||
}
|
||||
for (const [key, value] of body.entries()) {
|
||||
if (key !== "file") continue;
|
||||
if (typeof value === "string") {
|
||||
uploadedRequests = value
|
||||
.split("\n")
|
||||
.filter(Boolean)
|
||||
.map((line) => JSON.parse(line) as { custom_id?: string });
|
||||
} else {
|
||||
const text = await value.text();
|
||||
uploadedRequests = text
|
||||
.split("\n")
|
||||
.filter(Boolean)
|
||||
.map((line) => JSON.parse(line) as { custom_id?: string });
|
||||
}
|
||||
}
|
||||
return new Response(JSON.stringify({ id: "file_1" }), {
|
||||
status: 200,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
if (url.endsWith("/batches")) {
|
||||
return new Response(JSON.stringify({ id: "batch_1", status: "in_progress" }), {
|
||||
status: 200,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
if (url.endsWith("/batches/batch_1")) {
|
||||
return new Response(
|
||||
JSON.stringify({ id: "batch_1", status: "completed", output_file_id: "file_out" }),
|
||||
{ status: 200, headers: { "Content-Type": "application/json" } },
|
||||
);
|
||||
}
|
||||
if (url.endsWith("/files/file_out/content")) {
|
||||
const lines = uploadedRequests.map((request, index) =>
|
||||
JSON.stringify({
|
||||
custom_id: request.custom_id,
|
||||
response: {
|
||||
status_code: 200,
|
||||
body: { data: [{ embedding: [index + 1, 0, 0], index: 0 }] },
|
||||
},
|
||||
}),
|
||||
);
|
||||
return new Response(lines.join("\n"), {
|
||||
status: 200,
|
||||
headers: { "Content-Type": "application/jsonl" },
|
||||
});
|
||||
}
|
||||
throw new Error(`unexpected fetch ${url}`);
|
||||
});
|
||||
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
workspace: workspaceDir,
|
||||
memorySearch: {
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
store: { path: indexPath },
|
||||
sync: { watch: false, onSessionStart: false, onSearch: false },
|
||||
query: { minScore: 0 },
|
||||
remote: { batch: { enabled: true, wait: true } },
|
||||
},
|
||||
},
|
||||
list: [{ id: "main", default: true }],
|
||||
},
|
||||
};
|
||||
|
||||
const result = await getMemorySearchManager({ cfg, agentId: "main" });
|
||||
expect(result.manager).not.toBeNull();
|
||||
if (!result.manager) throw new Error("manager missing");
|
||||
manager = result.manager;
|
||||
await manager.sync({ force: true });
|
||||
|
||||
const status = manager.status();
|
||||
expect(status.chunks).toBeGreaterThan(0);
|
||||
expect(embedBatch).not.toHaveBeenCalled();
|
||||
expect(fetchMock).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@@ -24,7 +24,7 @@ vi.mock("./embeddings.js", () => {
|
||||
model: "mock-embed",
|
||||
embedQuery: async () => [0, 0, 0],
|
||||
embedBatch: async () => {
|
||||
throw new Error("openai embeddings failed: 429 insufficient_quota");
|
||||
throw new Error("openai embeddings failed: 400 bad request");
|
||||
},
|
||||
},
|
||||
}),
|
||||
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
createEmbeddingProvider,
|
||||
type EmbeddingProvider,
|
||||
type EmbeddingProviderResult,
|
||||
type OpenAiEmbeddingClient,
|
||||
} from "./embeddings.js";
|
||||
import {
|
||||
buildFileEntry,
|
||||
@@ -73,6 +74,35 @@ type MemorySyncProgressState = {
|
||||
report: (update: MemorySyncProgressUpdate) => void;
|
||||
};
|
||||
|
||||
type OpenAiBatchRequest = {
|
||||
custom_id: string;
|
||||
method: "POST";
|
||||
url: "/v1/embeddings";
|
||||
body: {
|
||||
model: string;
|
||||
input: string;
|
||||
};
|
||||
};
|
||||
|
||||
type OpenAiBatchStatus = {
|
||||
id?: string;
|
||||
status?: string;
|
||||
output_file_id?: string | null;
|
||||
error_file_id?: string | null;
|
||||
};
|
||||
|
||||
type OpenAiBatchOutputLine = {
|
||||
custom_id?: string;
|
||||
response?: {
|
||||
status_code?: number;
|
||||
body?: {
|
||||
data?: Array<{ embedding?: number[]; index?: number }>;
|
||||
error?: { message?: string };
|
||||
};
|
||||
};
|
||||
error?: { message?: string };
|
||||
};
|
||||
|
||||
const META_KEY = "memory_index_meta_v1";
|
||||
const SNIPPET_MAX_CHARS = 700;
|
||||
const VECTOR_TABLE = "chunks_vec";
|
||||
@@ -83,6 +113,9 @@ const EMBEDDING_INDEX_CONCURRENCY = 4;
|
||||
const EMBEDDING_RETRY_MAX_ATTEMPTS = 3;
|
||||
const EMBEDDING_RETRY_BASE_DELAY_MS = 500;
|
||||
const EMBEDDING_RETRY_MAX_DELAY_MS = 8000;
|
||||
const OPENAI_BATCH_ENDPOINT = "/v1/embeddings";
|
||||
const OPENAI_BATCH_COMPLETION_WINDOW = "24h";
|
||||
const OPENAI_BATCH_MAX_REQUESTS = 50000;
|
||||
|
||||
const log = createSubsystemLogger("memory");
|
||||
|
||||
@@ -100,6 +133,13 @@ export class MemoryIndexManager {
|
||||
private readonly provider: EmbeddingProvider;
|
||||
private readonly requestedProvider: "openai" | "local";
|
||||
private readonly fallbackReason?: string;
|
||||
private readonly openAi?: OpenAiEmbeddingClient;
|
||||
private readonly batch: {
|
||||
enabled: boolean;
|
||||
wait: boolean;
|
||||
pollIntervalMs: number;
|
||||
timeoutMs: number;
|
||||
};
|
||||
private readonly db: DatabaseSync;
|
||||
private readonly sources: Set<MemorySource>;
|
||||
private readonly vector: {
|
||||
@@ -170,6 +210,7 @@ export class MemoryIndexManager {
|
||||
this.provider = params.providerResult.provider;
|
||||
this.requestedProvider = params.providerResult.requestedProvider;
|
||||
this.fallbackReason = params.providerResult.fallbackReason;
|
||||
this.openAi = params.providerResult.openAi;
|
||||
this.sources = new Set(params.settings.sources);
|
||||
this.db = this.openDatabase();
|
||||
this.ensureSchema();
|
||||
@@ -189,6 +230,13 @@ export class MemoryIndexManager {
|
||||
if (this.sources.has("sessions")) {
|
||||
this.sessionsDirty = true;
|
||||
}
|
||||
const batch = params.settings.remote?.batch;
|
||||
this.batch = {
|
||||
enabled: Boolean(batch?.enabled && this.openAi && this.provider.id === "openai"),
|
||||
wait: batch?.wait ?? true,
|
||||
pollIntervalMs: batch?.pollIntervalMs ?? 5000,
|
||||
timeoutMs: (batch?.timeoutMinutes ?? 60) * 60 * 1000,
|
||||
};
|
||||
}
|
||||
|
||||
async warmSession(sessionKey?: string): Promise<void> {
|
||||
@@ -712,7 +760,7 @@ export class MemoryIndexManager {
|
||||
});
|
||||
}
|
||||
});
|
||||
await this.runWithConcurrency(tasks, EMBEDDING_INDEX_CONCURRENCY);
|
||||
await this.runWithConcurrency(tasks, this.getIndexConcurrency());
|
||||
|
||||
const staleRows = this.db
|
||||
.prepare(`SELECT path FROM files WHERE source = ?`)
|
||||
@@ -784,7 +832,7 @@ export class MemoryIndexManager {
|
||||
});
|
||||
}
|
||||
});
|
||||
await this.runWithConcurrency(tasks, EMBEDDING_INDEX_CONCURRENCY);
|
||||
await this.runWithConcurrency(tasks, this.getIndexConcurrency());
|
||||
|
||||
const staleRows = this.db
|
||||
.prepare(`SELECT path FROM files WHERE source = ?`)
|
||||
@@ -1035,6 +1083,271 @@ export class MemoryIndexManager {
|
||||
return embeddings;
|
||||
}
|
||||
|
||||
private getOpenAiBaseUrl(): string {
|
||||
return this.openAi?.baseUrl?.replace(/\/$/, "") ?? "";
|
||||
}
|
||||
|
||||
private getOpenAiHeaders(params: { json: boolean }): Record<string, string> {
|
||||
const headers = this.openAi?.headers ? { ...this.openAi.headers } : {};
|
||||
if (params.json) {
|
||||
if (!headers["Content-Type"] && !headers["content-type"]) {
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
} else {
|
||||
delete headers["Content-Type"];
|
||||
delete headers["content-type"];
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
private buildOpenAiBatchRequests(
|
||||
chunks: MemoryChunk[],
|
||||
entry: MemoryFileEntry | SessionFileEntry,
|
||||
source: MemorySource,
|
||||
): { requests: OpenAiBatchRequest[]; mapping: Map<string, number> } {
|
||||
const requests: OpenAiBatchRequest[] = [];
|
||||
const mapping = new Map<string, number>();
|
||||
for (let i = 0; i < chunks.length; i += 1) {
|
||||
const chunk = chunks[i];
|
||||
const customId = hashText(
|
||||
`${source}:${entry.path}:${chunk.startLine}:${chunk.endLine}:${chunk.hash}:${i}`,
|
||||
);
|
||||
mapping.set(customId, i);
|
||||
requests.push({
|
||||
custom_id: customId,
|
||||
method: "POST",
|
||||
url: OPENAI_BATCH_ENDPOINT,
|
||||
body: {
|
||||
model: this.openAi?.model ?? this.provider.model,
|
||||
input: chunk.text,
|
||||
},
|
||||
});
|
||||
}
|
||||
return { requests, mapping };
|
||||
}
|
||||
|
||||
private splitOpenAiBatchRequests(requests: OpenAiBatchRequest[]): OpenAiBatchRequest[][] {
|
||||
if (requests.length <= OPENAI_BATCH_MAX_REQUESTS) return [requests];
|
||||
const groups: OpenAiBatchRequest[][] = [];
|
||||
for (let i = 0; i < requests.length; i += OPENAI_BATCH_MAX_REQUESTS) {
|
||||
groups.push(requests.slice(i, i + OPENAI_BATCH_MAX_REQUESTS));
|
||||
}
|
||||
return groups;
|
||||
}
|
||||
|
||||
private async submitOpenAiBatch(requests: OpenAiBatchRequest[]): Promise<OpenAiBatchStatus> {
|
||||
if (!this.openAi) {
|
||||
throw new Error("OpenAI batch requested without an OpenAI embedding client.");
|
||||
}
|
||||
const baseUrl = this.getOpenAiBaseUrl();
|
||||
const jsonl = requests.map((request) => JSON.stringify(request)).join("\n");
|
||||
const form = new FormData();
|
||||
form.append("purpose", "batch");
|
||||
form.append(
|
||||
"file",
|
||||
new Blob([jsonl], { type: "application/jsonl" }),
|
||||
"memory-embeddings.jsonl",
|
||||
);
|
||||
|
||||
const fileRes = await fetch(`${baseUrl}/files`, {
|
||||
method: "POST",
|
||||
headers: this.getOpenAiHeaders({ json: false }),
|
||||
body: form,
|
||||
});
|
||||
if (!fileRes.ok) {
|
||||
const text = await fileRes.text();
|
||||
throw new Error(`openai batch file upload failed: ${fileRes.status} ${text}`);
|
||||
}
|
||||
const filePayload = (await fileRes.json()) as { id?: string };
|
||||
if (!filePayload.id) {
|
||||
throw new Error("openai batch file upload failed: missing file id");
|
||||
}
|
||||
|
||||
const batchRes = await fetch(`${baseUrl}/batches`, {
|
||||
method: "POST",
|
||||
headers: this.getOpenAiHeaders({ json: true }),
|
||||
body: JSON.stringify({
|
||||
input_file_id: filePayload.id,
|
||||
endpoint: OPENAI_BATCH_ENDPOINT,
|
||||
completion_window: OPENAI_BATCH_COMPLETION_WINDOW,
|
||||
metadata: {
|
||||
source: "clawdbot-memory",
|
||||
agent: this.agentId,
|
||||
},
|
||||
}),
|
||||
});
|
||||
if (!batchRes.ok) {
|
||||
const text = await batchRes.text();
|
||||
throw new Error(`openai batch create failed: ${batchRes.status} ${text}`);
|
||||
}
|
||||
return (await batchRes.json()) as OpenAiBatchStatus;
|
||||
}
|
||||
|
||||
private async fetchOpenAiBatchStatus(batchId: string): Promise<OpenAiBatchStatus> {
|
||||
const baseUrl = this.getOpenAiBaseUrl();
|
||||
const res = await fetch(`${baseUrl}/batches/${batchId}`, {
|
||||
headers: this.getOpenAiHeaders({ json: true }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`openai batch status failed: ${res.status} ${text}`);
|
||||
}
|
||||
return (await res.json()) as OpenAiBatchStatus;
|
||||
}
|
||||
|
||||
private async fetchOpenAiFileContent(fileId: string): Promise<string> {
|
||||
const baseUrl = this.getOpenAiBaseUrl();
|
||||
const res = await fetch(`${baseUrl}/files/${fileId}/content`, {
|
||||
headers: this.getOpenAiHeaders({ json: true }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`openai batch file content failed: ${res.status} ${text}`);
|
||||
}
|
||||
return await res.text();
|
||||
}
|
||||
|
||||
private parseOpenAiBatchOutput(text: string): OpenAiBatchOutputLine[] {
|
||||
if (!text.trim()) return [];
|
||||
return text
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.map((line) => JSON.parse(line) as OpenAiBatchOutputLine);
|
||||
}
|
||||
|
||||
private async readOpenAiBatchError(errorFileId: string): Promise<string | undefined> {
|
||||
try {
|
||||
const content = await this.fetchOpenAiFileContent(errorFileId);
|
||||
const lines = this.parseOpenAiBatchOutput(content);
|
||||
const first = lines.find((line) => line.error?.message || line.response?.body?.error);
|
||||
const message =
|
||||
first?.error?.message ??
|
||||
(typeof first?.response?.body?.error?.message === "string"
|
||||
? first?.response?.body?.error?.message
|
||||
: undefined);
|
||||
return message;
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
return message ? `error file unavailable: ${message}` : undefined;
|
||||
}
|
||||
}
|
||||
|
||||
private async waitForOpenAiBatch(
|
||||
batchId: string,
|
||||
initial?: OpenAiBatchStatus,
|
||||
): Promise<{ outputFileId: string; errorFileId?: string }> {
|
||||
const start = Date.now();
|
||||
let current: OpenAiBatchStatus | undefined = initial;
|
||||
while (true) {
|
||||
const status = current ?? (await this.fetchOpenAiBatchStatus(batchId));
|
||||
const state = status.status ?? "unknown";
|
||||
if (state === "completed") {
|
||||
if (!status.output_file_id) {
|
||||
throw new Error(`openai batch ${batchId} completed without output file`);
|
||||
}
|
||||
return {
|
||||
outputFileId: status.output_file_id,
|
||||
errorFileId: status.error_file_id ?? undefined,
|
||||
};
|
||||
}
|
||||
if (["failed", "expired", "cancelled", "canceled"].includes(state)) {
|
||||
const detail = status.error_file_id
|
||||
? await this.readOpenAiBatchError(status.error_file_id)
|
||||
: undefined;
|
||||
const suffix = detail ? `: ${detail}` : "";
|
||||
throw new Error(`openai batch ${batchId} ${state}${suffix}`);
|
||||
}
|
||||
if (!this.batch.wait) {
|
||||
throw new Error(`openai batch ${batchId} still ${state}; wait disabled`);
|
||||
}
|
||||
if (Date.now() - start > this.batch.timeoutMs) {
|
||||
throw new Error(`openai batch ${batchId} timed out after ${this.batch.timeoutMs}ms`);
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, this.batch.pollIntervalMs));
|
||||
current = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
private async embedChunksWithBatch(
|
||||
chunks: MemoryChunk[],
|
||||
entry: MemoryFileEntry | SessionFileEntry,
|
||||
source: MemorySource,
|
||||
): Promise<number[][]> {
|
||||
if (!this.openAi) {
|
||||
return this.embedChunksInBatches(chunks);
|
||||
}
|
||||
if (chunks.length === 0) return [];
|
||||
|
||||
const { requests, mapping } = this.buildOpenAiBatchRequests(chunks, entry, source);
|
||||
const groups = this.splitOpenAiBatchRequests(requests);
|
||||
const embeddings: number[][] = Array.from({ length: chunks.length }, () => []);
|
||||
|
||||
for (const group of groups) {
|
||||
const batchInfo = await this.submitOpenAiBatch(group);
|
||||
if (!batchInfo.id) {
|
||||
throw new Error("openai batch create failed: missing batch id");
|
||||
}
|
||||
if (!this.batch.wait && batchInfo.status !== "completed") {
|
||||
throw new Error(
|
||||
`openai batch ${batchInfo.id} submitted; enable remote.batch.wait to await completion`,
|
||||
);
|
||||
}
|
||||
const completed =
|
||||
batchInfo.status === "completed"
|
||||
? {
|
||||
outputFileId: batchInfo.output_file_id ?? "",
|
||||
errorFileId: batchInfo.error_file_id ?? undefined,
|
||||
}
|
||||
: await this.waitForOpenAiBatch(batchInfo.id, batchInfo);
|
||||
if (!completed.outputFileId) {
|
||||
throw new Error(`openai batch ${batchInfo.id} completed without output file`);
|
||||
}
|
||||
const content = await this.fetchOpenAiFileContent(completed.outputFileId);
|
||||
const outputLines = this.parseOpenAiBatchOutput(content);
|
||||
const errors: string[] = [];
|
||||
const remaining = new Set(group.map((request) => request.custom_id));
|
||||
for (const line of outputLines) {
|
||||
const customId = line.custom_id;
|
||||
if (!customId) continue;
|
||||
const index = mapping.get(customId);
|
||||
if (index === undefined) continue;
|
||||
remaining.delete(customId);
|
||||
if (line.error?.message) {
|
||||
errors.push(`${customId}: ${line.error.message}`);
|
||||
continue;
|
||||
}
|
||||
const response = line.response;
|
||||
const statusCode = response?.status_code ?? 0;
|
||||
if (statusCode >= 400) {
|
||||
const message =
|
||||
response?.body?.error?.message ??
|
||||
(typeof response?.body === "string" ? response.body : undefined) ??
|
||||
"unknown error";
|
||||
errors.push(`${customId}: ${message}`);
|
||||
continue;
|
||||
}
|
||||
const data = response?.body?.data ?? [];
|
||||
const embedding = data[0]?.embedding ?? [];
|
||||
if (embedding.length === 0) {
|
||||
errors.push(`${customId}: empty embedding`);
|
||||
continue;
|
||||
}
|
||||
embeddings[index] = embedding;
|
||||
}
|
||||
if (errors.length > 0) {
|
||||
throw new Error(`openai batch ${batchInfo.id} failed: ${errors.join("; ")}`);
|
||||
}
|
||||
if (remaining.size > 0) {
|
||||
throw new Error(
|
||||
`openai batch ${batchInfo.id} missing ${remaining.size} embedding responses`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return embeddings;
|
||||
}
|
||||
|
||||
private async embedBatchWithRetry(texts: string[]): Promise<number[][]> {
|
||||
if (texts.length === 0) return [];
|
||||
let attempt = 0;
|
||||
@@ -1068,7 +1381,7 @@ export class MemoryIndexManager {
|
||||
const resolvedLimit = Math.max(1, Math.min(limit, tasks.length));
|
||||
const results: T[] = Array.from({ length: tasks.length });
|
||||
let next = 0;
|
||||
let firstError: unknown | null = null;
|
||||
let firstError: unknown = null;
|
||||
|
||||
const workers = Array.from({ length: resolvedLimit }, async () => {
|
||||
while (true) {
|
||||
@@ -1090,6 +1403,10 @@ export class MemoryIndexManager {
|
||||
return results;
|
||||
}
|
||||
|
||||
private getIndexConcurrency(): number {
|
||||
return this.batch.enabled ? 1 : EMBEDDING_INDEX_CONCURRENCY;
|
||||
}
|
||||
|
||||
private async indexFile(
|
||||
entry: MemoryFileEntry | SessionFileEntry,
|
||||
options: { source: MemorySource; content?: string },
|
||||
@@ -1098,7 +1415,9 @@ export class MemoryIndexManager {
|
||||
const chunks = chunkMarkdown(content, this.settings.chunking).filter(
|
||||
(chunk) => chunk.text.trim().length > 0,
|
||||
);
|
||||
const embeddings = await this.embedChunksInBatches(chunks);
|
||||
const embeddings = this.batch.enabled
|
||||
? await this.embedChunksWithBatch(chunks, entry, options.source)
|
||||
: await this.embedChunksInBatches(chunks);
|
||||
const sample = embeddings.find((embedding) => embedding.length > 0);
|
||||
const vectorReady = sample ? await this.ensureVectorReady(sample.length) : false;
|
||||
const now = Date.now();
|
||||
|
||||
Reference in New Issue
Block a user