feat: add memory vector search
This commit is contained in:
@@ -115,6 +115,23 @@ const FIELD_LABELS: Record<string, string> = {
|
||||
"gateway.reload.mode": "Config Reload Mode",
|
||||
"gateway.reload.debounceMs": "Config Reload Debounce (ms)",
|
||||
"agents.defaults.workspace": "Workspace",
|
||||
"agents.defaults.memorySearch": "Memory Search",
|
||||
"agents.defaults.memorySearch.enabled": "Enable Memory Search",
|
||||
"agents.defaults.memorySearch.provider": "Memory Search Provider",
|
||||
"agents.defaults.memorySearch.model": "Memory Search Model",
|
||||
"agents.defaults.memorySearch.fallback": "Memory Search Fallback",
|
||||
"agents.defaults.memorySearch.local.modelPath": "Local Embedding Model Path",
|
||||
"agents.defaults.memorySearch.store.path": "Memory Search Index Path",
|
||||
"agents.defaults.memorySearch.chunking.tokens": "Memory Chunk Tokens",
|
||||
"agents.defaults.memorySearch.chunking.overlap":
|
||||
"Memory Chunk Overlap Tokens",
|
||||
"agents.defaults.memorySearch.sync.onSessionStart": "Index on Session Start",
|
||||
"agents.defaults.memorySearch.sync.onSearch": "Index on Search (Lazy)",
|
||||
"agents.defaults.memorySearch.sync.watch": "Watch Memory Files",
|
||||
"agents.defaults.memorySearch.sync.watchDebounceMs":
|
||||
"Memory Watch Debounce (ms)",
|
||||
"agents.defaults.memorySearch.query.maxResults": "Memory Search Max Results",
|
||||
"agents.defaults.memorySearch.query.minScore": "Memory Search Min Score",
|
||||
"auth.profiles": "Auth Profiles",
|
||||
"auth.order": "Auth Profile Order",
|
||||
"auth.cooldowns.billingBackoffHours": "Billing Backoff (hours)",
|
||||
@@ -215,6 +232,20 @@ const FIELD_HELP: Record<string, string> = {
|
||||
"Failure window (hours) for backoff counters (default: 24).",
|
||||
"agents.defaults.models":
|
||||
"Configured model catalog (keys are full provider/model IDs).",
|
||||
"agents.defaults.memorySearch":
|
||||
"Vector search over MEMORY.md and memory/*.md (per-agent overrides supported).",
|
||||
"agents.defaults.memorySearch.provider":
|
||||
'Embedding provider ("openai" or "local").',
|
||||
"agents.defaults.memorySearch.local.modelPath":
|
||||
"Local GGUF model path or hf: URI (node-llama-cpp).",
|
||||
"agents.defaults.memorySearch.fallback":
|
||||
'Fallback to OpenAI when local embeddings fail ("openai" or "none").',
|
||||
"agents.defaults.memorySearch.store.path":
|
||||
"SQLite index path (default: ~/.clawdbot/memory/{agentId}.sqlite).",
|
||||
"agents.defaults.memorySearch.sync.onSearch":
|
||||
"Lazy sync: reindex on first search after a change.",
|
||||
"agents.defaults.memorySearch.sync.watch":
|
||||
"Watch memory files for changes (chokidar).",
|
||||
"plugins.enabled": "Enable plugin/extension loading (default: true).",
|
||||
"plugins.allow":
|
||||
"Optional allowlist of plugin ids; when set, only listed plugins load.",
|
||||
|
||||
@@ -996,6 +996,47 @@ export type AgentToolsConfig = {
|
||||
};
|
||||
};
|
||||
|
||||
export type MemorySearchConfig = {
|
||||
/** Enable vector memory search (default: true). */
|
||||
enabled?: boolean;
|
||||
/** Embedding provider mode. */
|
||||
provider?: "openai" | "local";
|
||||
/** Fallback behavior when local embeddings fail. */
|
||||
fallback?: "openai" | "none";
|
||||
/** Embedding model id (remote) or alias (local). */
|
||||
model?: string;
|
||||
/** Local embedding settings (node-llama-cpp). */
|
||||
local?: {
|
||||
/** GGUF model path or hf: URI. */
|
||||
modelPath?: string;
|
||||
/** Optional cache directory for local models. */
|
||||
modelCacheDir?: string;
|
||||
};
|
||||
/** Index storage configuration. */
|
||||
store?: {
|
||||
driver?: "sqlite";
|
||||
path?: string;
|
||||
};
|
||||
/** Chunking configuration. */
|
||||
chunking?: {
|
||||
tokens?: number;
|
||||
overlap?: number;
|
||||
};
|
||||
/** Sync behavior. */
|
||||
sync?: {
|
||||
onSessionStart?: boolean;
|
||||
onSearch?: boolean;
|
||||
watch?: boolean;
|
||||
watchDebounceMs?: number;
|
||||
intervalMinutes?: number;
|
||||
};
|
||||
/** Query behavior. */
|
||||
query?: {
|
||||
maxResults?: number;
|
||||
minScore?: number;
|
||||
};
|
||||
};
|
||||
|
||||
export type ToolsConfig = {
|
||||
allow?: string[];
|
||||
deny?: string[];
|
||||
@@ -1070,6 +1111,7 @@ export type AgentConfig = {
|
||||
workspace?: string;
|
||||
agentDir?: string;
|
||||
model?: string;
|
||||
memorySearch?: MemorySearchConfig;
|
||||
/** Human-like delay between block replies for this agent. */
|
||||
humanDelay?: HumanDelayConfig;
|
||||
identity?: IdentityConfig;
|
||||
@@ -1534,6 +1576,8 @@ export type AgentDefaultsConfig = {
|
||||
contextPruning?: AgentContextPruningConfig;
|
||||
/** Compaction tuning and pre-compaction memory flush behavior. */
|
||||
compaction?: AgentCompactionConfig;
|
||||
/** Vector memory search configuration (per-agent overrides supported). */
|
||||
memorySearch?: MemorySearchConfig;
|
||||
/** Default thinking level when no /think directive is present. */
|
||||
thinkingDefault?: "off" | "minimal" | "low" | "medium" | "high";
|
||||
/** Default verbose level when no /verbose directive is present. */
|
||||
|
||||
@@ -867,6 +867,48 @@ const AgentToolsSchema = z
|
||||
})
|
||||
.optional();
|
||||
|
||||
const MemorySearchSchema = z
|
||||
.object({
|
||||
enabled: z.boolean().optional(),
|
||||
provider: z.union([z.literal("openai"), z.literal("local")]).optional(),
|
||||
fallback: z.union([z.literal("openai"), z.literal("none")]).optional(),
|
||||
model: z.string().optional(),
|
||||
local: z
|
||||
.object({
|
||||
modelPath: z.string().optional(),
|
||||
modelCacheDir: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
store: z
|
||||
.object({
|
||||
driver: z.literal("sqlite").optional(),
|
||||
path: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
chunking: z
|
||||
.object({
|
||||
tokens: z.number().int().positive().optional(),
|
||||
overlap: z.number().int().nonnegative().optional(),
|
||||
})
|
||||
.optional(),
|
||||
sync: z
|
||||
.object({
|
||||
onSessionStart: z.boolean().optional(),
|
||||
onSearch: z.boolean().optional(),
|
||||
watch: z.boolean().optional(),
|
||||
watchDebounceMs: z.number().int().nonnegative().optional(),
|
||||
intervalMinutes: z.number().int().nonnegative().optional(),
|
||||
})
|
||||
.optional(),
|
||||
query: z
|
||||
.object({
|
||||
maxResults: z.number().int().positive().optional(),
|
||||
minScore: z.number().min(0).max(1).optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.optional();
|
||||
|
||||
const AgentEntrySchema = z.object({
|
||||
id: z.string(),
|
||||
default: z.boolean().optional(),
|
||||
@@ -874,6 +916,7 @@ const AgentEntrySchema = z.object({
|
||||
workspace: z.string().optional(),
|
||||
agentDir: z.string().optional(),
|
||||
model: z.string().optional(),
|
||||
memorySearch: MemorySearchSchema,
|
||||
humanDelay: HumanDelaySchema.optional(),
|
||||
identity: IdentitySchema,
|
||||
groupChat: GroupChatSchema,
|
||||
@@ -1098,6 +1141,7 @@ const AgentDefaultsSchema = z
|
||||
userTimezone: z.string().optional(),
|
||||
contextTokens: z.number().int().positive().optional(),
|
||||
cliBackends: z.record(z.string(), CliBackendSchema).optional(),
|
||||
memorySearch: MemorySearchSchema,
|
||||
contextPruning: z
|
||||
.object({
|
||||
mode: z
|
||||
|
||||
Reference in New Issue
Block a user