From 8b4bdaa8a473e6e14cab866a916a407e86ab861a Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Mon, 12 Jan 2026 03:42:49 +0000 Subject: [PATCH] feat: add apply_patch tool (exec-gated) --- CHANGELOG.md | 1 + docs/concepts/agent.md | 5 +- docs/concepts/multi-agent.md | 2 +- docs/docs.json | 1 + docs/gateway/configuration-examples.md | 2 +- docs/gateway/configuration.md | 13 +- docs/gateway/sandboxing.md | 4 +- docs/gateway/security.md | 8 +- docs/install/docker.md | 4 +- docs/multi-agent-sandbox-tools.md | 14 +- docs/tools/apply-patch.md | 49 ++ docs/tools/exec.md | 20 + docs/tools/index.md | 4 + src/agents/apply-patch.test.ts | 74 +++ src/agents/apply-patch.ts | 689 +++++++++++++++++++++++ src/agents/pi-embedded-runner.ts | 2 + src/agents/pi-tools-agent-config.test.ts | 31 + src/agents/pi-tools.test.ts | 54 +- src/agents/pi-tools.ts | 88 ++- src/agents/sandbox.ts | 1 + src/agents/system-prompt.ts | 3 + src/agents/tool-display.json | 5 + src/config/schema.ts | 6 + src/config/types.ts | 10 + src/config/zod-schema.ts | 6 + 25 files changed, 1055 insertions(+), 41 deletions(-) create mode 100644 docs/tools/apply-patch.md create mode 100644 src/agents/apply-patch.test.ts create mode 100644 src/agents/apply-patch.ts diff --git a/CHANGELOG.md b/CHANGELOG.md index f1a6b23ef..6e9361047 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Docs: add beginner-friendly plugin quick start + expand Voice Call plugin docs. - Tests: add Docker plugin loader + tgz-install smoke test. - Tests: extend Docker plugin E2E to cover installing from local folders (`plugins.load.paths`) and `file:` npm specs. +- Agents/Tools: add `apply_patch` tool for multi-file edits (experimental; gated by tools.exec.applyPatch; OpenAI-only). - Agents/Tools: rename the bash tool to exec (config alias maintained). (#748) — thanks @myfunc. - Config: add `$include` directive for modular config files. (#731) — thanks @pasogott. - Build: set pnpm minimum release age to 2880 minutes (2 days). (#718) — thanks @dan-dr. diff --git a/docs/concepts/agent.md b/docs/concepts/agent.md index 9a0db312a..4b08d6e89 100644 --- a/docs/concepts/agent.md +++ b/docs/concepts/agent.md @@ -45,7 +45,10 @@ To disable bootstrap file creation entirely (for pre-seeded workspaces), set: ## Built-in tools -Core tools (read/exec/edit/write and related system tools) are always available. `TOOLS.md` does **not** control which tools exist; it’s guidance for how *you* want them used. +Core tools (read/exec/edit/write and related system tools) are always available, +subject to tool policy. `apply_patch` is optional and gated by +`tools.exec.applyPatch`. `TOOLS.md` does **not** control which tools exist; it’s +guidance for how *you* want them used. ## Skills diff --git a/docs/concepts/multi-agent.md b/docs/concepts/multi-agent.md index 2fed872f8..e1ec74e64 100644 --- a/docs/concepts/multi-agent.md +++ b/docs/concepts/multi-agent.md @@ -217,7 +217,7 @@ Starting with v2026.1.6, each agent can have its own sandbox and tool restrictio }, tools: { allow: ["read"], // Only read tool - deny: ["exec", "write", "edit"], // Deny others + deny: ["exec", "write", "edit", "apply_patch"], // Deny others }, }, ], diff --git a/docs/docs.json b/docs/docs.json index 5f963b54e..fba1f321e 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -701,6 +701,7 @@ "tools", "plugin", "tools/exec", + "tools/apply-patch", "tools/elevated", "tools/browser", "tools/browser-linux-troubleshooting", diff --git a/docs/gateway/configuration-examples.md b/docs/gateway/configuration-examples.md index 54cdcfac6..e86e3a41d 100644 --- a/docs/gateway/configuration-examples.md +++ b/docs/gateway/configuration-examples.md @@ -259,7 +259,7 @@ Save to `~/.clawdbot/clawdbot.json` and you can DM the bot from that number. }, tools: { - allow: ["exec", "process", "read", "write", "edit"], + allow: ["exec", "process", "read", "write", "edit", "apply_patch"], deny: ["browser", "canvas"], exec: { backgroundMs: 10000, diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index a83cc6353..17781dd03 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -638,7 +638,7 @@ Read-only tools + read-only workspace: }, tools: { allow: ["read", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"], - deny: ["write", "edit", "exec", "process", "browser"] + deny: ["write", "edit", "apply_patch", "exec", "process", "browser"] } } ] @@ -661,7 +661,7 @@ No filesystem access (messaging/session tools enabled): }, tools: { allow: ["sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status", "whatsapp", "telegram", "slack", "discord", "gateway"], - deny: ["read", "write", "edit", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"] + deny: ["read", "write", "edit", "apply_patch", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"] } } ] @@ -1431,6 +1431,9 @@ of `every`, keep `HEARTBEAT.md` tiny, and/or choose a cheaper `model`. - `backgroundMs`: time before auto-background (ms, default 10000) - `timeoutSec`: auto-kill after this runtime (seconds, default 1800) - `cleanupMs`: how long to keep finished sessions in memory (ms, default 1800000) +- `applyPatch.enabled`: enable experimental `apply_patch` (OpenAI/OpenAI Codex only; default false) +- `applyPatch.allowModels`: optional allowlist of model ids (e.g. `gpt-5.2` or `openai/gpt-5.2`) +Note: `applyPatch` is only under `tools.exec` (no `tools.bash` alias). Legacy: `tools.bash` is still accepted as an alias. `agents.defaults.subagents` configures sub-agent defaults: @@ -1511,10 +1514,10 @@ Defaults (if enabled): - Debian bookworm-slim based image - agent workspace access: `workspaceAccess: "none"` (default) - `"none"`: use a per-scope sandbox workspace under `~/.clawdbot/sandboxes` - - `"ro"`: keep the sandbox workspace at `/workspace`, and mount the agent workspace read-only at `/agent` (disables `write`/`edit`) +- `"ro"`: keep the sandbox workspace at `/workspace`, and mount the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`) - `"rw"`: mount the agent workspace read/write at `/workspace` - auto-prune: idle > 24h OR age > 7d -- tool policy: allow only `exec`, `process`, `read`, `write`, `edit`, `sessions_list`, `sessions_history`, `sessions_send`, `sessions_spawn`, `session_status` (deny wins) +- tool policy: allow only `exec`, `process`, `read`, `write`, `edit`, `apply_patch`, `sessions_list`, `sessions_history`, `sessions_send`, `sessions_spawn`, `session_status` (deny wins) - configure via `tools.sandbox.tools`, override per-agent via `agents.list[].tools.sandbox.tools` - optional sandboxed browser (Chromium + CDP, noVNC observer) - hardening knobs: `network`, `user`, `pidsLimit`, `memory`, `cpus`, `ulimits`, `seccompProfile`, `apparmorProfile` @@ -1585,7 +1588,7 @@ Legacy: `perSession` is still supported (`true` → `scope: "session"`, tools: { sandbox: { tools: { - allow: ["exec", "process", "read", "write", "edit", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"], + allow: ["exec", "process", "read", "write", "edit", "apply_patch", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"], deny: ["browser", "canvas", "nodes", "cron", "discord", "gateway"] } } diff --git a/docs/gateway/sandboxing.md b/docs/gateway/sandboxing.md index 94f7aaa83..8aec4a747 100644 --- a/docs/gateway/sandboxing.md +++ b/docs/gateway/sandboxing.md @@ -17,7 +17,7 @@ This is not a perfect security boundary, but it materially limits filesystem and process access when the model does something dumb. ## What gets sandboxed -- Tool execution (`exec`, `read`, `write`, `edit`, `process`, etc.). +- Tool execution (`exec`, `read`, `write`, `edit`, `apply_patch`, `process`, etc.). - Optional sandboxed browser (`agents.defaults.sandbox.browser`). - By default, the sandbox browser auto-starts (ensures CDP is reachable) when the browser tool needs it. Configure via `agents.defaults.sandbox.browser.autoStart` and `agents.defaults.sandbox.browser.autoStartTimeoutMs`. @@ -47,7 +47,7 @@ Group/channel sessions use their own keys, so they count as non-main and will be ## Workspace access `agents.defaults.sandbox.workspaceAccess` controls **what the sandbox can see**: - `"none"` (default): tools see a sandbox workspace under `~/.clawdbot/sandboxes`. -- `"ro"`: mounts the agent workspace read-only at `/agent` (disables `write`/`edit`). +- `"ro"`: mounts the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`). - `"rw"`: mounts the agent workspace read/write at `/workspace`. Inbound media is copied into the active sandbox workspace (`media/inbound/*`). diff --git a/docs/gateway/security.md b/docs/gateway/security.md index 0ba634dff..a32899afc 100644 --- a/docs/gateway/security.md +++ b/docs/gateway/security.md @@ -184,7 +184,7 @@ Consider running your AI on a separate phone number from your personal one: You can already build a read-only profile by combining: - `agents.defaults.sandbox.workspaceAccess: "ro"` (or `"none"` for no workspace access) -- tool allow/deny lists that block `write`, `edit`, `exec`, `process`, etc. +- tool allow/deny lists that block `write`, `edit`, `apply_patch`, `exec`, `process`, etc. We may add a single `readOnlyMode` flag later to simplify this configuration. @@ -203,7 +203,7 @@ single container/workspace. Also consider agent workspace access inside the sandbox: - `agents.defaults.sandbox.workspaceAccess: "none"` (default) keeps the agent workspace off-limits; tools run against a sandbox workspace under `~/.clawdbot/sandboxes` -- `agents.defaults.sandbox.workspaceAccess: "ro"` mounts the agent workspace read-only at `/agent` (disables `write`/`edit`) +- `agents.defaults.sandbox.workspaceAccess: "ro"` mounts the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`) - `agents.defaults.sandbox.workspaceAccess: "rw"` mounts the agent workspace read/write at `/workspace` Important: `tools.elevated` is the global baseline escape hatch that runs exec on the host. Keep `tools.elevated.allowFrom` tight and don’t enable it for strangers. You can further restrict elevated per agent via `agents.list[].tools.elevated`. See [Elevated Mode](/tools/elevated). @@ -261,7 +261,7 @@ Common use cases: }, tools: { allow: ["read"], - deny: ["write", "edit", "exec", "process", "browser"] + deny: ["write", "edit", "apply_patch", "exec", "process", "browser"] } } ] @@ -285,7 +285,7 @@ Common use cases: }, tools: { allow: ["sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status", "whatsapp", "telegram", "slack", "discord", "gateway"], - deny: ["read", "write", "edit", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"] + deny: ["read", "write", "edit", "apply_patch", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"] } } ] diff --git a/docs/install/docker.md b/docs/install/docker.md index 73e5dcbfc..fe9736710 100644 --- a/docs/install/docker.md +++ b/docs/install/docker.md @@ -246,7 +246,7 @@ precedence, and troubleshooting. - Image: `clawdbot-sandbox:bookworm-slim` - One container per agent - Agent workspace access: `workspaceAccess: "none"` (default) uses `~/.clawdbot/sandboxes` - - `"ro"` keeps the sandbox workspace at `/workspace` and mounts the agent workspace read-only at `/agent` (disables `write`/`edit`) + - `"ro"` keeps the sandbox workspace at `/workspace` and mounts the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`) - `"rw"` mounts the agent workspace read/write at `/workspace` - Auto-prune: idle > 24h OR age > 7d - Network: `none` by default (explicitly opt-in if you need egress) @@ -424,7 +424,7 @@ Example: ### Security notes -- Hard wall only applies to **tools** (exec/read/write/edit). +- Hard wall only applies to **tools** (exec/read/write/edit/apply_patch). - Host-only tools like browser/camera/canvas are blocked by default. - Allowing `browser` in sandbox **breaks isolation** (browser runs on host). diff --git a/docs/multi-agent-sandbox-tools.md b/docs/multi-agent-sandbox-tools.md index fcf92f75d..e11c9a07b 100644 --- a/docs/multi-agent-sandbox-tools.md +++ b/docs/multi-agent-sandbox-tools.md @@ -48,7 +48,7 @@ For debugging “why is this blocked?”, see [Sandbox vs Tool Policy vs Elevate }, "tools": { "allow": ["read"], - "deny": ["exec", "write", "edit", "process", "browser"] + "deny": ["exec", "write", "edit", "apply_patch", "process", "browser"] } } ] @@ -95,7 +95,7 @@ For debugging “why is this blocked?”, see [Sandbox vs Tool Policy vs Elevate "workspaceRoot": "/tmp/work-sandboxes" }, "tools": { - "allow": ["read", "write", "exec"], + "allow": ["read", "write", "apply_patch", "exec"], "deny": ["browser", "gateway", "discord"] } } @@ -134,7 +134,7 @@ For debugging “why is this blocked?”, see [Sandbox vs Tool Policy vs Elevate }, "tools": { "allow": ["read"], - "deny": ["exec", "write", "edit"] + "deny": ["exec", "write", "edit", "apply_patch"] } } ] @@ -200,7 +200,7 @@ Mitigation patterns: "tools": { "sandbox": { "tools": { - "allow": ["read", "write", "exec"], + "allow": ["read", "write", "apply_patch", "exec"], "deny": [] } } @@ -235,7 +235,7 @@ Legacy `agent.*` configs are migrated by `clawdbot doctor`; prefer `agents.defau { "tools": { "allow": ["read"], - "deny": ["exec", "write", "edit", "process"] + "deny": ["exec", "write", "edit", "apply_patch", "process"] } } ``` @@ -245,7 +245,7 @@ Legacy `agent.*` configs are migrated by `clawdbot doctor`; prefer `agents.defau { "tools": { "allow": ["read", "exec", "process"], - "deny": ["write", "edit", "browser", "gateway"] + "deny": ["write", "edit", "apply_patch", "browser", "gateway"] } } ``` @@ -255,7 +255,7 @@ Legacy `agent.*` configs are migrated by `clawdbot doctor`; prefer `agents.defau { "tools": { "allow": ["sessions_list", "sessions_send", "sessions_history", "session_status"], - "deny": ["exec", "write", "edit", "read", "browser"] + "deny": ["exec", "write", "edit", "apply_patch", "read", "browser"] } } ``` diff --git a/docs/tools/apply-patch.md b/docs/tools/apply-patch.md new file mode 100644 index 000000000..45e53d070 --- /dev/null +++ b/docs/tools/apply-patch.md @@ -0,0 +1,49 @@ +--- +summary: "Apply multi-file patches with the apply_patch tool" +read_when: + - You need structured file edits across multiple files + - You want to document or debug patch-based edits +--- + +# apply_patch tool + +Apply file changes using a structured patch format. This is ideal for multi-file +or multi-hunk edits where a single `edit` call would be brittle. + +The tool accepts a single `input` string that wraps one or more file operations: + +``` +*** Begin Patch +*** Add File: path/to/file.txt ++line 1 ++line 2 +*** Update File: src/app.ts +@@ +-old line ++new line +*** Delete File: obsolete.txt +*** End Patch +``` + +## Parameters + +- `input` (required): Full patch contents including `*** Begin Patch` and `*** End Patch`. + +## Notes + +- Paths are resolved relative to the workspace root. +- Use `*** Move to:` within an `*** Update File:` hunk to rename files. +- `*** End of File` marks an EOF-only insert when needed. +- Experimental and disabled by default. Enable with `tools.exec.applyPatch.enabled`. +- OpenAI-only (including OpenAI Codex). Optionally gate by model via + `tools.exec.applyPatch.allowModels`. +- Config is only under `tools.exec` (no `tools.bash` alias). + +## Example + +```json +{ + "tool": "apply_patch", + "input": "*** Begin Patch\n*** Update File: src/index.ts\n@@\n-const foo = 1\n+const foo = 2\n*** End Patch" +} +``` diff --git a/docs/tools/exec.md b/docs/tools/exec.md index 0b0d8e776..2ef030b95 100644 --- a/docs/tools/exec.md +++ b/docs/tools/exec.md @@ -33,3 +33,23 @@ Background + poll: {"tool":"exec","command":"npm run build","yieldMs":1000} {"tool":"process","action":"poll","sessionId":""} ``` + +## apply_patch (experimental) + +`apply_patch` is a subtool of `exec` for structured multi-file edits. +Enable it explicitly: + +```json5 +{ + tools: { + exec: { + applyPatch: { enabled: true, allowModels: ["gpt-5.2"] } + } + } +} +``` + +Notes: +- Only available for OpenAI/OpenAI Codex models. +- Tool policy still applies; `allow: ["exec"]` implicitly allows `apply_patch`. +- Config lives under `tools.exec.applyPatch` (no `tools.bash` alias). diff --git a/docs/tools/index.md b/docs/tools/index.md index 8427c98b1..13853a79e 100644 --- a/docs/tools/index.md +++ b/docs/tools/index.md @@ -31,6 +31,10 @@ alongside tools (for example, the voice-call plugin). ## Tool inventory +### `apply_patch` +Apply structured patches across one or more files. Use for multi-hunk edits. +Experimental: enable via `tools.exec.applyPatch.enabled` (OpenAI models only). + ### `exec` Run shell commands in the workspace. diff --git a/src/agents/apply-patch.test.ts b/src/agents/apply-patch.test.ts new file mode 100644 index 000000000..16f45a1e9 --- /dev/null +++ b/src/agents/apply-patch.test.ts @@ -0,0 +1,74 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; + +import { applyPatch } from "./apply-patch.js"; + +async function withTempDir(fn: (dir: string) => Promise) { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-patch-")); + try { + return await fn(dir); + } finally { + await fs.rm(dir, { recursive: true, force: true }); + } +} + +describe("applyPatch", () => { + it("adds a file", async () => { + await withTempDir(async (dir) => { + const patch = `*** Begin Patch +*** Add File: hello.txt ++hello +*** End Patch`; + + const result = await applyPatch(patch, { cwd: dir }); + const contents = await fs.readFile(path.join(dir, "hello.txt"), "utf8"); + + expect(contents).toBe("hello\n"); + expect(result.summary.added).toEqual(["hello.txt"]); + }); + }); + + it("updates and moves a file", async () => { + await withTempDir(async (dir) => { + const source = path.join(dir, "source.txt"); + await fs.writeFile(source, "foo\nbar\n", "utf8"); + + const patch = `*** Begin Patch +*** Update File: source.txt +*** Move to: dest.txt +@@ + foo +-bar ++baz +*** End Patch`; + + const result = await applyPatch(patch, { cwd: dir }); + const dest = path.join(dir, "dest.txt"); + const contents = await fs.readFile(dest, "utf8"); + + expect(contents).toBe("foo\nbaz\n"); + await expect(fs.stat(source)).rejects.toBeDefined(); + expect(result.summary.modified).toEqual(["dest.txt"]); + }); + }); + + it("supports end-of-file inserts", async () => { + await withTempDir(async (dir) => { + const target = path.join(dir, "end.txt"); + await fs.writeFile(target, "line1\n", "utf8"); + + const patch = `*** Begin Patch +*** Update File: end.txt +@@ ++line2 +*** End of File +*** End Patch`; + + await applyPatch(patch, { cwd: dir }); + const contents = await fs.readFile(target, "utf8"); + expect(contents).toBe("line1\nline2\n"); + }); + }); +}); diff --git a/src/agents/apply-patch.ts b/src/agents/apply-patch.ts new file mode 100644 index 000000000..111dfcaf8 --- /dev/null +++ b/src/agents/apply-patch.ts @@ -0,0 +1,689 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { AgentTool } from "@mariozechner/pi-agent-core"; +import { Type } from "@sinclair/typebox"; + +import { assertSandboxPath } from "./sandbox-paths.js"; + +const BEGIN_PATCH_MARKER = "*** Begin Patch"; +const END_PATCH_MARKER = "*** End Patch"; +const ADD_FILE_MARKER = "*** Add File: "; +const DELETE_FILE_MARKER = "*** Delete File: "; +const UPDATE_FILE_MARKER = "*** Update File: "; +const MOVE_TO_MARKER = "*** Move to: "; +const EOF_MARKER = "*** End of File"; +const CHANGE_CONTEXT_MARKER = "@@ "; +const EMPTY_CHANGE_CONTEXT_MARKER = "@@"; +const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g; + +type AddFileHunk = { + kind: "add"; + path: string; + contents: string; +}; + +type DeleteFileHunk = { + kind: "delete"; + path: string; +}; + +type UpdateFileChunk = { + changeContext?: string; + oldLines: string[]; + newLines: string[]; + isEndOfFile: boolean; +}; + +type UpdateFileHunk = { + kind: "update"; + path: string; + movePath?: string; + chunks: UpdateFileChunk[]; +}; + +type Hunk = AddFileHunk | DeleteFileHunk | UpdateFileHunk; + +export type ApplyPatchSummary = { + added: string[]; + modified: string[]; + deleted: string[]; +}; + +export type ApplyPatchResult = { + summary: ApplyPatchSummary; + text: string; +}; + +export type ApplyPatchToolDetails = { + summary: ApplyPatchSummary; +}; + +type ApplyPatchOptions = { + cwd: string; + sandboxRoot?: string; + signal?: AbortSignal; +}; + +const applyPatchSchema = Type.Object({ + input: Type.String({ + description: "Patch content using the *** Begin Patch/End Patch format.", + }), +}); + +export function createApplyPatchTool( + options: { cwd?: string; sandboxRoot?: string } = {}, + // biome-ignore lint/suspicious/noExplicitAny: TypeBox schema type from pi-agent-core uses a different module instance. +): AgentTool { + const cwd = options.cwd ?? process.cwd(); + const sandboxRoot = options.sandboxRoot; + + return { + name: "apply_patch", + label: "apply_patch", + description: + "Apply a patch to one or more files using the apply_patch format. The input should include *** Begin Patch and *** End Patch markers.", + parameters: applyPatchSchema, + execute: async (_toolCallId, args, signal) => { + const params = args as { input?: string }; + const input = typeof params.input === "string" ? params.input : ""; + if (!input.trim()) { + throw new Error("Provide a patch input."); + } + if (signal?.aborted) { + const err = new Error("Aborted"); + err.name = "AbortError"; + throw err; + } + + const result = await applyPatch(input, { + cwd, + sandboxRoot, + signal, + }); + + return { + content: [{ type: "text", text: result.text }], + details: { summary: result.summary }, + }; + }, + }; +} + +export async function applyPatch( + input: string, + options: ApplyPatchOptions, +): Promise { + const parsed = parsePatchText(input); + if (parsed.hunks.length === 0) { + throw new Error("No files were modified."); + } + + const summary: ApplyPatchSummary = { + added: [], + modified: [], + deleted: [], + }; + const seen = { + added: new Set(), + modified: new Set(), + deleted: new Set(), + }; + + for (const hunk of parsed.hunks) { + if (options.signal?.aborted) { + const err = new Error("Aborted"); + err.name = "AbortError"; + throw err; + } + + if (hunk.kind === "add") { + const target = await resolvePatchPath(hunk.path, options); + await ensureDir(target.resolved); + await fs.writeFile(target.resolved, hunk.contents, "utf8"); + recordSummary(summary, seen, "added", target.display); + continue; + } + + if (hunk.kind === "delete") { + const target = await resolvePatchPath(hunk.path, options); + await fs.rm(target.resolved); + recordSummary(summary, seen, "deleted", target.display); + continue; + } + + const target = await resolvePatchPath(hunk.path, options); + const applied = await applyUpdateHunk(target.resolved, hunk.chunks); + + if (hunk.movePath) { + const moveTarget = await resolvePatchPath(hunk.movePath, options); + await ensureDir(moveTarget.resolved); + await fs.writeFile(moveTarget.resolved, applied, "utf8"); + await fs.rm(target.resolved); + recordSummary(summary, seen, "modified", moveTarget.display); + } else { + await fs.writeFile(target.resolved, applied, "utf8"); + recordSummary(summary, seen, "modified", target.display); + } + } + + return { + summary, + text: formatSummary(summary), + }; +} + +function recordSummary( + summary: ApplyPatchSummary, + seen: { + added: Set; + modified: Set; + deleted: Set; + }, + bucket: keyof ApplyPatchSummary, + value: string, +) { + if (seen[bucket].has(value)) return; + seen[bucket].add(value); + summary[bucket].push(value); +} + +function formatSummary(summary: ApplyPatchSummary): string { + const lines = ["Success. Updated the following files:"]; + for (const file of summary.added) lines.push(`A ${file}`); + for (const file of summary.modified) lines.push(`M ${file}`); + for (const file of summary.deleted) lines.push(`D ${file}`); + return lines.join("\n"); +} + +async function ensureDir(filePath: string) { + const parent = path.dirname(filePath); + if (!parent || parent === ".") return; + await fs.mkdir(parent, { recursive: true }); +} + +async function resolvePatchPath( + filePath: string, + options: ApplyPatchOptions, +): Promise<{ resolved: string; display: string }> { + if (options.sandboxRoot) { + const resolved = await assertSandboxPath({ + filePath, + cwd: options.cwd, + root: options.sandboxRoot, + }); + return { + resolved: resolved.resolved, + display: resolved.relative || resolved.resolved, + }; + } + + const resolved = resolvePathFromCwd(filePath, options.cwd); + return { + resolved, + display: toDisplayPath(resolved, options.cwd), + }; +} + +function normalizeUnicodeSpaces(value: string): string { + return value.replace(UNICODE_SPACES, " "); +} + +function expandPath(filePath: string): string { + const normalized = normalizeUnicodeSpaces(filePath); + if (normalized === "~") return os.homedir(); + if (normalized.startsWith("~/")) return os.homedir() + normalized.slice(1); + return normalized; +} + +function resolvePathFromCwd(filePath: string, cwd: string): string { + const expanded = expandPath(filePath); + if (path.isAbsolute(expanded)) return path.normalize(expanded); + return path.resolve(cwd, expanded); +} + +function toDisplayPath(resolved: string, cwd: string): string { + const relative = path.relative(cwd, resolved); + if (!relative || relative === "") return path.basename(resolved); + if (relative.startsWith("..") || path.isAbsolute(relative)) return resolved; + return relative; +} + +function parsePatchText(input: string): { hunks: Hunk[]; patch: string } { + const trimmed = input.trim(); + if (!trimmed) { + throw new Error("Invalid patch: input is empty."); + } + + const lines = trimmed.split(/\r?\n/); + const validated = checkPatchBoundariesLenient(lines); + const hunks: Hunk[] = []; + + const lastLineIndex = validated.length - 1; + let remaining = validated.slice(1, lastLineIndex); + let lineNumber = 2; + + while (remaining.length > 0) { + const { hunk, consumed } = parseOneHunk(remaining, lineNumber); + hunks.push(hunk); + lineNumber += consumed; + remaining = remaining.slice(consumed); + } + + return { hunks, patch: validated.join("\n") }; +} + +function checkPatchBoundariesLenient(lines: string[]): string[] { + const strictError = checkPatchBoundariesStrict(lines); + if (!strictError) return lines; + + if (lines.length < 4) { + throw new Error(strictError); + } + const first = lines[0]; + const last = lines[lines.length - 1]; + if ( + (first === "< 0) { + if (remaining[0].trim() === "") { + remaining = remaining.slice(1); + consumed += 1; + continue; + } + if (remaining[0].startsWith("***")) { + break; + } + const { chunk, consumed: chunkLines } = parseUpdateFileChunk( + remaining, + lineNumber + consumed, + chunks.length === 0, + ); + chunks.push(chunk); + remaining = remaining.slice(chunkLines); + consumed += chunkLines; + } + + if (chunks.length === 0) { + throw new Error( + `Invalid patch hunk at line ${lineNumber}: Update file hunk for path '${targetPath}' is empty`, + ); + } + + return { + hunk: { + kind: "update", + path: targetPath, + movePath, + chunks, + }, + consumed, + }; + } + + throw new Error( + `Invalid patch hunk at line ${lineNumber}: '${lines[0]}' is not a valid hunk header. Valid hunk headers: '*** Add File: {path}', '*** Delete File: {path}', '*** Update File: {path}'`, + ); +} + +function parseUpdateFileChunk( + lines: string[], + lineNumber: number, + allowMissingContext: boolean, +): { chunk: UpdateFileChunk; consumed: number } { + if (lines.length === 0) { + throw new Error( + `Invalid patch hunk at line ${lineNumber}: Update hunk does not contain any lines`, + ); + } + + let changeContext: string | undefined; + let startIndex = 0; + if (lines[0] === EMPTY_CHANGE_CONTEXT_MARKER) { + startIndex = 1; + } else if (lines[0].startsWith(CHANGE_CONTEXT_MARKER)) { + changeContext = lines[0].slice(CHANGE_CONTEXT_MARKER.length); + startIndex = 1; + } else if (!allowMissingContext) { + throw new Error( + `Invalid patch hunk at line ${lineNumber}: Expected update hunk to start with a @@ context marker, got: '${lines[0]}'`, + ); + } + + if (startIndex >= lines.length) { + throw new Error( + `Invalid patch hunk at line ${lineNumber + 1}: Update hunk does not contain any lines`, + ); + } + + const chunk: UpdateFileChunk = { + changeContext, + oldLines: [], + newLines: [], + isEndOfFile: false, + }; + + let parsedLines = 0; + for (const line of lines.slice(startIndex)) { + if (line === EOF_MARKER) { + if (parsedLines === 0) { + throw new Error( + `Invalid patch hunk at line ${lineNumber + 1}: Update hunk does not contain any lines`, + ); + } + chunk.isEndOfFile = true; + parsedLines += 1; + break; + } + + const marker = line[0]; + if (!marker) { + chunk.oldLines.push(""); + chunk.newLines.push(""); + parsedLines += 1; + continue; + } + + if (marker === " ") { + const content = line.slice(1); + chunk.oldLines.push(content); + chunk.newLines.push(content); + parsedLines += 1; + continue; + } + if (marker === "+") { + chunk.newLines.push(line.slice(1)); + parsedLines += 1; + continue; + } + if (marker === "-") { + chunk.oldLines.push(line.slice(1)); + parsedLines += 1; + continue; + } + + if (parsedLines === 0) { + throw new Error( + `Invalid patch hunk at line ${lineNumber + 1}: Unexpected line found in update hunk: '${line}'. Every line should start with ' ' (context line), '+' (added line), or '-' (removed line)`, + ); + } + break; + } + + return { chunk, consumed: parsedLines + startIndex }; +} + +async function applyUpdateHunk( + filePath: string, + chunks: UpdateFileChunk[], +): Promise { + const originalContents = await fs.readFile(filePath, "utf8").catch((err) => { + throw new Error(`Failed to read file to update ${filePath}: ${err}`); + }); + + const originalLines = originalContents.split("\n"); + if ( + originalLines.length > 0 && + originalLines[originalLines.length - 1] === "" + ) { + originalLines.pop(); + } + + const replacements = computeReplacements(originalLines, filePath, chunks); + let newLines = applyReplacements(originalLines, replacements); + if (newLines.length === 0 || newLines[newLines.length - 1] !== "") { + newLines = [...newLines, ""]; + } + return newLines.join("\n"); +} + +function computeReplacements( + originalLines: string[], + filePath: string, + chunks: UpdateFileChunk[], +): Array<[number, number, string[]]> { + const replacements: Array<[number, number, string[]]> = []; + let lineIndex = 0; + + for (const chunk of chunks) { + if (chunk.changeContext) { + const ctxIndex = seekSequence( + originalLines, + [chunk.changeContext], + lineIndex, + false, + ); + if (ctxIndex === null) { + throw new Error( + `Failed to find context '${chunk.changeContext}' in ${filePath}`, + ); + } + lineIndex = ctxIndex + 1; + } + + if (chunk.oldLines.length === 0) { + const insertionIndex = + originalLines.length > 0 && + originalLines[originalLines.length - 1] === "" + ? originalLines.length - 1 + : originalLines.length; + replacements.push([insertionIndex, 0, chunk.newLines]); + continue; + } + + let pattern = chunk.oldLines; + let newSlice = chunk.newLines; + let found = seekSequence( + originalLines, + pattern, + lineIndex, + chunk.isEndOfFile, + ); + + if (found === null && pattern[pattern.length - 1] === "") { + pattern = pattern.slice(0, -1); + if (newSlice.length > 0 && newSlice[newSlice.length - 1] === "") { + newSlice = newSlice.slice(0, -1); + } + found = seekSequence( + originalLines, + pattern, + lineIndex, + chunk.isEndOfFile, + ); + } + + if (found === null) { + throw new Error( + `Failed to find expected lines in ${filePath}:\n${chunk.oldLines.join("\n")}`, + ); + } + + replacements.push([found, pattern.length, newSlice]); + lineIndex = found + pattern.length; + } + + replacements.sort((a, b) => a[0] - b[0]); + return replacements; +} + +function applyReplacements( + lines: string[], + replacements: Array<[number, number, string[]]>, +): string[] { + const result = [...lines]; + for (const [startIndex, oldLen, newLines] of [...replacements].reverse()) { + for (let i = 0; i < oldLen; i += 1) { + if (startIndex < result.length) { + result.splice(startIndex, 1); + } + } + for (let i = 0; i < newLines.length; i += 1) { + result.splice(startIndex + i, 0, newLines[i]); + } + } + return result; +} + +function seekSequence( + lines: string[], + pattern: string[], + start: number, + eof: boolean, +): number | null { + if (pattern.length === 0) return start; + if (pattern.length > lines.length) return null; + + const maxStart = lines.length - pattern.length; + const searchStart = eof && lines.length >= pattern.length ? maxStart : start; + if (searchStart > maxStart) return null; + + for (let i = searchStart; i <= maxStart; i += 1) { + if (linesMatch(lines, pattern, i, (value) => value)) return i; + } + for (let i = searchStart; i <= maxStart; i += 1) { + if (linesMatch(lines, pattern, i, (value) => value.trimEnd())) return i; + } + for (let i = searchStart; i <= maxStart; i += 1) { + if (linesMatch(lines, pattern, i, (value) => value.trim())) return i; + } + for (let i = searchStart; i <= maxStart; i += 1) { + if ( + linesMatch(lines, pattern, i, (value) => + normalizePunctuation(value.trim()), + ) + ) { + return i; + } + } + + return null; +} + +function linesMatch( + lines: string[], + pattern: string[], + start: number, + normalize: (value: string) => string, +): boolean { + for (let idx = 0; idx < pattern.length; idx += 1) { + if (normalize(lines[start + idx]) !== normalize(pattern[idx])) { + return false; + } + } + return true; +} + +function normalizePunctuation(value: string): string { + return Array.from(value) + .map((char) => { + switch (char) { + case "\u2010": + case "\u2011": + case "\u2012": + case "\u2013": + case "\u2014": + case "\u2015": + case "\u2212": + return "-"; + case "\u2018": + case "\u2019": + case "\u201A": + case "\u201B": + return "'"; + case "\u201C": + case "\u201D": + case "\u201E": + case "\u201F": + return '"'; + case "\u00A0": + case "\u2002": + case "\u2003": + case "\u2004": + case "\u2005": + case "\u2006": + case "\u2007": + case "\u2008": + case "\u2009": + case "\u200A": + case "\u202F": + case "\u205F": + case "\u3000": + return " "; + default: + return char; + } + }) + .join(""); +} diff --git a/src/agents/pi-embedded-runner.ts b/src/agents/pi-embedded-runner.ts index 83678ac4e..1f04636f6 100644 --- a/src/agents/pi-embedded-runner.ts +++ b/src/agents/pi-embedded-runner.ts @@ -1110,6 +1110,7 @@ export async function compactEmbeddedPiSession(params: { config: params.config, abortSignal: runAbortController.signal, modelProvider: model.provider, + modelId, modelAuthMode: resolveModelAuthMode(model.provider, params.config), // No currentChannelId/currentThreadTs for compaction - not in message context }); @@ -1524,6 +1525,7 @@ export async function runEmbeddedPiAgent(params: { config: params.config, abortSignal: runAbortController.signal, modelProvider: model.provider, + modelId, modelAuthMode: resolveModelAuthMode(model.provider, params.config), currentChannelId: params.currentChannelId, currentThreadTs: params.currentThreadTs, diff --git a/src/agents/pi-tools-agent-config.test.ts b/src/agents/pi-tools-agent-config.test.ts index 0b6553447..a716c89ad 100644 --- a/src/agents/pi-tools-agent-config.test.ts +++ b/src/agents/pi-tools-agent-config.test.ts @@ -31,6 +31,7 @@ describe("Agent-specific tool filtering", () => { expect(toolNames).toContain("read"); expect(toolNames).toContain("write"); expect(toolNames).not.toContain("exec"); + expect(toolNames).not.toContain("apply_patch"); }); it("should keep global tool policy when agent only sets tools.elevated", () => { @@ -65,6 +66,32 @@ describe("Agent-specific tool filtering", () => { expect(toolNames).toContain("exec"); expect(toolNames).toContain("read"); expect(toolNames).not.toContain("write"); + expect(toolNames).not.toContain("apply_patch"); + }); + + it("should allow apply_patch when exec is allow-listed and applyPatch is enabled", () => { + const cfg: ClawdbotConfig = { + tools: { + allow: ["read", "exec"], + exec: { + applyPatch: { enabled: true }, + }, + }, + }; + + const tools = createClawdbotCodingTools({ + config: cfg, + sessionKey: "agent:main:main", + workspaceDir: "/tmp/test", + agentDir: "/tmp/agent", + modelProvider: "openai", + modelId: "gpt-5.2", + }); + + const toolNames = tools.map((t) => t.name); + expect(toolNames).toContain("read"); + expect(toolNames).toContain("exec"); + expect(toolNames).toContain("apply_patch"); }); it("should apply agent-specific tool policy", () => { @@ -98,6 +125,7 @@ describe("Agent-specific tool filtering", () => { expect(toolNames).toContain("read"); expect(toolNames).not.toContain("exec"); expect(toolNames).not.toContain("write"); + expect(toolNames).not.toContain("apply_patch"); expect(toolNames).not.toContain("edit"); }); @@ -133,6 +161,7 @@ describe("Agent-specific tool filtering", () => { expect(mainToolNames).toContain("exec"); expect(mainToolNames).toContain("write"); expect(mainToolNames).toContain("edit"); + expect(mainToolNames).not.toContain("apply_patch"); // family agent: restricted const familyTools = createClawdbotCodingTools({ @@ -146,6 +175,7 @@ describe("Agent-specific tool filtering", () => { expect(familyToolNames).not.toContain("exec"); expect(familyToolNames).not.toContain("write"); expect(familyToolNames).not.toContain("edit"); + expect(familyToolNames).not.toContain("apply_patch"); }); it("should prefer agent-specific tool policy over global", () => { @@ -178,6 +208,7 @@ describe("Agent-specific tool filtering", () => { expect(toolNames).toContain("browser"); expect(toolNames).not.toContain("exec"); expect(toolNames).not.toContain("process"); + expect(toolNames).not.toContain("apply_patch"); }); it("should work with sandbox tools filtering", () => { diff --git a/src/agents/pi-tools.test.ts b/src/agents/pi-tools.test.ts index 600fbddac..089b10926 100644 --- a/src/agents/pi-tools.test.ts +++ b/src/agents/pi-tools.test.ts @@ -4,6 +4,7 @@ import path from "node:path"; import sharp from "sharp"; import { describe, expect, it } from "vitest"; +import type { ClawdbotConfig } from "../config/config.js"; import { __testing, createClawdbotCodingTools } from "./pi-tools.js"; import { createBrowserTool } from "./tools/browser-tool.js"; @@ -153,10 +154,59 @@ describe("createClawdbotCodingTools", () => { } }); - it("includes exec and process tools", () => { + it("includes exec and process tools by default", () => { const tools = createClawdbotCodingTools(); expect(tools.some((tool) => tool.name === "exec")).toBe(true); expect(tools.some((tool) => tool.name === "process")).toBe(true); + expect(tools.some((tool) => tool.name === "apply_patch")).toBe(false); + }); + + it("gates apply_patch behind tools.exec.applyPatch for OpenAI models", () => { + const config: ClawdbotConfig = { + tools: { + exec: { + applyPatch: { enabled: true }, + }, + }, + }; + const openAiTools = createClawdbotCodingTools({ + config, + modelProvider: "openai", + modelId: "gpt-5.2", + }); + expect(openAiTools.some((tool) => tool.name === "apply_patch")).toBe(true); + + const anthropicTools = createClawdbotCodingTools({ + config, + modelProvider: "anthropic", + modelId: "claude-opus-4-5", + }); + expect(anthropicTools.some((tool) => tool.name === "apply_patch")).toBe( + false, + ); + }); + + it("respects apply_patch allowModels", () => { + const config: ClawdbotConfig = { + tools: { + exec: { + applyPatch: { enabled: true, allowModels: ["gpt-5.2"] }, + }, + }, + }; + const allowed = createClawdbotCodingTools({ + config, + modelProvider: "openai", + modelId: "gpt-5.2", + }); + expect(allowed.some((tool) => tool.name === "apply_patch")).toBe(true); + + const denied = createClawdbotCodingTools({ + config, + modelProvider: "openai", + modelId: "gpt-5-mini", + }); + expect(denied.some((tool) => tool.name === "apply_patch")).toBe(false); }); it("keeps canonical tool names for Anthropic OAuth (pi-ai remaps on the wire)", () => { @@ -169,6 +219,7 @@ describe("createClawdbotCodingTools", () => { expect(names.has("read")).toBe(true); expect(names.has("write")).toBe(true); expect(names.has("edit")).toBe(true); + expect(names.has("apply_patch")).toBe(false); }); it("provides top-level object schemas for all tools", () => { @@ -212,6 +263,7 @@ describe("createClawdbotCodingTools", () => { expect(names.has("read")).toBe(true); expect(names.has("exec")).toBe(true); expect(names.has("process")).toBe(true); + expect(names.has("apply_patch")).toBe(false); }); it("supports allow-only sub-agent tool policy", () => { diff --git a/src/agents/pi-tools.ts b/src/agents/pi-tools.ts index 854801996..ed2549c32 100644 --- a/src/agents/pi-tools.ts +++ b/src/agents/pi-tools.ts @@ -14,6 +14,7 @@ import { resolveAgentConfig, resolveAgentIdFromSessionKey, } from "./agent-scope.js"; +import { createApplyPatchTool } from "./apply-patch.js"; import { createExecTool, createProcessTool, @@ -292,6 +293,7 @@ function cleanToolSchemaForGemini(schema: Record): unknown { const TOOL_NAME_ALIASES: Record = { bash: "exec", + "apply-patch": "apply_patch", }; function normalizeToolName(name: string) { @@ -304,6 +306,35 @@ function normalizeToolNames(list?: string[]) { return list.map(normalizeToolName).filter(Boolean); } +function isOpenAIProvider(provider?: string) { + const normalized = provider?.trim().toLowerCase(); + return normalized === "openai" || normalized === "openai-codex"; +} + +function isApplyPatchAllowedForModel(params: { + modelProvider?: string; + modelId?: string; + allowModels?: string[]; +}) { + const allowModels = Array.isArray(params.allowModels) + ? params.allowModels + : []; + if (allowModels.length === 0) return true; + const modelId = params.modelId?.trim(); + if (!modelId) return false; + const normalizedModelId = modelId.toLowerCase(); + const provider = params.modelProvider?.trim().toLowerCase(); + const normalizedFull = + provider && !normalizedModelId.includes("/") + ? `${provider}/${normalizedModelId}` + : normalizedModelId; + return allowModels.some((entry) => { + const normalized = entry.trim().toLowerCase(); + if (!normalized) return false; + return normalized === normalizedModelId || normalized === normalizedFull; + }); +} + const DEFAULT_SUBAGENT_TOOL_DENY = [ "sessions_list", "sessions_history", @@ -321,20 +352,30 @@ function resolveSubagentToolPolicy(cfg?: ClawdbotConfig): SandboxToolPolicy { return { allow, deny }; } +function isToolAllowedByPolicyName( + name: string, + policy?: SandboxToolPolicy, +): boolean { + if (!policy) return true; + const deny = new Set(normalizeToolNames(policy.deny)); + const allowRaw = normalizeToolNames(policy.allow); + const allow = allowRaw.length > 0 ? new Set(allowRaw) : null; + const normalized = normalizeToolName(name); + if (deny.has(normalized)) return false; + if (allow) { + if (allow.has(normalized)) return true; + if (normalized === "apply_patch" && allow.has("exec")) return true; + return false; + } + return true; +} + function filterToolsByPolicy( tools: AnyAgentTool[], policy?: SandboxToolPolicy, ) { if (!policy) return tools; - const deny = new Set(normalizeToolNames(policy.deny)); - const allowRaw = normalizeToolNames(policy.allow); - const allow = allowRaw.length > 0 ? new Set(allowRaw) : null; - return tools.filter((tool) => { - const name = tool.name.toLowerCase(); - if (deny.has(name)) return false; - if (allow) return allow.has(name); - return true; - }); + return tools.filter((tool) => isToolAllowedByPolicyName(tool.name, policy)); } function resolveEffectiveToolPolicy(params: { @@ -359,14 +400,7 @@ function resolveEffectiveToolPolicy(params: { } function isToolAllowedByPolicy(name: string, policy?: SandboxToolPolicy) { - if (!policy) return true; - const deny = new Set(normalizeToolNames(policy.deny)); - const allowRaw = normalizeToolNames(policy.allow); - const allow = allowRaw.length > 0 ? new Set(allowRaw) : null; - const normalized = normalizeToolName(name); - if (deny.has(normalized)) return false; - if (allow) return allow.has(normalized); - return true; + return isToolAllowedByPolicyName(name, policy); } function isToolAllowedByPolicies( @@ -490,6 +524,8 @@ export function createClawdbotCodingTools(options?: { * Example: "anthropic", "openai", "google", "openai-codex". */ modelProvider?: string; + /** Model id for the current provider (used for model-specific tool gating). */ + modelId?: string; /** * Auth mode for the current provider. We only need this for Anthropic OAuth * tool-name blocking quirks. @@ -524,6 +560,15 @@ export function createClawdbotCodingTools(options?: { const sandboxRoot = sandbox?.workspaceDir; const allowWorkspaceWrites = sandbox?.workspaceAccess !== "ro"; const workspaceRoot = options?.workspaceDir ?? process.cwd(); + const applyPatchConfig = options?.config?.tools?.exec?.applyPatch; + const applyPatchEnabled = + !!applyPatchConfig?.enabled && + isOpenAIProvider(options?.modelProvider) && + isApplyPatchAllowedForModel({ + modelProvider: options?.modelProvider, + modelId: options?.modelId, + allowModels: applyPatchConfig?.allowModels, + }); const base = (codingTools as unknown as AnyAgentTool[]).flatMap((tool) => { if (tool.name === readTool.name) { @@ -562,6 +607,14 @@ export function createClawdbotCodingTools(options?: { cleanupMs: options?.exec?.cleanupMs, scopeKey, }); + const applyPatchTool = + !applyPatchEnabled || (sandboxRoot && !allowWorkspaceWrites) + ? null + : createApplyPatchTool({ + cwd: sandboxRoot ?? workspaceRoot, + sandboxRoot: + sandboxRoot && allowWorkspaceWrites ? sandboxRoot : undefined, + }); const tools: AnyAgentTool[] = [ ...base, ...(sandboxRoot @@ -572,6 +625,7 @@ export function createClawdbotCodingTools(options?: { ] : [] : []), + ...(applyPatchTool ? [applyPatchTool as unknown as AnyAgentTool] : []), execTool as unknown as AnyAgentTool, processTool as unknown as AnyAgentTool, // Provider docking: include provider-defined agent tools (login, etc.). diff --git a/src/agents/sandbox.ts b/src/agents/sandbox.ts index 11b6aeba5..c0f94ea2d 100644 --- a/src/agents/sandbox.ts +++ b/src/agents/sandbox.ts @@ -171,6 +171,7 @@ const DEFAULT_TOOL_ALLOW = [ "read", "write", "edit", + "apply_patch", "sessions_list", "sessions_history", "sessions_send", diff --git a/src/agents/system-prompt.ts b/src/agents/system-prompt.ts index 8d2756e97..d71fad490 100644 --- a/src/agents/system-prompt.ts +++ b/src/agents/system-prompt.ts @@ -50,6 +50,7 @@ export function buildAgentSystemPrompt(params: { read: "Read file contents", write: "Create or overwrite files", edit: "Make precise edits to files", + apply_patch: "Apply multi-file patches", grep: "Search file contents for patterns", find: "Find files by glob pattern", ls: "List directory contents", @@ -77,6 +78,7 @@ export function buildAgentSystemPrompt(params: { "read", "write", "edit", + "apply_patch", "grep", "find", "ls", @@ -195,6 +197,7 @@ export function buildAgentSystemPrompt(params: { "- grep: search file contents for patterns", "- find: find files by glob pattern", "- ls: list directory contents", + "- apply_patch: apply multi-file patches", `- ${execToolName}: run shell commands (supports background via yieldMs/background)`, `- ${processToolName}: manage background exec sessions`, "- browser: control clawd's dedicated browser", diff --git a/src/agents/tool-display.json b/src/agents/tool-display.json index c3174c31d..d008eadca 100644 --- a/src/agents/tool-display.json +++ b/src/agents/tool-display.json @@ -50,6 +50,11 @@ "title": "Edit", "detailKeys": ["path"] }, + "apply_patch": { + "emoji": "🩹", + "title": "Apply Patch", + "detailKeys": [] + }, "attach": { "emoji": "📎", "title": "Attach", diff --git a/src/config/schema.ts b/src/config/schema.ts index 0875fce47..ebc388643 100644 --- a/src/config/schema.ts +++ b/src/config/schema.ts @@ -107,6 +107,8 @@ const FIELD_LABELS: Record = { "tools.audio.transcription.args": "Audio Transcription Args", "tools.audio.transcription.timeoutSeconds": "Audio Transcription Timeout (sec)", + "tools.exec.applyPatch.enabled": "Enable apply_patch", + "tools.exec.applyPatch.allowModels": "apply_patch Model Allowlist", "gateway.controlUi.basePath": "Control UI Base Path", "gateway.http.endpoints.chatCompletions.enabled": "OpenAI Chat Completions Endpoint", @@ -194,6 +196,10 @@ const FIELD_HELP: Record = { 'Hot reload strategy for config changes ("hybrid" recommended).', "gateway.reload.debounceMs": "Debounce window (ms) before applying config changes.", + "tools.exec.applyPatch.enabled": + "Experimental. Enables apply_patch for OpenAI models when allowed by tool policy.", + "tools.exec.applyPatch.allowModels": + 'Optional allowlist of model ids (e.g. "gpt-5.2" or "openai/gpt-5.2").', "slack.allowBots": "Allow bot-authored messages to trigger Slack replies (default: false).", "auth.profiles": "Named auth profiles (provider + mode + optional email).", diff --git a/src/config/types.ts b/src/config/types.ts index 37fdfeb04..7600d0104 100644 --- a/src/config/types.ts +++ b/src/config/types.ts @@ -1018,6 +1018,16 @@ export type ToolsConfig = { timeoutSec?: number; /** How long to keep finished sessions in memory (ms). */ cleanupMs?: number; + /** apply_patch subtool configuration (experimental). */ + applyPatch?: { + /** Enable apply_patch for OpenAI models (default: false). */ + enabled?: boolean; + /** + * Optional allowlist of model ids that can use apply_patch. + * Accepts either raw ids (e.g. "gpt-5.2") or full ids (e.g. "openai/gpt-5.2"). + */ + allowModels?: string[]; + }; }; /** @deprecated Use tools.exec. */ bash?: { diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index 66e842130..6a5665f03 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -910,6 +910,12 @@ const ToolsSchema = z backgroundMs: z.number().int().positive().optional(), timeoutSec: z.number().int().positive().optional(), cleanupMs: z.number().int().positive().optional(), + applyPatch: z + .object({ + enabled: z.boolean().optional(), + allowModels: z.array(z.string()).optional(), + }) + .optional(), }) .optional(), bash: z