feat: add apply_patch tool (exec-gated)
This commit is contained in:
@@ -10,6 +10,7 @@
|
|||||||
- Docs: add beginner-friendly plugin quick start + expand Voice Call plugin docs.
|
- Docs: add beginner-friendly plugin quick start + expand Voice Call plugin docs.
|
||||||
- Tests: add Docker plugin loader + tgz-install smoke test.
|
- Tests: add Docker plugin loader + tgz-install smoke test.
|
||||||
- Tests: extend Docker plugin E2E to cover installing from local folders (`plugins.load.paths`) and `file:` npm specs.
|
- Tests: extend Docker plugin E2E to cover installing from local folders (`plugins.load.paths`) and `file:` npm specs.
|
||||||
|
- Agents/Tools: add `apply_patch` tool for multi-file edits (experimental; gated by tools.exec.applyPatch; OpenAI-only).
|
||||||
- Agents/Tools: rename the bash tool to exec (config alias maintained). (#748) — thanks @myfunc.
|
- Agents/Tools: rename the bash tool to exec (config alias maintained). (#748) — thanks @myfunc.
|
||||||
- Config: add `$include` directive for modular config files. (#731) — thanks @pasogott.
|
- Config: add `$include` directive for modular config files. (#731) — thanks @pasogott.
|
||||||
- Build: set pnpm minimum release age to 2880 minutes (2 days). (#718) — thanks @dan-dr.
|
- Build: set pnpm minimum release age to 2880 minutes (2 days). (#718) — thanks @dan-dr.
|
||||||
|
|||||||
@@ -45,7 +45,10 @@ To disable bootstrap file creation entirely (for pre-seeded workspaces), set:
|
|||||||
|
|
||||||
## Built-in tools
|
## Built-in tools
|
||||||
|
|
||||||
Core tools (read/exec/edit/write and related system tools) are always available. `TOOLS.md` does **not** control which tools exist; it’s guidance for how *you* want them used.
|
Core tools (read/exec/edit/write and related system tools) are always available,
|
||||||
|
subject to tool policy. `apply_patch` is optional and gated by
|
||||||
|
`tools.exec.applyPatch`. `TOOLS.md` does **not** control which tools exist; it’s
|
||||||
|
guidance for how *you* want them used.
|
||||||
|
|
||||||
## Skills
|
## Skills
|
||||||
|
|
||||||
|
|||||||
@@ -217,7 +217,7 @@ Starting with v2026.1.6, each agent can have its own sandbox and tool restrictio
|
|||||||
},
|
},
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["read"], // Only read tool
|
allow: ["read"], // Only read tool
|
||||||
deny: ["exec", "write", "edit"], // Deny others
|
deny: ["exec", "write", "edit", "apply_patch"], // Deny others
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -701,6 +701,7 @@
|
|||||||
"tools",
|
"tools",
|
||||||
"plugin",
|
"plugin",
|
||||||
"tools/exec",
|
"tools/exec",
|
||||||
|
"tools/apply-patch",
|
||||||
"tools/elevated",
|
"tools/elevated",
|
||||||
"tools/browser",
|
"tools/browser",
|
||||||
"tools/browser-linux-troubleshooting",
|
"tools/browser-linux-troubleshooting",
|
||||||
|
|||||||
@@ -259,7 +259,7 @@ Save to `~/.clawdbot/clawdbot.json` and you can DM the bot from that number.
|
|||||||
},
|
},
|
||||||
|
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["exec", "process", "read", "write", "edit"],
|
allow: ["exec", "process", "read", "write", "edit", "apply_patch"],
|
||||||
deny: ["browser", "canvas"],
|
deny: ["browser", "canvas"],
|
||||||
exec: {
|
exec: {
|
||||||
backgroundMs: 10000,
|
backgroundMs: 10000,
|
||||||
|
|||||||
@@ -638,7 +638,7 @@ Read-only tools + read-only workspace:
|
|||||||
},
|
},
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["read", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"],
|
allow: ["read", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"],
|
||||||
deny: ["write", "edit", "exec", "process", "browser"]
|
deny: ["write", "edit", "apply_patch", "exec", "process", "browser"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -661,7 +661,7 @@ No filesystem access (messaging/session tools enabled):
|
|||||||
},
|
},
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status", "whatsapp", "telegram", "slack", "discord", "gateway"],
|
allow: ["sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status", "whatsapp", "telegram", "slack", "discord", "gateway"],
|
||||||
deny: ["read", "write", "edit", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"]
|
deny: ["read", "write", "edit", "apply_patch", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -1431,6 +1431,9 @@ of `every`, keep `HEARTBEAT.md` tiny, and/or choose a cheaper `model`.
|
|||||||
- `backgroundMs`: time before auto-background (ms, default 10000)
|
- `backgroundMs`: time before auto-background (ms, default 10000)
|
||||||
- `timeoutSec`: auto-kill after this runtime (seconds, default 1800)
|
- `timeoutSec`: auto-kill after this runtime (seconds, default 1800)
|
||||||
- `cleanupMs`: how long to keep finished sessions in memory (ms, default 1800000)
|
- `cleanupMs`: how long to keep finished sessions in memory (ms, default 1800000)
|
||||||
|
- `applyPatch.enabled`: enable experimental `apply_patch` (OpenAI/OpenAI Codex only; default false)
|
||||||
|
- `applyPatch.allowModels`: optional allowlist of model ids (e.g. `gpt-5.2` or `openai/gpt-5.2`)
|
||||||
|
Note: `applyPatch` is only under `tools.exec` (no `tools.bash` alias).
|
||||||
Legacy: `tools.bash` is still accepted as an alias.
|
Legacy: `tools.bash` is still accepted as an alias.
|
||||||
|
|
||||||
`agents.defaults.subagents` configures sub-agent defaults:
|
`agents.defaults.subagents` configures sub-agent defaults:
|
||||||
@@ -1511,10 +1514,10 @@ Defaults (if enabled):
|
|||||||
- Debian bookworm-slim based image
|
- Debian bookworm-slim based image
|
||||||
- agent workspace access: `workspaceAccess: "none"` (default)
|
- agent workspace access: `workspaceAccess: "none"` (default)
|
||||||
- `"none"`: use a per-scope sandbox workspace under `~/.clawdbot/sandboxes`
|
- `"none"`: use a per-scope sandbox workspace under `~/.clawdbot/sandboxes`
|
||||||
- `"ro"`: keep the sandbox workspace at `/workspace`, and mount the agent workspace read-only at `/agent` (disables `write`/`edit`)
|
- `"ro"`: keep the sandbox workspace at `/workspace`, and mount the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`)
|
||||||
- `"rw"`: mount the agent workspace read/write at `/workspace`
|
- `"rw"`: mount the agent workspace read/write at `/workspace`
|
||||||
- auto-prune: idle > 24h OR age > 7d
|
- auto-prune: idle > 24h OR age > 7d
|
||||||
- tool policy: allow only `exec`, `process`, `read`, `write`, `edit`, `sessions_list`, `sessions_history`, `sessions_send`, `sessions_spawn`, `session_status` (deny wins)
|
- tool policy: allow only `exec`, `process`, `read`, `write`, `edit`, `apply_patch`, `sessions_list`, `sessions_history`, `sessions_send`, `sessions_spawn`, `session_status` (deny wins)
|
||||||
- configure via `tools.sandbox.tools`, override per-agent via `agents.list[].tools.sandbox.tools`
|
- configure via `tools.sandbox.tools`, override per-agent via `agents.list[].tools.sandbox.tools`
|
||||||
- optional sandboxed browser (Chromium + CDP, noVNC observer)
|
- optional sandboxed browser (Chromium + CDP, noVNC observer)
|
||||||
- hardening knobs: `network`, `user`, `pidsLimit`, `memory`, `cpus`, `ulimits`, `seccompProfile`, `apparmorProfile`
|
- hardening knobs: `network`, `user`, `pidsLimit`, `memory`, `cpus`, `ulimits`, `seccompProfile`, `apparmorProfile`
|
||||||
@@ -1585,7 +1588,7 @@ Legacy: `perSession` is still supported (`true` → `scope: "session"`,
|
|||||||
tools: {
|
tools: {
|
||||||
sandbox: {
|
sandbox: {
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["exec", "process", "read", "write", "edit", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"],
|
allow: ["exec", "process", "read", "write", "edit", "apply_patch", "sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status"],
|
||||||
deny: ["browser", "canvas", "nodes", "cron", "discord", "gateway"]
|
deny: ["browser", "canvas", "nodes", "cron", "discord", "gateway"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ This is not a perfect security boundary, but it materially limits filesystem
|
|||||||
and process access when the model does something dumb.
|
and process access when the model does something dumb.
|
||||||
|
|
||||||
## What gets sandboxed
|
## What gets sandboxed
|
||||||
- Tool execution (`exec`, `read`, `write`, `edit`, `process`, etc.).
|
- Tool execution (`exec`, `read`, `write`, `edit`, `apply_patch`, `process`, etc.).
|
||||||
- Optional sandboxed browser (`agents.defaults.sandbox.browser`).
|
- Optional sandboxed browser (`agents.defaults.sandbox.browser`).
|
||||||
- By default, the sandbox browser auto-starts (ensures CDP is reachable) when the browser tool needs it.
|
- By default, the sandbox browser auto-starts (ensures CDP is reachable) when the browser tool needs it.
|
||||||
Configure via `agents.defaults.sandbox.browser.autoStart` and `agents.defaults.sandbox.browser.autoStartTimeoutMs`.
|
Configure via `agents.defaults.sandbox.browser.autoStart` and `agents.defaults.sandbox.browser.autoStartTimeoutMs`.
|
||||||
@@ -47,7 +47,7 @@ Group/channel sessions use their own keys, so they count as non-main and will be
|
|||||||
## Workspace access
|
## Workspace access
|
||||||
`agents.defaults.sandbox.workspaceAccess` controls **what the sandbox can see**:
|
`agents.defaults.sandbox.workspaceAccess` controls **what the sandbox can see**:
|
||||||
- `"none"` (default): tools see a sandbox workspace under `~/.clawdbot/sandboxes`.
|
- `"none"` (default): tools see a sandbox workspace under `~/.clawdbot/sandboxes`.
|
||||||
- `"ro"`: mounts the agent workspace read-only at `/agent` (disables `write`/`edit`).
|
- `"ro"`: mounts the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`).
|
||||||
- `"rw"`: mounts the agent workspace read/write at `/workspace`.
|
- `"rw"`: mounts the agent workspace read/write at `/workspace`.
|
||||||
|
|
||||||
Inbound media is copied into the active sandbox workspace (`media/inbound/*`).
|
Inbound media is copied into the active sandbox workspace (`media/inbound/*`).
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ Consider running your AI on a separate phone number from your personal one:
|
|||||||
|
|
||||||
You can already build a read-only profile by combining:
|
You can already build a read-only profile by combining:
|
||||||
- `agents.defaults.sandbox.workspaceAccess: "ro"` (or `"none"` for no workspace access)
|
- `agents.defaults.sandbox.workspaceAccess: "ro"` (or `"none"` for no workspace access)
|
||||||
- tool allow/deny lists that block `write`, `edit`, `exec`, `process`, etc.
|
- tool allow/deny lists that block `write`, `edit`, `apply_patch`, `exec`, `process`, etc.
|
||||||
|
|
||||||
We may add a single `readOnlyMode` flag later to simplify this configuration.
|
We may add a single `readOnlyMode` flag later to simplify this configuration.
|
||||||
|
|
||||||
@@ -203,7 +203,7 @@ single container/workspace.
|
|||||||
|
|
||||||
Also consider agent workspace access inside the sandbox:
|
Also consider agent workspace access inside the sandbox:
|
||||||
- `agents.defaults.sandbox.workspaceAccess: "none"` (default) keeps the agent workspace off-limits; tools run against a sandbox workspace under `~/.clawdbot/sandboxes`
|
- `agents.defaults.sandbox.workspaceAccess: "none"` (default) keeps the agent workspace off-limits; tools run against a sandbox workspace under `~/.clawdbot/sandboxes`
|
||||||
- `agents.defaults.sandbox.workspaceAccess: "ro"` mounts the agent workspace read-only at `/agent` (disables `write`/`edit`)
|
- `agents.defaults.sandbox.workspaceAccess: "ro"` mounts the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`)
|
||||||
- `agents.defaults.sandbox.workspaceAccess: "rw"` mounts the agent workspace read/write at `/workspace`
|
- `agents.defaults.sandbox.workspaceAccess: "rw"` mounts the agent workspace read/write at `/workspace`
|
||||||
|
|
||||||
Important: `tools.elevated` is the global baseline escape hatch that runs exec on the host. Keep `tools.elevated.allowFrom` tight and don’t enable it for strangers. You can further restrict elevated per agent via `agents.list[].tools.elevated`. See [Elevated Mode](/tools/elevated).
|
Important: `tools.elevated` is the global baseline escape hatch that runs exec on the host. Keep `tools.elevated.allowFrom` tight and don’t enable it for strangers. You can further restrict elevated per agent via `agents.list[].tools.elevated`. See [Elevated Mode](/tools/elevated).
|
||||||
@@ -261,7 +261,7 @@ Common use cases:
|
|||||||
},
|
},
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["read"],
|
allow: ["read"],
|
||||||
deny: ["write", "edit", "exec", "process", "browser"]
|
deny: ["write", "edit", "apply_patch", "exec", "process", "browser"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -285,7 +285,7 @@ Common use cases:
|
|||||||
},
|
},
|
||||||
tools: {
|
tools: {
|
||||||
allow: ["sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status", "whatsapp", "telegram", "slack", "discord", "gateway"],
|
allow: ["sessions_list", "sessions_history", "sessions_send", "sessions_spawn", "session_status", "whatsapp", "telegram", "slack", "discord", "gateway"],
|
||||||
deny: ["read", "write", "edit", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"]
|
deny: ["read", "write", "edit", "apply_patch", "exec", "process", "browser", "canvas", "nodes", "cron", "gateway", "image"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -246,7 +246,7 @@ precedence, and troubleshooting.
|
|||||||
- Image: `clawdbot-sandbox:bookworm-slim`
|
- Image: `clawdbot-sandbox:bookworm-slim`
|
||||||
- One container per agent
|
- One container per agent
|
||||||
- Agent workspace access: `workspaceAccess: "none"` (default) uses `~/.clawdbot/sandboxes`
|
- Agent workspace access: `workspaceAccess: "none"` (default) uses `~/.clawdbot/sandboxes`
|
||||||
- `"ro"` keeps the sandbox workspace at `/workspace` and mounts the agent workspace read-only at `/agent` (disables `write`/`edit`)
|
- `"ro"` keeps the sandbox workspace at `/workspace` and mounts the agent workspace read-only at `/agent` (disables `write`/`edit`/`apply_patch`)
|
||||||
- `"rw"` mounts the agent workspace read/write at `/workspace`
|
- `"rw"` mounts the agent workspace read/write at `/workspace`
|
||||||
- Auto-prune: idle > 24h OR age > 7d
|
- Auto-prune: idle > 24h OR age > 7d
|
||||||
- Network: `none` by default (explicitly opt-in if you need egress)
|
- Network: `none` by default (explicitly opt-in if you need egress)
|
||||||
@@ -424,7 +424,7 @@ Example:
|
|||||||
|
|
||||||
### Security notes
|
### Security notes
|
||||||
|
|
||||||
- Hard wall only applies to **tools** (exec/read/write/edit).
|
- Hard wall only applies to **tools** (exec/read/write/edit/apply_patch).
|
||||||
- Host-only tools like browser/camera/canvas are blocked by default.
|
- Host-only tools like browser/camera/canvas are blocked by default.
|
||||||
- Allowing `browser` in sandbox **breaks isolation** (browser runs on host).
|
- Allowing `browser` in sandbox **breaks isolation** (browser runs on host).
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ For debugging “why is this blocked?”, see [Sandbox vs Tool Policy vs Elevate
|
|||||||
},
|
},
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["read"],
|
"allow": ["read"],
|
||||||
"deny": ["exec", "write", "edit", "process", "browser"]
|
"deny": ["exec", "write", "edit", "apply_patch", "process", "browser"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -95,7 +95,7 @@ For debugging “why is this blocked?”, see [Sandbox vs Tool Policy vs Elevate
|
|||||||
"workspaceRoot": "/tmp/work-sandboxes"
|
"workspaceRoot": "/tmp/work-sandboxes"
|
||||||
},
|
},
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["read", "write", "exec"],
|
"allow": ["read", "write", "apply_patch", "exec"],
|
||||||
"deny": ["browser", "gateway", "discord"]
|
"deny": ["browser", "gateway", "discord"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -134,7 +134,7 @@ For debugging “why is this blocked?”, see [Sandbox vs Tool Policy vs Elevate
|
|||||||
},
|
},
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["read"],
|
"allow": ["read"],
|
||||||
"deny": ["exec", "write", "edit"]
|
"deny": ["exec", "write", "edit", "apply_patch"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -200,7 +200,7 @@ Mitigation patterns:
|
|||||||
"tools": {
|
"tools": {
|
||||||
"sandbox": {
|
"sandbox": {
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["read", "write", "exec"],
|
"allow": ["read", "write", "apply_patch", "exec"],
|
||||||
"deny": []
|
"deny": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -235,7 +235,7 @@ Legacy `agent.*` configs are migrated by `clawdbot doctor`; prefer `agents.defau
|
|||||||
{
|
{
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["read"],
|
"allow": ["read"],
|
||||||
"deny": ["exec", "write", "edit", "process"]
|
"deny": ["exec", "write", "edit", "apply_patch", "process"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -245,7 +245,7 @@ Legacy `agent.*` configs are migrated by `clawdbot doctor`; prefer `agents.defau
|
|||||||
{
|
{
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["read", "exec", "process"],
|
"allow": ["read", "exec", "process"],
|
||||||
"deny": ["write", "edit", "browser", "gateway"]
|
"deny": ["write", "edit", "apply_patch", "browser", "gateway"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -255,7 +255,7 @@ Legacy `agent.*` configs are migrated by `clawdbot doctor`; prefer `agents.defau
|
|||||||
{
|
{
|
||||||
"tools": {
|
"tools": {
|
||||||
"allow": ["sessions_list", "sessions_send", "sessions_history", "session_status"],
|
"allow": ["sessions_list", "sessions_send", "sessions_history", "session_status"],
|
||||||
"deny": ["exec", "write", "edit", "read", "browser"]
|
"deny": ["exec", "write", "edit", "apply_patch", "read", "browser"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|||||||
49
docs/tools/apply-patch.md
Normal file
49
docs/tools/apply-patch.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
summary: "Apply multi-file patches with the apply_patch tool"
|
||||||
|
read_when:
|
||||||
|
- You need structured file edits across multiple files
|
||||||
|
- You want to document or debug patch-based edits
|
||||||
|
---
|
||||||
|
|
||||||
|
# apply_patch tool
|
||||||
|
|
||||||
|
Apply file changes using a structured patch format. This is ideal for multi-file
|
||||||
|
or multi-hunk edits where a single `edit` call would be brittle.
|
||||||
|
|
||||||
|
The tool accepts a single `input` string that wraps one or more file operations:
|
||||||
|
|
||||||
|
```
|
||||||
|
*** Begin Patch
|
||||||
|
*** Add File: path/to/file.txt
|
||||||
|
+line 1
|
||||||
|
+line 2
|
||||||
|
*** Update File: src/app.ts
|
||||||
|
@@
|
||||||
|
-old line
|
||||||
|
+new line
|
||||||
|
*** Delete File: obsolete.txt
|
||||||
|
*** End Patch
|
||||||
|
```
|
||||||
|
|
||||||
|
## Parameters
|
||||||
|
|
||||||
|
- `input` (required): Full patch contents including `*** Begin Patch` and `*** End Patch`.
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Paths are resolved relative to the workspace root.
|
||||||
|
- Use `*** Move to:` within an `*** Update File:` hunk to rename files.
|
||||||
|
- `*** End of File` marks an EOF-only insert when needed.
|
||||||
|
- Experimental and disabled by default. Enable with `tools.exec.applyPatch.enabled`.
|
||||||
|
- OpenAI-only (including OpenAI Codex). Optionally gate by model via
|
||||||
|
`tools.exec.applyPatch.allowModels`.
|
||||||
|
- Config is only under `tools.exec` (no `tools.bash` alias).
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"tool": "apply_patch",
|
||||||
|
"input": "*** Begin Patch\n*** Update File: src/index.ts\n@@\n-const foo = 1\n+const foo = 2\n*** End Patch"
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -33,3 +33,23 @@ Background + poll:
|
|||||||
{"tool":"exec","command":"npm run build","yieldMs":1000}
|
{"tool":"exec","command":"npm run build","yieldMs":1000}
|
||||||
{"tool":"process","action":"poll","sessionId":"<id>"}
|
{"tool":"process","action":"poll","sessionId":"<id>"}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## apply_patch (experimental)
|
||||||
|
|
||||||
|
`apply_patch` is a subtool of `exec` for structured multi-file edits.
|
||||||
|
Enable it explicitly:
|
||||||
|
|
||||||
|
```json5
|
||||||
|
{
|
||||||
|
tools: {
|
||||||
|
exec: {
|
||||||
|
applyPatch: { enabled: true, allowModels: ["gpt-5.2"] }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Only available for OpenAI/OpenAI Codex models.
|
||||||
|
- Tool policy still applies; `allow: ["exec"]` implicitly allows `apply_patch`.
|
||||||
|
- Config lives under `tools.exec.applyPatch` (no `tools.bash` alias).
|
||||||
|
|||||||
@@ -31,6 +31,10 @@ alongside tools (for example, the voice-call plugin).
|
|||||||
|
|
||||||
## Tool inventory
|
## Tool inventory
|
||||||
|
|
||||||
|
### `apply_patch`
|
||||||
|
Apply structured patches across one or more files. Use for multi-hunk edits.
|
||||||
|
Experimental: enable via `tools.exec.applyPatch.enabled` (OpenAI models only).
|
||||||
|
|
||||||
### `exec`
|
### `exec`
|
||||||
Run shell commands in the workspace.
|
Run shell commands in the workspace.
|
||||||
|
|
||||||
|
|||||||
74
src/agents/apply-patch.test.ts
Normal file
74
src/agents/apply-patch.test.ts
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
import fs from "node:fs/promises";
|
||||||
|
import os from "node:os";
|
||||||
|
import path from "node:path";
|
||||||
|
import { describe, expect, it } from "vitest";
|
||||||
|
|
||||||
|
import { applyPatch } from "./apply-patch.js";
|
||||||
|
|
||||||
|
async function withTempDir<T>(fn: (dir: string) => Promise<T>) {
|
||||||
|
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-patch-"));
|
||||||
|
try {
|
||||||
|
return await fn(dir);
|
||||||
|
} finally {
|
||||||
|
await fs.rm(dir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("applyPatch", () => {
|
||||||
|
it("adds a file", async () => {
|
||||||
|
await withTempDir(async (dir) => {
|
||||||
|
const patch = `*** Begin Patch
|
||||||
|
*** Add File: hello.txt
|
||||||
|
+hello
|
||||||
|
*** End Patch`;
|
||||||
|
|
||||||
|
const result = await applyPatch(patch, { cwd: dir });
|
||||||
|
const contents = await fs.readFile(path.join(dir, "hello.txt"), "utf8");
|
||||||
|
|
||||||
|
expect(contents).toBe("hello\n");
|
||||||
|
expect(result.summary.added).toEqual(["hello.txt"]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("updates and moves a file", async () => {
|
||||||
|
await withTempDir(async (dir) => {
|
||||||
|
const source = path.join(dir, "source.txt");
|
||||||
|
await fs.writeFile(source, "foo\nbar\n", "utf8");
|
||||||
|
|
||||||
|
const patch = `*** Begin Patch
|
||||||
|
*** Update File: source.txt
|
||||||
|
*** Move to: dest.txt
|
||||||
|
@@
|
||||||
|
foo
|
||||||
|
-bar
|
||||||
|
+baz
|
||||||
|
*** End Patch`;
|
||||||
|
|
||||||
|
const result = await applyPatch(patch, { cwd: dir });
|
||||||
|
const dest = path.join(dir, "dest.txt");
|
||||||
|
const contents = await fs.readFile(dest, "utf8");
|
||||||
|
|
||||||
|
expect(contents).toBe("foo\nbaz\n");
|
||||||
|
await expect(fs.stat(source)).rejects.toBeDefined();
|
||||||
|
expect(result.summary.modified).toEqual(["dest.txt"]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("supports end-of-file inserts", async () => {
|
||||||
|
await withTempDir(async (dir) => {
|
||||||
|
const target = path.join(dir, "end.txt");
|
||||||
|
await fs.writeFile(target, "line1\n", "utf8");
|
||||||
|
|
||||||
|
const patch = `*** Begin Patch
|
||||||
|
*** Update File: end.txt
|
||||||
|
@@
|
||||||
|
+line2
|
||||||
|
*** End of File
|
||||||
|
*** End Patch`;
|
||||||
|
|
||||||
|
await applyPatch(patch, { cwd: dir });
|
||||||
|
const contents = await fs.readFile(target, "utf8");
|
||||||
|
expect(contents).toBe("line1\nline2\n");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
689
src/agents/apply-patch.ts
Normal file
689
src/agents/apply-patch.ts
Normal file
@@ -0,0 +1,689 @@
|
|||||||
|
import fs from "node:fs/promises";
|
||||||
|
import os from "node:os";
|
||||||
|
import path from "node:path";
|
||||||
|
import type { AgentTool } from "@mariozechner/pi-agent-core";
|
||||||
|
import { Type } from "@sinclair/typebox";
|
||||||
|
|
||||||
|
import { assertSandboxPath } from "./sandbox-paths.js";
|
||||||
|
|
||||||
|
const BEGIN_PATCH_MARKER = "*** Begin Patch";
|
||||||
|
const END_PATCH_MARKER = "*** End Patch";
|
||||||
|
const ADD_FILE_MARKER = "*** Add File: ";
|
||||||
|
const DELETE_FILE_MARKER = "*** Delete File: ";
|
||||||
|
const UPDATE_FILE_MARKER = "*** Update File: ";
|
||||||
|
const MOVE_TO_MARKER = "*** Move to: ";
|
||||||
|
const EOF_MARKER = "*** End of File";
|
||||||
|
const CHANGE_CONTEXT_MARKER = "@@ ";
|
||||||
|
const EMPTY_CHANGE_CONTEXT_MARKER = "@@";
|
||||||
|
const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g;
|
||||||
|
|
||||||
|
type AddFileHunk = {
|
||||||
|
kind: "add";
|
||||||
|
path: string;
|
||||||
|
contents: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type DeleteFileHunk = {
|
||||||
|
kind: "delete";
|
||||||
|
path: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type UpdateFileChunk = {
|
||||||
|
changeContext?: string;
|
||||||
|
oldLines: string[];
|
||||||
|
newLines: string[];
|
||||||
|
isEndOfFile: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
type UpdateFileHunk = {
|
||||||
|
kind: "update";
|
||||||
|
path: string;
|
||||||
|
movePath?: string;
|
||||||
|
chunks: UpdateFileChunk[];
|
||||||
|
};
|
||||||
|
|
||||||
|
type Hunk = AddFileHunk | DeleteFileHunk | UpdateFileHunk;
|
||||||
|
|
||||||
|
export type ApplyPatchSummary = {
|
||||||
|
added: string[];
|
||||||
|
modified: string[];
|
||||||
|
deleted: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export type ApplyPatchResult = {
|
||||||
|
summary: ApplyPatchSummary;
|
||||||
|
text: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type ApplyPatchToolDetails = {
|
||||||
|
summary: ApplyPatchSummary;
|
||||||
|
};
|
||||||
|
|
||||||
|
type ApplyPatchOptions = {
|
||||||
|
cwd: string;
|
||||||
|
sandboxRoot?: string;
|
||||||
|
signal?: AbortSignal;
|
||||||
|
};
|
||||||
|
|
||||||
|
const applyPatchSchema = Type.Object({
|
||||||
|
input: Type.String({
|
||||||
|
description: "Patch content using the *** Begin Patch/End Patch format.",
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
export function createApplyPatchTool(
|
||||||
|
options: { cwd?: string; sandboxRoot?: string } = {},
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: TypeBox schema type from pi-agent-core uses a different module instance.
|
||||||
|
): AgentTool<any, ApplyPatchToolDetails> {
|
||||||
|
const cwd = options.cwd ?? process.cwd();
|
||||||
|
const sandboxRoot = options.sandboxRoot;
|
||||||
|
|
||||||
|
return {
|
||||||
|
name: "apply_patch",
|
||||||
|
label: "apply_patch",
|
||||||
|
description:
|
||||||
|
"Apply a patch to one or more files using the apply_patch format. The input should include *** Begin Patch and *** End Patch markers.",
|
||||||
|
parameters: applyPatchSchema,
|
||||||
|
execute: async (_toolCallId, args, signal) => {
|
||||||
|
const params = args as { input?: string };
|
||||||
|
const input = typeof params.input === "string" ? params.input : "";
|
||||||
|
if (!input.trim()) {
|
||||||
|
throw new Error("Provide a patch input.");
|
||||||
|
}
|
||||||
|
if (signal?.aborted) {
|
||||||
|
const err = new Error("Aborted");
|
||||||
|
err.name = "AbortError";
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await applyPatch(input, {
|
||||||
|
cwd,
|
||||||
|
sandboxRoot,
|
||||||
|
signal,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [{ type: "text", text: result.text }],
|
||||||
|
details: { summary: result.summary },
|
||||||
|
};
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function applyPatch(
|
||||||
|
input: string,
|
||||||
|
options: ApplyPatchOptions,
|
||||||
|
): Promise<ApplyPatchResult> {
|
||||||
|
const parsed = parsePatchText(input);
|
||||||
|
if (parsed.hunks.length === 0) {
|
||||||
|
throw new Error("No files were modified.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const summary: ApplyPatchSummary = {
|
||||||
|
added: [],
|
||||||
|
modified: [],
|
||||||
|
deleted: [],
|
||||||
|
};
|
||||||
|
const seen = {
|
||||||
|
added: new Set<string>(),
|
||||||
|
modified: new Set<string>(),
|
||||||
|
deleted: new Set<string>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const hunk of parsed.hunks) {
|
||||||
|
if (options.signal?.aborted) {
|
||||||
|
const err = new Error("Aborted");
|
||||||
|
err.name = "AbortError";
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hunk.kind === "add") {
|
||||||
|
const target = await resolvePatchPath(hunk.path, options);
|
||||||
|
await ensureDir(target.resolved);
|
||||||
|
await fs.writeFile(target.resolved, hunk.contents, "utf8");
|
||||||
|
recordSummary(summary, seen, "added", target.display);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hunk.kind === "delete") {
|
||||||
|
const target = await resolvePatchPath(hunk.path, options);
|
||||||
|
await fs.rm(target.resolved);
|
||||||
|
recordSummary(summary, seen, "deleted", target.display);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const target = await resolvePatchPath(hunk.path, options);
|
||||||
|
const applied = await applyUpdateHunk(target.resolved, hunk.chunks);
|
||||||
|
|
||||||
|
if (hunk.movePath) {
|
||||||
|
const moveTarget = await resolvePatchPath(hunk.movePath, options);
|
||||||
|
await ensureDir(moveTarget.resolved);
|
||||||
|
await fs.writeFile(moveTarget.resolved, applied, "utf8");
|
||||||
|
await fs.rm(target.resolved);
|
||||||
|
recordSummary(summary, seen, "modified", moveTarget.display);
|
||||||
|
} else {
|
||||||
|
await fs.writeFile(target.resolved, applied, "utf8");
|
||||||
|
recordSummary(summary, seen, "modified", target.display);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
summary,
|
||||||
|
text: formatSummary(summary),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function recordSummary(
|
||||||
|
summary: ApplyPatchSummary,
|
||||||
|
seen: {
|
||||||
|
added: Set<string>;
|
||||||
|
modified: Set<string>;
|
||||||
|
deleted: Set<string>;
|
||||||
|
},
|
||||||
|
bucket: keyof ApplyPatchSummary,
|
||||||
|
value: string,
|
||||||
|
) {
|
||||||
|
if (seen[bucket].has(value)) return;
|
||||||
|
seen[bucket].add(value);
|
||||||
|
summary[bucket].push(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatSummary(summary: ApplyPatchSummary): string {
|
||||||
|
const lines = ["Success. Updated the following files:"];
|
||||||
|
for (const file of summary.added) lines.push(`A ${file}`);
|
||||||
|
for (const file of summary.modified) lines.push(`M ${file}`);
|
||||||
|
for (const file of summary.deleted) lines.push(`D ${file}`);
|
||||||
|
return lines.join("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
async function ensureDir(filePath: string) {
|
||||||
|
const parent = path.dirname(filePath);
|
||||||
|
if (!parent || parent === ".") return;
|
||||||
|
await fs.mkdir(parent, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
async function resolvePatchPath(
|
||||||
|
filePath: string,
|
||||||
|
options: ApplyPatchOptions,
|
||||||
|
): Promise<{ resolved: string; display: string }> {
|
||||||
|
if (options.sandboxRoot) {
|
||||||
|
const resolved = await assertSandboxPath({
|
||||||
|
filePath,
|
||||||
|
cwd: options.cwd,
|
||||||
|
root: options.sandboxRoot,
|
||||||
|
});
|
||||||
|
return {
|
||||||
|
resolved: resolved.resolved,
|
||||||
|
display: resolved.relative || resolved.resolved,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const resolved = resolvePathFromCwd(filePath, options.cwd);
|
||||||
|
return {
|
||||||
|
resolved,
|
||||||
|
display: toDisplayPath(resolved, options.cwd),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeUnicodeSpaces(value: string): string {
|
||||||
|
return value.replace(UNICODE_SPACES, " ");
|
||||||
|
}
|
||||||
|
|
||||||
|
function expandPath(filePath: string): string {
|
||||||
|
const normalized = normalizeUnicodeSpaces(filePath);
|
||||||
|
if (normalized === "~") return os.homedir();
|
||||||
|
if (normalized.startsWith("~/")) return os.homedir() + normalized.slice(1);
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolvePathFromCwd(filePath: string, cwd: string): string {
|
||||||
|
const expanded = expandPath(filePath);
|
||||||
|
if (path.isAbsolute(expanded)) return path.normalize(expanded);
|
||||||
|
return path.resolve(cwd, expanded);
|
||||||
|
}
|
||||||
|
|
||||||
|
function toDisplayPath(resolved: string, cwd: string): string {
|
||||||
|
const relative = path.relative(cwd, resolved);
|
||||||
|
if (!relative || relative === "") return path.basename(resolved);
|
||||||
|
if (relative.startsWith("..") || path.isAbsolute(relative)) return resolved;
|
||||||
|
return relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
function parsePatchText(input: string): { hunks: Hunk[]; patch: string } {
|
||||||
|
const trimmed = input.trim();
|
||||||
|
if (!trimmed) {
|
||||||
|
throw new Error("Invalid patch: input is empty.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = trimmed.split(/\r?\n/);
|
||||||
|
const validated = checkPatchBoundariesLenient(lines);
|
||||||
|
const hunks: Hunk[] = [];
|
||||||
|
|
||||||
|
const lastLineIndex = validated.length - 1;
|
||||||
|
let remaining = validated.slice(1, lastLineIndex);
|
||||||
|
let lineNumber = 2;
|
||||||
|
|
||||||
|
while (remaining.length > 0) {
|
||||||
|
const { hunk, consumed } = parseOneHunk(remaining, lineNumber);
|
||||||
|
hunks.push(hunk);
|
||||||
|
lineNumber += consumed;
|
||||||
|
remaining = remaining.slice(consumed);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { hunks, patch: validated.join("\n") };
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPatchBoundariesLenient(lines: string[]): string[] {
|
||||||
|
const strictError = checkPatchBoundariesStrict(lines);
|
||||||
|
if (!strictError) return lines;
|
||||||
|
|
||||||
|
if (lines.length < 4) {
|
||||||
|
throw new Error(strictError);
|
||||||
|
}
|
||||||
|
const first = lines[0];
|
||||||
|
const last = lines[lines.length - 1];
|
||||||
|
if (
|
||||||
|
(first === "<<EOF" || first === "<<'EOF'" || first === '<<"EOF"') &&
|
||||||
|
last.endsWith("EOF")
|
||||||
|
) {
|
||||||
|
const inner = lines.slice(1, lines.length - 1);
|
||||||
|
const innerError = checkPatchBoundariesStrict(inner);
|
||||||
|
if (!innerError) return inner;
|
||||||
|
throw new Error(innerError);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(strictError);
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPatchBoundariesStrict(lines: string[]): string | null {
|
||||||
|
const firstLine = lines[0]?.trim();
|
||||||
|
const lastLine = lines[lines.length - 1]?.trim();
|
||||||
|
|
||||||
|
if (firstLine === BEGIN_PATCH_MARKER && lastLine === END_PATCH_MARKER) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (firstLine !== BEGIN_PATCH_MARKER) {
|
||||||
|
return "The first line of the patch must be '*** Begin Patch'";
|
||||||
|
}
|
||||||
|
return "The last line of the patch must be '*** End Patch'";
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseOneHunk(
|
||||||
|
lines: string[],
|
||||||
|
lineNumber: number,
|
||||||
|
): { hunk: Hunk; consumed: number } {
|
||||||
|
if (lines.length === 0) {
|
||||||
|
throw new Error(`Invalid patch hunk at line ${lineNumber}: empty hunk`);
|
||||||
|
}
|
||||||
|
const firstLine = lines[0].trim();
|
||||||
|
if (firstLine.startsWith(ADD_FILE_MARKER)) {
|
||||||
|
const targetPath = firstLine.slice(ADD_FILE_MARKER.length);
|
||||||
|
let contents = "";
|
||||||
|
let consumed = 1;
|
||||||
|
for (const addLine of lines.slice(1)) {
|
||||||
|
if (addLine.startsWith("+")) {
|
||||||
|
contents += `${addLine.slice(1)}\n`;
|
||||||
|
consumed += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
hunk: { kind: "add", path: targetPath, contents },
|
||||||
|
consumed,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (firstLine.startsWith(DELETE_FILE_MARKER)) {
|
||||||
|
const targetPath = firstLine.slice(DELETE_FILE_MARKER.length);
|
||||||
|
return {
|
||||||
|
hunk: { kind: "delete", path: targetPath },
|
||||||
|
consumed: 1,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (firstLine.startsWith(UPDATE_FILE_MARKER)) {
|
||||||
|
const targetPath = firstLine.slice(UPDATE_FILE_MARKER.length);
|
||||||
|
let remaining = lines.slice(1);
|
||||||
|
let consumed = 1;
|
||||||
|
let movePath: string | undefined;
|
||||||
|
|
||||||
|
const moveCandidate = remaining[0]?.trim();
|
||||||
|
if (moveCandidate?.startsWith(MOVE_TO_MARKER)) {
|
||||||
|
movePath = moveCandidate.slice(MOVE_TO_MARKER.length);
|
||||||
|
remaining = remaining.slice(1);
|
||||||
|
consumed += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const chunks: UpdateFileChunk[] = [];
|
||||||
|
while (remaining.length > 0) {
|
||||||
|
if (remaining[0].trim() === "") {
|
||||||
|
remaining = remaining.slice(1);
|
||||||
|
consumed += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (remaining[0].startsWith("***")) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
const { chunk, consumed: chunkLines } = parseUpdateFileChunk(
|
||||||
|
remaining,
|
||||||
|
lineNumber + consumed,
|
||||||
|
chunks.length === 0,
|
||||||
|
);
|
||||||
|
chunks.push(chunk);
|
||||||
|
remaining = remaining.slice(chunkLines);
|
||||||
|
consumed += chunkLines;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunks.length === 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber}: Update file hunk for path '${targetPath}' is empty`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
hunk: {
|
||||||
|
kind: "update",
|
||||||
|
path: targetPath,
|
||||||
|
movePath,
|
||||||
|
chunks,
|
||||||
|
},
|
||||||
|
consumed,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber}: '${lines[0]}' is not a valid hunk header. Valid hunk headers: '*** Add File: {path}', '*** Delete File: {path}', '*** Update File: {path}'`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseUpdateFileChunk(
|
||||||
|
lines: string[],
|
||||||
|
lineNumber: number,
|
||||||
|
allowMissingContext: boolean,
|
||||||
|
): { chunk: UpdateFileChunk; consumed: number } {
|
||||||
|
if (lines.length === 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber}: Update hunk does not contain any lines`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let changeContext: string | undefined;
|
||||||
|
let startIndex = 0;
|
||||||
|
if (lines[0] === EMPTY_CHANGE_CONTEXT_MARKER) {
|
||||||
|
startIndex = 1;
|
||||||
|
} else if (lines[0].startsWith(CHANGE_CONTEXT_MARKER)) {
|
||||||
|
changeContext = lines[0].slice(CHANGE_CONTEXT_MARKER.length);
|
||||||
|
startIndex = 1;
|
||||||
|
} else if (!allowMissingContext) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber}: Expected update hunk to start with a @@ context marker, got: '${lines[0]}'`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (startIndex >= lines.length) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber + 1}: Update hunk does not contain any lines`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const chunk: UpdateFileChunk = {
|
||||||
|
changeContext,
|
||||||
|
oldLines: [],
|
||||||
|
newLines: [],
|
||||||
|
isEndOfFile: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let parsedLines = 0;
|
||||||
|
for (const line of lines.slice(startIndex)) {
|
||||||
|
if (line === EOF_MARKER) {
|
||||||
|
if (parsedLines === 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber + 1}: Update hunk does not contain any lines`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
chunk.isEndOfFile = true;
|
||||||
|
parsedLines += 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
const marker = line[0];
|
||||||
|
if (!marker) {
|
||||||
|
chunk.oldLines.push("");
|
||||||
|
chunk.newLines.push("");
|
||||||
|
parsedLines += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (marker === " ") {
|
||||||
|
const content = line.slice(1);
|
||||||
|
chunk.oldLines.push(content);
|
||||||
|
chunk.newLines.push(content);
|
||||||
|
parsedLines += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (marker === "+") {
|
||||||
|
chunk.newLines.push(line.slice(1));
|
||||||
|
parsedLines += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (marker === "-") {
|
||||||
|
chunk.oldLines.push(line.slice(1));
|
||||||
|
parsedLines += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parsedLines === 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid patch hunk at line ${lineNumber + 1}: Unexpected line found in update hunk: '${line}'. Every line should start with ' ' (context line), '+' (added line), or '-' (removed line)`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { chunk, consumed: parsedLines + startIndex };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function applyUpdateHunk(
|
||||||
|
filePath: string,
|
||||||
|
chunks: UpdateFileChunk[],
|
||||||
|
): Promise<string> {
|
||||||
|
const originalContents = await fs.readFile(filePath, "utf8").catch((err) => {
|
||||||
|
throw new Error(`Failed to read file to update ${filePath}: ${err}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
const originalLines = originalContents.split("\n");
|
||||||
|
if (
|
||||||
|
originalLines.length > 0 &&
|
||||||
|
originalLines[originalLines.length - 1] === ""
|
||||||
|
) {
|
||||||
|
originalLines.pop();
|
||||||
|
}
|
||||||
|
|
||||||
|
const replacements = computeReplacements(originalLines, filePath, chunks);
|
||||||
|
let newLines = applyReplacements(originalLines, replacements);
|
||||||
|
if (newLines.length === 0 || newLines[newLines.length - 1] !== "") {
|
||||||
|
newLines = [...newLines, ""];
|
||||||
|
}
|
||||||
|
return newLines.join("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
function computeReplacements(
|
||||||
|
originalLines: string[],
|
||||||
|
filePath: string,
|
||||||
|
chunks: UpdateFileChunk[],
|
||||||
|
): Array<[number, number, string[]]> {
|
||||||
|
const replacements: Array<[number, number, string[]]> = [];
|
||||||
|
let lineIndex = 0;
|
||||||
|
|
||||||
|
for (const chunk of chunks) {
|
||||||
|
if (chunk.changeContext) {
|
||||||
|
const ctxIndex = seekSequence(
|
||||||
|
originalLines,
|
||||||
|
[chunk.changeContext],
|
||||||
|
lineIndex,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
if (ctxIndex === null) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to find context '${chunk.changeContext}' in ${filePath}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
lineIndex = ctxIndex + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunk.oldLines.length === 0) {
|
||||||
|
const insertionIndex =
|
||||||
|
originalLines.length > 0 &&
|
||||||
|
originalLines[originalLines.length - 1] === ""
|
||||||
|
? originalLines.length - 1
|
||||||
|
: originalLines.length;
|
||||||
|
replacements.push([insertionIndex, 0, chunk.newLines]);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let pattern = chunk.oldLines;
|
||||||
|
let newSlice = chunk.newLines;
|
||||||
|
let found = seekSequence(
|
||||||
|
originalLines,
|
||||||
|
pattern,
|
||||||
|
lineIndex,
|
||||||
|
chunk.isEndOfFile,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (found === null && pattern[pattern.length - 1] === "") {
|
||||||
|
pattern = pattern.slice(0, -1);
|
||||||
|
if (newSlice.length > 0 && newSlice[newSlice.length - 1] === "") {
|
||||||
|
newSlice = newSlice.slice(0, -1);
|
||||||
|
}
|
||||||
|
found = seekSequence(
|
||||||
|
originalLines,
|
||||||
|
pattern,
|
||||||
|
lineIndex,
|
||||||
|
chunk.isEndOfFile,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (found === null) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to find expected lines in ${filePath}:\n${chunk.oldLines.join("\n")}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
replacements.push([found, pattern.length, newSlice]);
|
||||||
|
lineIndex = found + pattern.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
replacements.sort((a, b) => a[0] - b[0]);
|
||||||
|
return replacements;
|
||||||
|
}
|
||||||
|
|
||||||
|
function applyReplacements(
|
||||||
|
lines: string[],
|
||||||
|
replacements: Array<[number, number, string[]]>,
|
||||||
|
): string[] {
|
||||||
|
const result = [...lines];
|
||||||
|
for (const [startIndex, oldLen, newLines] of [...replacements].reverse()) {
|
||||||
|
for (let i = 0; i < oldLen; i += 1) {
|
||||||
|
if (startIndex < result.length) {
|
||||||
|
result.splice(startIndex, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (let i = 0; i < newLines.length; i += 1) {
|
||||||
|
result.splice(startIndex + i, 0, newLines[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
function seekSequence(
|
||||||
|
lines: string[],
|
||||||
|
pattern: string[],
|
||||||
|
start: number,
|
||||||
|
eof: boolean,
|
||||||
|
): number | null {
|
||||||
|
if (pattern.length === 0) return start;
|
||||||
|
if (pattern.length > lines.length) return null;
|
||||||
|
|
||||||
|
const maxStart = lines.length - pattern.length;
|
||||||
|
const searchStart = eof && lines.length >= pattern.length ? maxStart : start;
|
||||||
|
if (searchStart > maxStart) return null;
|
||||||
|
|
||||||
|
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||||
|
if (linesMatch(lines, pattern, i, (value) => value)) return i;
|
||||||
|
}
|
||||||
|
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||||
|
if (linesMatch(lines, pattern, i, (value) => value.trimEnd())) return i;
|
||||||
|
}
|
||||||
|
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||||
|
if (linesMatch(lines, pattern, i, (value) => value.trim())) return i;
|
||||||
|
}
|
||||||
|
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||||
|
if (
|
||||||
|
linesMatch(lines, pattern, i, (value) =>
|
||||||
|
normalizePunctuation(value.trim()),
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function linesMatch(
|
||||||
|
lines: string[],
|
||||||
|
pattern: string[],
|
||||||
|
start: number,
|
||||||
|
normalize: (value: string) => string,
|
||||||
|
): boolean {
|
||||||
|
for (let idx = 0; idx < pattern.length; idx += 1) {
|
||||||
|
if (normalize(lines[start + idx]) !== normalize(pattern[idx])) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizePunctuation(value: string): string {
|
||||||
|
return Array.from(value)
|
||||||
|
.map((char) => {
|
||||||
|
switch (char) {
|
||||||
|
case "\u2010":
|
||||||
|
case "\u2011":
|
||||||
|
case "\u2012":
|
||||||
|
case "\u2013":
|
||||||
|
case "\u2014":
|
||||||
|
case "\u2015":
|
||||||
|
case "\u2212":
|
||||||
|
return "-";
|
||||||
|
case "\u2018":
|
||||||
|
case "\u2019":
|
||||||
|
case "\u201A":
|
||||||
|
case "\u201B":
|
||||||
|
return "'";
|
||||||
|
case "\u201C":
|
||||||
|
case "\u201D":
|
||||||
|
case "\u201E":
|
||||||
|
case "\u201F":
|
||||||
|
return '"';
|
||||||
|
case "\u00A0":
|
||||||
|
case "\u2002":
|
||||||
|
case "\u2003":
|
||||||
|
case "\u2004":
|
||||||
|
case "\u2005":
|
||||||
|
case "\u2006":
|
||||||
|
case "\u2007":
|
||||||
|
case "\u2008":
|
||||||
|
case "\u2009":
|
||||||
|
case "\u200A":
|
||||||
|
case "\u202F":
|
||||||
|
case "\u205F":
|
||||||
|
case "\u3000":
|
||||||
|
return " ";
|
||||||
|
default:
|
||||||
|
return char;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.join("");
|
||||||
|
}
|
||||||
@@ -1110,6 +1110,7 @@ export async function compactEmbeddedPiSession(params: {
|
|||||||
config: params.config,
|
config: params.config,
|
||||||
abortSignal: runAbortController.signal,
|
abortSignal: runAbortController.signal,
|
||||||
modelProvider: model.provider,
|
modelProvider: model.provider,
|
||||||
|
modelId,
|
||||||
modelAuthMode: resolveModelAuthMode(model.provider, params.config),
|
modelAuthMode: resolveModelAuthMode(model.provider, params.config),
|
||||||
// No currentChannelId/currentThreadTs for compaction - not in message context
|
// No currentChannelId/currentThreadTs for compaction - not in message context
|
||||||
});
|
});
|
||||||
@@ -1524,6 +1525,7 @@ export async function runEmbeddedPiAgent(params: {
|
|||||||
config: params.config,
|
config: params.config,
|
||||||
abortSignal: runAbortController.signal,
|
abortSignal: runAbortController.signal,
|
||||||
modelProvider: model.provider,
|
modelProvider: model.provider,
|
||||||
|
modelId,
|
||||||
modelAuthMode: resolveModelAuthMode(model.provider, params.config),
|
modelAuthMode: resolveModelAuthMode(model.provider, params.config),
|
||||||
currentChannelId: params.currentChannelId,
|
currentChannelId: params.currentChannelId,
|
||||||
currentThreadTs: params.currentThreadTs,
|
currentThreadTs: params.currentThreadTs,
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ describe("Agent-specific tool filtering", () => {
|
|||||||
expect(toolNames).toContain("read");
|
expect(toolNames).toContain("read");
|
||||||
expect(toolNames).toContain("write");
|
expect(toolNames).toContain("write");
|
||||||
expect(toolNames).not.toContain("exec");
|
expect(toolNames).not.toContain("exec");
|
||||||
|
expect(toolNames).not.toContain("apply_patch");
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should keep global tool policy when agent only sets tools.elevated", () => {
|
it("should keep global tool policy when agent only sets tools.elevated", () => {
|
||||||
@@ -65,6 +66,32 @@ describe("Agent-specific tool filtering", () => {
|
|||||||
expect(toolNames).toContain("exec");
|
expect(toolNames).toContain("exec");
|
||||||
expect(toolNames).toContain("read");
|
expect(toolNames).toContain("read");
|
||||||
expect(toolNames).not.toContain("write");
|
expect(toolNames).not.toContain("write");
|
||||||
|
expect(toolNames).not.toContain("apply_patch");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should allow apply_patch when exec is allow-listed and applyPatch is enabled", () => {
|
||||||
|
const cfg: ClawdbotConfig = {
|
||||||
|
tools: {
|
||||||
|
allow: ["read", "exec"],
|
||||||
|
exec: {
|
||||||
|
applyPatch: { enabled: true },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const tools = createClawdbotCodingTools({
|
||||||
|
config: cfg,
|
||||||
|
sessionKey: "agent:main:main",
|
||||||
|
workspaceDir: "/tmp/test",
|
||||||
|
agentDir: "/tmp/agent",
|
||||||
|
modelProvider: "openai",
|
||||||
|
modelId: "gpt-5.2",
|
||||||
|
});
|
||||||
|
|
||||||
|
const toolNames = tools.map((t) => t.name);
|
||||||
|
expect(toolNames).toContain("read");
|
||||||
|
expect(toolNames).toContain("exec");
|
||||||
|
expect(toolNames).toContain("apply_patch");
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should apply agent-specific tool policy", () => {
|
it("should apply agent-specific tool policy", () => {
|
||||||
@@ -98,6 +125,7 @@ describe("Agent-specific tool filtering", () => {
|
|||||||
expect(toolNames).toContain("read");
|
expect(toolNames).toContain("read");
|
||||||
expect(toolNames).not.toContain("exec");
|
expect(toolNames).not.toContain("exec");
|
||||||
expect(toolNames).not.toContain("write");
|
expect(toolNames).not.toContain("write");
|
||||||
|
expect(toolNames).not.toContain("apply_patch");
|
||||||
expect(toolNames).not.toContain("edit");
|
expect(toolNames).not.toContain("edit");
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -133,6 +161,7 @@ describe("Agent-specific tool filtering", () => {
|
|||||||
expect(mainToolNames).toContain("exec");
|
expect(mainToolNames).toContain("exec");
|
||||||
expect(mainToolNames).toContain("write");
|
expect(mainToolNames).toContain("write");
|
||||||
expect(mainToolNames).toContain("edit");
|
expect(mainToolNames).toContain("edit");
|
||||||
|
expect(mainToolNames).not.toContain("apply_patch");
|
||||||
|
|
||||||
// family agent: restricted
|
// family agent: restricted
|
||||||
const familyTools = createClawdbotCodingTools({
|
const familyTools = createClawdbotCodingTools({
|
||||||
@@ -146,6 +175,7 @@ describe("Agent-specific tool filtering", () => {
|
|||||||
expect(familyToolNames).not.toContain("exec");
|
expect(familyToolNames).not.toContain("exec");
|
||||||
expect(familyToolNames).not.toContain("write");
|
expect(familyToolNames).not.toContain("write");
|
||||||
expect(familyToolNames).not.toContain("edit");
|
expect(familyToolNames).not.toContain("edit");
|
||||||
|
expect(familyToolNames).not.toContain("apply_patch");
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should prefer agent-specific tool policy over global", () => {
|
it("should prefer agent-specific tool policy over global", () => {
|
||||||
@@ -178,6 +208,7 @@ describe("Agent-specific tool filtering", () => {
|
|||||||
expect(toolNames).toContain("browser");
|
expect(toolNames).toContain("browser");
|
||||||
expect(toolNames).not.toContain("exec");
|
expect(toolNames).not.toContain("exec");
|
||||||
expect(toolNames).not.toContain("process");
|
expect(toolNames).not.toContain("process");
|
||||||
|
expect(toolNames).not.toContain("apply_patch");
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should work with sandbox tools filtering", () => {
|
it("should work with sandbox tools filtering", () => {
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import path from "node:path";
|
|||||||
|
|
||||||
import sharp from "sharp";
|
import sharp from "sharp";
|
||||||
import { describe, expect, it } from "vitest";
|
import { describe, expect, it } from "vitest";
|
||||||
|
import type { ClawdbotConfig } from "../config/config.js";
|
||||||
import { __testing, createClawdbotCodingTools } from "./pi-tools.js";
|
import { __testing, createClawdbotCodingTools } from "./pi-tools.js";
|
||||||
import { createBrowserTool } from "./tools/browser-tool.js";
|
import { createBrowserTool } from "./tools/browser-tool.js";
|
||||||
|
|
||||||
@@ -153,10 +154,59 @@ describe("createClawdbotCodingTools", () => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
it("includes exec and process tools", () => {
|
it("includes exec and process tools by default", () => {
|
||||||
const tools = createClawdbotCodingTools();
|
const tools = createClawdbotCodingTools();
|
||||||
expect(tools.some((tool) => tool.name === "exec")).toBe(true);
|
expect(tools.some((tool) => tool.name === "exec")).toBe(true);
|
||||||
expect(tools.some((tool) => tool.name === "process")).toBe(true);
|
expect(tools.some((tool) => tool.name === "process")).toBe(true);
|
||||||
|
expect(tools.some((tool) => tool.name === "apply_patch")).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("gates apply_patch behind tools.exec.applyPatch for OpenAI models", () => {
|
||||||
|
const config: ClawdbotConfig = {
|
||||||
|
tools: {
|
||||||
|
exec: {
|
||||||
|
applyPatch: { enabled: true },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
const openAiTools = createClawdbotCodingTools({
|
||||||
|
config,
|
||||||
|
modelProvider: "openai",
|
||||||
|
modelId: "gpt-5.2",
|
||||||
|
});
|
||||||
|
expect(openAiTools.some((tool) => tool.name === "apply_patch")).toBe(true);
|
||||||
|
|
||||||
|
const anthropicTools = createClawdbotCodingTools({
|
||||||
|
config,
|
||||||
|
modelProvider: "anthropic",
|
||||||
|
modelId: "claude-opus-4-5",
|
||||||
|
});
|
||||||
|
expect(anthropicTools.some((tool) => tool.name === "apply_patch")).toBe(
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("respects apply_patch allowModels", () => {
|
||||||
|
const config: ClawdbotConfig = {
|
||||||
|
tools: {
|
||||||
|
exec: {
|
||||||
|
applyPatch: { enabled: true, allowModels: ["gpt-5.2"] },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
const allowed = createClawdbotCodingTools({
|
||||||
|
config,
|
||||||
|
modelProvider: "openai",
|
||||||
|
modelId: "gpt-5.2",
|
||||||
|
});
|
||||||
|
expect(allowed.some((tool) => tool.name === "apply_patch")).toBe(true);
|
||||||
|
|
||||||
|
const denied = createClawdbotCodingTools({
|
||||||
|
config,
|
||||||
|
modelProvider: "openai",
|
||||||
|
modelId: "gpt-5-mini",
|
||||||
|
});
|
||||||
|
expect(denied.some((tool) => tool.name === "apply_patch")).toBe(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("keeps canonical tool names for Anthropic OAuth (pi-ai remaps on the wire)", () => {
|
it("keeps canonical tool names for Anthropic OAuth (pi-ai remaps on the wire)", () => {
|
||||||
@@ -169,6 +219,7 @@ describe("createClawdbotCodingTools", () => {
|
|||||||
expect(names.has("read")).toBe(true);
|
expect(names.has("read")).toBe(true);
|
||||||
expect(names.has("write")).toBe(true);
|
expect(names.has("write")).toBe(true);
|
||||||
expect(names.has("edit")).toBe(true);
|
expect(names.has("edit")).toBe(true);
|
||||||
|
expect(names.has("apply_patch")).toBe(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("provides top-level object schemas for all tools", () => {
|
it("provides top-level object schemas for all tools", () => {
|
||||||
@@ -212,6 +263,7 @@ describe("createClawdbotCodingTools", () => {
|
|||||||
expect(names.has("read")).toBe(true);
|
expect(names.has("read")).toBe(true);
|
||||||
expect(names.has("exec")).toBe(true);
|
expect(names.has("exec")).toBe(true);
|
||||||
expect(names.has("process")).toBe(true);
|
expect(names.has("process")).toBe(true);
|
||||||
|
expect(names.has("apply_patch")).toBe(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("supports allow-only sub-agent tool policy", () => {
|
it("supports allow-only sub-agent tool policy", () => {
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import {
|
|||||||
resolveAgentConfig,
|
resolveAgentConfig,
|
||||||
resolveAgentIdFromSessionKey,
|
resolveAgentIdFromSessionKey,
|
||||||
} from "./agent-scope.js";
|
} from "./agent-scope.js";
|
||||||
|
import { createApplyPatchTool } from "./apply-patch.js";
|
||||||
import {
|
import {
|
||||||
createExecTool,
|
createExecTool,
|
||||||
createProcessTool,
|
createProcessTool,
|
||||||
@@ -292,6 +293,7 @@ function cleanToolSchemaForGemini(schema: Record<string, unknown>): unknown {
|
|||||||
|
|
||||||
const TOOL_NAME_ALIASES: Record<string, string> = {
|
const TOOL_NAME_ALIASES: Record<string, string> = {
|
||||||
bash: "exec",
|
bash: "exec",
|
||||||
|
"apply-patch": "apply_patch",
|
||||||
};
|
};
|
||||||
|
|
||||||
function normalizeToolName(name: string) {
|
function normalizeToolName(name: string) {
|
||||||
@@ -304,6 +306,35 @@ function normalizeToolNames(list?: string[]) {
|
|||||||
return list.map(normalizeToolName).filter(Boolean);
|
return list.map(normalizeToolName).filter(Boolean);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isOpenAIProvider(provider?: string) {
|
||||||
|
const normalized = provider?.trim().toLowerCase();
|
||||||
|
return normalized === "openai" || normalized === "openai-codex";
|
||||||
|
}
|
||||||
|
|
||||||
|
function isApplyPatchAllowedForModel(params: {
|
||||||
|
modelProvider?: string;
|
||||||
|
modelId?: string;
|
||||||
|
allowModels?: string[];
|
||||||
|
}) {
|
||||||
|
const allowModels = Array.isArray(params.allowModels)
|
||||||
|
? params.allowModels
|
||||||
|
: [];
|
||||||
|
if (allowModels.length === 0) return true;
|
||||||
|
const modelId = params.modelId?.trim();
|
||||||
|
if (!modelId) return false;
|
||||||
|
const normalizedModelId = modelId.toLowerCase();
|
||||||
|
const provider = params.modelProvider?.trim().toLowerCase();
|
||||||
|
const normalizedFull =
|
||||||
|
provider && !normalizedModelId.includes("/")
|
||||||
|
? `${provider}/${normalizedModelId}`
|
||||||
|
: normalizedModelId;
|
||||||
|
return allowModels.some((entry) => {
|
||||||
|
const normalized = entry.trim().toLowerCase();
|
||||||
|
if (!normalized) return false;
|
||||||
|
return normalized === normalizedModelId || normalized === normalizedFull;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
const DEFAULT_SUBAGENT_TOOL_DENY = [
|
const DEFAULT_SUBAGENT_TOOL_DENY = [
|
||||||
"sessions_list",
|
"sessions_list",
|
||||||
"sessions_history",
|
"sessions_history",
|
||||||
@@ -321,20 +352,30 @@ function resolveSubagentToolPolicy(cfg?: ClawdbotConfig): SandboxToolPolicy {
|
|||||||
return { allow, deny };
|
return { allow, deny };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isToolAllowedByPolicyName(
|
||||||
|
name: string,
|
||||||
|
policy?: SandboxToolPolicy,
|
||||||
|
): boolean {
|
||||||
|
if (!policy) return true;
|
||||||
|
const deny = new Set(normalizeToolNames(policy.deny));
|
||||||
|
const allowRaw = normalizeToolNames(policy.allow);
|
||||||
|
const allow = allowRaw.length > 0 ? new Set(allowRaw) : null;
|
||||||
|
const normalized = normalizeToolName(name);
|
||||||
|
if (deny.has(normalized)) return false;
|
||||||
|
if (allow) {
|
||||||
|
if (allow.has(normalized)) return true;
|
||||||
|
if (normalized === "apply_patch" && allow.has("exec")) return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
function filterToolsByPolicy(
|
function filterToolsByPolicy(
|
||||||
tools: AnyAgentTool[],
|
tools: AnyAgentTool[],
|
||||||
policy?: SandboxToolPolicy,
|
policy?: SandboxToolPolicy,
|
||||||
) {
|
) {
|
||||||
if (!policy) return tools;
|
if (!policy) return tools;
|
||||||
const deny = new Set(normalizeToolNames(policy.deny));
|
return tools.filter((tool) => isToolAllowedByPolicyName(tool.name, policy));
|
||||||
const allowRaw = normalizeToolNames(policy.allow);
|
|
||||||
const allow = allowRaw.length > 0 ? new Set(allowRaw) : null;
|
|
||||||
return tools.filter((tool) => {
|
|
||||||
const name = tool.name.toLowerCase();
|
|
||||||
if (deny.has(name)) return false;
|
|
||||||
if (allow) return allow.has(name);
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function resolveEffectiveToolPolicy(params: {
|
function resolveEffectiveToolPolicy(params: {
|
||||||
@@ -359,14 +400,7 @@ function resolveEffectiveToolPolicy(params: {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function isToolAllowedByPolicy(name: string, policy?: SandboxToolPolicy) {
|
function isToolAllowedByPolicy(name: string, policy?: SandboxToolPolicy) {
|
||||||
if (!policy) return true;
|
return isToolAllowedByPolicyName(name, policy);
|
||||||
const deny = new Set(normalizeToolNames(policy.deny));
|
|
||||||
const allowRaw = normalizeToolNames(policy.allow);
|
|
||||||
const allow = allowRaw.length > 0 ? new Set(allowRaw) : null;
|
|
||||||
const normalized = normalizeToolName(name);
|
|
||||||
if (deny.has(normalized)) return false;
|
|
||||||
if (allow) return allow.has(normalized);
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function isToolAllowedByPolicies(
|
function isToolAllowedByPolicies(
|
||||||
@@ -490,6 +524,8 @@ export function createClawdbotCodingTools(options?: {
|
|||||||
* Example: "anthropic", "openai", "google", "openai-codex".
|
* Example: "anthropic", "openai", "google", "openai-codex".
|
||||||
*/
|
*/
|
||||||
modelProvider?: string;
|
modelProvider?: string;
|
||||||
|
/** Model id for the current provider (used for model-specific tool gating). */
|
||||||
|
modelId?: string;
|
||||||
/**
|
/**
|
||||||
* Auth mode for the current provider. We only need this for Anthropic OAuth
|
* Auth mode for the current provider. We only need this for Anthropic OAuth
|
||||||
* tool-name blocking quirks.
|
* tool-name blocking quirks.
|
||||||
@@ -524,6 +560,15 @@ export function createClawdbotCodingTools(options?: {
|
|||||||
const sandboxRoot = sandbox?.workspaceDir;
|
const sandboxRoot = sandbox?.workspaceDir;
|
||||||
const allowWorkspaceWrites = sandbox?.workspaceAccess !== "ro";
|
const allowWorkspaceWrites = sandbox?.workspaceAccess !== "ro";
|
||||||
const workspaceRoot = options?.workspaceDir ?? process.cwd();
|
const workspaceRoot = options?.workspaceDir ?? process.cwd();
|
||||||
|
const applyPatchConfig = options?.config?.tools?.exec?.applyPatch;
|
||||||
|
const applyPatchEnabled =
|
||||||
|
!!applyPatchConfig?.enabled &&
|
||||||
|
isOpenAIProvider(options?.modelProvider) &&
|
||||||
|
isApplyPatchAllowedForModel({
|
||||||
|
modelProvider: options?.modelProvider,
|
||||||
|
modelId: options?.modelId,
|
||||||
|
allowModels: applyPatchConfig?.allowModels,
|
||||||
|
});
|
||||||
|
|
||||||
const base = (codingTools as unknown as AnyAgentTool[]).flatMap((tool) => {
|
const base = (codingTools as unknown as AnyAgentTool[]).flatMap((tool) => {
|
||||||
if (tool.name === readTool.name) {
|
if (tool.name === readTool.name) {
|
||||||
@@ -562,6 +607,14 @@ export function createClawdbotCodingTools(options?: {
|
|||||||
cleanupMs: options?.exec?.cleanupMs,
|
cleanupMs: options?.exec?.cleanupMs,
|
||||||
scopeKey,
|
scopeKey,
|
||||||
});
|
});
|
||||||
|
const applyPatchTool =
|
||||||
|
!applyPatchEnabled || (sandboxRoot && !allowWorkspaceWrites)
|
||||||
|
? null
|
||||||
|
: createApplyPatchTool({
|
||||||
|
cwd: sandboxRoot ?? workspaceRoot,
|
||||||
|
sandboxRoot:
|
||||||
|
sandboxRoot && allowWorkspaceWrites ? sandboxRoot : undefined,
|
||||||
|
});
|
||||||
const tools: AnyAgentTool[] = [
|
const tools: AnyAgentTool[] = [
|
||||||
...base,
|
...base,
|
||||||
...(sandboxRoot
|
...(sandboxRoot
|
||||||
@@ -572,6 +625,7 @@ export function createClawdbotCodingTools(options?: {
|
|||||||
]
|
]
|
||||||
: []
|
: []
|
||||||
: []),
|
: []),
|
||||||
|
...(applyPatchTool ? [applyPatchTool as unknown as AnyAgentTool] : []),
|
||||||
execTool as unknown as AnyAgentTool,
|
execTool as unknown as AnyAgentTool,
|
||||||
processTool as unknown as AnyAgentTool,
|
processTool as unknown as AnyAgentTool,
|
||||||
// Provider docking: include provider-defined agent tools (login, etc.).
|
// Provider docking: include provider-defined agent tools (login, etc.).
|
||||||
|
|||||||
@@ -171,6 +171,7 @@ const DEFAULT_TOOL_ALLOW = [
|
|||||||
"read",
|
"read",
|
||||||
"write",
|
"write",
|
||||||
"edit",
|
"edit",
|
||||||
|
"apply_patch",
|
||||||
"sessions_list",
|
"sessions_list",
|
||||||
"sessions_history",
|
"sessions_history",
|
||||||
"sessions_send",
|
"sessions_send",
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ export function buildAgentSystemPrompt(params: {
|
|||||||
read: "Read file contents",
|
read: "Read file contents",
|
||||||
write: "Create or overwrite files",
|
write: "Create or overwrite files",
|
||||||
edit: "Make precise edits to files",
|
edit: "Make precise edits to files",
|
||||||
|
apply_patch: "Apply multi-file patches",
|
||||||
grep: "Search file contents for patterns",
|
grep: "Search file contents for patterns",
|
||||||
find: "Find files by glob pattern",
|
find: "Find files by glob pattern",
|
||||||
ls: "List directory contents",
|
ls: "List directory contents",
|
||||||
@@ -77,6 +78,7 @@ export function buildAgentSystemPrompt(params: {
|
|||||||
"read",
|
"read",
|
||||||
"write",
|
"write",
|
||||||
"edit",
|
"edit",
|
||||||
|
"apply_patch",
|
||||||
"grep",
|
"grep",
|
||||||
"find",
|
"find",
|
||||||
"ls",
|
"ls",
|
||||||
@@ -195,6 +197,7 @@ export function buildAgentSystemPrompt(params: {
|
|||||||
"- grep: search file contents for patterns",
|
"- grep: search file contents for patterns",
|
||||||
"- find: find files by glob pattern",
|
"- find: find files by glob pattern",
|
||||||
"- ls: list directory contents",
|
"- ls: list directory contents",
|
||||||
|
"- apply_patch: apply multi-file patches",
|
||||||
`- ${execToolName}: run shell commands (supports background via yieldMs/background)`,
|
`- ${execToolName}: run shell commands (supports background via yieldMs/background)`,
|
||||||
`- ${processToolName}: manage background exec sessions`,
|
`- ${processToolName}: manage background exec sessions`,
|
||||||
"- browser: control clawd's dedicated browser",
|
"- browser: control clawd's dedicated browser",
|
||||||
|
|||||||
@@ -50,6 +50,11 @@
|
|||||||
"title": "Edit",
|
"title": "Edit",
|
||||||
"detailKeys": ["path"]
|
"detailKeys": ["path"]
|
||||||
},
|
},
|
||||||
|
"apply_patch": {
|
||||||
|
"emoji": "🩹",
|
||||||
|
"title": "Apply Patch",
|
||||||
|
"detailKeys": []
|
||||||
|
},
|
||||||
"attach": {
|
"attach": {
|
||||||
"emoji": "📎",
|
"emoji": "📎",
|
||||||
"title": "Attach",
|
"title": "Attach",
|
||||||
|
|||||||
@@ -107,6 +107,8 @@ const FIELD_LABELS: Record<string, string> = {
|
|||||||
"tools.audio.transcription.args": "Audio Transcription Args",
|
"tools.audio.transcription.args": "Audio Transcription Args",
|
||||||
"tools.audio.transcription.timeoutSeconds":
|
"tools.audio.transcription.timeoutSeconds":
|
||||||
"Audio Transcription Timeout (sec)",
|
"Audio Transcription Timeout (sec)",
|
||||||
|
"tools.exec.applyPatch.enabled": "Enable apply_patch",
|
||||||
|
"tools.exec.applyPatch.allowModels": "apply_patch Model Allowlist",
|
||||||
"gateway.controlUi.basePath": "Control UI Base Path",
|
"gateway.controlUi.basePath": "Control UI Base Path",
|
||||||
"gateway.http.endpoints.chatCompletions.enabled":
|
"gateway.http.endpoints.chatCompletions.enabled":
|
||||||
"OpenAI Chat Completions Endpoint",
|
"OpenAI Chat Completions Endpoint",
|
||||||
@@ -194,6 +196,10 @@ const FIELD_HELP: Record<string, string> = {
|
|||||||
'Hot reload strategy for config changes ("hybrid" recommended).',
|
'Hot reload strategy for config changes ("hybrid" recommended).',
|
||||||
"gateway.reload.debounceMs":
|
"gateway.reload.debounceMs":
|
||||||
"Debounce window (ms) before applying config changes.",
|
"Debounce window (ms) before applying config changes.",
|
||||||
|
"tools.exec.applyPatch.enabled":
|
||||||
|
"Experimental. Enables apply_patch for OpenAI models when allowed by tool policy.",
|
||||||
|
"tools.exec.applyPatch.allowModels":
|
||||||
|
'Optional allowlist of model ids (e.g. "gpt-5.2" or "openai/gpt-5.2").',
|
||||||
"slack.allowBots":
|
"slack.allowBots":
|
||||||
"Allow bot-authored messages to trigger Slack replies (default: false).",
|
"Allow bot-authored messages to trigger Slack replies (default: false).",
|
||||||
"auth.profiles": "Named auth profiles (provider + mode + optional email).",
|
"auth.profiles": "Named auth profiles (provider + mode + optional email).",
|
||||||
|
|||||||
@@ -1018,6 +1018,16 @@ export type ToolsConfig = {
|
|||||||
timeoutSec?: number;
|
timeoutSec?: number;
|
||||||
/** How long to keep finished sessions in memory (ms). */
|
/** How long to keep finished sessions in memory (ms). */
|
||||||
cleanupMs?: number;
|
cleanupMs?: number;
|
||||||
|
/** apply_patch subtool configuration (experimental). */
|
||||||
|
applyPatch?: {
|
||||||
|
/** Enable apply_patch for OpenAI models (default: false). */
|
||||||
|
enabled?: boolean;
|
||||||
|
/**
|
||||||
|
* Optional allowlist of model ids that can use apply_patch.
|
||||||
|
* Accepts either raw ids (e.g. "gpt-5.2") or full ids (e.g. "openai/gpt-5.2").
|
||||||
|
*/
|
||||||
|
allowModels?: string[];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
/** @deprecated Use tools.exec. */
|
/** @deprecated Use tools.exec. */
|
||||||
bash?: {
|
bash?: {
|
||||||
|
|||||||
@@ -910,6 +910,12 @@ const ToolsSchema = z
|
|||||||
backgroundMs: z.number().int().positive().optional(),
|
backgroundMs: z.number().int().positive().optional(),
|
||||||
timeoutSec: z.number().int().positive().optional(),
|
timeoutSec: z.number().int().positive().optional(),
|
||||||
cleanupMs: z.number().int().positive().optional(),
|
cleanupMs: z.number().int().positive().optional(),
|
||||||
|
applyPatch: z
|
||||||
|
.object({
|
||||||
|
enabled: z.boolean().optional(),
|
||||||
|
allowModels: z.array(z.string()).optional(),
|
||||||
|
})
|
||||||
|
.optional(),
|
||||||
})
|
})
|
||||||
.optional(),
|
.optional(),
|
||||||
bash: z
|
bash: z
|
||||||
|
|||||||
Reference in New Issue
Block a user