Gateway: disable OpenAI HTTP chat completions by default (#686)

* feat(gateway): disable OpenAI chat completions HTTP by default

* test(gateway): deflake mock OpenAI tool-calling

* docs(changelog): note OpenAI HTTP endpoint default-off
This commit is contained in:
Peter Steinberger
2026-01-10 21:55:54 +00:00
committed by GitHub
parent 06052640e8
commit 1c257f170a
8 changed files with 52 additions and 9 deletions

View File

@@ -14,6 +14,7 @@
### Fixes
- CLI: avoid success message when daemon restart is skipped. (#685) — thanks @carlulsoe.
- Gateway: disable the OpenAI-compatible `/v1/chat/completions` endpoint by default; enable via `gateway.http.endpoints.chatCompletions.enabled=true`.
- macOS: stabilize bridge tunnels, guard invoke senders on disconnect, and drain stdout/stderr to avoid deadlocks. (#676) — thanks @ngutman.
- Agents/System: clarify sandboxed runtime in system prompt and surface elevated availability when sandboxed.
- Auto-reply: prefer `RawBody` for command/directive parsing (WhatsApp + Discord) and prevent fallback runs from clobbering concurrent session updates. (#643) — thanks @mcinteerj.

View File

@@ -1803,7 +1803,7 @@ Related docs:
Notes:
- `clawdbot gateway` refuses to start unless `gateway.mode` is set to `local` (or you pass the override flag).
- `gateway.port` controls the single multiplexed port used for WebSocket + HTTP (control UI, hooks, A2UI).
- Disable the OpenAI-compatible endpoint with `gateway.http.endpoints.chatCompletions.enabled: false`.
- OpenAI Chat Completions endpoint: **disabled by default**; enable with `gateway.http.endpoints.chatCompletions.enabled: true`.
- Precedence: `--port` > `CLAWDBOT_GATEWAY_PORT` > `gateway.port` > default `18789`.
- Non-loopback binds (`lan`/`tailnet`/`auto`) require auth. Use `gateway.auth.token` (or `CLAWDBOT_GATEWAY_TOKEN`).
- `gateway.remote.token` is **only** for remote CLI calls; it does not enable local gateway auth. `gateway.token` is ignored.

View File

@@ -5,7 +5,9 @@ read_when:
---
# OpenAI Chat Completions (HTTP)
Clawdbots Gateway can serve a small OpenAI-compatible Chat Completions endpoint:
Clawdbots Gateway can serve a small OpenAI-compatible Chat Completions endpoint.
This endpoint is **disabled by default**. Enable it in config first.
- `POST /v1/chat/completions`
- Same port as the Gateway (WS + HTTP multiplex): `http://<gateway-host>:<port>/v1/chat/completions`
@@ -36,6 +38,22 @@ Or target a specific Clawdbot agent by header:
Advanced:
- `x-clawdbot-session-key: <sessionKey>` to fully control session routing.
## Enabling the endpoint
Set `gateway.http.endpoints.chatCompletions.enabled` to `true`:
```json5
{
gateway: {
http: {
endpoints: {
chatCompletions: { enabled: true }
}
}
}
}
```
## Disabling the endpoint
Set `gateway.http.endpoints.chatCompletions.enabled` to `false`:

View File

@@ -161,7 +161,7 @@ const FIELD_HELP: Record<string, string> = {
"gateway.controlUi.basePath":
"Optional URL prefix where the Control UI is served (e.g. /clawdbot).",
"gateway.http.endpoints.chatCompletions.enabled":
"Enable the OpenAI-compatible `POST /v1/chat/completions` endpoint (default: true).",
"Enable the OpenAI-compatible `POST /v1/chat/completions` endpoint (default: false).",
"gateway.reload.mode":
'Hot reload strategy for config changes ("hybrid" recommended).',
"gateway.reload.debounceMs":

View File

@@ -1158,7 +1158,7 @@ export type GatewayReloadConfig = {
export type GatewayHttpChatCompletionsConfig = {
/**
* If false, the Gateway will not serve `POST /v1/chat/completions`.
* Default: true when absent.
* Default: false when absent.
*/
enabled?: boolean;
};

View File

@@ -6,6 +6,9 @@ import path from "node:path";
import { describe, expect, it } from "vitest";
import { GatewayClient } from "./client.js";
import { startGatewayServer } from "./server.js";
type OpenAIResponsesParams = {
input?: unknown[];
};
@@ -227,7 +230,6 @@ function extractPayloadText(result: unknown): string {
}
async function connectClient(params: { url: string; token: string }) {
const { GatewayClient } = await import("./client.js");
return await new Promise<InstanceType<typeof GatewayClient>>(
(resolve, reject) => {
let settled = false;
@@ -368,7 +370,6 @@ describe("gateway (mock openai): tool calling", () => {
process.env.CLAWDBOT_CONFIG_PATH = configPath;
const port = await getFreeGatewayPort();
const { startGatewayServer } = await import("./server.js");
const server = await startGatewayServer(port, {
bind: "loopback",
auth: { mode: "token", token },

View File

@@ -9,6 +9,15 @@ import {
installGatewayTestHooks();
async function startServerWithDefaultConfig(port: number) {
const { startGatewayServer } = await import("./server.js");
return await startGatewayServer(port, {
host: "127.0.0.1",
auth: { mode: "token", token: "secret" },
controlUiEnabled: false,
});
}
async function startServer(
port: number,
opts?: { openAiChatCompletionsEnabled?: boolean },
@@ -18,7 +27,7 @@ async function startServer(
host: "127.0.0.1",
auth: { mode: "token", token: "secret" },
controlUiEnabled: false,
openAiChatCompletionsEnabled: opts?.openAiChatCompletionsEnabled,
openAiChatCompletionsEnabled: opts?.openAiChatCompletionsEnabled ?? true,
});
}
@@ -48,6 +57,20 @@ function parseSseDataLines(text: string): string[] {
}
describe("OpenAI-compatible HTTP API (e2e)", () => {
it("is disabled by default (requires config)", async () => {
const port = await getFreePort();
const server = await startServerWithDefaultConfig(port);
try {
const res = await postChatCompletions(port, {
model: "clawdbot",
messages: [{ role: "user", content: "hi" }],
});
expect(res.status).toBe(404);
} finally {
await server.close({ reason: "test done" });
}
});
it("can be disabled via config (404)", async () => {
const port = await getFreePort();
const server = await startServer(port, {

View File

@@ -330,7 +330,7 @@ export type GatewayServerOptions = {
controlUiEnabled?: boolean;
/**
* If false, do not serve `POST /v1/chat/completions`.
* Default: config `gateway.http.endpoints.chatCompletions.enabled` (or true when absent).
* Default: config `gateway.http.endpoints.chatCompletions.enabled` (or false when absent).
*/
openAiChatCompletionsEnabled?: boolean;
/**
@@ -440,7 +440,7 @@ export async function startGatewayServer(
const openAiChatCompletionsEnabled =
opts.openAiChatCompletionsEnabled ??
cfgAtStart.gateway?.http?.endpoints?.chatCompletions?.enabled ??
true;
false;
const controlUiBasePath = normalizeControlUiBasePath(
cfgAtStart.gateway?.controlUi?.basePath,
);