From 1c257f170aa2b99a1f3533eac148ec895578245d Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Sat, 10 Jan 2026 21:55:54 +0000 Subject: [PATCH] Gateway: disable OpenAI HTTP chat completions by default (#686) * feat(gateway): disable OpenAI chat completions HTTP by default * test(gateway): deflake mock OpenAI tool-calling * docs(changelog): note OpenAI HTTP endpoint default-off --- CHANGELOG.md | 1 + docs/gateway/configuration.md | 2 +- docs/gateway/openai-http-api.md | 20 ++++++++++++++- src/config/schema.ts | 2 +- src/config/types.ts | 2 +- .../gateway.tool-calling.mock-openai.test.ts | 5 ++-- src/gateway/openai-http.e2e.test.ts | 25 ++++++++++++++++++- src/gateway/server.ts | 4 +-- 8 files changed, 52 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 749dc7faf..21d417dba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ### Fixes - CLI: avoid success message when daemon restart is skipped. (#685) — thanks @carlulsoe. +- Gateway: disable the OpenAI-compatible `/v1/chat/completions` endpoint by default; enable via `gateway.http.endpoints.chatCompletions.enabled=true`. - macOS: stabilize bridge tunnels, guard invoke senders on disconnect, and drain stdout/stderr to avoid deadlocks. (#676) — thanks @ngutman. - Agents/System: clarify sandboxed runtime in system prompt and surface elevated availability when sandboxed. - Auto-reply: prefer `RawBody` for command/directive parsing (WhatsApp + Discord) and prevent fallback runs from clobbering concurrent session updates. (#643) — thanks @mcinteerj. diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index 4040a671d..dca0921c3 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -1803,7 +1803,7 @@ Related docs: Notes: - `clawdbot gateway` refuses to start unless `gateway.mode` is set to `local` (or you pass the override flag). - `gateway.port` controls the single multiplexed port used for WebSocket + HTTP (control UI, hooks, A2UI). -- Disable the OpenAI-compatible endpoint with `gateway.http.endpoints.chatCompletions.enabled: false`. +- OpenAI Chat Completions endpoint: **disabled by default**; enable with `gateway.http.endpoints.chatCompletions.enabled: true`. - Precedence: `--port` > `CLAWDBOT_GATEWAY_PORT` > `gateway.port` > default `18789`. - Non-loopback binds (`lan`/`tailnet`/`auto`) require auth. Use `gateway.auth.token` (or `CLAWDBOT_GATEWAY_TOKEN`). - `gateway.remote.token` is **only** for remote CLI calls; it does not enable local gateway auth. `gateway.token` is ignored. diff --git a/docs/gateway/openai-http-api.md b/docs/gateway/openai-http-api.md index d6b2e0021..8d6941a22 100644 --- a/docs/gateway/openai-http-api.md +++ b/docs/gateway/openai-http-api.md @@ -5,7 +5,9 @@ read_when: --- # OpenAI Chat Completions (HTTP) -Clawdbot’s Gateway can serve a small OpenAI-compatible Chat Completions endpoint: +Clawdbot’s Gateway can serve a small OpenAI-compatible Chat Completions endpoint. + +This endpoint is **disabled by default**. Enable it in config first. - `POST /v1/chat/completions` - Same port as the Gateway (WS + HTTP multiplex): `http://:/v1/chat/completions` @@ -36,6 +38,22 @@ Or target a specific Clawdbot agent by header: Advanced: - `x-clawdbot-session-key: ` to fully control session routing. +## Enabling the endpoint + +Set `gateway.http.endpoints.chatCompletions.enabled` to `true`: + +```json5 +{ + gateway: { + http: { + endpoints: { + chatCompletions: { enabled: true } + } + } + } +} +``` + ## Disabling the endpoint Set `gateway.http.endpoints.chatCompletions.enabled` to `false`: diff --git a/src/config/schema.ts b/src/config/schema.ts index 1a9a9b597..05840ac7f 100644 --- a/src/config/schema.ts +++ b/src/config/schema.ts @@ -161,7 +161,7 @@ const FIELD_HELP: Record = { "gateway.controlUi.basePath": "Optional URL prefix where the Control UI is served (e.g. /clawdbot).", "gateway.http.endpoints.chatCompletions.enabled": - "Enable the OpenAI-compatible `POST /v1/chat/completions` endpoint (default: true).", + "Enable the OpenAI-compatible `POST /v1/chat/completions` endpoint (default: false).", "gateway.reload.mode": 'Hot reload strategy for config changes ("hybrid" recommended).', "gateway.reload.debounceMs": diff --git a/src/config/types.ts b/src/config/types.ts index 5dde3a4bb..4de23e5d9 100644 --- a/src/config/types.ts +++ b/src/config/types.ts @@ -1158,7 +1158,7 @@ export type GatewayReloadConfig = { export type GatewayHttpChatCompletionsConfig = { /** * If false, the Gateway will not serve `POST /v1/chat/completions`. - * Default: true when absent. + * Default: false when absent. */ enabled?: boolean; }; diff --git a/src/gateway/gateway.tool-calling.mock-openai.test.ts b/src/gateway/gateway.tool-calling.mock-openai.test.ts index f512dffee..28ca42f3a 100644 --- a/src/gateway/gateway.tool-calling.mock-openai.test.ts +++ b/src/gateway/gateway.tool-calling.mock-openai.test.ts @@ -6,6 +6,9 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; +import { GatewayClient } from "./client.js"; +import { startGatewayServer } from "./server.js"; + type OpenAIResponsesParams = { input?: unknown[]; }; @@ -227,7 +230,6 @@ function extractPayloadText(result: unknown): string { } async function connectClient(params: { url: string; token: string }) { - const { GatewayClient } = await import("./client.js"); return await new Promise>( (resolve, reject) => { let settled = false; @@ -368,7 +370,6 @@ describe("gateway (mock openai): tool calling", () => { process.env.CLAWDBOT_CONFIG_PATH = configPath; const port = await getFreeGatewayPort(); - const { startGatewayServer } = await import("./server.js"); const server = await startGatewayServer(port, { bind: "loopback", auth: { mode: "token", token }, diff --git a/src/gateway/openai-http.e2e.test.ts b/src/gateway/openai-http.e2e.test.ts index 2a1b76a79..d8fa34134 100644 --- a/src/gateway/openai-http.e2e.test.ts +++ b/src/gateway/openai-http.e2e.test.ts @@ -9,6 +9,15 @@ import { installGatewayTestHooks(); +async function startServerWithDefaultConfig(port: number) { + const { startGatewayServer } = await import("./server.js"); + return await startGatewayServer(port, { + host: "127.0.0.1", + auth: { mode: "token", token: "secret" }, + controlUiEnabled: false, + }); +} + async function startServer( port: number, opts?: { openAiChatCompletionsEnabled?: boolean }, @@ -18,7 +27,7 @@ async function startServer( host: "127.0.0.1", auth: { mode: "token", token: "secret" }, controlUiEnabled: false, - openAiChatCompletionsEnabled: opts?.openAiChatCompletionsEnabled, + openAiChatCompletionsEnabled: opts?.openAiChatCompletionsEnabled ?? true, }); } @@ -48,6 +57,20 @@ function parseSseDataLines(text: string): string[] { } describe("OpenAI-compatible HTTP API (e2e)", () => { + it("is disabled by default (requires config)", async () => { + const port = await getFreePort(); + const server = await startServerWithDefaultConfig(port); + try { + const res = await postChatCompletions(port, { + model: "clawdbot", + messages: [{ role: "user", content: "hi" }], + }); + expect(res.status).toBe(404); + } finally { + await server.close({ reason: "test done" }); + } + }); + it("can be disabled via config (404)", async () => { const port = await getFreePort(); const server = await startServer(port, { diff --git a/src/gateway/server.ts b/src/gateway/server.ts index 4fc2008a8..cbe94ba2a 100644 --- a/src/gateway/server.ts +++ b/src/gateway/server.ts @@ -330,7 +330,7 @@ export type GatewayServerOptions = { controlUiEnabled?: boolean; /** * If false, do not serve `POST /v1/chat/completions`. - * Default: config `gateway.http.endpoints.chatCompletions.enabled` (or true when absent). + * Default: config `gateway.http.endpoints.chatCompletions.enabled` (or false when absent). */ openAiChatCompletionsEnabled?: boolean; /** @@ -440,7 +440,7 @@ export async function startGatewayServer( const openAiChatCompletionsEnabled = opts.openAiChatCompletionsEnabled ?? cfgAtStart.gateway?.http?.endpoints?.chatCompletions?.enabled ?? - true; + false; const controlUiBasePath = normalizeControlUiBasePath( cfgAtStart.gateway?.controlUi?.basePath, );