diff --git a/src/gateway/openai-http.e2e.test.ts b/src/gateway/openai-http.e2e.test.ts index f44b18dce..08dc11bbc 100644 --- a/src/gateway/openai-http.e2e.test.ts +++ b/src/gateway/openai-http.e2e.test.ts @@ -1,5 +1,7 @@ import { describe, expect, it } from "vitest"; +import { HISTORY_CONTEXT_MARKER } from "../auto-reply/reply/history.js"; +import { CURRENT_MESSAGE_MARKER } from "../auto-reply/reply/mentions.js"; import { emitAgentEvent } from "../infra/agent-events.js"; import { agentCommand, getFreePort, installGatewayTestHooks } from "./test-helpers.js"; @@ -262,6 +264,121 @@ describe("OpenAI-compatible HTTP API (e2e)", () => { } }); + it("includes conversation history when multiple messages are provided", async () => { + agentCommand.mockResolvedValueOnce({ + payloads: [{ text: "I am Claude" }], + } as never); + + const port = await getFreePort(); + const server = await startServer(port); + try { + const res = await postChatCompletions(port, { + model: "clawdbot", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Hello, who are you?" }, + { role: "assistant", content: "I am Claude." }, + { role: "user", content: "What did I just ask you?" }, + ], + }); + expect(res.status).toBe(200); + + const [opts] = agentCommand.mock.calls[0] ?? []; + const message = (opts as { message?: string } | undefined)?.message ?? ""; + expect(message).toContain(HISTORY_CONTEXT_MARKER); + expect(message).toContain("User: Hello, who are you?"); + expect(message).toContain("Assistant: I am Claude."); + expect(message).toContain(CURRENT_MESSAGE_MARKER); + expect(message).toContain("User: What did I just ask you?"); + } finally { + await server.close({ reason: "test done" }); + } + }); + + it("does not include history markers for single message", async () => { + agentCommand.mockResolvedValueOnce({ + payloads: [{ text: "hello" }], + } as never); + + const port = await getFreePort(); + const server = await startServer(port); + try { + const res = await postChatCompletions(port, { + model: "clawdbot", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Hello" }, + ], + }); + expect(res.status).toBe(200); + + const [opts] = agentCommand.mock.calls[0] ?? []; + const message = (opts as { message?: string } | undefined)?.message ?? ""; + expect(message).not.toContain(HISTORY_CONTEXT_MARKER); + expect(message).not.toContain(CURRENT_MESSAGE_MARKER); + expect(message).toBe("Hello"); + } finally { + await server.close({ reason: "test done" }); + } + }); + + it("treats developer role same as system role", async () => { + agentCommand.mockResolvedValueOnce({ + payloads: [{ text: "hello" }], + } as never); + + const port = await getFreePort(); + const server = await startServer(port); + try { + const res = await postChatCompletions(port, { + model: "clawdbot", + messages: [ + { role: "developer", content: "You are a helpful assistant." }, + { role: "user", content: "Hello" }, + ], + }); + expect(res.status).toBe(200); + + const [opts] = agentCommand.mock.calls[0] ?? []; + const extraSystemPrompt = (opts as { extraSystemPrompt?: string } | undefined) + ?.extraSystemPrompt ?? ""; + expect(extraSystemPrompt).toBe("You are a helpful assistant."); + } finally { + await server.close({ reason: "test done" }); + } + }); + + it("includes tool output when it is the latest message", async () => { + agentCommand.mockResolvedValueOnce({ + payloads: [{ text: "ok" }], + } as never); + + const port = await getFreePort(); + const server = await startServer(port); + try { + const res = await postChatCompletions(port, { + model: "clawdbot", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "What's the weather?" }, + { role: "assistant", content: "Checking the weather." }, + { role: "tool", content: "Sunny, 70F." }, + ], + }); + expect(res.status).toBe(200); + + const [opts] = agentCommand.mock.calls[0] ?? []; + const message = (opts as { message?: string } | undefined)?.message ?? ""; + expect(message).toContain(HISTORY_CONTEXT_MARKER); + expect(message).toContain("User: What's the weather?"); + expect(message).toContain("Assistant: Checking the weather."); + expect(message).toContain(CURRENT_MESSAGE_MARKER); + expect(message).toContain("Tool: Sunny, 70F."); + } finally { + await server.close({ reason: "test done" }); + } + }); + it("returns a non-streaming OpenAI chat.completion response", async () => { agentCommand.mockResolvedValueOnce({ payloads: [{ text: "hello" }], diff --git a/src/gateway/openai-http.ts b/src/gateway/openai-http.ts index 5b475f4af..64709c4df 100644 --- a/src/gateway/openai-http.ts +++ b/src/gateway/openai-http.ts @@ -1,6 +1,7 @@ import { randomUUID } from "node:crypto"; import type { IncomingMessage, ServerResponse } from "node:http"; +import { buildHistoryContextFromEntries, type HistoryEntry } from "../auto-reply/reply/history.js"; import { createDefaultDeps } from "../cli/deps.js"; import { agentCommand } from "../commands/agent.js"; import { emitAgentEvent, onAgentEvent } from "../infra/agent-events.js"; @@ -17,6 +18,7 @@ type OpenAiHttpOptions = { type OpenAiChatMessage = { role?: unknown; content?: unknown; + name?: unknown; }; type OpenAiChatCompletionRequest = { @@ -85,24 +87,69 @@ function buildAgentPrompt(messagesUnknown: unknown): { const messages = asMessages(messagesUnknown); const systemParts: string[] = []; - let lastUser = ""; + const conversationEntries: Array<{ role: "user" | "assistant" | "tool"; entry: HistoryEntry }> = + []; for (const msg of messages) { if (!msg || typeof msg !== "object") continue; const role = typeof msg.role === "string" ? msg.role.trim() : ""; const content = extractTextContent(msg.content).trim(); if (!role || !content) continue; - if (role === "system") { + if (role === "system" || role === "developer") { systemParts.push(content); continue; } - if (role === "user") { - lastUser = content; + + const normalizedRole = role === "function" ? "tool" : role; + if (normalizedRole !== "user" && normalizedRole !== "assistant" && normalizedRole !== "tool") { + continue; + } + + const name = typeof msg.name === "string" ? msg.name.trim() : ""; + const sender = + normalizedRole === "assistant" + ? "Assistant" + : normalizedRole === "user" + ? "User" + : name + ? `Tool:${name}` + : "Tool"; + + conversationEntries.push({ + role: normalizedRole, + entry: { sender, body: content }, + }); + } + + let message = ""; + if (conversationEntries.length > 0) { + let currentIndex = -1; + for (let i = conversationEntries.length - 1; i >= 0; i -= 1) { + const entryRole = conversationEntries[i]?.role; + if (entryRole === "user" || entryRole === "tool") { + currentIndex = i; + break; + } + } + if (currentIndex < 0) currentIndex = conversationEntries.length - 1; + const currentEntry = conversationEntries[currentIndex]?.entry; + if (currentEntry) { + const historyEntries = conversationEntries.slice(0, currentIndex).map((entry) => entry.entry); + if (historyEntries.length === 0) { + message = currentEntry.body; + } else { + const formatEntry = (entry: HistoryEntry) => `${entry.sender}: ${entry.body}`; + message = buildHistoryContextFromEntries({ + entries: [...historyEntries, currentEntry], + currentMessage: formatEntry(currentEntry), + formatEntry, + }); + } } } return { - message: lastUser, + message, extraSystemPrompt: systemParts.length > 0 ? systemParts.join("\n\n") : undefined, }; }