Files
clawdbot/src/agents/pi-embedded-runner.limithistoryturns.test.ts
Abhay 51e3d16be9 feat: Add Ollama provider with automatic model discovery (#1606)
* feat: Add Ollama provider with automatic model discovery

- Add Ollama provider builder with automatic model detection
- Discover available models from local Ollama instance via /api/tags API
- Make resolveImplicitProviders async to support dynamic model discovery
- Add comprehensive Ollama documentation with setup and usage guide
- Add tests for Ollama provider integration
- Update provider index and model providers documentation

Closes #1531

* fix: Correct Ollama provider type definitions and error handling

- Fix input property type to match ModelDefinitionConfig
- Import ModelDefinitionConfig type properly
- Fix error template literal to use String() for type safety
- Simplify return type signature of discoverOllamaModels

* fix: Suppress unhandled promise warnings from ensureClawdbotModelsJson in tests

- Cast unused promise returns to 'unknown' to suppress TypeScript warnings
- Tests that don't await the promise are intentionally not awaiting it
- This fixes the failing test suite caused by unawaited async calls

* fix: Skip Ollama model discovery during tests

- Check for VITEST or NODE_ENV=test before making HTTP requests
- Prevents test timeouts and hangs from network calls
- Ollama discovery will still work in production/normal usage

* fix: Set VITEST environment variable in test setup

- Ensures Ollama discovery is skipped in all test runs
- Prevents network calls during tests that could cause timeouts

* test: Temporarily skip Ollama provider tests to diagnose CI failures

* fix: Make Ollama provider opt-in to avoid breaking existing tests

**Root Cause:**
The Ollama provider was being added to ALL configurations by default
(with a fallback API key of 'ollama-local'), which broke tests that
expected NO providers when no API keys were configured.

**Solution:**
- Removed the default fallback API key for Ollama
- Ollama provider now requires explicit configuration via:
  - OLLAMA_API_KEY environment variable, OR
  - Ollama profile in auth store
- Updated documentation to reflect the explicit configuration requirement
- Added a test to verify Ollama is not added by default

This fixes all 4 failing test suites:
- checks (node, test, pnpm test)
- checks (bun, test, bunx vitest run)
- checks-windows (node, test, pnpm test)
- checks-macos (test, pnpm test)

Closes #1531
2026-01-24 22:38:52 +00:00

161 lines
5.7 KiB
TypeScript

import fs from "node:fs/promises";
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import { describe, expect, it, vi } from "vitest";
import type { ClawdbotConfig } from "../config/config.js";
import { ensureClawdbotModelsJson } from "./models-config.js";
import { limitHistoryTurns } from "./pi-embedded-runner.js";
vi.mock("@mariozechner/pi-ai", async () => {
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>("@mariozechner/pi-ai");
return {
...actual,
streamSimple: (model: { api: string; provider: string; id: string }) => {
if (model.id === "mock-error") {
throw new Error("boom");
}
const stream = new actual.AssistantMessageEventStream();
queueMicrotask(() => {
stream.push({
type: "done",
reason: "stop",
message: {
role: "assistant",
content: [{ type: "text", text: "ok" }],
stopReason: "stop",
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 2,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
timestamp: Date.now(),
},
});
});
return stream;
},
};
});
const _makeOpenAiConfig = (modelIds: string[]) =>
({
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
}) satisfies ClawdbotConfig;
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
ensureClawdbotModelsJson(cfg, agentDir) as unknown;
const _textFromContent = (content: unknown) => {
if (typeof content === "string") return content;
if (Array.isArray(content) && content[0]?.type === "text") {
return (content[0] as { text?: string }).text;
}
return undefined;
};
const _readSessionMessages = async (sessionFile: string) => {
const raw = await fs.readFile(sessionFile, "utf-8");
return raw
.split(/\r?\n/)
.filter(Boolean)
.map(
(line) =>
JSON.parse(line) as {
type?: string;
message?: { role?: string; content?: unknown };
},
)
.filter((entry) => entry.type === "message")
.map((entry) => entry.message as { role?: string; content?: unknown });
};
describe("limitHistoryTurns", () => {
const makeMessages = (roles: ("user" | "assistant")[]): AgentMessage[] =>
roles.map((role, i) => ({
role,
content: [{ type: "text", text: `message ${i}` }],
}));
it("returns all messages when limit is undefined", () => {
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
expect(limitHistoryTurns(messages, undefined)).toBe(messages);
});
it("returns all messages when limit is 0", () => {
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
expect(limitHistoryTurns(messages, 0)).toBe(messages);
});
it("returns all messages when limit is negative", () => {
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
expect(limitHistoryTurns(messages, -1)).toBe(messages);
});
it("returns empty array when messages is empty", () => {
expect(limitHistoryTurns([], 5)).toEqual([]);
});
it("keeps all messages when fewer user turns than limit", () => {
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
expect(limitHistoryTurns(messages, 10)).toBe(messages);
});
it("limits to last N user turns", () => {
const messages = makeMessages(["user", "assistant", "user", "assistant", "user", "assistant"]);
const limited = limitHistoryTurns(messages, 2);
expect(limited.length).toBe(4);
expect(limited[0].content).toEqual([{ type: "text", text: "message 2" }]);
});
it("handles single user turn limit", () => {
const messages = makeMessages(["user", "assistant", "user", "assistant", "user", "assistant"]);
const limited = limitHistoryTurns(messages, 1);
expect(limited.length).toBe(2);
expect(limited[0].content).toEqual([{ type: "text", text: "message 4" }]);
expect(limited[1].content).toEqual([{ type: "text", text: "message 5" }]);
});
it("handles messages with multiple assistant responses per user turn", () => {
const messages = makeMessages(["user", "assistant", "assistant", "user", "assistant"]);
const limited = limitHistoryTurns(messages, 1);
expect(limited.length).toBe(2);
expect(limited[0].role).toBe("user");
expect(limited[1].role).toBe("assistant");
});
it("preserves message content integrity", () => {
const messages: AgentMessage[] = [
{ role: "user", content: [{ type: "text", text: "first" }] },
{
role: "assistant",
content: [{ type: "toolCall", id: "1", name: "exec", arguments: {} }],
},
{ role: "user", content: [{ type: "text", text: "second" }] },
{ role: "assistant", content: [{ type: "text", text: "response" }] },
];
const limited = limitHistoryTurns(messages, 1);
expect(limited[0].content).toEqual([{ type: "text", text: "second" }]);
expect(limited[1].content).toEqual([{ type: "text", text: "response" }]);
});
});