diff --git a/src/tts/tts.test.ts b/src/tts/tts.test.ts index a8c9dce9c..8462cba01 100644 --- a/src/tts/tts.test.ts +++ b/src/tts/tts.test.ts @@ -109,13 +109,13 @@ describe("tts", () => { }); describe("isValidOpenAIModel", () => { - it("accepts gpt-4o-mini-tts model", () => { + it("accepts supported models", () => { expect(isValidOpenAIModel("gpt-4o-mini-tts")).toBe(true); + expect(isValidOpenAIModel("tts-1")).toBe(true); + expect(isValidOpenAIModel("tts-1-hd")).toBe(true); }); - it("rejects other models", () => { - expect(isValidOpenAIModel("tts-1")).toBe(false); - expect(isValidOpenAIModel("tts-1-hd")).toBe(false); + it("rejects unsupported models", () => { expect(isValidOpenAIModel("invalid")).toBe(false); expect(isValidOpenAIModel("")).toBe(false); expect(isValidOpenAIModel("gpt-4")).toBe(false); @@ -123,9 +123,11 @@ describe("tts", () => { }); describe("OPENAI_TTS_MODELS", () => { - it("contains only gpt-4o-mini-tts", () => { + it("contains supported models", () => { expect(OPENAI_TTS_MODELS).toContain("gpt-4o-mini-tts"); - expect(OPENAI_TTS_MODELS).toHaveLength(1); + expect(OPENAI_TTS_MODELS).toContain("tts-1"); + expect(OPENAI_TTS_MODELS).toContain("tts-1-hd"); + expect(OPENAI_TTS_MODELS).toHaveLength(3); }); it("is a non-empty array", () => { diff --git a/src/tts/tts.ts b/src/tts/tts.ts index 5fa06f8d4..5f911ec14 100644 --- a/src/tts/tts.ts +++ b/src/tts/tts.ts @@ -736,7 +736,17 @@ function parseTtsDirectives( }; } -export const OPENAI_TTS_MODELS = ["gpt-4o-mini-tts"] as const; +export const OPENAI_TTS_MODELS = ["gpt-4o-mini-tts", "tts-1", "tts-1-hd"] as const; + +/** + * Custom OpenAI-compatible TTS endpoint. + * When set, model/voice validation is relaxed to allow non-OpenAI models. + * Example: OPENAI_TTS_BASE_URL=http://localhost:8880/v1 + */ +const OPENAI_TTS_BASE_URL = ( + process.env.OPENAI_TTS_BASE_URL?.trim() || "https://api.openai.com/v1" +).replace(/\/+$/, ""); +const isCustomOpenAIEndpoint = OPENAI_TTS_BASE_URL !== "https://api.openai.com/v1"; export const OPENAI_TTS_VOICES = [ "alloy", "ash", @@ -752,10 +762,14 @@ export const OPENAI_TTS_VOICES = [ type OpenAiTtsVoice = (typeof OPENAI_TTS_VOICES)[number]; function isValidOpenAIModel(model: string): boolean { + // Allow any model when using custom endpoint (e.g., Kokoro, LocalAI) + if (isCustomOpenAIEndpoint) return true; return OPENAI_TTS_MODELS.includes(model as (typeof OPENAI_TTS_MODELS)[number]); } function isValidOpenAIVoice(voice: string): voice is OpenAiTtsVoice { + // Allow any voice when using custom endpoint (e.g., Kokoro Chinese voices) + if (isCustomOpenAIEndpoint) return true; return OPENAI_TTS_VOICES.includes(voice as OpenAiTtsVoice); } @@ -982,7 +996,7 @@ async function openaiTTS(params: { const timeout = setTimeout(() => controller.abort(), timeoutMs); try { - const response = await fetch("https://api.openai.com/v1/audio/speech", { + const response = await fetch(`${OPENAI_TTS_BASE_URL}/audio/speech`, { method: "POST", headers: { Authorization: `Bearer ${apiKey}`,