- Add LLM_API_KEY_HEADER for providers using non-standard auth headers - Support Xiaomi MiMo (uses "api-key" header instead of "x-api-key") - Add extra_headers to LiteLLM calls when custom header is configured - Document LITELLM_ANTHROPIC_DISABLE_URL_SUFFIX for full path URLs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
68 lines
2.8 KiB
Plaintext
68 lines
2.8 KiB
Plaintext
# LLM Configuration for The Island Backend
|
|
# Copy this file to .env and fill in your values
|
|
|
|
# =============================================================================
|
|
# Option 1: OpenAI (default)
|
|
# =============================================================================
|
|
# OPENAI_API_KEY=sk-xxx
|
|
# LLM_MODEL=gpt-3.5-turbo
|
|
|
|
# =============================================================================
|
|
# Option 2: Anthropic Claude
|
|
# =============================================================================
|
|
# ANTHROPIC_API_KEY=sk-ant-xxx
|
|
# LLM_MODEL=claude-3-haiku-20240307
|
|
|
|
# =============================================================================
|
|
# Option 3: Google Gemini
|
|
# =============================================================================
|
|
# GEMINI_API_KEY=xxx
|
|
# LLM_MODEL=gemini/gemini-pro
|
|
|
|
# =============================================================================
|
|
# Option 4: Azure OpenAI
|
|
# =============================================================================
|
|
# AZURE_API_KEY=xxx
|
|
# AZURE_API_BASE=https://your-resource.openai.azure.com
|
|
# LLM_MODEL=azure/your-deployment-name
|
|
|
|
# =============================================================================
|
|
# Option 5: OpenRouter (access multiple models)
|
|
# =============================================================================
|
|
# OPENROUTER_API_KEY=sk-or-xxx
|
|
# LLM_MODEL=openrouter/anthropic/claude-3-haiku
|
|
|
|
# =============================================================================
|
|
# Option 6: Local Ollama
|
|
# =============================================================================
|
|
# OLLAMA_API_BASE=http://localhost:11434
|
|
# LLM_MODEL=ollama/llama2
|
|
|
|
# =============================================================================
|
|
# Option 7: Custom/Self-hosted endpoint
|
|
# See: https://docs.litellm.ai/docs/providers
|
|
# =============================================================================
|
|
# LLM_API_BASE=http://localhost:8000/v1
|
|
# LLM_API_KEY=your-key
|
|
# LLM_API_KEY_HEADER=api-key # Optional: custom header name for API key
|
|
#
|
|
# For OpenAI-compatible API:
|
|
# LLM_MODEL=openai/your-model-name
|
|
#
|
|
# For Anthropic-compatible API:
|
|
# LLM_MODEL=anthropic/your-model-name
|
|
|
|
# =============================================================================
|
|
# Example: Xiaomi MiMo (Anthropic-compatible)
|
|
# =============================================================================
|
|
# LLM_API_BASE=https://api.xiaomimomo.com/anthropic/v1/messages
|
|
# LLM_API_KEY=your-mimo-api-key
|
|
# LLM_API_KEY_HEADER=api-key
|
|
# LLM_MODEL=anthropic/mimo-v2-flash
|
|
# LITELLM_ANTHROPIC_DISABLE_URL_SUFFIX=true # Prevent appending /v1/messages
|
|
|
|
# =============================================================================
|
|
# Force mock mode (no API calls, uses predefined responses)
|
|
# =============================================================================
|
|
# LLM_MOCK_MODE=true
|