# LLM Configuration for The Island Backend # Copy this file to .env and fill in your values # ============================================================================= # Option 1: OpenAI (default) # ============================================================================= # OPENAI_API_KEY=sk-xxx # LLM_MODEL=gpt-3.5-turbo # ============================================================================= # Option 2: Anthropic Claude # ============================================================================= # ANTHROPIC_API_KEY=sk-ant-xxx # LLM_MODEL=claude-3-haiku-20240307 # ============================================================================= # Option 3: Google Gemini # ============================================================================= # GEMINI_API_KEY=xxx # LLM_MODEL=gemini/gemini-pro # ============================================================================= # Option 4: Azure OpenAI # ============================================================================= # AZURE_API_KEY=xxx # AZURE_API_BASE=https://your-resource.openai.azure.com # LLM_MODEL=azure/your-deployment-name # ============================================================================= # Option 5: OpenRouter (access multiple models) # ============================================================================= # OPENROUTER_API_KEY=sk-or-xxx # LLM_MODEL=openrouter/anthropic/claude-3-haiku # ============================================================================= # Option 6: Local Ollama # ============================================================================= # OLLAMA_API_BASE=http://localhost:11434 # LLM_MODEL=ollama/llama2 # ============================================================================= # Option 7: Custom/Self-hosted (OpenAI-compatible endpoint) # ============================================================================= # LLM_API_BASE=http://localhost:8000/v1 # LLM_API_KEY=your-key # LLM_MODEL=qwen2.5 # Note: When LLM_API_BASE is set, model is auto-prefixed as "openai/qwen2.5" # ============================================================================= # Model naming convention (LiteLLM requires provider prefix) # See: https://docs.litellm.ai/docs/providers # ============================================================================= # Format: provider/model-name # Examples: # openai/gpt-4 # anthropic/claude-3-haiku-20240307 # gemini/gemini-pro # ollama/llama2 # huggingface/starcoder # azure/your-deployment-name # ============================================================================= # Force mock mode (no API calls, uses predefined responses) # ============================================================================= # LLM_MOCK_MODE=true