feat: Add VLM config to config.example.yaml with config.yaml support

This commit is contained in:
empty
2026-01-07 09:32:16 +08:00
parent 92183b083b
commit 9675b9c23b
2 changed files with 25 additions and 5 deletions

View File

@@ -17,6 +17,20 @@ llm:
# DeepSeek: base_url: "https://api.deepseek.com" model: "deepseek-chat"
# Ollama (Local): base_url: "http://localhost:11434/v1" model: "llama3.2"
# ==================== VLM Configuration (Vision Language Model) ====================
# Used for character analysis and image understanding
# If not configured, will try to use LLM config with vision model auto-detection
vlm:
provider: "qwen" # Options: qwen, glm, openai
api_key: "" # Leave empty to use DASHSCOPE_API_KEY or VLM_API_KEY env var
base_url: "" # Leave empty for auto-detection based on provider
model: "" # Leave empty for default model based on provider
# VLM Provider presets:
# Qwen (通义千问): provider: "qwen" model: "qwen-vl-plus" or "qwen-vl-max" or "qwen3-vl-plus"
# GLM (智谱): provider: "glm" model: "glm-4v-flash" or "glm-4v"
# OpenAI: provider: "openai" model: "gpt-4-vision-preview" or "gpt-4o"
# ==================== ComfyUI Configuration ====================
comfyui:
# Global ComfyUI settings